diff options
Diffstat (limited to 'clang/lib')
57 files changed, 1749 insertions, 493 deletions
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index f899b3c..597cbd8 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -5290,6 +5290,33 @@ QualType ArraySectionExpr::getBaseOriginalType(const Expr *Base) { return OriginalTy; } +QualType ArraySectionExpr::getElementType() const { + QualType BaseTy = getBase()->IgnoreParenImpCasts()->getType(); + // We only have to look into the array section exprs, else we will get the + // type of the base, which should already be valid. + if (auto *ASE = dyn_cast<ArraySectionExpr>(getBase()->IgnoreParenImpCasts())) + BaseTy = ASE->getElementType(); + + if (BaseTy->isAnyPointerType()) + return BaseTy->getPointeeType(); + if (BaseTy->isArrayType()) + return BaseTy->castAsArrayTypeUnsafe()->getElementType(); + + // If this isn't a pointer or array, the base is a dependent expression, so + // just return the BaseTy anyway. + assert(BaseTy->isInstantiationDependentType()); + return BaseTy; +} + +QualType ArraySectionExpr::getBaseType() const { + // We only have to look into the array section exprs, else we will get the + // type of the base, which should already be valid. + if (auto *ASE = dyn_cast<ArraySectionExpr>(getBase()->IgnoreParenImpCasts())) + return ASE->getElementType(); + + return getBase()->IgnoreParenImpCasts()->getType(); +} + RecoveryExpr::RecoveryExpr(ASTContext &Ctx, QualType T, SourceLocation BeginLoc, SourceLocation EndLoc, ArrayRef<Expr *> SubExprs) : Expr(RecoveryExprClass, T.getNonReferenceType(), diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index f3b5478..3cd033e 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -2769,10 +2769,19 @@ void OpenACCClauseProfiler::VisitReductionClause( for (auto &Recipe : Clause.getRecipes()) { Profiler.VisitDecl(Recipe.AllocaDecl); + // TODO: OpenACC: Make sure we remember to update this when we figure out // what we're adding for the operation recipe, in the meantime, a static // assert will make sure we don't add something. - static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *)); + static_assert(sizeof(OpenACCReductionRecipe::CombinerRecipe) == + 3 * sizeof(int *)); + for (auto &CombinerRecipe : Recipe.CombinerRecipes) { + if (CombinerRecipe.Op) { + Profiler.VisitDecl(CombinerRecipe.LHS); + Profiler.VisitDecl(CombinerRecipe.RHS); + Profiler.VisitStmt(CombinerRecipe.Op); + } + } } } diff --git a/clang/lib/ASTMatchers/CMakeLists.txt b/clang/lib/ASTMatchers/CMakeLists.txt index 7769fd6..29ad27df 100644 --- a/clang/lib/ASTMatchers/CMakeLists.txt +++ b/clang/lib/ASTMatchers/CMakeLists.txt @@ -8,7 +8,6 @@ set(LLVM_LINK_COMPONENTS add_clang_library(clangASTMatchers ASTMatchFinder.cpp ASTMatchersInternal.cpp - GtestMatchers.cpp LowLevelHelpers.cpp LINK_LIBS diff --git a/clang/lib/ASTMatchers/GtestMatchers.cpp b/clang/lib/ASTMatchers/GtestMatchers.cpp deleted file mode 100644 index 7c135bb..0000000 --- a/clang/lib/ASTMatchers/GtestMatchers.cpp +++ /dev/null @@ -1,228 +0,0 @@ -//===- GtestMatchers.cpp - AST Matchers for Gtest ---------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file implements several matchers for popular gtest macros. In general, -// AST matchers cannot match calls to macros. However, we can simulate such -// matches if the macro definition has identifiable elements that themselves can -// be matched. In that case, we can match on those elements and then check that -// the match occurs within an expansion of the desired macro. The more uncommon -// the identified elements, the more efficient this process will be. -// -//===----------------------------------------------------------------------===// - -#include "clang/ASTMatchers/GtestMatchers.h" -#include "llvm/ADT/StringRef.h" - -namespace clang { -namespace ast_matchers { -namespace { - -enum class MacroType { - Expect, - Assert, - On, -}; - -} // namespace - -static DeclarationMatcher getComparisonDecl(GtestCmp Cmp) { - switch (Cmp) { - case GtestCmp::Eq: - return cxxMethodDecl(hasName("Compare"), - ofClass(cxxRecordDecl(isSameOrDerivedFrom( - hasName("::testing::internal::EqHelper"))))); - case GtestCmp::Ne: - return functionDecl(hasName("::testing::internal::CmpHelperNE")); - case GtestCmp::Ge: - return functionDecl(hasName("::testing::internal::CmpHelperGE")); - case GtestCmp::Gt: - return functionDecl(hasName("::testing::internal::CmpHelperGT")); - case GtestCmp::Le: - return functionDecl(hasName("::testing::internal::CmpHelperLE")); - case GtestCmp::Lt: - return functionDecl(hasName("::testing::internal::CmpHelperLT")); - } - llvm_unreachable("Unhandled GtestCmp enum"); -} - -static llvm::StringRef getMacroTypeName(MacroType Macro) { - switch (Macro) { - case MacroType::Expect: - return "EXPECT"; - case MacroType::Assert: - return "ASSERT"; - case MacroType::On: - return "ON"; - } - llvm_unreachable("Unhandled MacroType enum"); -} - -static llvm::StringRef getComparisonTypeName(GtestCmp Cmp) { - switch (Cmp) { - case GtestCmp::Eq: - return "EQ"; - case GtestCmp::Ne: - return "NE"; - case GtestCmp::Ge: - return "GE"; - case GtestCmp::Gt: - return "GT"; - case GtestCmp::Le: - return "LE"; - case GtestCmp::Lt: - return "LT"; - } - llvm_unreachable("Unhandled GtestCmp enum"); -} - -static std::string getMacroName(MacroType Macro, GtestCmp Cmp) { - return (getMacroTypeName(Macro) + "_" + getComparisonTypeName(Cmp)).str(); -} - -static std::string getMacroName(MacroType Macro, llvm::StringRef Operation) { - return (getMacroTypeName(Macro) + "_" + Operation).str(); -} - -// Under the hood, ON_CALL is expanded to a call to `InternalDefaultActionSetAt` -// to set a default action spec to the underlying function mocker, while -// EXPECT_CALL is expanded to a call to `InternalExpectedAt` to set a new -// expectation spec. -static llvm::StringRef getSpecSetterName(MacroType Macro) { - switch (Macro) { - case MacroType::On: - return "InternalDefaultActionSetAt"; - case MacroType::Expect: - return "InternalExpectedAt"; - default: - llvm_unreachable("Unhandled MacroType enum"); - } - llvm_unreachable("Unhandled MacroType enum"); -} - -// In general, AST matchers cannot match calls to macros. However, we can -// simulate such matches if the macro definition has identifiable elements that -// themselves can be matched. In that case, we can match on those elements and -// then check that the match occurs within an expansion of the desired -// macro. The more uncommon the identified elements, the more efficient this -// process will be. -// -// We use this approach to implement the derived matchers gtestAssert and -// gtestExpect. -static internal::BindableMatcher<Stmt> -gtestComparisonInternal(MacroType Macro, GtestCmp Cmp, StatementMatcher Left, - StatementMatcher Right) { - return callExpr(isExpandedFromMacro(getMacroName(Macro, Cmp)), - callee(getComparisonDecl(Cmp)), hasArgument(2, Left), - hasArgument(3, Right)); -} - -static internal::BindableMatcher<Stmt> -gtestThatInternal(MacroType Macro, StatementMatcher Actual, - StatementMatcher Matcher) { - return cxxOperatorCallExpr( - isExpandedFromMacro(getMacroName(Macro, "THAT")), - hasOverloadedOperatorName("()"), hasArgument(2, Actual), - hasArgument( - 0, expr(hasType(classTemplateSpecializationDecl(hasName( - "::testing::internal::PredicateFormatterFromMatcher"))), - ignoringImplicit( - callExpr(callee(functionDecl(hasName( - "::testing::internal::" - "MakePredicateFormatterFromMatcher"))), - hasArgument(0, ignoringImplicit(Matcher))))))); -} - -static internal::BindableMatcher<Stmt> -gtestCallInternal(MacroType Macro, StatementMatcher MockCall, MockArgs Args) { - // A ON_CALL or EXPECT_CALL macro expands to different AST structures - // depending on whether the mock method has arguments or not. - switch (Args) { - // For example, - // `ON_CALL(mock, TwoParamMethod)` is expanded to - // `mock.gmock_TwoArgsMethod(WithoutMatchers(), - // nullptr).InternalDefaultActionSetAt(...)`. - // EXPECT_CALL is the same except - // that it calls `InternalExpectedAt` instead of `InternalDefaultActionSetAt` - // in the end. - case MockArgs::None: - return cxxMemberCallExpr( - isExpandedFromMacro(getMacroName(Macro, "CALL")), - callee(functionDecl(hasName(getSpecSetterName(Macro)))), - onImplicitObjectArgument(ignoringImplicit(MockCall))); - // For example, - // `ON_CALL(mock, TwoParamMethod(m1, m2))` is expanded to - // `mock.gmock_TwoParamMethod(m1,m2)(WithoutMatchers(), - // nullptr).InternalDefaultActionSetAt(...)`. - // EXPECT_CALL is the same except that it calls `InternalExpectedAt` instead - // of `InternalDefaultActionSetAt` in the end. - case MockArgs::Some: - return cxxMemberCallExpr( - isExpandedFromMacro(getMacroName(Macro, "CALL")), - callee(functionDecl(hasName(getSpecSetterName(Macro)))), - onImplicitObjectArgument(ignoringImplicit(cxxOperatorCallExpr( - hasOverloadedOperatorName("()"), argumentCountIs(3), - hasArgument(0, ignoringImplicit(MockCall)))))); - } - llvm_unreachable("Unhandled MockArgs enum"); -} - -static internal::BindableMatcher<Stmt> -gtestCallInternal(MacroType Macro, StatementMatcher MockObject, - llvm::StringRef MockMethodName, MockArgs Args) { - return gtestCallInternal( - Macro, - cxxMemberCallExpr( - onImplicitObjectArgument(MockObject), - callee(functionDecl(hasName(("gmock_" + MockMethodName).str())))), - Args); -} - -internal::BindableMatcher<Stmt> gtestAssert(GtestCmp Cmp, StatementMatcher Left, - StatementMatcher Right) { - return gtestComparisonInternal(MacroType::Assert, Cmp, Left, Right); -} - -internal::BindableMatcher<Stmt> gtestExpect(GtestCmp Cmp, StatementMatcher Left, - StatementMatcher Right) { - return gtestComparisonInternal(MacroType::Expect, Cmp, Left, Right); -} - -internal::BindableMatcher<Stmt> gtestAssertThat(StatementMatcher Actual, - StatementMatcher Matcher) { - return gtestThatInternal(MacroType::Assert, Actual, Matcher); -} - -internal::BindableMatcher<Stmt> gtestExpectThat(StatementMatcher Actual, - StatementMatcher Matcher) { - return gtestThatInternal(MacroType::Expect, Actual, Matcher); -} - -internal::BindableMatcher<Stmt> gtestOnCall(StatementMatcher MockObject, - llvm::StringRef MockMethodName, - MockArgs Args) { - return gtestCallInternal(MacroType::On, MockObject, MockMethodName, Args); -} - -internal::BindableMatcher<Stmt> gtestOnCall(StatementMatcher MockCall, - MockArgs Args) { - return gtestCallInternal(MacroType::On, MockCall, Args); -} - -internal::BindableMatcher<Stmt> gtestExpectCall(StatementMatcher MockObject, - llvm::StringRef MockMethodName, - MockArgs Args) { - return gtestCallInternal(MacroType::Expect, MockObject, MockMethodName, Args); -} - -internal::BindableMatcher<Stmt> gtestExpectCall(StatementMatcher MockCall, - MockArgs Args) { - return gtestCallInternal(MacroType::Expect, MockCall, Args); -} - -} // end namespace ast_matchers -} // end namespace clang diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 58345b4..a6f10e6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -122,6 +122,11 @@ public: return getPointerTo(cir::VPtrType::get(getContext())); } + cir::FuncType getFuncType(llvm::ArrayRef<mlir::Type> params, mlir::Type retTy, + bool isVarArg = false) { + return cir::FuncType::get(params, retTy, isVarArg); + } + /// Get a CIR record kind from a AST declaration tag. cir::RecordType::RecordKind getRecordKind(const clang::TagTypeKind kind) { switch (kind) { @@ -314,12 +319,6 @@ public: return cir::ConstantOp::create(*this, loc, cir::IntAttr::get(sInt64Ty, c)); } - // Creates constant nullptr for pointer type ty. - cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { - assert(!cir::MissingFeatures::targetCodeGenInfoGetNullPointer()); - return cir::ConstantOp::create(*this, loc, getConstPtrAttr(ty, 0)); - } - mlir::Value createNeg(mlir::Value value) { if (auto intTy = mlir::dyn_cast<cir::IntType>(value.getType())) { @@ -372,6 +371,15 @@ public: return cir::BinOp::create(*this, loc, cir::BinOpKind::Div, lhs, rhs); } + mlir::Value createDynCast(mlir::Location loc, mlir::Value src, + cir::PointerType destType, bool isRefCast, + cir::DynamicCastInfoAttr info) { + auto castKind = + isRefCast ? cir::DynamicCastKind::Ref : cir::DynamicCastKind::Ptr; + return cir::DynamicCastOp::create(*this, loc, destType, castKind, src, info, + /*relative_layout=*/false); + } + Address createBaseClassAddr(mlir::Location loc, Address addr, mlir::Type destType, unsigned offset, bool assumeNotNull) { diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index d5b35c2..274d11b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenCXXABI.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" @@ -95,7 +96,63 @@ static void emitDeclDestroy(CIRGenFunction &cgf, const VarDecl *vd, return; } - cgf.cgm.errorNYI(vd->getSourceRange(), "global with destructor"); + // If not constant storage we'll emit this regardless of NeedsDtor value. + CIRGenBuilderTy &builder = cgf.getBuilder(); + + // Prepare the dtor region. + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::Block *block = builder.createBlock(&addr.getDtorRegion()); + CIRGenFunction::LexicalScope lexScope{cgf, addr.getLoc(), + builder.getInsertionBlock()}; + lexScope.setAsGlobalInit(); + builder.setInsertionPointToStart(block); + + CIRGenModule &cgm = cgf.cgm; + QualType type = vd->getType(); + + // Special-case non-array C++ destructors, if they have the right signature. + // Under some ABIs, destructors return this instead of void, and cannot be + // passed directly to __cxa_atexit if the target does not allow this + // mismatch. + const CXXRecordDecl *record = type->getAsCXXRecordDecl(); + bool canRegisterDestructor = + record && (!cgm.getCXXABI().hasThisReturn( + GlobalDecl(record->getDestructor(), Dtor_Complete)) || + cgm.getCXXABI().canCallMismatchedFunctionType()); + + // If __cxa_atexit is disabled via a flag, a different helper function is + // generated elsewhere which uses atexit instead, and it takes the destructor + // directly. + cir::FuncOp fnOp; + if (record && (canRegisterDestructor || cgm.getCodeGenOpts().CXAAtExit)) { + if (vd->getTLSKind()) + cgm.errorNYI(vd->getSourceRange(), "TLS destructor"); + assert(!record->hasTrivialDestructor()); + assert(!cir::MissingFeatures::openCL()); + CXXDestructorDecl *dtor = record->getDestructor(); + // In LLVM OG codegen this is done in registerGlobalDtor, but CIRGen + // relies on LoweringPrepare for further decoupling, so build the + // call right here. + auto gd = GlobalDecl(dtor, Dtor_Complete); + fnOp = cgm.getAddrAndTypeOfCXXStructor(gd).second; + cgf.getBuilder().createCallOp( + cgf.getLoc(vd->getSourceRange()), + mlir::FlatSymbolRefAttr::get(fnOp.getSymNameAttr()), + mlir::ValueRange{cgm.getAddrOfGlobalVar(vd)}); + } else { + cgm.errorNYI(vd->getSourceRange(), "array destructor"); + } + assert(fnOp && "expected cir.func"); + cgm.getCXXABI().registerGlobalDtor(vd, fnOp, nullptr); + + builder.setInsertionPointToEnd(block); + if (block->empty()) { + block->erase(); + // Don't confuse lexical cleanup. + builder.clearInsertionPoint(); + } else { + builder.create<cir::YieldOp>(addr.getLoc()); + } } cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl gd) { diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 2465a68..06f41cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -54,6 +54,12 @@ public: Address thisAddr, const CXXRecordDecl *classDecl, const CXXRecordDecl *baseClassDecl) = 0; + virtual mlir::Value emitDynamicCast(CIRGenFunction &cgf, mlir::Location loc, + QualType srcRecordTy, + QualType destRecordTy, + cir::PointerType destCIRTy, + bool isRefCast, Address src) = 0; + public: /// Similar to AddedStructorArgs, but only notes the number of additional /// arguments. @@ -149,6 +155,14 @@ public: /// Loads the incoming C++ this pointer as it was passed by the caller. mlir::Value loadIncomingCXXThis(CIRGenFunction &cgf); + /// Get the implicit (second) parameter that comes after the "this" pointer, + /// or nullptr if there is isn't one. + virtual mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &cgf, + const CXXDestructorDecl *dd, + CXXDtorType type, + bool forVirtualBase, + bool delegating) = 0; + /// Emit constructor variants required by this ABI. virtual void emitCXXConstructors(const clang::CXXConstructorDecl *d) = 0; @@ -160,6 +174,14 @@ public: bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) = 0; + /// Emit code to force the execution of a destructor during global + /// teardown. The default implementation of this uses atexit. + /// + /// \param dtor - a function taking a single pointer argument + /// \param addr - a pointer to pass to the destructor function. + virtual void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor, + mlir::Value addr) = 0; + /// Checks if ABI requires extra virtual offset for vtable field. virtual bool isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf, @@ -233,6 +255,16 @@ public: return false; } + /// Returns true if the target allows calling a function through a pointer + /// with a different signature than the actual function (or equivalently, + /// bitcasting a function or function pointer to a different function type). + /// In principle in the most general case this could depend on the target, the + /// calling convention, and the actual types of the arguments and return + /// value. Here it just means whether the signature mismatch could *ever* be + /// allowed; in other words, does the target do strict checking of signatures + /// for all calls. + virtual bool canCallMismatchedFunctionType() const { return true; } + /// Gets the mangle context. clang::MangleContext &getMangleContext() { return *mangleContext; } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index d9ebf19..485b2c8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -126,6 +126,30 @@ static bool isInitializerOfDynamicClass(const CXXCtorInitializer *baseInit) { } namespace { +/// Call the destructor for a direct base class. +struct CallBaseDtor final : EHScopeStack::Cleanup { + const CXXRecordDecl *baseClass; + bool baseIsVirtual; + CallBaseDtor(const CXXRecordDecl *base, bool baseIsVirtual) + : baseClass(base), baseIsVirtual(baseIsVirtual) {} + + void emit(CIRGenFunction &cgf) override { + const CXXRecordDecl *derivedClass = + cast<CXXMethodDecl>(cgf.curFuncDecl)->getParent(); + + const CXXDestructorDecl *d = baseClass->getDestructor(); + // We are already inside a destructor, so presumably the object being + // destroyed should have the expected type. + QualType thisTy = d->getFunctionObjectParameterType(); + assert(cgf.currSrcLoc && "expected source location"); + Address addr = cgf.getAddressOfDirectBaseInCompleteClass( + *cgf.currSrcLoc, cgf.loadCXXThisAddress(), derivedClass, baseClass, + baseIsVirtual); + cgf.emitCXXDestructorCall(d, Dtor_Base, baseIsVirtual, + /*delegating=*/false, addr, thisTy); + } +}; + /// A visitor which checks whether an initializer uses 'this' in a /// way which requires the vtable to be properly set. struct DynamicThisUseChecker @@ -891,12 +915,6 @@ public: assert(!cir::MissingFeatures::ehCleanupFlags()); cgf.emitDestroy(lv.getAddress(), field->getType(), destroyer); } - - // This is a placeholder until EHCleanupScope is implemented. - size_t getSize() const override { - assert(!cir::MissingFeatures::ehCleanupScope()); - return sizeof(DestroyField); - } }; } // namespace @@ -928,8 +946,21 @@ void CIRGenFunction::enterDtorCleanups(const CXXDestructorDecl *dd, if (dtorType == Dtor_Complete) { assert(!cir::MissingFeatures::sanitizers()); - if (classDecl->getNumVBases()) - cgm.errorNYI(dd->getSourceRange(), "virtual base destructor cleanups"); + // We push them in the forward order so that they'll be popped in + // the reverse order. + for (const CXXBaseSpecifier &base : classDecl->vbases()) { + auto *baseClassDecl = base.getType()->castAsCXXRecordDecl(); + + if (baseClassDecl->hasTrivialDestructor()) { + // Under SanitizeMemoryUseAfterDtor, poison the trivial base class + // memory. For non-trival base classes the same is done in the class + // destructor. + assert(!cir::MissingFeatures::sanitizers()); + } else { + ehStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, baseClassDecl, + /*baseIsVirtual=*/true); + } + } return; } @@ -948,8 +979,8 @@ void CIRGenFunction::enterDtorCleanups(const CXXDestructorDecl *dd, if (baseClassDecl->hasTrivialDestructor()) assert(!cir::MissingFeatures::sanitizers()); else - cgm.errorNYI(dd->getSourceRange(), - "non-trivial base destructor cleanups"); + ehStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, baseClassDecl, + /*baseIsVirtual=*/false); } assert(!cir::MissingFeatures::sanitizers()); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 563a753..039d290 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -695,12 +695,6 @@ struct DestroyObject final : EHScopeStack::Cleanup { void emit(CIRGenFunction &cgf) override { cgf.emitDestroy(addr, type, destroyer); } - - // This is a placeholder until EHCleanupScope is implemented. - size_t getSize() const override { - assert(!cir::MissingFeatures::ehCleanupScope()); - return sizeof(DestroyObject); - } }; } // namespace diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 6453843..f9ff37b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -64,3 +64,11 @@ void CIRGenFunction::emitAnyExprToExn(const Expr *e, Address addr) { // Deactivate the cleanup block. assert(!cir::MissingFeatures::ehCleanupScope()); } + +mlir::LogicalResult CIRGenFunction::emitCXXTryStmt(const CXXTryStmt &s) { + if (s.getTryBlock()->body_empty()) + return mlir::LogicalResult::success(); + + cgm.errorNYI("exitCXXTryStmt: CXXTryStmt with non-empty body"); + return mlir::LogicalResult::success(); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index be94890..f416571 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1185,10 +1185,16 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) { case CK_BuiltinFnToFnPtr: llvm_unreachable("builtin functions are handled elsewhere"); + case CK_Dynamic: { + LValue lv = emitLValue(e->getSubExpr()); + Address v = lv.getAddress(); + const auto *dce = cast<CXXDynamicCastExpr>(e); + return makeNaturalAlignAddrLValue(emitDynamicCast(v, dce), e->getType()); + } + // These are never l-values; just use the aggregate emission code. case CK_NonAtomicToAtomic: case CK_AtomicToNonAtomic: - case CK_Dynamic: case CK_ToUnion: case CK_BaseToDerived: case CK_AddressSpaceConversion: diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 4eb8ca8..97c0944 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -463,12 +463,6 @@ struct CallObjectDelete final : EHScopeStack::Cleanup { void emit(CIRGenFunction &cgf) override { cgf.emitDeleteCall(operatorDelete, ptr, elementType); } - - // This is a placeholder until EHCleanupScope is implemented. - size_t getSize() const override { - assert(!cir::MissingFeatures::ehCleanupScope()); - return sizeof(CallObjectDelete); - } }; } // namespace @@ -728,3 +722,43 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD, // Emit the call to delete. emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs); } + +mlir::Value CIRGenFunction::emitDynamicCast(Address thisAddr, + const CXXDynamicCastExpr *dce) { + mlir::Location loc = getLoc(dce->getSourceRange()); + + cgm.emitExplicitCastExprType(dce, this); + QualType destTy = dce->getTypeAsWritten(); + QualType srcTy = dce->getSubExpr()->getType(); + + // C++ [expr.dynamic.cast]p7: + // If T is "pointer to cv void," then the result is a pointer to the most + // derived object pointed to by v. + bool isDynCastToVoid = destTy->isVoidPointerType(); + bool isRefCast = destTy->isReferenceType(); + + QualType srcRecordTy; + QualType destRecordTy; + if (isDynCastToVoid) { + srcRecordTy = srcTy->getPointeeType(); + // No destRecordTy. + } else if (const PointerType *destPTy = destTy->getAs<PointerType>()) { + srcRecordTy = srcTy->castAs<PointerType>()->getPointeeType(); + destRecordTy = destPTy->getPointeeType(); + } else { + srcRecordTy = srcTy; + destRecordTy = destTy->castAs<ReferenceType>()->getPointeeType(); + } + + assert(srcRecordTy->isRecordType() && "source type must be a record type!"); + assert(!cir::MissingFeatures::emitTypeCheck()); + + if (dce->isAlwaysNull()) { + cgm.errorNYI(dce->getSourceRange(), "emitDynamicCastToNull"); + return {}; + } + + auto destCirTy = mlir::cast<cir::PointerType>(convertType(destTy)); + return cgm.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy, + destCirTy, isRefCast, thisAddr); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp index 59aa257..89e9ec4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp @@ -500,6 +500,26 @@ private: bool appendBitField(const FieldDecl *field, uint64_t fieldOffset, cir::IntAttr ci, bool allowOverwrite = false); + /// Applies zero-initialization to padding bytes before and within a field. + /// \param layout The record layout containing field offset information. + /// \param fieldNo The field index in the record. + /// \param field The field declaration. + /// \param allowOverwrite Whether to allow overwriting existing values. + /// \param sizeSoFar The current size processed, updated by this function. + /// \param zeroFieldSize Set to true if the field has zero size. + /// \returns true on success, false if padding could not be applied. + bool applyZeroInitPadding(const ASTRecordLayout &layout, unsigned fieldNo, + const FieldDecl &field, bool allowOverwrite, + CharUnits &sizeSoFar, bool &zeroFieldSize); + + /// Applies zero-initialization to trailing padding bytes in a record. + /// \param layout The record layout containing size information. + /// \param allowOverwrite Whether to allow overwriting existing values. + /// \param sizeSoFar The current size processed. + /// \returns true on success, false if padding could not be applied. + bool applyZeroInitPadding(const ASTRecordLayout &layout, bool allowOverwrite, + CharUnits &sizeSoFar); + bool build(InitListExpr *ile, bool allowOverwrite); bool build(const APValue &val, const RecordDecl *rd, bool isPrimaryBase, const CXXRecordDecl *vTableClass, CharUnits baseOffset); @@ -548,6 +568,49 @@ bool ConstRecordBuilder::appendBitField(const FieldDecl *field, allowOverwrite); } +bool ConstRecordBuilder::applyZeroInitPadding( + const ASTRecordLayout &layout, unsigned fieldNo, const FieldDecl &field, + bool allowOverwrite, CharUnits &sizeSoFar, bool &zeroFieldSize) { + uint64_t startBitOffset = layout.getFieldOffset(fieldNo); + CharUnits startOffset = + cgm.getASTContext().toCharUnitsFromBits(startBitOffset); + if (sizeSoFar < startOffset) { + if (!appendBytes(sizeSoFar, computePadding(cgm, startOffset - sizeSoFar), + allowOverwrite)) + return false; + } + + if (!field.isBitField()) { + CharUnits fieldSize = + cgm.getASTContext().getTypeSizeInChars(field.getType()); + sizeSoFar = startOffset + fieldSize; + zeroFieldSize = fieldSize.isZero(); + } else { + const CIRGenRecordLayout &rl = + cgm.getTypes().getCIRGenRecordLayout(field.getParent()); + const CIRGenBitFieldInfo &info = rl.getBitFieldInfo(&field); + uint64_t endBitOffset = startBitOffset + info.size; + sizeSoFar = cgm.getASTContext().toCharUnitsFromBits(endBitOffset); + if (endBitOffset % cgm.getASTContext().getCharWidth() != 0) + sizeSoFar++; + zeroFieldSize = info.size == 0; + } + return true; +} + +bool ConstRecordBuilder::applyZeroInitPadding(const ASTRecordLayout &layout, + bool allowOverwrite, + CharUnits &sizeSoFar) { + CharUnits totalSize = layout.getSize(); + if (sizeSoFar < totalSize) { + if (!appendBytes(sizeSoFar, computePadding(cgm, totalSize - sizeSoFar), + allowOverwrite)) + return false; + } + sizeSoFar = totalSize; + return true; +} + bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) { RecordDecl *rd = ile->getType() ->castAs<clang::RecordType>() @@ -562,11 +625,9 @@ bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) { if (cxxrd->getNumBases()) return false; - if (cgm.shouldZeroInitPadding()) { - assert(!cir::MissingFeatures::recordZeroInitPadding()); - cgm.errorNYI(rd->getSourceRange(), "zero init padding"); - return false; - } + const bool zeroInitPadding = cgm.shouldZeroInitPadding(); + bool zeroFieldSize = false; + CharUnits sizeSoFar = CharUnits::Zero(); unsigned elementNo = 0; for (auto [index, field] : llvm::enumerate(rd->fields())) { @@ -596,7 +657,10 @@ bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) { continue; } - assert(!cir::MissingFeatures::recordZeroInitPadding()); + if (zeroInitPadding && + !applyZeroInitPadding(layout, index, *field, allowOverwrite, sizeSoFar, + zeroFieldSize)) + return false; // When emitting a DesignatedInitUpdateExpr, a nested InitListExpr // represents additional overwriting of our current constant value, and not @@ -641,8 +705,8 @@ bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) { } } - assert(!cir::MissingFeatures::recordZeroInitPadding()); - return true; + return !zeroInitPadding || + applyZeroInitPadding(layout, allowOverwrite, sizeSoFar); } namespace { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 7edd83e..637f9ef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1916,6 +1916,11 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) { return builder.createIntToPtr(middleVal, destCIRTy); } + case CK_Dynamic: { + Address v = cgf.emitPointerWithAlignment(subExpr); + const auto *dce = cast<CXXDynamicCastExpr>(ce); + return cgf.emitDynamicCast(v, dce); + } case CK_ArrayToPointerDecay: return cgf.emitArrayToPointerDecay(subExpr).getPointer(); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index db2adc2..d71de2f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1297,6 +1297,8 @@ public: void emitCXXThrowExpr(const CXXThrowExpr *e); + mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s); + void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args); @@ -1312,6 +1314,8 @@ public: mlir::LogicalResult emitDoStmt(const clang::DoStmt &s); + mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce); + /// Emit an expression as an initializer for an object (variable, field, etc.) /// at the given location. The expression is not necessarily the normal /// initializer for the object, and the address is not necessarily diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 0418174..9e490c6d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -59,7 +59,11 @@ public: void addImplicitStructorParams(CIRGenFunction &cgf, QualType &resTy, FunctionArgList ¶ms) override; - + mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &cgf, + const CXXDestructorDecl *dd, + CXXDtorType type, + bool forVirtualBase, + bool delegating) override; void emitCXXConstructors(const clang::CXXConstructorDecl *d) override; void emitCXXDestructors(const clang::CXXDestructorDecl *d) override; void emitCXXStructor(clang::GlobalDecl gd) override; @@ -68,6 +72,8 @@ public: CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) override; + void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor, + mlir::Value addr) override; void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) override; void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) override; @@ -116,6 +122,16 @@ public: Address thisAddr, const CXXRecordDecl *classDecl, const CXXRecordDecl *baseClassDecl) override; + // The traditional clang CodeGen emits calls to `__dynamic_cast` directly into + // LLVM in the `emitDynamicCastCall` function. In CIR, `dynamic_cast` + // expressions are lowered to `cir.dyn_cast` ops instead of calls to runtime + // functions. So during CIRGen we don't need the `emitDynamicCastCall` + // function that clang CodeGen has. + mlir::Value emitDynamicCast(CIRGenFunction &cgf, mlir::Location loc, + QualType srcRecordTy, QualType destRecordTy, + cir::PointerType destCIRTy, bool isRefCast, + Address src) override; + /**************************** RTTI Uniqueness ******************************/ protected: /// Returns true if the ABI requires RTTI type_info objects to be unique @@ -1492,11 +1508,8 @@ void CIRGenItaniumCXXABI::emitDestructorCall( CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) { GlobalDecl gd(dd, type); - if (needsVTTParameter(gd)) { - cgm.errorNYI(dd->getSourceRange(), "emitDestructorCall: VTT"); - } - - mlir::Value vtt = nullptr; + mlir::Value vtt = + getCXXDestructorImplicitParam(cgf, dd, type, forVirtualBase, delegating); ASTContext &astContext = cgm.getASTContext(); QualType vttTy = astContext.getPointerType(astContext.VoidPtrTy); assert(!cir::MissingFeatures::appleKext()); @@ -1507,6 +1520,34 @@ void CIRGenItaniumCXXABI::emitDestructorCall( vttTy, nullptr); } +void CIRGenItaniumCXXABI::registerGlobalDtor(const VarDecl *vd, + cir::FuncOp dtor, + mlir::Value addr) { + if (vd->isNoDestroy(cgm.getASTContext())) + return; + + if (vd->getTLSKind()) { + cgm.errorNYI(vd->getSourceRange(), "registerGlobalDtor: TLS"); + return; + } + + // HLSL doesn't support atexit. + if (cgm.getLangOpts().HLSL) { + cgm.errorNYI(vd->getSourceRange(), "registerGlobalDtor: HLSL"); + return; + } + + // The default behavior is to use atexit. This is handled in lowering + // prepare. Nothing to be done for CIR here. +} + +mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam( + CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type, + bool forVirtualBase, bool delegating) { + GlobalDecl gd(dd, type); + return cgf.getVTTParameter(gd, forVirtualBase, delegating); +} + // The idea here is creating a separate block for the throw with an // `UnreachableOp` as the terminator. So, we branch from the current block // to the throw block and create a block for the remaining operations. @@ -1796,3 +1837,143 @@ mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset( } return vbaseOffset; } + +static cir::FuncOp getBadCastFn(CIRGenFunction &cgf) { + // Prototype: void __cxa_bad_cast(); + + // TODO(cir): set the calling convention of the runtime function. + assert(!cir::MissingFeatures::opFuncCallingConv()); + + cir::FuncType fnTy = + cgf.getBuilder().getFuncType({}, cgf.getBuilder().getVoidTy()); + return cgf.cgm.createRuntimeFunction(fnTy, "__cxa_bad_cast"); +} + +// TODO(cir): This could be shared with classic codegen. +static CharUnits computeOffsetHint(ASTContext &astContext, + const CXXRecordDecl *src, + const CXXRecordDecl *dst) { + CXXBasePaths paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, + /*DetectVirtual=*/false); + + // If Dst is not derived from Src we can skip the whole computation below and + // return that Src is not a public base of Dst. Record all inheritance paths. + if (!dst->isDerivedFrom(src, paths)) + return CharUnits::fromQuantity(-2ULL); + + unsigned numPublicPaths = 0; + CharUnits offset; + + // Now walk all possible inheritance paths. + for (const CXXBasePath &path : paths) { + if (path.Access != AS_public) // Ignore non-public inheritance. + continue; + + ++numPublicPaths; + + for (const CXXBasePathElement &pathElement : path) { + // If the path contains a virtual base class we can't give any hint. + // -1: no hint. + if (pathElement.Base->isVirtual()) + return CharUnits::fromQuantity(-1ULL); + + if (numPublicPaths > 1) // Won't use offsets, skip computation. + continue; + + // Accumulate the base class offsets. + const ASTRecordLayout &L = + astContext.getASTRecordLayout(pathElement.Class); + offset += L.getBaseClassOffset( + pathElement.Base->getType()->getAsCXXRecordDecl()); + } + } + + // -2: Src is not a public base of Dst. + if (numPublicPaths == 0) + return CharUnits::fromQuantity(-2ULL); + + // -3: Src is a multiple public base type but never a virtual base type. + if (numPublicPaths > 1) + return CharUnits::fromQuantity(-3ULL); + + // Otherwise, the Src type is a unique public nonvirtual base type of Dst. + // Return the offset of Src from the origin of Dst. + return offset; +} + +static cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &cgf) { + // Prototype: + // void *__dynamic_cast(const void *sub, + // global_as const abi::__class_type_info *src, + // global_as const abi::__class_type_info *dst, + // std::ptrdiff_t src2dst_offset); + + mlir::Type voidPtrTy = cgf.getBuilder().getVoidPtrTy(); + mlir::Type rttiPtrTy = cgf.getBuilder().getUInt8PtrTy(); + mlir::Type ptrDiffTy = cgf.convertType(cgf.getContext().getPointerDiffType()); + + // TODO(cir): mark the function as nowind willreturn readonly. + assert(!cir::MissingFeatures::opFuncNoUnwind()); + assert(!cir::MissingFeatures::opFuncWillReturn()); + assert(!cir::MissingFeatures::opFuncReadOnly()); + + // TODO(cir): set the calling convention of the runtime function. + assert(!cir::MissingFeatures::opFuncCallingConv()); + + cir::FuncType FTy = cgf.getBuilder().getFuncType( + {voidPtrTy, rttiPtrTy, rttiPtrTy, ptrDiffTy}, voidPtrTy); + return cgf.cgm.createRuntimeFunction(FTy, "__dynamic_cast"); +} + +static cir::DynamicCastInfoAttr emitDynamicCastInfo(CIRGenFunction &cgf, + mlir::Location loc, + QualType srcRecordTy, + QualType destRecordTy) { + auto srcRtti = mlir::cast<cir::GlobalViewAttr>( + cgf.cgm.getAddrOfRTTIDescriptor(loc, srcRecordTy)); + auto destRtti = mlir::cast<cir::GlobalViewAttr>( + cgf.cgm.getAddrOfRTTIDescriptor(loc, destRecordTy)); + + cir::FuncOp runtimeFuncOp = getItaniumDynamicCastFn(cgf); + cir::FuncOp badCastFuncOp = getBadCastFn(cgf); + auto runtimeFuncRef = mlir::FlatSymbolRefAttr::get(runtimeFuncOp); + auto badCastFuncRef = mlir::FlatSymbolRefAttr::get(badCastFuncOp); + + const CXXRecordDecl *srcDecl = srcRecordTy->getAsCXXRecordDecl(); + const CXXRecordDecl *destDecl = destRecordTy->getAsCXXRecordDecl(); + CharUnits offsetHint = computeOffsetHint(cgf.getContext(), srcDecl, destDecl); + + mlir::Type ptrdiffTy = cgf.convertType(cgf.getContext().getPointerDiffType()); + auto offsetHintAttr = cir::IntAttr::get(ptrdiffTy, offsetHint.getQuantity()); + + return cir::DynamicCastInfoAttr::get(srcRtti, destRtti, runtimeFuncRef, + badCastFuncRef, offsetHintAttr); +} + +mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf, + mlir::Location loc, + QualType srcRecordTy, + QualType destRecordTy, + cir::PointerType destCIRTy, + bool isRefCast, Address src) { + bool isCastToVoid = destRecordTy.isNull(); + assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference"); + + if (isCastToVoid) { + cgm.errorNYI(loc, "emitDynamicCastToVoid"); + return {}; + } + + // If the destination is effectively final, the cast succeeds if and only + // if the dynamic type of the pointer is exactly the destination type. + if (destRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() && + cgf.cgm.getCodeGenOpts().OptimizationLevel > 0) { + cgm.errorNYI(loc, "emitExactDynamicCast"); + return {}; + } + + cir::DynamicCastInfoAttr castInfo = + emitDynamicCastInfo(cgf, loc, srcRecordTy, destRecordTy); + return cgf.getBuilder().createDynCast(loc, src.getPointer(), destCIRTy, + isRefCast, castInfo); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 910c8a9..fe1ea56 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2079,6 +2079,29 @@ CIRGenModule::createCIRBuiltinFunction(mlir::Location loc, StringRef name, return fnOp; } +cir::FuncOp CIRGenModule::createRuntimeFunction(cir::FuncType ty, + StringRef name, mlir::ArrayAttr, + [[maybe_unused]] bool isLocal, + bool assumeConvergent) { + if (assumeConvergent) + errorNYI("createRuntimeFunction: assumeConvergent"); + if (isLocal) + errorNYI("createRuntimeFunction: local"); + + cir::FuncOp entry = getOrCreateCIRFunction(name, ty, GlobalDecl(), + /*forVtable=*/false); + + if (entry) { + // TODO(cir): set the attributes of the function. + assert(!cir::MissingFeatures::setLLVMFunctionFEnvAttributes()); + assert(!cir::MissingFeatures::opFuncCallingConv()); + assert(!cir::MissingFeatures::opGlobalDLLImportExport()); + entry.setDSOLocal(true); + } + + return entry; +} + mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibility(cir::GlobalOp op) { // MLIR doesn't accept public symbols declarations (only diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index c6a6681..f627bae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -480,6 +480,10 @@ public: cir::FuncType ty, const clang::FunctionDecl *fd); + cir::FuncOp createRuntimeFunction(cir::FuncType ty, llvm::StringRef name, + mlir::ArrayAttr = {}, bool isLocal = false, + bool assumeConvergent = false); + static constexpr const char *builtinCoroId = "__builtin_coro_id"; /// Given a builtin id for a function like "__builtin_fabsf", return a diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp index 4cf2237..5ba6bcb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp @@ -73,7 +73,7 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) { // Array sections are special, and we have to treat them that way. if (const auto *section = dyn_cast<ArraySectionExpr>(curVarExpr->IgnoreParenImpCasts())) - origType = ArraySectionExpr::getBaseOriginalType(section); + origType = section->getElementType(); mlir::Location exprLoc = cgm.getLoc(curVarExpr->getBeginLoc()); llvm::SmallVector<mlir::Value> bounds; @@ -84,16 +84,10 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) { e->printPretty(os, nullptr, getContext().getPrintingPolicy()); auto addBoundType = [&](const Expr *e) { - if (const auto *section = dyn_cast<ArraySectionExpr>(curVarExpr)) { - QualType baseTy = ArraySectionExpr::getBaseOriginalType( - section->getBase()->IgnoreParenImpCasts()); - if (auto *at = getContext().getAsArrayType(baseTy)) - boundTypes.push_back(at->getElementType()); - else - boundTypes.push_back(baseTy->getPointeeType()); - } else { + if (const auto *section = dyn_cast<ArraySectionExpr>(curVarExpr)) + boundTypes.push_back(section->getElementType()); + else boundTypes.push_back(curVarExpr->getType()); - } }; addBoundType(curVarExpr); @@ -113,8 +107,7 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) { if (const Expr *len = section->getLength()) { extent = emitOpenACCIntExpr(len); } else { - QualType baseTy = ArraySectionExpr::getBaseOriginalType( - section->getBase()->IgnoreParenImpCasts()); + QualType baseTy = section->getBaseType(); // We know this is the case as implicit lengths are only allowed for // array types with a constant size, or a dependent size. AND since // we are codegen we know we're not dependent. diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 0b8f8bf..cfd48a2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -154,6 +154,8 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s, return emitWhileStmt(cast<WhileStmt>(*s)); case Stmt::DoStmtClass: return emitDoStmt(cast<DoStmt>(*s)); + case Stmt::CXXTryStmtClass: + return emitCXXTryStmt(cast<CXXTryStmt>(*s)); case Stmt::CXXForRangeStmtClass: return emitCXXForRangeStmt(cast<CXXForRangeStmt>(*s), attr); case Stmt::OpenACCComputeConstructClass: @@ -199,7 +201,6 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s, case Stmt::CoroutineBodyStmtClass: return emitCoroutineBody(cast<CoroutineBodyStmt>(*s)); case Stmt::CoreturnStmtClass: - case Stmt::CXXTryStmtClass: case Stmt::IndirectGotoStmtClass: case Stmt::OMPParallelDirectiveClass: case Stmt::OMPTaskwaitDirectiveClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 94d856b..84f5977 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -327,9 +327,40 @@ cir::GlobalLinkageKind CIRGenModule::getVTableLinkage(const CXXRecordDecl *rd) { llvm_unreachable("Should not have been asked to emit this"); } } + // -fapple-kext mode does not support weak linkage, so we must use + // internal linkage. + if (astContext.getLangOpts().AppleKext) + return cir::GlobalLinkageKind::InternalLinkage; + + auto discardableODRLinkage = cir::GlobalLinkageKind::LinkOnceODRLinkage; + auto nonDiscardableODRLinkage = cir::GlobalLinkageKind::WeakODRLinkage; + if (rd->hasAttr<DLLExportAttr>()) { + // Cannot discard exported vtables. + discardableODRLinkage = nonDiscardableODRLinkage; + } else if (rd->hasAttr<DLLImportAttr>()) { + // Imported vtables are available externally. + discardableODRLinkage = cir::GlobalLinkageKind::AvailableExternallyLinkage; + nonDiscardableODRLinkage = + cir::GlobalLinkageKind::AvailableExternallyLinkage; + } + + switch (rd->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ExplicitSpecialization: + case TSK_ImplicitInstantiation: + return discardableODRLinkage; + + case TSK_ExplicitInstantiationDeclaration: { + errorNYI(rd->getSourceRange(), + "getVTableLinkage: explicit instantiation declaration"); + return cir::GlobalLinkageKind::ExternalLinkage; + } + + case TSK_ExplicitInstantiationDefinition: + return nonDiscardableODRLinkage; + } - errorNYI(rd->getSourceRange(), "getVTableLinkage: no key function"); - return cir::GlobalLinkageKind::ExternalLinkage; + llvm_unreachable("Invalid TemplateSpecializationKind!"); } cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *rd) { diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h index 66c1f76..67a72f5 100644 --- a/clang/lib/CIR/CodeGen/EHScopeStack.h +++ b/clang/lib/CIR/CodeGen/EHScopeStack.h @@ -108,9 +108,6 @@ public: /// // \param flags cleanup kind. virtual void emit(CIRGenFunction &cgf) = 0; - - // This is a placeholder until EHScope is implemented. - virtual size_t getSize() const = 0; }; private: diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index df7a1a3..3fc5b06 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -4,6 +4,7 @@ add_clang_library(MLIRCIRTransforms FlattenCFG.cpp HoistAllocas.cpp LoweringPrepare.cpp + LoweringPrepareItaniumCXXABI.cpp GotoSolver.cpp DEPENDS diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index bc917d0..dbff0b9 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "LoweringPrepareCXXABI.h" #include "PassDetail.h" #include "clang/AST/ASTContext.h" #include "clang/Basic/Module.h" @@ -41,6 +42,16 @@ static SmallString<128> getTransformedFileName(mlir::ModuleOp mlirModule) { return fileName; } +/// Return the FuncOp called by `callOp`. +static cir::FuncOp getCalledFunction(cir::CallOp callOp) { + mlir::SymbolRefAttr sym = llvm::dyn_cast_if_present<mlir::SymbolRefAttr>( + callOp.getCallableForCallee()); + if (!sym) + return nullptr; + return dyn_cast_or_null<cir::FuncOp>( + mlir::SymbolTable::lookupNearestSymbolFrom(callOp, sym)); +} + namespace { struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> { LoweringPreparePass() = default; @@ -52,6 +63,7 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> { void lowerComplexMulOp(cir::ComplexMulOp op); void lowerUnaryOp(cir::UnaryOp op); void lowerGlobalOp(cir::GlobalOp op); + void lowerDynamicCastOp(cir::DynamicCastOp op); void lowerArrayDtor(cir::ArrayDtor op); void lowerArrayCtor(cir::ArrayCtor op); @@ -69,12 +81,21 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> { cir::FuncType type, cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage); + cir::GlobalOp buildRuntimeVariable( + mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, + mlir::Type type, + cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage, + cir::VisibilityKind visibility = cir::VisibilityKind::Default); + /// /// AST related /// ----------- clang::ASTContext *astCtx; + // Helper for lowering C++ ABI specific operations. + std::shared_ptr<cir::LoweringPrepareCXXABI> cxxABI; + /// Tracks current module. mlir::ModuleOp mlirModule; @@ -85,11 +106,47 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> { /// List of ctors and their priorities to be called before main() llvm::SmallVector<std::pair<std::string, uint32_t>, 4> globalCtorList; - void setASTContext(clang::ASTContext *c) { astCtx = c; } + void setASTContext(clang::ASTContext *c) { + astCtx = c; + switch (c->getCXXABIKind()) { + case clang::TargetCXXABI::GenericItanium: + // We'll need X86-specific support for handling vaargs lowering, but for + // now the Itanium ABI will work. + assert(!cir::MissingFeatures::loweringPrepareX86CXXABI()); + cxxABI.reset(cir::LoweringPrepareCXXABI::createItaniumABI()); + break; + case clang::TargetCXXABI::GenericAArch64: + case clang::TargetCXXABI::AppleARM64: + assert(!cir::MissingFeatures::loweringPrepareAArch64XXABI()); + cxxABI.reset(cir::LoweringPrepareCXXABI::createItaniumABI()); + break; + default: + llvm_unreachable("NYI"); + } + } }; } // namespace +cir::GlobalOp LoweringPreparePass::buildRuntimeVariable( + mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, + mlir::Type type, cir::GlobalLinkageKind linkage, + cir::VisibilityKind visibility) { + cir::GlobalOp g = dyn_cast_or_null<cir::GlobalOp>( + mlir::SymbolTable::lookupNearestSymbolFrom( + mlirModule, mlir::StringAttr::get(mlirModule->getContext(), name))); + if (!g) { + g = cir::GlobalOp::create(builder, loc, name, type); + g.setLinkageAttr( + cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage)); + mlir::SymbolTable::setSymbolVisibility( + g, mlir::SymbolTable::Visibility::Private); + g.setGlobalVisibilityAttr( + cir::VisibilityAttr::get(builder.getContext(), visibility)); + } + return g; +} + cir::FuncOp LoweringPreparePass::buildRuntimeFunction( mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, cir::FuncType type, cir::GlobalLinkageKind linkage) { @@ -640,7 +697,8 @@ LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) { // Create a variable initialization function. CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op); - auto fnType = cir::FuncType::get({}, builder.getVoidTy()); + cir::VoidType voidTy = builder.getVoidTy(); + auto fnType = cir::FuncType::get({}, voidTy); FuncOp f = buildRuntimeFunction(builder, fnName, op.getLoc(), fnType, cir::GlobalLinkageKind::InternalLinkage); @@ -655,8 +713,57 @@ LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) { // Register the destructor call with __cxa_atexit mlir::Region &dtorRegion = op.getDtorRegion(); if (!dtorRegion.empty()) { - assert(!cir::MissingFeatures::opGlobalDtorLowering()); - llvm_unreachable("dtor region lowering is NYI"); + assert(!cir::MissingFeatures::astVarDeclInterface()); + assert(!cir::MissingFeatures::opGlobalThreadLocal()); + // Create a variable that binds the atexit to this shared object. + builder.setInsertionPointToStart(&mlirModule.getBodyRegion().front()); + cir::GlobalOp handle = buildRuntimeVariable( + builder, "__dso_handle", op.getLoc(), builder.getI8Type(), + cir::GlobalLinkageKind::ExternalLinkage, cir::VisibilityKind::Hidden); + + // Look for the destructor call in dtorBlock + mlir::Block &dtorBlock = dtorRegion.front(); + cir::CallOp dtorCall; + for (auto op : reverse(dtorBlock.getOps<cir::CallOp>())) { + dtorCall = op; + break; + } + assert(dtorCall && "Expected a dtor call"); + cir::FuncOp dtorFunc = getCalledFunction(dtorCall); + assert(dtorFunc && "Expected a dtor call"); + + // Create a runtime helper function: + // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); + auto voidPtrTy = cir::PointerType::get(voidTy); + auto voidFnTy = cir::FuncType::get({voidPtrTy}, voidTy); + auto voidFnPtrTy = cir::PointerType::get(voidFnTy); + auto handlePtrTy = cir::PointerType::get(handle.getSymType()); + auto fnAtExitType = + cir::FuncType::get({voidFnPtrTy, voidPtrTy, handlePtrTy}, voidTy); + const char *nameAtExit = "__cxa_atexit"; + cir::FuncOp fnAtExit = + buildRuntimeFunction(builder, nameAtExit, op.getLoc(), fnAtExitType); + + // Replace the dtor call with a call to __cxa_atexit(&dtor, &var, + // &__dso_handle) + builder.setInsertionPointAfter(dtorCall); + mlir::Value args[3]; + auto dtorPtrTy = cir::PointerType::get(dtorFunc.getFunctionType()); + // dtorPtrTy + args[0] = cir::GetGlobalOp::create(builder, dtorCall.getLoc(), dtorPtrTy, + dtorFunc.getSymName()); + args[0] = cir::CastOp::create(builder, dtorCall.getLoc(), voidFnPtrTy, + cir::CastKind::bitcast, args[0]); + args[1] = + cir::CastOp::create(builder, dtorCall.getLoc(), voidPtrTy, + cir::CastKind::bitcast, dtorCall.getArgOperand(0)); + args[2] = cir::GetGlobalOp::create(builder, handle.getLoc(), handlePtrTy, + handle.getSymName()); + builder.createCallOp(dtorCall.getLoc(), fnAtExit, args); + dtorCall->erase(); + entryBB->getOperations().splice(entryBB->end(), dtorBlock.getOperations(), + dtorBlock.begin(), + std::prev(dtorBlock.end())); } // Replace cir.yield with cir.return @@ -666,11 +773,12 @@ LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) { mlir::Block &block = op.getCtorRegion().front(); yieldOp = &block.getOperations().back(); } else { - assert(!cir::MissingFeatures::opGlobalDtorLowering()); - llvm_unreachable("dtor region lowering is NYI"); + assert(!dtorRegion.empty()); + mlir::Block &block = dtorRegion.front(); + yieldOp = &block.getOperations().back(); } - assert(isa<YieldOp>(*yieldOp)); + assert(isa<cir::YieldOp>(*yieldOp)); cir::ReturnOp::create(builder, yieldOp->getLoc()); return f; } @@ -715,7 +823,10 @@ void LoweringPreparePass::buildGlobalCtorDtorList() { mlir::ArrayAttr::get(&getContext(), globalCtors)); } - assert(!cir::MissingFeatures::opGlobalDtorLowering()); + // We will eventual need to populate a global_dtor list, but that's not + // needed for globals with destructors. It will only be needed for functions + // that are marked as global destructors with an attribute. + assert(!cir::MissingFeatures::opGlobalDtorList()); } void LoweringPreparePass::buildCXXGlobalInitFunc() { @@ -761,6 +872,17 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { cir::ReturnOp::create(builder, f.getLoc()); } +void LoweringPreparePass::lowerDynamicCastOp(DynamicCastOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op); + + assert(astCtx && "AST context is not available during lowering prepare"); + auto loweredValue = cxxABI->lowerDynamicCast(builder, *astCtx, op); + + op.replaceAllUsesWith(loweredValue); + op.erase(); +} + static void lowerArrayDtorCtorIntoLoop(cir::CIRBaseBuilderTy &builder, clang::ASTContext *astCtx, mlir::Operation *op, mlir::Type eltTy, @@ -865,6 +987,8 @@ void LoweringPreparePass::runOnOp(mlir::Operation *op) { lowerComplexMulOp(complexMul); else if (auto glob = mlir::dyn_cast<cir::GlobalOp>(op)) lowerGlobalOp(glob); + else if (auto dynamicCast = mlir::dyn_cast<cir::DynamicCastOp>(op)) + lowerDynamicCastOp(dynamicCast); else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op)) lowerUnaryOp(unary); } @@ -878,8 +1002,8 @@ void LoweringPreparePass::runOnOperation() { op->walk([&](mlir::Operation *op) { if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp, - cir::ComplexMulOp, cir::ComplexDivOp, cir::GlobalOp, - cir::UnaryOp>(op)) + cir::ComplexMulOp, cir::ComplexDivOp, cir::DynamicCastOp, + cir::GlobalOp, cir::UnaryOp>(op)) opsToTransform.push_back(op); }); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h new file mode 100644 index 0000000..2582c332 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h @@ -0,0 +1,38 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides the LoweringPrepareCXXABI class, which is the base class +// for ABI specific functionalities that are required during LLVM lowering +// prepare. +// +//===----------------------------------------------------------------------===// + +#ifndef CIR_DIALECT_TRANSFORMS__LOWERINGPREPARECXXABI_H +#define CIR_DIALECT_TRANSFORMS__LOWERINGPREPARECXXABI_H + +#include "mlir/IR/Value.h" +#include "clang/AST/ASTContext.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +namespace cir { + +class LoweringPrepareCXXABI { +public: + static LoweringPrepareCXXABI *createItaniumABI(); + + virtual ~LoweringPrepareCXXABI() {} + + virtual mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, + cir::DynamicCastOp op) = 0; +}; + +} // namespace cir + +#endif // CIR_DIALECT_TRANSFORMS__LOWERINGPREPARECXXABI_H diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp new file mode 100644 index 0000000..7d3c711 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp @@ -0,0 +1,126 @@ +//===--------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with +// LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===--------------------------------------------------------------------===// +// +// This file provides Itanium C++ ABI specific code +// that is used during LLVMIR lowering prepare. +// +//===--------------------------------------------------------------------===// + +#include "LoweringPrepareCXXABI.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Value.h" +#include "mlir/IR/ValueRange.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/MissingFeatures.h" + +class LoweringPrepareItaniumCXXABI : public cir::LoweringPrepareCXXABI { +public: + mlir::Value lowerDynamicCast(cir::CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, + cir::DynamicCastOp op) override; +}; + +cir::LoweringPrepareCXXABI *cir::LoweringPrepareCXXABI::createItaniumABI() { + return new LoweringPrepareItaniumCXXABI(); +} + +static void buildBadCastCall(cir::CIRBaseBuilderTy &builder, mlir::Location loc, + mlir::FlatSymbolRefAttr badCastFuncRef) { + builder.createCallOp(loc, badCastFuncRef, cir::VoidType(), + mlir::ValueRange{}); + // TODO(cir): Set the 'noreturn' attribute on the function. + assert(!cir::MissingFeatures::opFuncNoReturn()); + cir::UnreachableOp::create(builder, loc); + builder.clearInsertionPoint(); +} + +static mlir::Value +buildDynamicCastAfterNullCheck(cir::CIRBaseBuilderTy &builder, + cir::DynamicCastOp op) { + mlir::Location loc = op->getLoc(); + mlir::Value srcValue = op.getSrc(); + cir::DynamicCastInfoAttr castInfo = op.getInfo().value(); + + // TODO(cir): consider address space + assert(!cir::MissingFeatures::addressSpace()); + + mlir::Value srcPtr = builder.createBitcast(srcValue, builder.getVoidPtrTy()); + cir::ConstantOp srcRtti = builder.getConstant(loc, castInfo.getSrcRtti()); + cir::ConstantOp destRtti = builder.getConstant(loc, castInfo.getDestRtti()); + cir::ConstantOp offsetHint = + builder.getConstant(loc, castInfo.getOffsetHint()); + + mlir::FlatSymbolRefAttr dynCastFuncRef = castInfo.getRuntimeFunc(); + mlir::Value dynCastFuncArgs[4] = {srcPtr, srcRtti, destRtti, offsetHint}; + + mlir::Value castedPtr = + builder + .createCallOp(loc, dynCastFuncRef, builder.getVoidPtrTy(), + dynCastFuncArgs) + .getResult(); + + assert(mlir::isa<cir::PointerType>(castedPtr.getType()) && + "the return value of __dynamic_cast should be a ptr"); + + /// C++ [expr.dynamic.cast]p9: + /// A failed cast to reference type throws std::bad_cast + if (op.isRefCast()) { + // Emit a cir.if that checks the casted value. + mlir::Value castedValueIsNull = builder.createPtrIsNull(castedPtr); + builder.create<cir::IfOp>( + loc, castedValueIsNull, false, [&](mlir::OpBuilder &, mlir::Location) { + buildBadCastCall(builder, loc, castInfo.getBadCastFunc()); + }); + } + + // Note that castedPtr is a void*. Cast it to a pointer to the destination + // type before return. + return builder.createBitcast(castedPtr, op.getType()); +} + +static mlir::Value +buildDynamicCastToVoidAfterNullCheck(cir::CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, + cir::DynamicCastOp op) { + llvm_unreachable("dynamic cast to void is NYI"); +} + +mlir::Value +LoweringPrepareItaniumCXXABI::lowerDynamicCast(cir::CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, + cir::DynamicCastOp op) { + mlir::Location loc = op->getLoc(); + mlir::Value srcValue = op.getSrc(); + + assert(!cir::MissingFeatures::emitTypeCheck()); + + if (op.isRefCast()) + return buildDynamicCastAfterNullCheck(builder, op); + + mlir::Value srcValueIsNotNull = builder.createPtrToBoolCast(srcValue); + return builder + .create<cir::TernaryOp>( + loc, srcValueIsNotNull, + [&](mlir::OpBuilder &, mlir::Location) { + mlir::Value castedValue = + op.isCastToVoid() + ? buildDynamicCastToVoidAfterNullCheck(builder, astCtx, op) + : buildDynamicCastAfterNullCheck(builder, op); + builder.createYield(loc, castedValue); + }, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield( + loc, builder.getNullPtr(op.getType(), loc).getResult()); + }) + .getResult(); +} diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index a80a295..26e0ba9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1739,7 +1739,6 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( const mlir::LLVM::Linkage linkage = convertLinkage(op.getLinkage()); const StringRef symbol = op.getSymName(); SmallVector<mlir::NamedAttribute> attributes; - mlir::SymbolRefAttr comdatAttr = getComdatAttr(op, rewriter); if (init.has_value()) { if (mlir::isa<cir::FPAttr, cir::IntAttr, cir::BoolAttr>(init.value())) { @@ -1771,9 +1770,14 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( } // Rewrite op. - rewriter.replaceOpWithNewOp<mlir::LLVM::GlobalOp>( + mlir::SymbolRefAttr comdatAttr = getComdatAttr(op, rewriter); + auto newOp = rewriter.replaceOpWithNewOp<mlir::LLVM::GlobalOp>( op, llvmType, isConst, linkage, symbol, init.value_or(mlir::Attribute()), alignment, addrSpace, isDsoLocal, isThreadLocal, comdatAttr, attributes); + newOp.setVisibility_Attr(mlir::LLVM::VisibilityAttr::get( + getContext(), lowerCIRVisibilityToLLVMVisibility( + op.getGlobalVisibilityAttr().getValue()))); + return mlir::success(); } @@ -2594,6 +2598,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { return std::make_pair(ctorAttr.getName(), ctorAttr.getPriority()); }); + assert(!cir::MissingFeatures::opGlobalDtorList()); } mlir::LogicalResult CIRToLLVMBrOpLowering::matchAndRewrite( diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index c5371e4..df28641 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -2012,13 +2012,6 @@ static void getTrivialDefaultFunctionAttributes( FuncAttrs.addAttribute("no-infs-fp-math", "true"); if (LangOpts.NoHonorNaNs) FuncAttrs.addAttribute("no-nans-fp-math", "true"); - if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip && - LangOpts.NoSignedZero && LangOpts.ApproxFunc && - (LangOpts.getDefaultFPContractMode() == - LangOptions::FPModeKind::FPM_Fast || - LangOpts.getDefaultFPContractMode() == - LangOptions::FPModeKind::FPM_FastHonorPragmas)) - FuncAttrs.addAttribute("unsafe-fp-math", "true"); if (CodeGenOpts.SoftFloat) FuncAttrs.addAttribute("use-soft-float", "true"); FuncAttrs.addAttribute("stack-protector-buffer-size", diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h index 935b508..1ef8a3f 100644 --- a/clang/lib/CodeGen/CGCall.h +++ b/clang/lib/CodeGen/CGCall.h @@ -410,10 +410,10 @@ public: /// This is useful for adding attrs to bitcode modules that you want to link /// with but don't control, such as CUDA's libdevice. When linking with such /// a bitcode library, you might want to set e.g. its functions' -/// "unsafe-fp-math" attribute to match the attr of the functions you're +/// "denormal-fp-math" attribute to match the attr of the functions you're /// codegen'ing. Otherwise, LLVM will interpret the bitcode module's lack of -/// unsafe-fp-math attrs as tantamount to unsafe-fp-math=false, and then LLVM -/// will propagate unsafe-fp-math=false up to every transitive caller of a +/// denormal-fp-math attrs as tantamount to denormal-fp-math=ieee, and then LLVM +/// will propagate denormal-fp-math=ieee up to every transitive caller of a /// function in the bitcode library! /// /// With the exception of fast-math attrs, this will only make the attributes diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp b/clang/lib/CodeGen/CGHLSLBuiltins.cpp index 6c0fc8d..4f2f5a76 100644 --- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp +++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp @@ -352,6 +352,19 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID, SmallVector<Value *> Args{OrderID, SpaceOp, RangeOp, IndexOp, Name}; return Builder.CreateIntrinsic(HandleTy, IntrinsicID, Args); } + case Builtin::BI__builtin_hlsl_resource_counterhandlefromimplicitbinding: { + Value *MainHandle = EmitScalarExpr(E->getArg(0)); + if (!CGM.getTriple().isSPIRV()) + return MainHandle; + + llvm::Type *HandleTy = CGM.getTypes().ConvertType(E->getType()); + Value *OrderID = EmitScalarExpr(E->getArg(1)); + Value *SpaceOp = EmitScalarExpr(E->getArg(2)); + llvm::Intrinsic::ID IntrinsicID = + llvm::Intrinsic::spv_resource_counterhandlefromimplicitbinding; + SmallVector<Value *> Args{MainHandle, OrderID, SpaceOp}; + return Builder.CreateIntrinsic(HandleTy, IntrinsicID, Args); + } case Builtin::BI__builtin_hlsl_resource_nonuniformindex: { Value *IndexOp = EmitScalarExpr(E->getArg(0)); llvm::Type *RetTy = ConvertType(E->getType()); diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp index ede1780..603cef9 100644 --- a/clang/lib/CodeGen/CGHLSLRuntime.cpp +++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp @@ -145,19 +145,29 @@ static CXXMethodDecl *lookupResourceInitMethodAndSetupArgs( // explicit binding auto *RegSlot = llvm::ConstantInt::get(CGM.IntTy, Binding.getSlot()); Args.add(RValue::get(RegSlot), AST.UnsignedIntTy); - CreateMethod = lookupMethod(ResourceDecl, "__createFromBinding", SC_Static); + const char *Name = Binding.hasCounterImplicitOrderID() + ? "__createFromBindingWithImplicitCounter" + : "__createFromBinding"; + CreateMethod = lookupMethod(ResourceDecl, Name, SC_Static); } else { // implicit binding auto *OrderID = llvm::ConstantInt::get(CGM.IntTy, Binding.getImplicitOrderID()); Args.add(RValue::get(OrderID), AST.UnsignedIntTy); - CreateMethod = - lookupMethod(ResourceDecl, "__createFromImplicitBinding", SC_Static); + const char *Name = Binding.hasCounterImplicitOrderID() + ? "__createFromImplicitBindingWithImplicitCounter" + : "__createFromImplicitBinding"; + CreateMethod = lookupMethod(ResourceDecl, Name, SC_Static); } Args.add(RValue::get(Space), AST.UnsignedIntTy); Args.add(RValue::get(Range), AST.IntTy); Args.add(RValue::get(Index), AST.UnsignedIntTy); Args.add(RValue::get(NameStr), AST.getPointerType(AST.CharTy.withConst())); + if (Binding.hasCounterImplicitOrderID()) { + uint32_t CounterBinding = Binding.getCounterImplicitOrderID(); + auto *CounterOrderID = llvm::ConstantInt::get(CGM.IntTy, CounterBinding); + Args.add(RValue::get(CounterOrderID), AST.UnsignedIntTy); + } return CreateMethod; } diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index acf8de4..8862853 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -183,11 +183,6 @@ void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) { mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs()); mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs()); mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero()); - mergeFnAttrValue( - "unsafe-fp-math", - FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() && - FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() && - FPFeatures.allowFPContractAcrossStatement()); } CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() { diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp index 6596ec0..5049a0a 100644 --- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp @@ -11,8 +11,11 @@ //===----------------------------------------------------------------------===// #include "CGBuiltin.h" +#include "CodeGenFunction.h" #include "clang/Basic/TargetBuiltins.h" +#include "clang/Frontend/FrontendDiagnostic.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/CodeGen/MachineFunction.h" #include "llvm/IR/IntrinsicsAMDGPU.h" #include "llvm/IR/IntrinsicsR600.h" #include "llvm/IR/MemoryModelRelaxationAnnotations.h" @@ -181,6 +184,74 @@ static Value *EmitAMDGCNBallotForExec(CodeGenFunction &CGF, const CallExpr *E, return Call; } +static llvm::Value *loadTextureDescPtorAsVec8I32(CodeGenFunction &CGF, + llvm::Value *RsrcPtr) { + auto &B = CGF.Builder; + auto *VecTy = llvm::FixedVectorType::get(B.getInt32Ty(), 8); + + if (RsrcPtr->getType() == VecTy) + return RsrcPtr; + + if (RsrcPtr->getType()->isIntegerTy(32)) { + llvm::PointerType *VecPtrTy = + llvm::PointerType::get(CGF.getLLVMContext(), 8); + llvm::Value *Ptr = B.CreateIntToPtr(RsrcPtr, VecPtrTy, "tex.rsrc.from.int"); + return B.CreateAlignedLoad(VecTy, Ptr, llvm::Align(32), "tex.rsrc.val"); + } + + if (RsrcPtr->getType()->isPointerTy()) { + auto *VecPtrTy = llvm::PointerType::get( + CGF.getLLVMContext(), RsrcPtr->getType()->getPointerAddressSpace()); + llvm::Value *Typed = B.CreateBitCast(RsrcPtr, VecPtrTy, "tex.rsrc.typed"); + return B.CreateAlignedLoad(VecTy, Typed, llvm::Align(32), "tex.rsrc.val"); + } + + const auto &DL = CGF.CGM.getDataLayout(); + if (DL.getTypeSizeInBits(RsrcPtr->getType()) == 256) + return B.CreateBitCast(RsrcPtr, VecTy, "tex.rsrc.val"); + + llvm::report_fatal_error("Unexpected texture resource argument form"); +} + +llvm::CallInst * +emitAMDGCNImageOverloadedReturnType(clang::CodeGen::CodeGenFunction &CGF, + const clang::CallExpr *E, + unsigned IntrinsicID, bool IsImageStore) { + auto findTextureDescIndex = [&CGF](const CallExpr *E) -> unsigned { + QualType TexQT = CGF.getContext().AMDGPUTextureTy; + for (unsigned I = 0, N = E->getNumArgs(); I < N; ++I) { + QualType ArgTy = E->getArg(I)->getType(); + if (ArgTy == TexQT) { + return I; + } + + if (ArgTy.getCanonicalType() == TexQT.getCanonicalType()) { + return I; + } + } + + return ~0U; + }; + + clang::SmallVector<llvm::Value *, 10> Args; + unsigned RsrcIndex = findTextureDescIndex(E); + + if (RsrcIndex == ~0U) { + llvm::report_fatal_error("Invalid argument count for image builtin"); + } + + for (unsigned I = 0; I < E->getNumArgs(); ++I) { + llvm::Value *V = CGF.EmitScalarExpr(E->getArg(I)); + if (I == RsrcIndex) + V = loadTextureDescPtorAsVec8I32(CGF, V); + Args.push_back(V); + } + + llvm::Type *RetTy = IsImageStore ? CGF.VoidTy : CGF.ConvertType(E->getType()); + llvm::CallInst *Call = CGF.Builder.CreateIntrinsic(RetTy, IntrinsicID, Args); + return Call; +} + // Emit an intrinsic that has 1 float or double operand, and 1 integer. static Value *emitFPIntBuiltin(CodeGenFunction &CGF, const CallExpr *E, @@ -937,6 +1008,136 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, return Builder.CreateInsertElement(I0, A, 1); } + case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_1d, false); + case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_1darray, false); + case AMDGPU::BI__builtin_amdgcn_image_load_2d_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_2d, false); + case AMDGPU::BI__builtin_amdgcn_image_load_2darray_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_2darray, false); + case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_3d, false); + case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_cube, false); + case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_mip_1d, false); + case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_mip_1darray, false); + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_mip_2d, false); + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_mip_2darray, false); + case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_mip_3d, false); + case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_load_mip_cube, false); + case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_1d, true); + case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_1darray, true); + case AMDGPU::BI__builtin_amdgcn_image_store_2d_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_2d, true); + case AMDGPU::BI__builtin_amdgcn_image_store_2darray_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_2darray, true); + case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_3d, true); + case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_cube, true); + case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_mip_1d, true); + case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_mip_1darray, true); + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_mip_2d, true); + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_mip_2darray, true); + case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_mip_3d, true); + case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f16_i32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_store_mip_cube, true); + case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f16_f32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_sample_1d, false); + case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f16_f32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_sample_1darray, false); + case AMDGPU::BI__builtin_amdgcn_image_sample_2d_f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f16_f32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_sample_2d, false); + case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f16_f32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_sample_2darray, false); + case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f16_f32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_sample_3d, false); + case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f16_f32: + return emitAMDGCNImageOverloadedReturnType( + *this, E, Intrinsic::amdgcn_image_sample_cube, false); case AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_16x16x128_f8f6f4: case AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_32x32x64_f8f6f4: { llvm::FixedVectorType *VT = FixedVectorType::get(Builder.getInt32Ty(), 8); diff --git a/clang/lib/Driver/Distro.cpp b/clang/lib/Driver/Distro.cpp index 90e5a39..8a5a9fc 100644 --- a/clang/lib/Driver/Distro.cpp +++ b/clang/lib/Driver/Distro.cpp @@ -61,11 +61,6 @@ static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) { if (Version == Distro::UnknownDistro && Line.starts_with("DISTRIB_CODENAME=")) Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(17)) - .Case("hardy", Distro::UbuntuHardy) - .Case("intrepid", Distro::UbuntuIntrepid) - .Case("jaunty", Distro::UbuntuJaunty) - .Case("karmic", Distro::UbuntuKarmic) - .Case("lucid", Distro::UbuntuLucid) .Case("maverick", Distro::UbuntuMaverick) .Case("natty", Distro::UbuntuNatty) .Case("oneiric", Distro::UbuntuOneiric) diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp index 16e35b0..8eb4d34e 100644 --- a/clang/lib/Driver/ToolChains/Linux.cpp +++ b/clang/lib/Driver/ToolChains/Linux.cpp @@ -301,11 +301,10 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args) // .gnu.hash needs symbols to be grouped by hash code whereas the MIPS // ABI requires a mapping between the GOT and the symbol table. // Android loader does not support .gnu.hash until API 23. - // Hexagon linker/loader does not support .gnu.hash + // Hexagon linker/loader does not support .gnu.hash. + // SUSE SLES 11 will stop being supported Mar 2028. if (!IsMips && !IsHexagon) { - if (Distro.IsOpenSUSE() || Distro == Distro::UbuntuLucid || - Distro == Distro::UbuntuJaunty || Distro == Distro::UbuntuKarmic || - (IsAndroid && Triple.isAndroidVersionLT(23))) + if (Distro.IsOpenSUSE() || (IsAndroid && Triple.isAndroidVersionLT(23))) ExtraOpts.push_back("--hash-style=both"); else ExtraOpts.push_back("--hash-style=gnu"); diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp index d6cd7eb..e4b158e 100644 --- a/clang/lib/Parse/ParseDecl.cpp +++ b/clang/lib/Parse/ParseDecl.cpp @@ -1934,12 +1934,12 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration( bool RequireSemi, ForRangeInit *FRI, SourceLocation *DeclSpecStart) { // Need to retain these for diagnostics before we add them to the DeclSepc. ParsedAttributesView OriginalDeclSpecAttrs; - OriginalDeclSpecAttrs.addAll(DeclSpecAttrs.begin(), DeclSpecAttrs.end()); + OriginalDeclSpecAttrs.prepend(DeclSpecAttrs.begin(), DeclSpecAttrs.end()); OriginalDeclSpecAttrs.Range = DeclSpecAttrs.Range; // Parse the common declaration-specifiers piece. ParsingDeclSpec DS(*this); - DS.takeAttributesFrom(DeclSpecAttrs); + DS.takeAttributesAppendingingFrom(DeclSpecAttrs); ParsedTemplateInfo TemplateInfo; DeclSpecContext DSContext = getDeclSpecContextFromDeclaratorContext(Context); @@ -2135,7 +2135,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS, // list. This ensures that we will not attempt to interpret them as statement // attributes higher up the callchain. ParsedAttributes LocalAttrs(AttrFactory); - LocalAttrs.takeAllFrom(Attrs); + LocalAttrs.takeAllPrependingFrom(Attrs); ParsingDeclarator D(*this, DS, LocalAttrs, Context); if (TemplateInfo.TemplateParams) D.setTemplateParameterLists(*TemplateInfo.TemplateParams); @@ -3462,7 +3462,7 @@ void Parser::ParseDeclarationSpecifiers( PA.setInvalid(); } - DS.takeAttributesFrom(attrs); + DS.takeAttributesAppendingingFrom(attrs); } // If this is not a declaration specifier token, we're done reading decl @@ -3689,7 +3689,7 @@ void Parser::ParseDeclarationSpecifiers( if (ParseImplicitInt(DS, &SS, TemplateInfo, AS, DSContext, Attrs)) { if (!Attrs.empty()) { AttrsLastTime = true; - attrs.takeAllFrom(Attrs); + attrs.takeAllAppendingFrom(Attrs); } continue; } @@ -3854,7 +3854,7 @@ void Parser::ParseDeclarationSpecifiers( if (ParseImplicitInt(DS, nullptr, TemplateInfo, AS, DSContext, Attrs)) { if (!Attrs.empty()) { AttrsLastTime = true; - attrs.takeAllFrom(Attrs); + attrs.takeAllAppendingFrom(Attrs); } continue; } @@ -4463,7 +4463,7 @@ void Parser::ParseDeclarationSpecifiers( // take them over and handle them here. if (!Attributes.empty()) { AttrsLastTime = true; - attrs.takeAllFrom(Attributes); + attrs.takeAllAppendingFrom(Attributes); } continue; } @@ -4830,7 +4830,7 @@ void Parser::ParseLexedCAttribute(LateParsedAttribute &LA, bool EnterScope, ConsumeAnyToken(); if (OutAttrs) { - OutAttrs->takeAllFrom(Attrs); + OutAttrs->takeAllAppendingFrom(Attrs); } } @@ -6122,7 +6122,7 @@ void Parser::ParseTypeQualifierListOpt( isAllowedCXX11AttributeSpecifier()) { ParsedAttributes Attrs(AttrFactory); ParseCXX11Attributes(Attrs); - DS.takeAttributesFrom(Attrs); + DS.takeAttributesAppendingingFrom(Attrs); } SourceLocation EndLoc; @@ -7483,7 +7483,7 @@ void Parser::ParseParameterDeclarationClause( // Take them so that we only apply the attributes to the first parameter. // We have already started parsing the decl-specifier sequence, so don't // parse any parameter-declaration pieces that precede it. - ArgDeclSpecAttrs.takeAllFrom(FirstArgAttrs); + ArgDeclSpecAttrs.takeAllPrependingFrom(FirstArgAttrs); } else { // Parse any C++11 attributes. MaybeParseCXX11Attributes(ArgDeclAttrs); @@ -7505,7 +7505,7 @@ void Parser::ParseParameterDeclarationClause( DeclSpecContext::DSC_normal, /*LateAttrs=*/nullptr, AllowImplicitTypename); - DS.takeAttributesFrom(ArgDeclSpecAttrs); + DS.takeAttributesAppendingingFrom(ArgDeclSpecAttrs); // Parse the declarator. This is "PrototypeContext" or // "LambdaExprParameterContext", because we must accept either diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp index 19f9412..b96968d 100644 --- a/clang/lib/Parse/ParseDeclCXX.cpp +++ b/clang/lib/Parse/ParseDeclCXX.cpp @@ -739,7 +739,7 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration( << FixItHint::CreateInsertionFromRange( Tok.getLocation(), CharSourceRange::getTokenRange(Range)) << FixItHint::CreateRemoval(Range); - Attrs.takeAllFrom(MisplacedAttrs); + Attrs.takeAllPrependingFrom(MisplacedAttrs); } // Maybe this is an alias-declaration. @@ -787,7 +787,7 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration( // Parse (optional) attributes. MaybeParseAttributes(PAKM_GNU | PAKM_CXX11, Attrs); DiagnoseCXX11AttributeExtension(Attrs); - Attrs.addAll(PrefixAttrs.begin(), PrefixAttrs.end()); + Attrs.prepend(PrefixAttrs.begin(), PrefixAttrs.end()); if (InvalidDeclarator) SkipUntil(tok::comma, tok::semi, StopBeforeMatch); @@ -1948,7 +1948,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, // Recover by adding misplaced attributes to the attribute list // of the class so they can be applied on the class later. - attrs.takeAllFrom(Attributes); + attrs.takeAllAppendingFrom(Attributes); } } @@ -2842,7 +2842,7 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclaration( // decl-specifier-seq: // Parse the common declaration-specifiers piece. ParsingDeclSpec DS(*this, TemplateDiags); - DS.takeAttributesFrom(DeclSpecAttrs); + DS.takeAttributesAppendingingFrom(DeclSpecAttrs); if (MalformedTypeSpec) DS.SetTypeSpecError(); diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp index a2c6957..74f87a8 100644 --- a/clang/lib/Parse/ParseExprCXX.cpp +++ b/clang/lib/Parse/ParseExprCXX.cpp @@ -1244,7 +1244,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer( break; } - D.takeAttributes(Attributes); + D.takeAttributesAppending(Attributes); } MultiParseScope TemplateParamScope(*this); @@ -3200,6 +3200,8 @@ ExprResult Parser::ParseRequiresExpression() { BalancedDelimiterTracker ExprBraces(*this, tok::l_brace); ExprBraces.consumeOpen(); ExprResult Expression = ParseExpression(); + if (Expression.isUsable()) + Expression = Actions.CheckPlaceholderExpr(Expression.get()); if (!Expression.isUsable()) { ExprBraces.skipToEnd(); SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch); @@ -3369,6 +3371,8 @@ ExprResult Parser::ParseRequiresExpression() { // expression ';' SourceLocation StartLoc = Tok.getLocation(); ExprResult Expression = ParseExpression(); + if (Expression.isUsable()) + Expression = Actions.CheckPlaceholderExpr(Expression.get()); if (!Expression.isUsable()) { SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch); break; diff --git a/clang/lib/Parse/ParseObjc.cpp b/clang/lib/Parse/ParseObjc.cpp index a64fb02..0b9f113 100644 --- a/clang/lib/Parse/ParseObjc.cpp +++ b/clang/lib/Parse/ParseObjc.cpp @@ -43,7 +43,7 @@ void Parser::MaybeSkipAttributes(tok::ObjCKeywordKind Kind) { Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives(ParsedAttributes &DeclAttrs, ParsedAttributes &DeclSpecAttrs) { - DeclAttrs.takeAllFrom(DeclSpecAttrs); + DeclAttrs.takeAllPrependingFrom(DeclSpecAttrs); SourceLocation AtLoc = ConsumeToken(); // the "@" @@ -1065,8 +1065,8 @@ void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS, /// Take all the decl attributes out of the given list and add /// them to the given attribute set. -static void takeDeclAttributes(ParsedAttributesView &attrs, - ParsedAttributesView &from) { +static void takeDeclAttributesAppend(ParsedAttributesView &attrs, + ParsedAttributesView &from) { for (auto &AL : llvm::reverse(from)) { if (!AL.isUsedAsTypeAttr()) { from.remove(&AL); @@ -1088,10 +1088,10 @@ static void takeDeclAttributes(ParsedAttributes &attrs, attrs.getPool().takeAllFrom(D.getDeclSpec().getAttributePool()); // Now actually move the attributes over. - takeDeclAttributes(attrs, D.getMutableDeclSpec().getAttributes()); - takeDeclAttributes(attrs, D.getAttributes()); + takeDeclAttributesAppend(attrs, D.getMutableDeclSpec().getAttributes()); + takeDeclAttributesAppend(attrs, D.getAttributes()); for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) - takeDeclAttributes(attrs, D.getTypeObject(i).getAttrs()); + takeDeclAttributesAppend(attrs, D.getTypeObject(i).getAttrs()); } ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS, diff --git a/clang/lib/Parse/ParseStmt.cpp b/clang/lib/Parse/ParseStmt.cpp index 2e7af12..9203898 100644 --- a/clang/lib/Parse/ParseStmt.cpp +++ b/clang/lib/Parse/ParseStmt.cpp @@ -718,7 +718,7 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributes &Attrs, // and followed by a semicolon, GCC will reject (it appears to parse the // attributes as part of a statement in that case). That looks like a bug. if (!getLangOpts().CPlusPlus || Tok.is(tok::semi)) - Attrs.takeAllFrom(TempAttrs); + Attrs.takeAllAppendingFrom(TempAttrs); else { StmtVector Stmts; ParsedAttributes EmptyCXX11Attrs(AttrFactory); @@ -2407,7 +2407,7 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts, Stmts, StmtCtx, TrailingElseLoc, Attrs, EmptyDeclSpecAttrs, PrecedingLabel); - Attrs.takeAllFrom(TempAttrs); + Attrs.takeAllPrependingFrom(TempAttrs); // Start of attribute range may already be set for some invalid input. // See PR46336. diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp index 74aff0b..dbc7cbc 100644 --- a/clang/lib/Parse/ParseTemplate.cpp +++ b/clang/lib/Parse/ParseTemplate.cpp @@ -196,7 +196,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclarationAfterTemplate( ParsingDeclSpec DS(*this, &DiagsFromTParams); DS.SetRangeStart(DeclSpecAttrs.Range.getBegin()); DS.SetRangeEnd(DeclSpecAttrs.Range.getEnd()); - DS.takeAttributesFrom(DeclSpecAttrs); + DS.takeAttributesAppendingingFrom(DeclSpecAttrs); ParseDeclarationSpecifiers(DS, TemplateInfo, AS, getDeclSpecContextFromDeclaratorContext(Context)); diff --git a/clang/lib/Parse/Parser.cpp b/clang/lib/Parse/Parser.cpp index a17398b..bbff627 100644 --- a/clang/lib/Parse/Parser.cpp +++ b/clang/lib/Parse/Parser.cpp @@ -1083,7 +1083,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclOrFunctionDefInternal( "expected uninitialised source range"); DS.SetRangeStart(DeclSpecAttrs.Range.getBegin()); DS.SetRangeEnd(DeclSpecAttrs.Range.getEnd()); - DS.takeAttributesFrom(DeclSpecAttrs); + DS.takeAttributesAppendingingFrom(DeclSpecAttrs); ParsedTemplateInfo TemplateInfo; MaybeParseMicrosoftAttributes(DS.getAttributes()); @@ -1155,7 +1155,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclOrFunctionDefInternal( } DS.abort(); - DS.takeAttributesFrom(Attrs); + DS.takeAttributesAppendingingFrom(Attrs); const char *PrevSpec = nullptr; unsigned DiagID; diff --git a/clang/lib/Sema/DeclSpec.cpp b/clang/lib/Sema/DeclSpec.cpp index 8756ce5..184d31e 100644 --- a/clang/lib/Sema/DeclSpec.cpp +++ b/clang/lib/Sema/DeclSpec.cpp @@ -197,7 +197,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, [&](DeclSpec::TQ TypeQual, StringRef PrintName, SourceLocation SL) { I.Fun.MethodQualifiers->SetTypeQual(TypeQual, SL); }); - I.Fun.MethodQualifiers->getAttributes().takeAllFrom(attrs); + I.Fun.MethodQualifiers->getAttributes().takeAllPrependingFrom(attrs); I.Fun.MethodQualifiers->getAttributePool().takeAllFrom(attrs.getPool()); } diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp index 3c20ccd..40c318a 100644 --- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp +++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp @@ -144,6 +144,7 @@ private: _2, _3, _4, + _5, Handle = 128, CounterHandle, LastStmt @@ -190,6 +191,9 @@ public: template <typename T> BuiltinTypeMethodBuilder & accessCounterHandleFieldOnResource(T ResourceRecord); + template <typename ResourceT, typename ValueT> + BuiltinTypeMethodBuilder & + setCounterHandleFieldOnResource(ResourceT ResourceRecord, ValueT HandleValue); template <typename T> BuiltinTypeMethodBuilder &returnValue(T ReturnValue); BuiltinTypeMethodBuilder &returnThis(); BuiltinTypeDeclBuilder &finalize(); @@ -205,6 +209,11 @@ private: if (!Method) createDecl(); } + + template <typename ResourceT, typename ValueT> + BuiltinTypeMethodBuilder &setFieldOnResource(ResourceT ResourceRecord, + ValueT HandleValue, + FieldDecl *HandleField); }; TemplateParameterListBuilder::~TemplateParameterListBuilder() { @@ -592,13 +601,27 @@ template <typename ResourceT, typename ValueT> BuiltinTypeMethodBuilder & BuiltinTypeMethodBuilder::setHandleFieldOnResource(ResourceT ResourceRecord, ValueT HandleValue) { + return setFieldOnResource(ResourceRecord, HandleValue, + DeclBuilder.getResourceHandleField()); +} + +template <typename ResourceT, typename ValueT> +BuiltinTypeMethodBuilder & +BuiltinTypeMethodBuilder::setCounterHandleFieldOnResource( + ResourceT ResourceRecord, ValueT HandleValue) { + return setFieldOnResource(ResourceRecord, HandleValue, + DeclBuilder.getResourceCounterHandleField()); +} + +template <typename ResourceT, typename ValueT> +BuiltinTypeMethodBuilder &BuiltinTypeMethodBuilder::setFieldOnResource( + ResourceT ResourceRecord, ValueT HandleValue, FieldDecl *HandleField) { ensureCompleteDecl(); Expr *ResourceExpr = convertPlaceholder(ResourceRecord); Expr *HandleValueExpr = convertPlaceholder(HandleValue); ASTContext &AST = DeclBuilder.SemaRef.getASTContext(); - FieldDecl *HandleField = DeclBuilder.getResourceHandleField(); MemberExpr *HandleMemberExpr = MemberExpr::CreateImplicit( AST, ResourceExpr, false, HandleField, HandleField->getType(), VK_LValue, OK_Ordinary); @@ -829,6 +852,18 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addDefaultHandleConstructor() { .finalize(); } +BuiltinTypeDeclBuilder & +BuiltinTypeDeclBuilder::addStaticInitializationFunctions(bool HasCounter) { + if (HasCounter) { + addCreateFromBindingWithImplicitCounter(); + addCreateFromImplicitBindingWithImplicitCounter(); + } else { + addCreateFromBinding(); + addCreateFromImplicitBinding(); + } + return *this; +} + // Adds static method that initializes resource from binding: // // static Resource<T> __createFromBinding(unsigned registerNo, @@ -903,6 +938,102 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCreateFromImplicitBinding() { .finalize(); } +// Adds static method that initializes resource from binding: +// +// static Resource<T> +// __createFromBindingWithImplicitCounter(unsigned registerNo, +// unsigned spaceNo, int range, +// unsigned index, const char *name, +// unsigned counterOrderId) { +// Resource<T> tmp; +// tmp.__handle = __builtin_hlsl_resource_handlefrombinding( +// tmp.__handle, registerNo, spaceNo, range, index, name); +// tmp.__counter_handle = +// __builtin_hlsl_resource_counterhandlefromimplicitbinding( +// tmp.__handle, counterOrderId, spaceNo); +// return tmp; +// } +BuiltinTypeDeclBuilder & +BuiltinTypeDeclBuilder::addCreateFromBindingWithImplicitCounter() { + assert(!Record->isCompleteDefinition() && "record is already complete"); + + using PH = BuiltinTypeMethodBuilder::PlaceHolder; + ASTContext &AST = SemaRef.getASTContext(); + QualType HandleType = getResourceHandleField()->getType(); + QualType RecordType = AST.getTypeDeclType(cast<TypeDecl>(Record)); + BuiltinTypeMethodBuilder::LocalVar TmpVar("tmp", RecordType); + + return BuiltinTypeMethodBuilder(*this, + "__createFromBindingWithImplicitCounter", + RecordType, false, false, SC_Static) + .addParam("registerNo", AST.UnsignedIntTy) + .addParam("spaceNo", AST.UnsignedIntTy) + .addParam("range", AST.IntTy) + .addParam("index", AST.UnsignedIntTy) + .addParam("name", AST.getPointerType(AST.CharTy.withConst())) + .addParam("counterOrderId", AST.UnsignedIntTy) + .declareLocalVar(TmpVar) + .accessHandleFieldOnResource(TmpVar) + .callBuiltin("__builtin_hlsl_resource_handlefrombinding", HandleType, + PH::LastStmt, PH::_0, PH::_1, PH::_2, PH::_3, PH::_4) + .setHandleFieldOnResource(TmpVar, PH::LastStmt) + .accessHandleFieldOnResource(TmpVar) + .callBuiltin("__builtin_hlsl_resource_counterhandlefromimplicitbinding", + HandleType, PH::LastStmt, PH::_5, PH::_1) + .setCounterHandleFieldOnResource(TmpVar, PH::LastStmt) + .returnValue(TmpVar) + .finalize(); +} + +// Adds static method that initializes resource from binding: +// +// static Resource<T> +// __createFromImplicitBindingWithImplicitCounter(unsigned orderId, +// unsigned spaceNo, int range, +// unsigned index, +// const char *name, +// unsigned counterOrderId) { +// Resource<T> tmp; +// tmp.__handle = __builtin_hlsl_resource_handlefromimplicitbinding( +// tmp.__handle, orderId, spaceNo, range, index, name); +// tmp.__counter_handle = +// __builtin_hlsl_resource_counterhandlefromimplicitbinding( +// tmp.__handle, counterOrderId, spaceNo); +// return tmp; +// } +BuiltinTypeDeclBuilder & +BuiltinTypeDeclBuilder::addCreateFromImplicitBindingWithImplicitCounter() { + assert(!Record->isCompleteDefinition() && "record is already complete"); + + using PH = BuiltinTypeMethodBuilder::PlaceHolder; + ASTContext &AST = SemaRef.getASTContext(); + QualType HandleType = getResourceHandleField()->getType(); + QualType RecordType = AST.getTypeDeclType(cast<TypeDecl>(Record)); + BuiltinTypeMethodBuilder::LocalVar TmpVar("tmp", RecordType); + + return BuiltinTypeMethodBuilder( + *this, "__createFromImplicitBindingWithImplicitCounter", + RecordType, false, false, SC_Static) + .addParam("orderId", AST.UnsignedIntTy) + .addParam("spaceNo", AST.UnsignedIntTy) + .addParam("range", AST.IntTy) + .addParam("index", AST.UnsignedIntTy) + .addParam("name", AST.getPointerType(AST.CharTy.withConst())) + .addParam("counterOrderId", AST.UnsignedIntTy) + .declareLocalVar(TmpVar) + .accessHandleFieldOnResource(TmpVar) + .callBuiltin("__builtin_hlsl_resource_handlefromimplicitbinding", + HandleType, PH::LastStmt, PH::_0, PH::_1, PH::_2, PH::_3, + PH::_4) + .setHandleFieldOnResource(TmpVar, PH::LastStmt) + .accessHandleFieldOnResource(TmpVar) + .callBuiltin("__builtin_hlsl_resource_counterhandlefromimplicitbinding", + HandleType, PH::LastStmt, PH::_5, PH::_1) + .setCounterHandleFieldOnResource(TmpVar, PH::LastStmt) + .returnValue(TmpVar) + .finalize(); +} + BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCopyConstructor() { assert(!Record->isCompleteDefinition() && "record is already complete"); @@ -1048,7 +1179,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addIncrementCounterMethod() { return BuiltinTypeMethodBuilder(*this, "IncrementCounter", SemaRef.getASTContext().UnsignedIntTy) .callBuiltin("__builtin_hlsl_buffer_update_counter", QualType(), - PH::Handle, getConstantIntExpr(1)) + PH::CounterHandle, getConstantIntExpr(1)) .finalize(); } @@ -1057,7 +1188,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addDecrementCounterMethod() { return BuiltinTypeMethodBuilder(*this, "DecrementCounter", SemaRef.getASTContext().UnsignedIntTy) .callBuiltin("__builtin_hlsl_buffer_update_counter", QualType(), - PH::Handle, getConstantIntExpr(-1)) + PH::CounterHandle, getConstantIntExpr(-1)) .finalize(); } @@ -1102,7 +1233,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addAppendMethod() { return BuiltinTypeMethodBuilder(*this, "Append", AST.VoidTy) .addParam("value", ElemTy) .callBuiltin("__builtin_hlsl_buffer_update_counter", AST.UnsignedIntTy, - PH::Handle, getConstantIntExpr(1)) + PH::CounterHandle, getConstantIntExpr(1)) .callBuiltin("__builtin_hlsl_resource_getpointer", AST.getPointerType(AddrSpaceElemTy), PH::Handle, PH::LastStmt) @@ -1119,7 +1250,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addConsumeMethod() { AST.getAddrSpaceQualType(ElemTy, LangAS::hlsl_device); return BuiltinTypeMethodBuilder(*this, "Consume", ElemTy) .callBuiltin("__builtin_hlsl_buffer_update_counter", AST.UnsignedIntTy, - PH::Handle, getConstantIntExpr(-1)) + PH::CounterHandle, getConstantIntExpr(-1)) .callBuiltin("__builtin_hlsl_resource_getpointer", AST.getPointerType(AddrSpaceElemTy), PH::Handle, PH::LastStmt) diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h index a981602..86cbd10 100644 --- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h +++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h @@ -83,8 +83,7 @@ public: BuiltinTypeDeclBuilder &addCopyAssignmentOperator(); // Static create methods - BuiltinTypeDeclBuilder &addCreateFromBinding(); - BuiltinTypeDeclBuilder &addCreateFromImplicitBinding(); + BuiltinTypeDeclBuilder &addStaticInitializationFunctions(bool HasCounter); // Builtin types methods BuiltinTypeDeclBuilder &addLoadMethods(); @@ -96,6 +95,10 @@ public: BuiltinTypeDeclBuilder &addConsumeMethod(); private: + BuiltinTypeDeclBuilder &addCreateFromBinding(); + BuiltinTypeDeclBuilder &addCreateFromImplicitBinding(); + BuiltinTypeDeclBuilder &addCreateFromBindingWithImplicitCounter(); + BuiltinTypeDeclBuilder &addCreateFromImplicitBindingWithImplicitCounter(); BuiltinTypeDeclBuilder &addResourceMember(StringRef MemberName, ResourceClass RC, bool IsROV, bool RawBuffer, bool IsCounter, diff --git a/clang/lib/Sema/HLSLExternalSemaSource.cpp b/clang/lib/Sema/HLSLExternalSemaSource.cpp index cc43e94..e118dda 100644 --- a/clang/lib/Sema/HLSLExternalSemaSource.cpp +++ b/clang/lib/Sema/HLSLExternalSemaSource.cpp @@ -236,8 +236,7 @@ static BuiltinTypeDeclBuilder setupBufferType(CXXRecordDecl *Decl, Sema &S, .addDefaultHandleConstructor() .addCopyConstructor() .addCopyAssignmentOperator() - .addCreateFromBinding() - .addCreateFromImplicitBinding(); + .addStaticInitializationFunctions(HasCounter); } // This function is responsible for constructing the constraint expression for diff --git a/clang/lib/Sema/ParsedAttr.cpp b/clang/lib/Sema/ParsedAttr.cpp index 294f88e..2b5ad33 100644 --- a/clang/lib/Sema/ParsedAttr.cpp +++ b/clang/lib/Sema/ParsedAttr.cpp @@ -304,7 +304,7 @@ bool ParsedAttr::checkAtMostNumArgs(Sema &S, unsigned Num) const { void clang::takeAndConcatenateAttrs(ParsedAttributes &First, ParsedAttributes &&Second) { - First.takeAllAtEndFrom(Second); + First.takeAllAppendingFrom(Second); if (!First.Range.getBegin().isValid()) First.Range.setBegin(Second.Range.getBegin()); diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp index 3a0c231..e32f437 100644 --- a/clang/lib/Sema/SemaAMDGPU.cpp +++ b/clang/lib/Sema/SemaAMDGPU.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "clang/Sema/SemaAMDGPU.h" +#include "clang/Basic/DiagnosticFrontend.h" #include "clang/Basic/DiagnosticSema.h" #include "clang/Basic/TargetBuiltins.h" #include "clang/Sema/Ownership.h" @@ -111,6 +112,108 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B: case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B: return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/true); + case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_2d_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_2darray_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f16_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f16_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_2d_f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f16_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f16_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f16_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f32_f32: + case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f16_f32: { + StringRef FeatureList( + getASTContext().BuiltinInfo.getRequiredFeatures(BuiltinID)); + if (!Builtin::evaluateRequiredTargetFeatures(FeatureList, + CallerFeatureMap)) { + Diag(TheCall->getBeginLoc(), diag::err_builtin_needs_feature) + << FD->getDeclName() << FeatureList; + return false; + } + + unsigned ArgCount = TheCall->getNumArgs() - 1; + llvm::APSInt Result; + + return (SemaRef.BuiltinConstantArg(TheCall, 0, Result)) || + (SemaRef.BuiltinConstantArg(TheCall, ArgCount, Result)) || + (SemaRef.BuiltinConstantArg(TheCall, (ArgCount - 1), Result)); + } + case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_2d_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_2darray_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f16_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f32_i32: + case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f16_i32: { + StringRef FeatureList( + getASTContext().BuiltinInfo.getRequiredFeatures(BuiltinID)); + if (!Builtin::evaluateRequiredTargetFeatures(FeatureList, + CallerFeatureMap)) { + Diag(TheCall->getBeginLoc(), diag::err_builtin_needs_feature) + << FD->getDeclName() << FeatureList; + return false; + } + + unsigned ArgCount = TheCall->getNumArgs() - 1; + llvm::APSInt Result; + + return (SemaRef.BuiltinConstantArg(TheCall, 1, Result)) || + (SemaRef.BuiltinConstantArg(TheCall, ArgCount, Result)) || + (SemaRef.BuiltinConstantArg(TheCall, (ArgCount - 1), Result)); + } default: return false; } diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index 6eaf7b9..0e83c20 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -14637,7 +14637,7 @@ StmtResult Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, Declarator D(DS, ParsedAttributesView::none(), DeclaratorContext::ForInit); D.SetIdentifier(Ident, IdentLoc); - D.takeAttributes(Attrs); + D.takeAttributesAppending(Attrs); D.AddTypeInfo(DeclaratorChunk::getReference(0, IdentLoc, /*lvalue*/ false), IdentLoc); diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index 4230ea7..01abc1f 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -166,6 +166,11 @@ static void diagnoseUseOfInternalDeclInInlineFunction(Sema &S, // This is disabled under C++; there are too many ways for this to fire in // contexts where the warning is a false positive, or where it is technically // correct but benign. + // + // WG14 N3622 which removed the constraint entirely in C2y. It is left + // enabled in earlier language modes because this is a constraint in those + // language modes. But in C2y mode, we still want to issue the "incompatible + // with previous standards" diagnostic, too. if (S.getLangOpts().CPlusPlus) return; @@ -190,16 +195,17 @@ static void diagnoseUseOfInternalDeclInInlineFunction(Sema &S, // This last can give us false negatives, but it's better than warning on // wrappers for simple C library functions. const FunctionDecl *UsedFn = dyn_cast<FunctionDecl>(D); - bool DowngradeWarning = S.getSourceManager().isInMainFile(Loc); - if (!DowngradeWarning && UsedFn) - DowngradeWarning = UsedFn->isInlined() || UsedFn->hasAttr<ConstAttr>(); - - S.Diag(Loc, DowngradeWarning ? diag::ext_internal_in_extern_inline_quiet - : diag::ext_internal_in_extern_inline) - << /*IsVar=*/!UsedFn << D; + unsigned DiagID; + if (S.getLangOpts().C2y) + DiagID = diag::warn_c2y_compat_internal_in_extern_inline; + else if ((UsedFn && (UsedFn->isInlined() || UsedFn->hasAttr<ConstAttr>())) || + S.getSourceManager().isInMainFile(Loc)) + DiagID = diag::ext_internal_in_extern_inline_quiet; + else + DiagID = diag::ext_internal_in_extern_inline; + S.Diag(Loc, DiagID) << /*IsVar=*/!UsedFn << D; S.MaybeSuggestAddingStaticToDecl(Current); - S.Diag(D->getCanonicalDecl()->getLocation(), diag::note_entity_declared_at) << D; } diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index 09e5d69..17cb1e4 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -1240,6 +1240,20 @@ static CXXMethodDecl *lookupMethod(Sema &S, CXXRecordDecl *RecordDecl, } // end anonymous namespace +static bool hasCounterHandle(const CXXRecordDecl *RD) { + if (RD->field_empty()) + return false; + auto It = std::next(RD->field_begin()); + if (It == RD->field_end()) + return false; + const FieldDecl *SecondField = *It; + if (const auto *ResTy = + SecondField->getType()->getAs<HLSLAttributedResourceType>()) { + return ResTy->getAttrs().IsCounter; + } + return false; +} + bool SemaHLSL::handleRootSignatureElements( ArrayRef<hlsl::RootSignatureElement> Elements) { // Define some common error handling functions @@ -2973,6 +2987,25 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { TheCall->setType(ResourceTy); break; } + case Builtin::BI__builtin_hlsl_resource_counterhandlefromimplicitbinding: { + ASTContext &AST = SemaRef.getASTContext(); + if (SemaRef.checkArgCount(TheCall, 3) || + CheckResourceHandle(&SemaRef, TheCall, 0) || + CheckArgTypeMatches(&SemaRef, TheCall->getArg(1), AST.UnsignedIntTy) || + CheckArgTypeMatches(&SemaRef, TheCall->getArg(2), AST.UnsignedIntTy)) + return true; + + QualType MainHandleTy = TheCall->getArg(0)->getType(); + auto *MainResType = MainHandleTy->getAs<HLSLAttributedResourceType>(); + auto MainAttrs = MainResType->getAttrs(); + assert(!MainAttrs.IsCounter && "cannot create a counter from a counter"); + MainAttrs.IsCounter = true; + QualType CounterHandleTy = AST.getHLSLAttributedResourceType( + MainResType->getWrappedType(), MainResType->getContainedType(), + MainAttrs); + TheCall->setType(CounterHandleTy); + break; + } case Builtin::BI__builtin_hlsl_and: case Builtin::BI__builtin_hlsl_or: { if (SemaRef.checkArgCount(TheCall, 2)) @@ -3780,10 +3813,24 @@ void SemaHLSL::ActOnVariableDeclarator(VarDecl *VD) { uint32_t OrderID = getNextImplicitBindingOrderID(); if (Binding.hasBinding()) Binding.setImplicitOrderID(OrderID); - else + else { addImplicitBindingAttrToDecl( SemaRef, VD, getRegisterType(getResourceArrayHandleType(VD)), OrderID); + // Re-create the binding object to pick up the new attribute. + Binding = ResourceBindingAttrs(VD); + } + } + + // Get to the base type of a potentially multi-dimensional array. + QualType Ty = getASTContext().getBaseElementType(VD->getType()); + + const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); + if (hasCounterHandle(RD)) { + if (!Binding.hasCounterImplicitOrderID()) { + uint32_t OrderID = getNextImplicitBindingOrderID(); + Binding.setCounterImplicitOrderID(OrderID); + } } } } @@ -3808,19 +3855,31 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) { CXXMethodDecl *CreateMethod = nullptr; llvm::SmallVector<Expr *> Args; + bool HasCounter = hasCounterHandle(ResourceDecl); + const char *CreateMethodName; + if (Binding.isExplicit()) + CreateMethodName = HasCounter ? "__createFromBindingWithImplicitCounter" + : "__createFromBinding"; + else + CreateMethodName = HasCounter + ? "__createFromImplicitBindingWithImplicitCounter" + : "__createFromImplicitBinding"; + + CreateMethod = + lookupMethod(SemaRef, ResourceDecl, CreateMethodName, VD->getLocation()); + + if (!CreateMethod) + // This can happen if someone creates a struct that looks like an HLSL + // resource record but does not have the required static create method. + // No binding will be generated for it. + return false; + if (Binding.isExplicit()) { - // The resource has explicit binding. - CreateMethod = lookupMethod(SemaRef, ResourceDecl, "__createFromBinding", - VD->getLocation()); IntegerLiteral *RegSlot = IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, Binding.getSlot()), AST.UnsignedIntTy, SourceLocation()); Args.push_back(RegSlot); } else { - // The resource has implicit binding. - CreateMethod = - lookupMethod(SemaRef, ResourceDecl, "__createFromImplicitBinding", - VD->getLocation()); uint32_t OrderID = (Binding.hasImplicitOrderID()) ? Binding.getImplicitOrderID() : getNextImplicitBindingOrderID(); @@ -3830,12 +3889,6 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) { Args.push_back(OrderId); } - if (!CreateMethod) - // This can happen if someone creates a struct that looks like an HLSL - // resource record but does not have the required static create method. - // No binding will be generated for it. - return false; - IntegerLiteral *Space = IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, Binding.getSpace()), AST.UnsignedIntTy, SourceLocation()); @@ -3859,6 +3912,15 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) { Name, nullptr, VK_PRValue, FPOptionsOverride()); Args.push_back(NameCast); + if (HasCounter) { + // Will this be in the correct order? + uint32_t CounterOrderID = getNextImplicitBindingOrderID(); + IntegerLiteral *CounterId = + IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, CounterOrderID), + AST.UnsignedIntTy, SourceLocation()); + Args.push_back(CounterId); + } + // Make sure the create method template is instantiated and emitted. if (!CreateMethod->isDefined() && CreateMethod->isTemplateInstantiation()) SemaRef.InstantiateFunctionDefinition(VD->getLocation(), CreateMethod, @@ -3899,20 +3961,24 @@ bool SemaHLSL::initGlobalResourceArrayDecl(VarDecl *VD) { ASTContext &AST = SemaRef.getASTContext(); QualType ResElementTy = AST.getBaseElementType(VD->getType()); CXXRecordDecl *ResourceDecl = ResElementTy->getAsCXXRecordDecl(); - - HLSLResourceBindingAttr *RBA = VD->getAttr<HLSLResourceBindingAttr>(); - HLSLVkBindingAttr *VkBinding = VD->getAttr<HLSLVkBindingAttr>(); CXXMethodDecl *CreateMethod = nullptr; - if (VkBinding || (RBA && RBA->hasRegisterSlot())) + bool HasCounter = hasCounterHandle(ResourceDecl); + ResourceBindingAttrs ResourceAttrs(VD); + if (ResourceAttrs.isExplicit()) // Resource has explicit binding. - CreateMethod = lookupMethod(SemaRef, ResourceDecl, "__createFromBinding", - VD->getLocation()); - else - // Resource has implicit binding. CreateMethod = - lookupMethod(SemaRef, ResourceDecl, "__createFromImplicitBinding", + lookupMethod(SemaRef, ResourceDecl, + HasCounter ? "__createFromBindingWithImplicitCounter" + : "__createFromBinding", VD->getLocation()); + else + // Resource has implicit binding. + CreateMethod = lookupMethod( + SemaRef, ResourceDecl, + HasCounter ? "__createFromImplicitBindingWithImplicitCounter" + : "__createFromImplicitBinding", + VD->getLocation()); if (!CreateMethod) return false; diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp index 8471f02..779b6e9 100644 --- a/clang/lib/Sema/SemaOpenACC.cpp +++ b/clang/lib/Sema/SemaOpenACC.cpp @@ -2759,7 +2759,7 @@ OpenACCPrivateRecipe SemaOpenACC::CreatePrivateInitRecipe(const Expr *VarExpr) { // Array sections are special, and we have to treat them that way. if (const auto *ASE = dyn_cast<ArraySectionExpr>(VarExpr->IgnoreParenImpCasts())) - VarTy = ArraySectionExpr::getBaseOriginalType(ASE); + VarTy = ASE->getElementType(); VarDecl *AllocaDecl = CreateAllocaDecl( getASTContext(), SemaRef.getCurContext(), VarExpr->getBeginLoc(), @@ -2795,7 +2795,7 @@ SemaOpenACC::CreateFirstPrivateInitRecipe(const Expr *VarExpr) { // Array sections are special, and we have to treat them that way. if (const auto *ASE = dyn_cast<ArraySectionExpr>(VarExpr->IgnoreParenImpCasts())) - VarTy = ArraySectionExpr::getBaseOriginalType(ASE); + VarTy = ASE->getElementType(); VarDecl *AllocaDecl = CreateAllocaDecl( getASTContext(), SemaRef.getCurContext(), VarExpr->getBeginLoc(), @@ -2896,7 +2896,16 @@ OpenACCReductionRecipe SemaOpenACC::CreateReductionInitRecipe( // Array sections are special, and we have to treat them that way. if (const auto *ASE = dyn_cast<ArraySectionExpr>(VarExpr->IgnoreParenImpCasts())) - VarTy = ArraySectionExpr::getBaseOriginalType(ASE); + VarTy = ASE->getElementType(); + + llvm::SmallVector<OpenACCReductionRecipe::CombinerRecipe, 1> CombinerRecipes; + + // We use the 'set-ness' of the alloca-decl to determine whether the combiner + // is 'set' or not, so we can skip any attempts at it if we're going to fail + // at any of the combiners. + if (CreateReductionCombinerRecipe(VarExpr->getBeginLoc(), ReductionOperator, + VarTy, CombinerRecipes)) + return OpenACCReductionRecipe::Empty(); VarDecl *AllocaDecl = CreateAllocaDecl( getASTContext(), SemaRef.getCurContext(), VarExpr->getBeginLoc(), @@ -2946,5 +2955,163 @@ OpenACCReductionRecipe SemaOpenACC::CreateReductionInitRecipe( AllocaDecl->setInit(Init.get()); AllocaDecl->setInitStyle(VarDecl::CallInit); } - return OpenACCReductionRecipe(AllocaDecl); + + return OpenACCReductionRecipe(AllocaDecl, CombinerRecipes); +} + +bool SemaOpenACC::CreateReductionCombinerRecipe( + SourceLocation Loc, OpenACCReductionOperator ReductionOperator, + QualType VarTy, + llvm::SmallVectorImpl<OpenACCReductionRecipe::CombinerRecipe> + &CombinerRecipes) { + // Now we can try to generate the 'combiner' recipe. This is a little + // complicated in that if the 'VarTy' is an array type, we want to take its + // element type so we can generate that. Additionally, if this is a struct, + // we have two options: If there is overloaded operators, we want to take + // THOSE, else we want to do the individual elements. + + BinaryOperatorKind BinOp; + switch (ReductionOperator) { + case OpenACCReductionOperator::Invalid: + // This can only happen when there is an error, and since these inits + // are used for code generation, we can just ignore/not bother doing any + // initialization here. + CombinerRecipes.push_back({nullptr, nullptr, nullptr}); + return false; + case OpenACCReductionOperator::Addition: + BinOp = BinaryOperatorKind::BO_AddAssign; + break; + case OpenACCReductionOperator::Multiplication: + BinOp = BinaryOperatorKind::BO_MulAssign; + break; + case OpenACCReductionOperator::BitwiseAnd: + BinOp = BinaryOperatorKind::BO_AndAssign; + break; + case OpenACCReductionOperator::BitwiseOr: + BinOp = BinaryOperatorKind::BO_OrAssign; + break; + case OpenACCReductionOperator::BitwiseXOr: + BinOp = BinaryOperatorKind::BO_XorAssign; + break; + + case OpenACCReductionOperator::Max: + case OpenACCReductionOperator::Min: + case OpenACCReductionOperator::And: + case OpenACCReductionOperator::Or: + // We just want a 'NYI' error in the backend, so leave an empty combiner + // recipe, and claim success. + CombinerRecipes.push_back({nullptr, nullptr, nullptr}); + return false; + } + + // If VarTy is an array type, at the top level only, we want to do our + // compares/decomp/etc at the element level. + if (auto *AT = getASTContext().getAsArrayType(VarTy)) + VarTy = AT->getElementType(); + + assert(!VarTy->isArrayType() && "Only 1 level of array allowed"); + + auto tryCombiner = [&, this](DeclRefExpr *LHSDRE, DeclRefExpr *RHSDRE, + bool IncludeTrap) { + // TODO: OpenACC: we have to figure out based on the bin-op how to do the + // ones that we can't just use compound operators for. So &&, ||, max, and + // min aren't really clear what we could do here. + if (IncludeTrap) { + // Trap all of the errors here, we'll emit our own at the end. + Sema::TentativeAnalysisScope Trap{SemaRef}; + + return SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE, + RHSDRE, + /*ForFoldExpr=*/false); + } else { + return SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE, + RHSDRE, + /*ForFoldExpr=*/false); + } + }; + + struct CombinerAttemptTy { + VarDecl *LHS; + DeclRefExpr *LHSDRE; + VarDecl *RHS; + DeclRefExpr *RHSDRE; + Expr *Op; + }; + + auto formCombiner = [&, this](QualType Ty) -> CombinerAttemptTy { + VarDecl *LHSDecl = CreateAllocaDecl( + getASTContext(), SemaRef.getCurContext(), Loc, + &getASTContext().Idents.get("openacc.reduction.combiner.lhs"), Ty); + auto *LHSDRE = DeclRefExpr::Create( + getASTContext(), NestedNameSpecifierLoc{}, SourceLocation{}, LHSDecl, + /*ReferstoEnclosingVariableOrCapture=*/false, + DeclarationNameInfo{DeclarationName{LHSDecl->getDeclName()}, + LHSDecl->getBeginLoc()}, + Ty, clang::VK_LValue, LHSDecl, nullptr, NOUR_None); + VarDecl *RHSDecl = CreateAllocaDecl( + getASTContext(), SemaRef.getCurContext(), Loc, + &getASTContext().Idents.get("openacc.reduction.combiner.lhs"), Ty); + auto *RHSDRE = DeclRefExpr::Create( + getASTContext(), NestedNameSpecifierLoc{}, SourceLocation{}, RHSDecl, + /*ReferstoEnclosingVariableOrCapture=*/false, + DeclarationNameInfo{DeclarationName{RHSDecl->getDeclName()}, + RHSDecl->getBeginLoc()}, + Ty, clang::VK_LValue, RHSDecl, nullptr, NOUR_None); + + ExprResult BinOpResult = tryCombiner(LHSDRE, RHSDRE, /*IncludeTrap=*/true); + + return {LHSDecl, LHSDRE, RHSDecl, RHSDRE, BinOpResult.get()}; + }; + + CombinerAttemptTy TopLevelCombinerInfo = formCombiner(VarTy); + + if (TopLevelCombinerInfo.Op) { + if (!TopLevelCombinerInfo.Op->containsErrors() && + TopLevelCombinerInfo.Op->isInstantiationDependent()) { + // If this is instantiation dependent, we're just going to 'give up' here + // and count on us to get it right during instantaition. + CombinerRecipes.push_back({nullptr, nullptr, nullptr}); + return false; + } else if (!TopLevelCombinerInfo.Op->containsErrors()) { + // Else, we succeeded, we can just return this combiner. + CombinerRecipes.push_back({TopLevelCombinerInfo.LHS, + TopLevelCombinerInfo.RHS, + TopLevelCombinerInfo.Op}); + return false; + } + } + + // Since the 'root' level didn't fail, the only thing that could be successful + // is a struct that we decompose on its individual fields. + + RecordDecl *RD = VarTy->getAsRecordDecl(); + if (!RD) { + Diag(Loc, diag::err_acc_reduction_recipe_no_op) << VarTy; + tryCombiner(TopLevelCombinerInfo.LHSDRE, TopLevelCombinerInfo.RHSDRE, + /*IncludeTrap=*/false); + return true; + } + + for (const FieldDecl *FD : RD->fields()) { + CombinerAttemptTy FieldCombinerInfo = formCombiner(FD->getType()); + + if (!FieldCombinerInfo.Op || FieldCombinerInfo.Op->containsErrors()) { + Diag(Loc, diag::err_acc_reduction_recipe_no_op) << FD->getType(); + Diag(FD->getBeginLoc(), diag::note_acc_reduction_recipe_noop_field) << RD; + tryCombiner(FieldCombinerInfo.LHSDRE, FieldCombinerInfo.RHSDRE, + /*IncludeTrap=*/false); + return true; + } + + if (FieldCombinerInfo.Op->isInstantiationDependent()) { + // If this is instantiation dependent, we're just going to 'give up' here + // and count on us to get it right during instantaition. + CombinerRecipes.push_back({nullptr, nullptr, nullptr}); + } else { + CombinerRecipes.push_back( + {FieldCombinerInfo.LHS, FieldCombinerInfo.RHS, FieldCombinerInfo.Op}); + } + } + + return false; } diff --git a/clang/lib/Sema/SemaOpenACCClause.cpp b/clang/lib/Sema/SemaOpenACCClause.cpp index b086929..881e960 100644 --- a/clang/lib/Sema/SemaOpenACCClause.cpp +++ b/clang/lib/Sema/SemaOpenACCClause.cpp @@ -1915,51 +1915,34 @@ SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses, return Result; } -/// OpenACC 3.3 section 2.5.15: -/// At a mininmum, the supported data types include ... the numerical data types -/// in C, C++, and Fortran. -/// -/// If the reduction var is a composite variable, each -/// member of the composite variable must be a supported datatype for the -/// reduction operation. -ExprResult SemaOpenACC::CheckReductionVar(OpenACCDirectiveKind DirectiveKind, - OpenACCReductionOperator ReductionOp, - Expr *VarExpr) { - // For now, we only support 'scalar' types, or composites/arrays of scalar - // types. - VarExpr = VarExpr->IgnoreParenCasts(); +bool SemaOpenACC::CheckReductionVarType(Expr *VarExpr) { SourceLocation VarLoc = VarExpr->getBeginLoc(); SmallVector<PartialDiagnosticAt> Notes; - QualType CurType = VarExpr->getType(); - - // For array like things, the expression can either be an array element - // (subscript expr), array section, or array type. Peel those off, and add - // notes in case we find an illegal kind. We'll allow scalar or composite of - // scalars inside of this. - if (auto *ASE = dyn_cast<ArraySectionExpr>(VarExpr)) { - QualType BaseType = ArraySectionExpr::getBaseOriginalType(ASE); + // The standard isn't clear how many levels of 'array element' or 'subarray' + // are permitted, but we can handle as many as we need, so we'll strip them + // off here. This will result in CurType being the actual 'type' of the + // expression, which is what we are looking to check. + QualType CurType = isa<ArraySectionExpr>(VarExpr) + ? ArraySectionExpr::getBaseOriginalType(VarExpr) + : VarExpr->getType(); + + // This can happen when we have a dependent type in an array element that the + // above function has tried to 'unwrap'. Since this can only happen with + // dependence, just let it go. + if (CurType.isNull()) + return false; - PartialDiagnostic PD = PDiag(diag::note_acc_reduction_array) - << diag::OACCReductionArray::Section << BaseType; - Notes.push_back({ASE->getBeginLoc(), PD}); - - CurType = getASTContext().getBaseElementType(BaseType); - } else if (auto *SubExpr = dyn_cast<ArraySubscriptExpr>(VarExpr)) { - // Array subscript already results in the type of the thing as its type, so - // there is no type to change here. - PartialDiagnostic PD = - PDiag(diag::note_acc_reduction_array) - << diag::OACCReductionArray::Subscript - << SubExpr->getBase()->IgnoreParenImpCasts()->getType(); - Notes.push_back({SubExpr->getBeginLoc(), PD}); - } else if (auto *AT = getASTContext().getAsArrayType(CurType)) { + // If we are still an array type, we allow 1 level of 'unpeeling' of the + // array. The standard isn't clear here whether this is allowed, but + // array-of-valid-things makes sense. + if (auto *AT = getASTContext().getAsArrayType(CurType)) { // If we're already the array type, peel off the array and leave the element // type. - CurType = getASTContext().getBaseElementType(AT); PartialDiagnostic PD = PDiag(diag::note_acc_reduction_array) << diag::OACCReductionArray::ArrayTy << CurType; Notes.push_back({VarLoc, PD}); + CurType = AT->getElementType(); } auto IsValidMemberOfComposite = [](QualType Ty) { @@ -1974,31 +1957,26 @@ ExprResult SemaOpenACC::CheckReductionVar(OpenACCDirectiveKind DirectiveKind, for (auto [Loc, PD] : Notes) Diag(Loc, PD); - Diag(VarLoc, diag::note_acc_reduction_type_summary); + return Diag(VarLoc, diag::note_acc_reduction_type_summary); }; // If the type is already scalar, or is dependent, just give up. if (IsValidMemberOfComposite(CurType)) { // Nothing to do here, is valid. } else if (auto *RD = CurType->getAsRecordDecl()) { - if (!RD->isStruct() && !RD->isClass()) { - EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type) - << RD << diag::OACCReductionTy::NotClassStruct); - return ExprError(); - } + if (!RD->isStruct() && !RD->isClass()) + return EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type) + << RD + << diag::OACCReductionTy::NotClassStruct); - if (!RD->isCompleteDefinition()) { - EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type) - << RD << diag::OACCReductionTy::NotComplete); - return ExprError(); - } + if (!RD->isCompleteDefinition()) + return EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type) + << RD << diag::OACCReductionTy::NotComplete); if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD); - CXXRD && !CXXRD->isAggregate()) { - EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type) - << CXXRD << diag::OACCReductionTy::NotAgg); - return ExprError(); - } + CXXRD && !CXXRD->isAggregate()) + return EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type) + << CXXRD << diag::OACCReductionTy::NotAgg); for (FieldDecl *FD : RD->fields()) { if (!IsValidMemberOfComposite(FD->getType())) { @@ -2007,17 +1985,37 @@ ExprResult SemaOpenACC::CheckReductionVar(OpenACCDirectiveKind DirectiveKind, << FD->getName() << RD->getName(); Notes.push_back({FD->getBeginLoc(), PD}); // TODO: member here.note_acc_reduction_member_of_composite - EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type) - << FD->getType() - << diag::OACCReductionTy::MemberNotScalar); - return ExprError(); + return EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type) + << FD->getType() + << diag::OACCReductionTy::MemberNotScalar); } } } else { - EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type) - << CurType << diag::OACCReductionTy::NotScalar); + return EmitDiags(VarLoc, PDiag(diag::err_acc_reduction_type) + << CurType + << diag::OACCReductionTy::NotScalar); } + return false; +} + +/// OpenACC 3.3 section 2.5.15: +/// At a mininmum, the supported data types include ... the numerical data types +/// in C, C++, and Fortran. +/// +/// If the reduction var is a composite variable, each +/// member of the composite variable must be a supported datatype for the +/// reduction operation. +ExprResult SemaOpenACC::CheckReductionVar(OpenACCDirectiveKind DirectiveKind, + OpenACCReductionOperator ReductionOp, + Expr *VarExpr) { + // For now, we only support 'scalar' types, or composites/arrays of scalar + // types. + VarExpr = VarExpr->IgnoreParenCasts(); + + if (CheckReductionVarType(VarExpr)) + return ExprError(); + // OpenACC3.3: 2.9.11: Reduction clauses on nested constructs for the same // reduction 'var' must have the same reduction operator. if (!VarExpr->isInstantiationDependent()) { diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp index 6acf79a..868f0cc 100644 --- a/clang/lib/Serialization/ASTReader.cpp +++ b/clang/lib/Serialization/ASTReader.cpp @@ -13009,9 +13009,22 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() { llvm::SmallVector<OpenACCReductionRecipe> RecipeList; for (unsigned I = 0; I < VarList.size(); ++I) { - static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *)); VarDecl *Recipe = readDeclAs<VarDecl>(); - RecipeList.push_back({Recipe}); + + static_assert(sizeof(OpenACCReductionRecipe::CombinerRecipe) == + 3 * sizeof(int *)); + + llvm::SmallVector<OpenACCReductionRecipe::CombinerRecipe> Combiners; + unsigned NumCombiners = readInt(); + for (unsigned I = 0; I < NumCombiners; ++I) { + VarDecl *LHS = readDeclAs<VarDecl>(); + VarDecl *RHS = readDeclAs<VarDecl>(); + Expr *Op = readExpr(); + + Combiners.push_back({LHS, RHS, Op}); + } + + RecipeList.push_back({Recipe, Combiners}); } return OpenACCReductionClause::Create(getContext(), BeginLoc, LParenLoc, Op, diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index 09b1e58..82ccde8 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -8925,8 +8925,17 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) { writeOpenACCVarList(RC); for (const OpenACCReductionRecipe &R : RC->getRecipes()) { - static_assert(sizeof(OpenACCReductionRecipe) == 1 * sizeof(int *)); AddDeclRef(R.AllocaDecl); + + static_assert(sizeof(OpenACCReductionRecipe::CombinerRecipe) == + 3 * sizeof(int *)); + writeUInt32(R.CombinerRecipes.size()); + + for (auto &CombinerRecipe : R.CombinerRecipes) { + AddDeclRef(CombinerRecipe.LHS); + AddDeclRef(CombinerRecipe.RHS); + AddStmt(CombinerRecipe.Op); + } } return; } diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp index 15a0c5a..ace639c 100644 --- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp @@ -232,7 +232,7 @@ public: bool ignoreARC = !PD->isReadOnly() && PD->getSetterKind() == ObjCPropertyDecl::Assign; auto IsUnsafePtr = isUnsafePtr(QT, ignoreARC); - return {IsUnsafePtr && *IsUnsafePtr, PropType}; + return {IsUnsafePtr && *IsUnsafePtr && !PD->isRetaining(), PropType}; } bool shouldSkipDecl(const RecordDecl *RD) const { diff --git a/clang/lib/StaticAnalyzer/Core/EntryPointStats.cpp b/clang/lib/StaticAnalyzer/Core/EntryPointStats.cpp index 62ae62f2f..abfb176 100644 --- a/clang/lib/StaticAnalyzer/Core/EntryPointStats.cpp +++ b/clang/lib/StaticAnalyzer/Core/EntryPointStats.cpp @@ -24,7 +24,6 @@ using namespace ento; namespace { struct Registry { - std::vector<BoolEPStat *> BoolStats; std::vector<CounterEPStat *> CounterStats; std::vector<UnsignedMaxEPStat *> UnsignedMaxStats; std::vector<UnsignedEPStat *> UnsignedStats; @@ -33,7 +32,6 @@ struct Registry { struct Snapshot { const Decl *EntryPoint; - std::vector<bool> BoolStatValues; std::vector<unsigned> UnsignedStatValues; void dumpAsCSV(llvm::raw_ostream &OS) const; @@ -48,7 +46,6 @@ static llvm::ManagedStatic<Registry> StatsRegistry; namespace { template <typename Callback> void enumerateStatVectors(const Callback &Fn) { - Fn(StatsRegistry->BoolStats); Fn(StatsRegistry->CounterStats); Fn(StatsRegistry->UnsignedMaxStats); Fn(StatsRegistry->UnsignedStats); @@ -94,12 +91,6 @@ void EntryPointStat::lockRegistry(llvm::StringRef CPPFileName) { return Result; } -BoolEPStat::BoolEPStat(llvm::StringLiteral Name) : EntryPointStat(Name) { - assert(!StatsRegistry->IsLocked); - assert(!isRegistered(Name)); - StatsRegistry->BoolStats.push_back(this); -} - CounterEPStat::CounterEPStat(llvm::StringLiteral Name) : EntryPointStat(Name) { assert(!StatsRegistry->IsLocked); assert(!isRegistered(Name)); @@ -165,28 +156,14 @@ void Registry::Snapshot::dumpAsCSV(llvm::raw_ostream &OS) const { OS << StatsRegistry->EscapedCPPFileName << "\",\""; llvm::printEscapedString( clang::AnalysisDeclContext::getFunctionName(EntryPoint), OS); - OS << "\","; - auto PrintAsBool = [&OS](bool B) { OS << (B ? "true" : "false"); }; - llvm::interleave(BoolStatValues, OS, PrintAsBool, ","); - OS << ((BoolStatValues.empty() || UnsignedStatValues.empty()) ? "" : ","); + OS << "\""; + OS << (UnsignedStatValues.empty() ? "" : ","); llvm::interleave(UnsignedStatValues, OS, [&OS](unsigned U) { OS << U; }, ","); } -static std::vector<bool> consumeBoolStats() { - std::vector<bool> Result; - Result.reserve(StatsRegistry->BoolStats.size()); - for (auto *M : StatsRegistry->BoolStats) { - Result.push_back(M->value()); - M->reset(); - } - return Result; -} - void EntryPointStat::takeSnapshot(const Decl *EntryPoint) { - auto BoolValues = consumeBoolStats(); auto UnsignedValues = consumeUnsignedStats(); - StatsRegistry->Snapshots.push_back( - {EntryPoint, std::move(BoolValues), std::move(UnsignedValues)}); + StatsRegistry->Snapshots.push_back({EntryPoint, std::move(UnsignedValues)}); } void EntryPointStat::dumpStatsAsCSV(llvm::StringRef FileName) { |