aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp68
-rw-r--r--clang/lib/AST/StmtProfile.cpp11
-rw-r--r--clang/lib/ASTMatchers/CMakeLists.txt1
-rw-r--r--clang/lib/ASTMatchers/GtestMatchers.cpp228
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuilder.h14
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXX.cpp59
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h32
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp51
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp46
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp80
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp193
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp23
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenVTables.cpp35
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h3
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp103
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp7
-rw-r--r--clang/lib/CodeGen/CGCall.cpp3
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp11
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp129
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp12
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp5
-rw-r--r--clang/lib/CodeGen/CGHLSLBuiltins.cpp13
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.cpp16
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp7
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h7
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp7
-rw-r--r--clang/lib/Driver/ToolChains/HIPAMD.cpp9
-rw-r--r--clang/lib/Driver/ToolChains/HIPSPV.cpp15
-rw-r--r--clang/lib/Driver/ToolChains/HIPUtility.cpp11
-rw-r--r--clang/lib/Driver/ToolChains/HIPUtility.h2
-rw-r--r--clang/lib/Parse/ParseExprCXX.cpp4
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp141
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h7
-rw-r--r--clang/lib/Sema/HLSLExternalSemaSource.cpp3
-rw-r--r--clang/lib/Sema/SemaExpr.cpp63
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp112
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp2
-rw-r--r--clang/lib/Sema/SemaOverload.cpp32
-rw-r--r--clang/lib/Serialization/ASTReader.cpp17
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/VAListChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp2
47 files changed, 1163 insertions, 459 deletions
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 9125250..922d679 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -2549,7 +2549,7 @@ static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC,
return true;
}
-static bool interp__builtin_ia32_pmadd(
+static bool interp__builtin_ia32_pmul(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &,
const APSInt &)>
@@ -2587,54 +2587,6 @@ static bool interp__builtin_ia32_pmadd(
return true;
}
-static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC,
- const CallExpr *Call,
- unsigned BuiltinID) {
- assert(Call->getArg(0)->getType()->isVectorType() &&
- Call->getArg(1)->getType()->isVectorType());
- const Pointer &RHS = S.Stk.pop<Pointer>();
- const Pointer &LHS = S.Stk.pop<Pointer>();
- const Pointer &Dst = S.Stk.peek<Pointer>();
-
- const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
- PrimType ElemT = *S.getContext().classify(VT->getElementType());
- unsigned SourceLen = VT->getNumElements();
-
- PrimType DstElemT = *S.getContext().classify(
- Call->getType()->castAs<VectorType>()->getElementType());
- unsigned DstElem = 0;
- for (unsigned I = 0; I != SourceLen; I += 2) {
- APSInt Elem1;
- APSInt Elem2;
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- Elem1 = LHS.elem<T>(I).toAPSInt();
- Elem2 = RHS.elem<T>(I).toAPSInt();
- });
-
- APSInt Result;
- switch (BuiltinID) {
- case clang::X86::BI__builtin_ia32_pmuludq128:
- case clang::X86::BI__builtin_ia32_pmuludq256:
- case clang::X86::BI__builtin_ia32_pmuludq512:
- Result = APSInt(llvm::APIntOps::muluExtended(Elem1, Elem2),
- /*IsUnsigned=*/true);
- break;
- case clang::X86::BI__builtin_ia32_pmuldq128:
- case clang::X86::BI__builtin_ia32_pmuldq256:
- case clang::X86::BI__builtin_ia32_pmuldq512:
- Result = APSInt(llvm::APIntOps::mulsExtended(Elem1, Elem2),
- /*IsUnsigned=*/false);
- break;
- }
- INT_TYPE_SWITCH_NO_BOOL(DstElemT,
- { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
- ++DstElem;
- }
-
- Dst.initializeAllElements();
- return true;
-}
-
static bool interp__builtin_elementwise_triop_fp(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<APFloat(const APFloat &, const APFloat &,
@@ -3512,7 +3464,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case clang::X86::BI__builtin_ia32_pmaddubsw128:
case clang::X86::BI__builtin_ia32_pmaddubsw256:
case clang::X86::BI__builtin_ia32_pmaddubsw512:
- return interp__builtin_ia32_pmadd(
+ return interp__builtin_ia32_pmul(
S, OpPC, Call,
[](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
const APSInt &HiRHS) {
@@ -3524,7 +3476,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case clang::X86::BI__builtin_ia32_pmaddwd128:
case clang::X86::BI__builtin_ia32_pmaddwd256:
case clang::X86::BI__builtin_ia32_pmaddwd512:
- return interp__builtin_ia32_pmadd(
+ return interp__builtin_ia32_pmul(
S, OpPC, Call,
[](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
const APSInt &HiRHS) {
@@ -3677,10 +3629,22 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case clang::X86::BI__builtin_ia32_pmuldq128:
case clang::X86::BI__builtin_ia32_pmuldq256:
case clang::X86::BI__builtin_ia32_pmuldq512:
+ return interp__builtin_ia32_pmul(
+ S, OpPC, Call,
+ [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
+ const APSInt &HiRHS) {
+ return llvm::APIntOps::mulsExtended(LoLHS, LoRHS);
+ });
+
case clang::X86::BI__builtin_ia32_pmuludq128:
case clang::X86::BI__builtin_ia32_pmuludq256:
case clang::X86::BI__builtin_ia32_pmuludq512:
- return interp__builtin_ia32_pmul(S, OpPC, Call, BuiltinID);
+ return interp__builtin_ia32_pmul(
+ S, OpPC, Call,
+ [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
+ const APSInt &HiRHS) {
+ return llvm::APIntOps::muluExtended(LoLHS, LoRHS);
+ });
case Builtin::BI__builtin_elementwise_fma:
return interp__builtin_elementwise_triop_fp(
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index f3b5478..3cd033e 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -2769,10 +2769,19 @@ void OpenACCClauseProfiler::VisitReductionClause(
for (auto &Recipe : Clause.getRecipes()) {
Profiler.VisitDecl(Recipe.AllocaDecl);
+
// TODO: OpenACC: Make sure we remember to update this when we figure out
// what we're adding for the operation recipe, in the meantime, a static
// assert will make sure we don't add something.
- static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *));
+ static_assert(sizeof(OpenACCReductionRecipe::CombinerRecipe) ==
+ 3 * sizeof(int *));
+ for (auto &CombinerRecipe : Recipe.CombinerRecipes) {
+ if (CombinerRecipe.Op) {
+ Profiler.VisitDecl(CombinerRecipe.LHS);
+ Profiler.VisitDecl(CombinerRecipe.RHS);
+ Profiler.VisitStmt(CombinerRecipe.Op);
+ }
+ }
}
}
diff --git a/clang/lib/ASTMatchers/CMakeLists.txt b/clang/lib/ASTMatchers/CMakeLists.txt
index 7769fd6..29ad27df 100644
--- a/clang/lib/ASTMatchers/CMakeLists.txt
+++ b/clang/lib/ASTMatchers/CMakeLists.txt
@@ -8,7 +8,6 @@ set(LLVM_LINK_COMPONENTS
add_clang_library(clangASTMatchers
ASTMatchFinder.cpp
ASTMatchersInternal.cpp
- GtestMatchers.cpp
LowLevelHelpers.cpp
LINK_LIBS
diff --git a/clang/lib/ASTMatchers/GtestMatchers.cpp b/clang/lib/ASTMatchers/GtestMatchers.cpp
deleted file mode 100644
index 7c135bb..0000000
--- a/clang/lib/ASTMatchers/GtestMatchers.cpp
+++ /dev/null
@@ -1,228 +0,0 @@
-//===- GtestMatchers.cpp - AST Matchers for Gtest ---------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements several matchers for popular gtest macros. In general,
-// AST matchers cannot match calls to macros. However, we can simulate such
-// matches if the macro definition has identifiable elements that themselves can
-// be matched. In that case, we can match on those elements and then check that
-// the match occurs within an expansion of the desired macro. The more uncommon
-// the identified elements, the more efficient this process will be.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/ASTMatchers/GtestMatchers.h"
-#include "llvm/ADT/StringRef.h"
-
-namespace clang {
-namespace ast_matchers {
-namespace {
-
-enum class MacroType {
- Expect,
- Assert,
- On,
-};
-
-} // namespace
-
-static DeclarationMatcher getComparisonDecl(GtestCmp Cmp) {
- switch (Cmp) {
- case GtestCmp::Eq:
- return cxxMethodDecl(hasName("Compare"),
- ofClass(cxxRecordDecl(isSameOrDerivedFrom(
- hasName("::testing::internal::EqHelper")))));
- case GtestCmp::Ne:
- return functionDecl(hasName("::testing::internal::CmpHelperNE"));
- case GtestCmp::Ge:
- return functionDecl(hasName("::testing::internal::CmpHelperGE"));
- case GtestCmp::Gt:
- return functionDecl(hasName("::testing::internal::CmpHelperGT"));
- case GtestCmp::Le:
- return functionDecl(hasName("::testing::internal::CmpHelperLE"));
- case GtestCmp::Lt:
- return functionDecl(hasName("::testing::internal::CmpHelperLT"));
- }
- llvm_unreachable("Unhandled GtestCmp enum");
-}
-
-static llvm::StringRef getMacroTypeName(MacroType Macro) {
- switch (Macro) {
- case MacroType::Expect:
- return "EXPECT";
- case MacroType::Assert:
- return "ASSERT";
- case MacroType::On:
- return "ON";
- }
- llvm_unreachable("Unhandled MacroType enum");
-}
-
-static llvm::StringRef getComparisonTypeName(GtestCmp Cmp) {
- switch (Cmp) {
- case GtestCmp::Eq:
- return "EQ";
- case GtestCmp::Ne:
- return "NE";
- case GtestCmp::Ge:
- return "GE";
- case GtestCmp::Gt:
- return "GT";
- case GtestCmp::Le:
- return "LE";
- case GtestCmp::Lt:
- return "LT";
- }
- llvm_unreachable("Unhandled GtestCmp enum");
-}
-
-static std::string getMacroName(MacroType Macro, GtestCmp Cmp) {
- return (getMacroTypeName(Macro) + "_" + getComparisonTypeName(Cmp)).str();
-}
-
-static std::string getMacroName(MacroType Macro, llvm::StringRef Operation) {
- return (getMacroTypeName(Macro) + "_" + Operation).str();
-}
-
-// Under the hood, ON_CALL is expanded to a call to `InternalDefaultActionSetAt`
-// to set a default action spec to the underlying function mocker, while
-// EXPECT_CALL is expanded to a call to `InternalExpectedAt` to set a new
-// expectation spec.
-static llvm::StringRef getSpecSetterName(MacroType Macro) {
- switch (Macro) {
- case MacroType::On:
- return "InternalDefaultActionSetAt";
- case MacroType::Expect:
- return "InternalExpectedAt";
- default:
- llvm_unreachable("Unhandled MacroType enum");
- }
- llvm_unreachable("Unhandled MacroType enum");
-}
-
-// In general, AST matchers cannot match calls to macros. However, we can
-// simulate such matches if the macro definition has identifiable elements that
-// themselves can be matched. In that case, we can match on those elements and
-// then check that the match occurs within an expansion of the desired
-// macro. The more uncommon the identified elements, the more efficient this
-// process will be.
-//
-// We use this approach to implement the derived matchers gtestAssert and
-// gtestExpect.
-static internal::BindableMatcher<Stmt>
-gtestComparisonInternal(MacroType Macro, GtestCmp Cmp, StatementMatcher Left,
- StatementMatcher Right) {
- return callExpr(isExpandedFromMacro(getMacroName(Macro, Cmp)),
- callee(getComparisonDecl(Cmp)), hasArgument(2, Left),
- hasArgument(3, Right));
-}
-
-static internal::BindableMatcher<Stmt>
-gtestThatInternal(MacroType Macro, StatementMatcher Actual,
- StatementMatcher Matcher) {
- return cxxOperatorCallExpr(
- isExpandedFromMacro(getMacroName(Macro, "THAT")),
- hasOverloadedOperatorName("()"), hasArgument(2, Actual),
- hasArgument(
- 0, expr(hasType(classTemplateSpecializationDecl(hasName(
- "::testing::internal::PredicateFormatterFromMatcher"))),
- ignoringImplicit(
- callExpr(callee(functionDecl(hasName(
- "::testing::internal::"
- "MakePredicateFormatterFromMatcher"))),
- hasArgument(0, ignoringImplicit(Matcher)))))));
-}
-
-static internal::BindableMatcher<Stmt>
-gtestCallInternal(MacroType Macro, StatementMatcher MockCall, MockArgs Args) {
- // A ON_CALL or EXPECT_CALL macro expands to different AST structures
- // depending on whether the mock method has arguments or not.
- switch (Args) {
- // For example,
- // `ON_CALL(mock, TwoParamMethod)` is expanded to
- // `mock.gmock_TwoArgsMethod(WithoutMatchers(),
- // nullptr).InternalDefaultActionSetAt(...)`.
- // EXPECT_CALL is the same except
- // that it calls `InternalExpectedAt` instead of `InternalDefaultActionSetAt`
- // in the end.
- case MockArgs::None:
- return cxxMemberCallExpr(
- isExpandedFromMacro(getMacroName(Macro, "CALL")),
- callee(functionDecl(hasName(getSpecSetterName(Macro)))),
- onImplicitObjectArgument(ignoringImplicit(MockCall)));
- // For example,
- // `ON_CALL(mock, TwoParamMethod(m1, m2))` is expanded to
- // `mock.gmock_TwoParamMethod(m1,m2)(WithoutMatchers(),
- // nullptr).InternalDefaultActionSetAt(...)`.
- // EXPECT_CALL is the same except that it calls `InternalExpectedAt` instead
- // of `InternalDefaultActionSetAt` in the end.
- case MockArgs::Some:
- return cxxMemberCallExpr(
- isExpandedFromMacro(getMacroName(Macro, "CALL")),
- callee(functionDecl(hasName(getSpecSetterName(Macro)))),
- onImplicitObjectArgument(ignoringImplicit(cxxOperatorCallExpr(
- hasOverloadedOperatorName("()"), argumentCountIs(3),
- hasArgument(0, ignoringImplicit(MockCall))))));
- }
- llvm_unreachable("Unhandled MockArgs enum");
-}
-
-static internal::BindableMatcher<Stmt>
-gtestCallInternal(MacroType Macro, StatementMatcher MockObject,
- llvm::StringRef MockMethodName, MockArgs Args) {
- return gtestCallInternal(
- Macro,
- cxxMemberCallExpr(
- onImplicitObjectArgument(MockObject),
- callee(functionDecl(hasName(("gmock_" + MockMethodName).str())))),
- Args);
-}
-
-internal::BindableMatcher<Stmt> gtestAssert(GtestCmp Cmp, StatementMatcher Left,
- StatementMatcher Right) {
- return gtestComparisonInternal(MacroType::Assert, Cmp, Left, Right);
-}
-
-internal::BindableMatcher<Stmt> gtestExpect(GtestCmp Cmp, StatementMatcher Left,
- StatementMatcher Right) {
- return gtestComparisonInternal(MacroType::Expect, Cmp, Left, Right);
-}
-
-internal::BindableMatcher<Stmt> gtestAssertThat(StatementMatcher Actual,
- StatementMatcher Matcher) {
- return gtestThatInternal(MacroType::Assert, Actual, Matcher);
-}
-
-internal::BindableMatcher<Stmt> gtestExpectThat(StatementMatcher Actual,
- StatementMatcher Matcher) {
- return gtestThatInternal(MacroType::Expect, Actual, Matcher);
-}
-
-internal::BindableMatcher<Stmt> gtestOnCall(StatementMatcher MockObject,
- llvm::StringRef MockMethodName,
- MockArgs Args) {
- return gtestCallInternal(MacroType::On, MockObject, MockMethodName, Args);
-}
-
-internal::BindableMatcher<Stmt> gtestOnCall(StatementMatcher MockCall,
- MockArgs Args) {
- return gtestCallInternal(MacroType::On, MockCall, Args);
-}
-
-internal::BindableMatcher<Stmt> gtestExpectCall(StatementMatcher MockObject,
- llvm::StringRef MockMethodName,
- MockArgs Args) {
- return gtestCallInternal(MacroType::Expect, MockObject, MockMethodName, Args);
-}
-
-internal::BindableMatcher<Stmt> gtestExpectCall(StatementMatcher MockCall,
- MockArgs Args) {
- return gtestCallInternal(MacroType::Expect, MockCall, Args);
-}
-
-} // end namespace ast_matchers
-} // end namespace clang
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 58345b4..25afe8b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -122,6 +122,11 @@ public:
return getPointerTo(cir::VPtrType::get(getContext()));
}
+ cir::FuncType getFuncType(llvm::ArrayRef<mlir::Type> params, mlir::Type retTy,
+ bool isVarArg = false) {
+ return cir::FuncType::get(params, retTy, isVarArg);
+ }
+
/// Get a CIR record kind from a AST declaration tag.
cir::RecordType::RecordKind getRecordKind(const clang::TagTypeKind kind) {
switch (kind) {
@@ -372,6 +377,15 @@ public:
return cir::BinOp::create(*this, loc, cir::BinOpKind::Div, lhs, rhs);
}
+ mlir::Value createDynCast(mlir::Location loc, mlir::Value src,
+ cir::PointerType destType, bool isRefCast,
+ cir::DynamicCastInfoAttr info) {
+ auto castKind =
+ isRefCast ? cir::DynamicCastKind::Ref : cir::DynamicCastKind::Ptr;
+ return cir::DynamicCastOp::create(*this, loc, destType, castKind, src, info,
+ /*relative_layout=*/false);
+ }
+
Address createBaseClassAddr(mlir::Location loc, Address addr,
mlir::Type destType, unsigned offset,
bool assumeNotNull) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
index d5b35c2..274d11b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CIRGenCXXABI.h"
#include "CIRGenFunction.h"
#include "CIRGenModule.h"
@@ -95,7 +96,63 @@ static void emitDeclDestroy(CIRGenFunction &cgf, const VarDecl *vd,
return;
}
- cgf.cgm.errorNYI(vd->getSourceRange(), "global with destructor");
+ // If not constant storage we'll emit this regardless of NeedsDtor value.
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+
+ // Prepare the dtor region.
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ mlir::Block *block = builder.createBlock(&addr.getDtorRegion());
+ CIRGenFunction::LexicalScope lexScope{cgf, addr.getLoc(),
+ builder.getInsertionBlock()};
+ lexScope.setAsGlobalInit();
+ builder.setInsertionPointToStart(block);
+
+ CIRGenModule &cgm = cgf.cgm;
+ QualType type = vd->getType();
+
+ // Special-case non-array C++ destructors, if they have the right signature.
+ // Under some ABIs, destructors return this instead of void, and cannot be
+ // passed directly to __cxa_atexit if the target does not allow this
+ // mismatch.
+ const CXXRecordDecl *record = type->getAsCXXRecordDecl();
+ bool canRegisterDestructor =
+ record && (!cgm.getCXXABI().hasThisReturn(
+ GlobalDecl(record->getDestructor(), Dtor_Complete)) ||
+ cgm.getCXXABI().canCallMismatchedFunctionType());
+
+ // If __cxa_atexit is disabled via a flag, a different helper function is
+ // generated elsewhere which uses atexit instead, and it takes the destructor
+ // directly.
+ cir::FuncOp fnOp;
+ if (record && (canRegisterDestructor || cgm.getCodeGenOpts().CXAAtExit)) {
+ if (vd->getTLSKind())
+ cgm.errorNYI(vd->getSourceRange(), "TLS destructor");
+ assert(!record->hasTrivialDestructor());
+ assert(!cir::MissingFeatures::openCL());
+ CXXDestructorDecl *dtor = record->getDestructor();
+ // In LLVM OG codegen this is done in registerGlobalDtor, but CIRGen
+ // relies on LoweringPrepare for further decoupling, so build the
+ // call right here.
+ auto gd = GlobalDecl(dtor, Dtor_Complete);
+ fnOp = cgm.getAddrAndTypeOfCXXStructor(gd).second;
+ cgf.getBuilder().createCallOp(
+ cgf.getLoc(vd->getSourceRange()),
+ mlir::FlatSymbolRefAttr::get(fnOp.getSymNameAttr()),
+ mlir::ValueRange{cgm.getAddrOfGlobalVar(vd)});
+ } else {
+ cgm.errorNYI(vd->getSourceRange(), "array destructor");
+ }
+ assert(fnOp && "expected cir.func");
+ cgm.getCXXABI().registerGlobalDtor(vd, fnOp, nullptr);
+
+ builder.setInsertionPointToEnd(block);
+ if (block->empty()) {
+ block->erase();
+ // Don't confuse lexical cleanup.
+ builder.clearInsertionPoint();
+ } else {
+ builder.create<cir::YieldOp>(addr.getLoc());
+ }
}
cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl gd) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index 2465a68..06f41cd 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -54,6 +54,12 @@ public:
Address thisAddr, const CXXRecordDecl *classDecl,
const CXXRecordDecl *baseClassDecl) = 0;
+ virtual mlir::Value emitDynamicCast(CIRGenFunction &cgf, mlir::Location loc,
+ QualType srcRecordTy,
+ QualType destRecordTy,
+ cir::PointerType destCIRTy,
+ bool isRefCast, Address src) = 0;
+
public:
/// Similar to AddedStructorArgs, but only notes the number of additional
/// arguments.
@@ -149,6 +155,14 @@ public:
/// Loads the incoming C++ this pointer as it was passed by the caller.
mlir::Value loadIncomingCXXThis(CIRGenFunction &cgf);
+ /// Get the implicit (second) parameter that comes after the "this" pointer,
+ /// or nullptr if there is isn't one.
+ virtual mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &cgf,
+ const CXXDestructorDecl *dd,
+ CXXDtorType type,
+ bool forVirtualBase,
+ bool delegating) = 0;
+
/// Emit constructor variants required by this ABI.
virtual void emitCXXConstructors(const clang::CXXConstructorDecl *d) = 0;
@@ -160,6 +174,14 @@ public:
bool forVirtualBase, bool delegating,
Address thisAddr, QualType thisTy) = 0;
+ /// Emit code to force the execution of a destructor during global
+ /// teardown. The default implementation of this uses atexit.
+ ///
+ /// \param dtor - a function taking a single pointer argument
+ /// \param addr - a pointer to pass to the destructor function.
+ virtual void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor,
+ mlir::Value addr) = 0;
+
/// Checks if ABI requires extra virtual offset for vtable field.
virtual bool
isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf,
@@ -233,6 +255,16 @@ public:
return false;
}
+ /// Returns true if the target allows calling a function through a pointer
+ /// with a different signature than the actual function (or equivalently,
+ /// bitcasting a function or function pointer to a different function type).
+ /// In principle in the most general case this could depend on the target, the
+ /// calling convention, and the actual types of the arguments and return
+ /// value. Here it just means whether the signature mismatch could *ever* be
+ /// allowed; in other words, does the target do strict checking of signatures
+ /// for all calls.
+ virtual bool canCallMismatchedFunctionType() const { return true; }
+
/// Gets the mangle context.
clang::MangleContext &getMangleContext() { return *mangleContext; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index d9ebf19..485b2c8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -126,6 +126,30 @@ static bool isInitializerOfDynamicClass(const CXXCtorInitializer *baseInit) {
}
namespace {
+/// Call the destructor for a direct base class.
+struct CallBaseDtor final : EHScopeStack::Cleanup {
+ const CXXRecordDecl *baseClass;
+ bool baseIsVirtual;
+ CallBaseDtor(const CXXRecordDecl *base, bool baseIsVirtual)
+ : baseClass(base), baseIsVirtual(baseIsVirtual) {}
+
+ void emit(CIRGenFunction &cgf) override {
+ const CXXRecordDecl *derivedClass =
+ cast<CXXMethodDecl>(cgf.curFuncDecl)->getParent();
+
+ const CXXDestructorDecl *d = baseClass->getDestructor();
+ // We are already inside a destructor, so presumably the object being
+ // destroyed should have the expected type.
+ QualType thisTy = d->getFunctionObjectParameterType();
+ assert(cgf.currSrcLoc && "expected source location");
+ Address addr = cgf.getAddressOfDirectBaseInCompleteClass(
+ *cgf.currSrcLoc, cgf.loadCXXThisAddress(), derivedClass, baseClass,
+ baseIsVirtual);
+ cgf.emitCXXDestructorCall(d, Dtor_Base, baseIsVirtual,
+ /*delegating=*/false, addr, thisTy);
+ }
+};
+
/// A visitor which checks whether an initializer uses 'this' in a
/// way which requires the vtable to be properly set.
struct DynamicThisUseChecker
@@ -891,12 +915,6 @@ public:
assert(!cir::MissingFeatures::ehCleanupFlags());
cgf.emitDestroy(lv.getAddress(), field->getType(), destroyer);
}
-
- // This is a placeholder until EHCleanupScope is implemented.
- size_t getSize() const override {
- assert(!cir::MissingFeatures::ehCleanupScope());
- return sizeof(DestroyField);
- }
};
} // namespace
@@ -928,8 +946,21 @@ void CIRGenFunction::enterDtorCleanups(const CXXDestructorDecl *dd,
if (dtorType == Dtor_Complete) {
assert(!cir::MissingFeatures::sanitizers());
- if (classDecl->getNumVBases())
- cgm.errorNYI(dd->getSourceRange(), "virtual base destructor cleanups");
+ // We push them in the forward order so that they'll be popped in
+ // the reverse order.
+ for (const CXXBaseSpecifier &base : classDecl->vbases()) {
+ auto *baseClassDecl = base.getType()->castAsCXXRecordDecl();
+
+ if (baseClassDecl->hasTrivialDestructor()) {
+ // Under SanitizeMemoryUseAfterDtor, poison the trivial base class
+ // memory. For non-trival base classes the same is done in the class
+ // destructor.
+ assert(!cir::MissingFeatures::sanitizers());
+ } else {
+ ehStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, baseClassDecl,
+ /*baseIsVirtual=*/true);
+ }
+ }
return;
}
@@ -948,8 +979,8 @@ void CIRGenFunction::enterDtorCleanups(const CXXDestructorDecl *dd,
if (baseClassDecl->hasTrivialDestructor())
assert(!cir::MissingFeatures::sanitizers());
else
- cgm.errorNYI(dd->getSourceRange(),
- "non-trivial base destructor cleanups");
+ ehStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, baseClassDecl,
+ /*baseIsVirtual=*/false);
}
assert(!cir::MissingFeatures::sanitizers());
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 563a753..039d290 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -695,12 +695,6 @@ struct DestroyObject final : EHScopeStack::Cleanup {
void emit(CIRGenFunction &cgf) override {
cgf.emitDestroy(addr, type, destroyer);
}
-
- // This is a placeholder until EHCleanupScope is implemented.
- size_t getSize() const override {
- assert(!cir::MissingFeatures::ehCleanupScope());
- return sizeof(DestroyObject);
- }
};
} // namespace
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index be94890..f416571 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1185,10 +1185,16 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
case CK_BuiltinFnToFnPtr:
llvm_unreachable("builtin functions are handled elsewhere");
+ case CK_Dynamic: {
+ LValue lv = emitLValue(e->getSubExpr());
+ Address v = lv.getAddress();
+ const auto *dce = cast<CXXDynamicCastExpr>(e);
+ return makeNaturalAlignAddrLValue(emitDynamicCast(v, dce), e->getType());
+ }
+
// These are never l-values; just use the aggregate emission code.
case CK_NonAtomicToAtomic:
case CK_AtomicToNonAtomic:
- case CK_Dynamic:
case CK_ToUnion:
case CK_BaseToDerived:
case CK_AddressSpaceConversion:
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 4eb8ca8..97c0944 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -463,12 +463,6 @@ struct CallObjectDelete final : EHScopeStack::Cleanup {
void emit(CIRGenFunction &cgf) override {
cgf.emitDeleteCall(operatorDelete, ptr, elementType);
}
-
- // This is a placeholder until EHCleanupScope is implemented.
- size_t getSize() const override {
- assert(!cir::MissingFeatures::ehCleanupScope());
- return sizeof(CallObjectDelete);
- }
};
} // namespace
@@ -728,3 +722,43 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD,
// Emit the call to delete.
emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs);
}
+
+mlir::Value CIRGenFunction::emitDynamicCast(Address thisAddr,
+ const CXXDynamicCastExpr *dce) {
+ mlir::Location loc = getLoc(dce->getSourceRange());
+
+ cgm.emitExplicitCastExprType(dce, this);
+ QualType destTy = dce->getTypeAsWritten();
+ QualType srcTy = dce->getSubExpr()->getType();
+
+ // C++ [expr.dynamic.cast]p7:
+ // If T is "pointer to cv void," then the result is a pointer to the most
+ // derived object pointed to by v.
+ bool isDynCastToVoid = destTy->isVoidPointerType();
+ bool isRefCast = destTy->isReferenceType();
+
+ QualType srcRecordTy;
+ QualType destRecordTy;
+ if (isDynCastToVoid) {
+ srcRecordTy = srcTy->getPointeeType();
+ // No destRecordTy.
+ } else if (const PointerType *destPTy = destTy->getAs<PointerType>()) {
+ srcRecordTy = srcTy->castAs<PointerType>()->getPointeeType();
+ destRecordTy = destPTy->getPointeeType();
+ } else {
+ srcRecordTy = srcTy;
+ destRecordTy = destTy->castAs<ReferenceType>()->getPointeeType();
+ }
+
+ assert(srcRecordTy->isRecordType() && "source type must be a record type!");
+ assert(!cir::MissingFeatures::emitTypeCheck());
+
+ if (dce->isAlwaysNull()) {
+ cgm.errorNYI(dce->getSourceRange(), "emitDynamicCastToNull");
+ return {};
+ }
+
+ auto destCirTy = mlir::cast<cir::PointerType>(convertType(destTy));
+ return cgm.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy,
+ destCirTy, isRefCast, thisAddr);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 59aa257..89e9ec4 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -500,6 +500,26 @@ private:
bool appendBitField(const FieldDecl *field, uint64_t fieldOffset,
cir::IntAttr ci, bool allowOverwrite = false);
+ /// Applies zero-initialization to padding bytes before and within a field.
+ /// \param layout The record layout containing field offset information.
+ /// \param fieldNo The field index in the record.
+ /// \param field The field declaration.
+ /// \param allowOverwrite Whether to allow overwriting existing values.
+ /// \param sizeSoFar The current size processed, updated by this function.
+ /// \param zeroFieldSize Set to true if the field has zero size.
+ /// \returns true on success, false if padding could not be applied.
+ bool applyZeroInitPadding(const ASTRecordLayout &layout, unsigned fieldNo,
+ const FieldDecl &field, bool allowOverwrite,
+ CharUnits &sizeSoFar, bool &zeroFieldSize);
+
+ /// Applies zero-initialization to trailing padding bytes in a record.
+ /// \param layout The record layout containing size information.
+ /// \param allowOverwrite Whether to allow overwriting existing values.
+ /// \param sizeSoFar The current size processed.
+ /// \returns true on success, false if padding could not be applied.
+ bool applyZeroInitPadding(const ASTRecordLayout &layout, bool allowOverwrite,
+ CharUnits &sizeSoFar);
+
bool build(InitListExpr *ile, bool allowOverwrite);
bool build(const APValue &val, const RecordDecl *rd, bool isPrimaryBase,
const CXXRecordDecl *vTableClass, CharUnits baseOffset);
@@ -548,6 +568,49 @@ bool ConstRecordBuilder::appendBitField(const FieldDecl *field,
allowOverwrite);
}
+bool ConstRecordBuilder::applyZeroInitPadding(
+ const ASTRecordLayout &layout, unsigned fieldNo, const FieldDecl &field,
+ bool allowOverwrite, CharUnits &sizeSoFar, bool &zeroFieldSize) {
+ uint64_t startBitOffset = layout.getFieldOffset(fieldNo);
+ CharUnits startOffset =
+ cgm.getASTContext().toCharUnitsFromBits(startBitOffset);
+ if (sizeSoFar < startOffset) {
+ if (!appendBytes(sizeSoFar, computePadding(cgm, startOffset - sizeSoFar),
+ allowOverwrite))
+ return false;
+ }
+
+ if (!field.isBitField()) {
+ CharUnits fieldSize =
+ cgm.getASTContext().getTypeSizeInChars(field.getType());
+ sizeSoFar = startOffset + fieldSize;
+ zeroFieldSize = fieldSize.isZero();
+ } else {
+ const CIRGenRecordLayout &rl =
+ cgm.getTypes().getCIRGenRecordLayout(field.getParent());
+ const CIRGenBitFieldInfo &info = rl.getBitFieldInfo(&field);
+ uint64_t endBitOffset = startBitOffset + info.size;
+ sizeSoFar = cgm.getASTContext().toCharUnitsFromBits(endBitOffset);
+ if (endBitOffset % cgm.getASTContext().getCharWidth() != 0)
+ sizeSoFar++;
+ zeroFieldSize = info.size == 0;
+ }
+ return true;
+}
+
+bool ConstRecordBuilder::applyZeroInitPadding(const ASTRecordLayout &layout,
+ bool allowOverwrite,
+ CharUnits &sizeSoFar) {
+ CharUnits totalSize = layout.getSize();
+ if (sizeSoFar < totalSize) {
+ if (!appendBytes(sizeSoFar, computePadding(cgm, totalSize - sizeSoFar),
+ allowOverwrite))
+ return false;
+ }
+ sizeSoFar = totalSize;
+ return true;
+}
+
bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
RecordDecl *rd = ile->getType()
->castAs<clang::RecordType>()
@@ -562,11 +625,9 @@ bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
if (cxxrd->getNumBases())
return false;
- if (cgm.shouldZeroInitPadding()) {
- assert(!cir::MissingFeatures::recordZeroInitPadding());
- cgm.errorNYI(rd->getSourceRange(), "zero init padding");
- return false;
- }
+ const bool zeroInitPadding = cgm.shouldZeroInitPadding();
+ bool zeroFieldSize = false;
+ CharUnits sizeSoFar = CharUnits::Zero();
unsigned elementNo = 0;
for (auto [index, field] : llvm::enumerate(rd->fields())) {
@@ -596,7 +657,10 @@ bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
continue;
}
- assert(!cir::MissingFeatures::recordZeroInitPadding());
+ if (zeroInitPadding &&
+ !applyZeroInitPadding(layout, index, *field, allowOverwrite, sizeSoFar,
+ zeroFieldSize))
+ return false;
// When emitting a DesignatedInitUpdateExpr, a nested InitListExpr
// represents additional overwriting of our current constant value, and not
@@ -641,8 +705,8 @@ bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
}
}
- assert(!cir::MissingFeatures::recordZeroInitPadding());
- return true;
+ return !zeroInitPadding ||
+ applyZeroInitPadding(layout, allowOverwrite, sizeSoFar);
}
namespace {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 7edd83e..637f9ef 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -1916,6 +1916,11 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
return builder.createIntToPtr(middleVal, destCIRTy);
}
+ case CK_Dynamic: {
+ Address v = cgf.emitPointerWithAlignment(subExpr);
+ const auto *dce = cast<CXXDynamicCastExpr>(ce);
+ return cgf.emitDynamicCast(v, dce);
+ }
case CK_ArrayToPointerDecay:
return cgf.emitArrayToPointerDecay(subExpr).getPointer();
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index db2adc2..7a606ee 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -1312,6 +1312,8 @@ public:
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
+ mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
+
/// Emit an expression as an initializer for an object (variable, field, etc.)
/// at the given location. The expression is not necessarily the normal
/// initializer for the object, and the address is not necessarily
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index 0418174..9e490c6d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -59,7 +59,11 @@ public:
void addImplicitStructorParams(CIRGenFunction &cgf, QualType &resTy,
FunctionArgList &params) override;
-
+ mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &cgf,
+ const CXXDestructorDecl *dd,
+ CXXDtorType type,
+ bool forVirtualBase,
+ bool delegating) override;
void emitCXXConstructors(const clang::CXXConstructorDecl *d) override;
void emitCXXDestructors(const clang::CXXDestructorDecl *d) override;
void emitCXXStructor(clang::GlobalDecl gd) override;
@@ -68,6 +72,8 @@ public:
CXXDtorType type, bool forVirtualBase,
bool delegating, Address thisAddr,
QualType thisTy) override;
+ void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor,
+ mlir::Value addr) override;
void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) override;
void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) override;
@@ -116,6 +122,16 @@ public:
Address thisAddr, const CXXRecordDecl *classDecl,
const CXXRecordDecl *baseClassDecl) override;
+ // The traditional clang CodeGen emits calls to `__dynamic_cast` directly into
+ // LLVM in the `emitDynamicCastCall` function. In CIR, `dynamic_cast`
+ // expressions are lowered to `cir.dyn_cast` ops instead of calls to runtime
+ // functions. So during CIRGen we don't need the `emitDynamicCastCall`
+ // function that clang CodeGen has.
+ mlir::Value emitDynamicCast(CIRGenFunction &cgf, mlir::Location loc,
+ QualType srcRecordTy, QualType destRecordTy,
+ cir::PointerType destCIRTy, bool isRefCast,
+ Address src) override;
+
/**************************** RTTI Uniqueness ******************************/
protected:
/// Returns true if the ABI requires RTTI type_info objects to be unique
@@ -1492,11 +1508,8 @@ void CIRGenItaniumCXXABI::emitDestructorCall(
CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type,
bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) {
GlobalDecl gd(dd, type);
- if (needsVTTParameter(gd)) {
- cgm.errorNYI(dd->getSourceRange(), "emitDestructorCall: VTT");
- }
-
- mlir::Value vtt = nullptr;
+ mlir::Value vtt =
+ getCXXDestructorImplicitParam(cgf, dd, type, forVirtualBase, delegating);
ASTContext &astContext = cgm.getASTContext();
QualType vttTy = astContext.getPointerType(astContext.VoidPtrTy);
assert(!cir::MissingFeatures::appleKext());
@@ -1507,6 +1520,34 @@ void CIRGenItaniumCXXABI::emitDestructorCall(
vttTy, nullptr);
}
+void CIRGenItaniumCXXABI::registerGlobalDtor(const VarDecl *vd,
+ cir::FuncOp dtor,
+ mlir::Value addr) {
+ if (vd->isNoDestroy(cgm.getASTContext()))
+ return;
+
+ if (vd->getTLSKind()) {
+ cgm.errorNYI(vd->getSourceRange(), "registerGlobalDtor: TLS");
+ return;
+ }
+
+ // HLSL doesn't support atexit.
+ if (cgm.getLangOpts().HLSL) {
+ cgm.errorNYI(vd->getSourceRange(), "registerGlobalDtor: HLSL");
+ return;
+ }
+
+ // The default behavior is to use atexit. This is handled in lowering
+ // prepare. Nothing to be done for CIR here.
+}
+
+mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam(
+ CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type,
+ bool forVirtualBase, bool delegating) {
+ GlobalDecl gd(dd, type);
+ return cgf.getVTTParameter(gd, forVirtualBase, delegating);
+}
+
// The idea here is creating a separate block for the throw with an
// `UnreachableOp` as the terminator. So, we branch from the current block
// to the throw block and create a block for the remaining operations.
@@ -1796,3 +1837,143 @@ mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset(
}
return vbaseOffset;
}
+
+static cir::FuncOp getBadCastFn(CIRGenFunction &cgf) {
+ // Prototype: void __cxa_bad_cast();
+
+ // TODO(cir): set the calling convention of the runtime function.
+ assert(!cir::MissingFeatures::opFuncCallingConv());
+
+ cir::FuncType fnTy =
+ cgf.getBuilder().getFuncType({}, cgf.getBuilder().getVoidTy());
+ return cgf.cgm.createRuntimeFunction(fnTy, "__cxa_bad_cast");
+}
+
+// TODO(cir): This could be shared with classic codegen.
+static CharUnits computeOffsetHint(ASTContext &astContext,
+ const CXXRecordDecl *src,
+ const CXXRecordDecl *dst) {
+ CXXBasePaths paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+
+ // If Dst is not derived from Src we can skip the whole computation below and
+ // return that Src is not a public base of Dst. Record all inheritance paths.
+ if (!dst->isDerivedFrom(src, paths))
+ return CharUnits::fromQuantity(-2ULL);
+
+ unsigned numPublicPaths = 0;
+ CharUnits offset;
+
+ // Now walk all possible inheritance paths.
+ for (const CXXBasePath &path : paths) {
+ if (path.Access != AS_public) // Ignore non-public inheritance.
+ continue;
+
+ ++numPublicPaths;
+
+ for (const CXXBasePathElement &pathElement : path) {
+ // If the path contains a virtual base class we can't give any hint.
+ // -1: no hint.
+ if (pathElement.Base->isVirtual())
+ return CharUnits::fromQuantity(-1ULL);
+
+ if (numPublicPaths > 1) // Won't use offsets, skip computation.
+ continue;
+
+ // Accumulate the base class offsets.
+ const ASTRecordLayout &L =
+ astContext.getASTRecordLayout(pathElement.Class);
+ offset += L.getBaseClassOffset(
+ pathElement.Base->getType()->getAsCXXRecordDecl());
+ }
+ }
+
+ // -2: Src is not a public base of Dst.
+ if (numPublicPaths == 0)
+ return CharUnits::fromQuantity(-2ULL);
+
+ // -3: Src is a multiple public base type but never a virtual base type.
+ if (numPublicPaths > 1)
+ return CharUnits::fromQuantity(-3ULL);
+
+ // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
+ // Return the offset of Src from the origin of Dst.
+ return offset;
+}
+
+static cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &cgf) {
+ // Prototype:
+ // void *__dynamic_cast(const void *sub,
+ // global_as const abi::__class_type_info *src,
+ // global_as const abi::__class_type_info *dst,
+ // std::ptrdiff_t src2dst_offset);
+
+ mlir::Type voidPtrTy = cgf.getBuilder().getVoidPtrTy();
+ mlir::Type rttiPtrTy = cgf.getBuilder().getUInt8PtrTy();
+ mlir::Type ptrDiffTy = cgf.convertType(cgf.getContext().getPointerDiffType());
+
+ // TODO(cir): mark the function as nowind willreturn readonly.
+ assert(!cir::MissingFeatures::opFuncNoUnwind());
+ assert(!cir::MissingFeatures::opFuncWillReturn());
+ assert(!cir::MissingFeatures::opFuncReadOnly());
+
+ // TODO(cir): set the calling convention of the runtime function.
+ assert(!cir::MissingFeatures::opFuncCallingConv());
+
+ cir::FuncType FTy = cgf.getBuilder().getFuncType(
+ {voidPtrTy, rttiPtrTy, rttiPtrTy, ptrDiffTy}, voidPtrTy);
+ return cgf.cgm.createRuntimeFunction(FTy, "__dynamic_cast");
+}
+
+static cir::DynamicCastInfoAttr emitDynamicCastInfo(CIRGenFunction &cgf,
+ mlir::Location loc,
+ QualType srcRecordTy,
+ QualType destRecordTy) {
+ auto srcRtti = mlir::cast<cir::GlobalViewAttr>(
+ cgf.cgm.getAddrOfRTTIDescriptor(loc, srcRecordTy));
+ auto destRtti = mlir::cast<cir::GlobalViewAttr>(
+ cgf.cgm.getAddrOfRTTIDescriptor(loc, destRecordTy));
+
+ cir::FuncOp runtimeFuncOp = getItaniumDynamicCastFn(cgf);
+ cir::FuncOp badCastFuncOp = getBadCastFn(cgf);
+ auto runtimeFuncRef = mlir::FlatSymbolRefAttr::get(runtimeFuncOp);
+ auto badCastFuncRef = mlir::FlatSymbolRefAttr::get(badCastFuncOp);
+
+ const CXXRecordDecl *srcDecl = srcRecordTy->getAsCXXRecordDecl();
+ const CXXRecordDecl *destDecl = destRecordTy->getAsCXXRecordDecl();
+ CharUnits offsetHint = computeOffsetHint(cgf.getContext(), srcDecl, destDecl);
+
+ mlir::Type ptrdiffTy = cgf.convertType(cgf.getContext().getPointerDiffType());
+ auto offsetHintAttr = cir::IntAttr::get(ptrdiffTy, offsetHint.getQuantity());
+
+ return cir::DynamicCastInfoAttr::get(srcRtti, destRtti, runtimeFuncRef,
+ badCastFuncRef, offsetHintAttr);
+}
+
+mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf,
+ mlir::Location loc,
+ QualType srcRecordTy,
+ QualType destRecordTy,
+ cir::PointerType destCIRTy,
+ bool isRefCast, Address src) {
+ bool isCastToVoid = destRecordTy.isNull();
+ assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference");
+
+ if (isCastToVoid) {
+ cgm.errorNYI(loc, "emitDynamicCastToVoid");
+ return {};
+ }
+
+ // If the destination is effectively final, the cast succeeds if and only
+ // if the dynamic type of the pointer is exactly the destination type.
+ if (destRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() &&
+ cgf.cgm.getCodeGenOpts().OptimizationLevel > 0) {
+ cgm.errorNYI(loc, "emitExactDynamicCast");
+ return {};
+ }
+
+ cir::DynamicCastInfoAttr castInfo =
+ emitDynamicCastInfo(cgf, loc, srcRecordTy, destRecordTy);
+ return cgf.getBuilder().createDynCast(loc, src.getPointer(), destCIRTy,
+ isRefCast, castInfo);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 910c8a9..fe1ea56 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -2079,6 +2079,29 @@ CIRGenModule::createCIRBuiltinFunction(mlir::Location loc, StringRef name,
return fnOp;
}
+cir::FuncOp CIRGenModule::createRuntimeFunction(cir::FuncType ty,
+ StringRef name, mlir::ArrayAttr,
+ [[maybe_unused]] bool isLocal,
+ bool assumeConvergent) {
+ if (assumeConvergent)
+ errorNYI("createRuntimeFunction: assumeConvergent");
+ if (isLocal)
+ errorNYI("createRuntimeFunction: local");
+
+ cir::FuncOp entry = getOrCreateCIRFunction(name, ty, GlobalDecl(),
+ /*forVtable=*/false);
+
+ if (entry) {
+ // TODO(cir): set the attributes of the function.
+ assert(!cir::MissingFeatures::setLLVMFunctionFEnvAttributes());
+ assert(!cir::MissingFeatures::opFuncCallingConv());
+ assert(!cir::MissingFeatures::opGlobalDLLImportExport());
+ entry.setDSOLocal(true);
+ }
+
+ return entry;
+}
+
mlir::SymbolTable::Visibility
CIRGenModule::getMLIRVisibility(cir::GlobalOp op) {
// MLIR doesn't accept public symbols declarations (only
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index c6a6681..f627bae 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -480,6 +480,10 @@ public:
cir::FuncType ty,
const clang::FunctionDecl *fd);
+ cir::FuncOp createRuntimeFunction(cir::FuncType ty, llvm::StringRef name,
+ mlir::ArrayAttr = {}, bool isLocal = false,
+ bool assumeConvergent = false);
+
static constexpr const char *builtinCoroId = "__builtin_coro_id";
/// Given a builtin id for a function like "__builtin_fabsf", return a
diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
index 94d856b..84f5977 100644
--- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
@@ -327,9 +327,40 @@ cir::GlobalLinkageKind CIRGenModule::getVTableLinkage(const CXXRecordDecl *rd) {
llvm_unreachable("Should not have been asked to emit this");
}
}
+ // -fapple-kext mode does not support weak linkage, so we must use
+ // internal linkage.
+ if (astContext.getLangOpts().AppleKext)
+ return cir::GlobalLinkageKind::InternalLinkage;
+
+ auto discardableODRLinkage = cir::GlobalLinkageKind::LinkOnceODRLinkage;
+ auto nonDiscardableODRLinkage = cir::GlobalLinkageKind::WeakODRLinkage;
+ if (rd->hasAttr<DLLExportAttr>()) {
+ // Cannot discard exported vtables.
+ discardableODRLinkage = nonDiscardableODRLinkage;
+ } else if (rd->hasAttr<DLLImportAttr>()) {
+ // Imported vtables are available externally.
+ discardableODRLinkage = cir::GlobalLinkageKind::AvailableExternallyLinkage;
+ nonDiscardableODRLinkage =
+ cir::GlobalLinkageKind::AvailableExternallyLinkage;
+ }
+
+ switch (rd->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ return discardableODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration: {
+ errorNYI(rd->getSourceRange(),
+ "getVTableLinkage: explicit instantiation declaration");
+ return cir::GlobalLinkageKind::ExternalLinkage;
+ }
+
+ case TSK_ExplicitInstantiationDefinition:
+ return nonDiscardableODRLinkage;
+ }
- errorNYI(rd->getSourceRange(), "getVTableLinkage: no key function");
- return cir::GlobalLinkageKind::ExternalLinkage;
+ llvm_unreachable("Invalid TemplateSpecializationKind!");
}
cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *rd) {
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 66c1f76..67a72f5 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -108,9 +108,6 @@ public:
///
// \param flags cleanup kind.
virtual void emit(CIRGenFunction &cgf) = 0;
-
- // This is a placeholder until EHScope is implemented.
- virtual size_t getSize() const = 0;
};
private:
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index bc917d0..706e54f 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -41,6 +41,16 @@ static SmallString<128> getTransformedFileName(mlir::ModuleOp mlirModule) {
return fileName;
}
+/// Return the FuncOp called by `callOp`.
+static cir::FuncOp getCalledFunction(cir::CallOp callOp) {
+ mlir::SymbolRefAttr sym = llvm::dyn_cast_if_present<mlir::SymbolRefAttr>(
+ callOp.getCallableForCallee());
+ if (!sym)
+ return nullptr;
+ return dyn_cast_or_null<cir::FuncOp>(
+ mlir::SymbolTable::lookupNearestSymbolFrom(callOp, sym));
+}
+
namespace {
struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
LoweringPreparePass() = default;
@@ -69,6 +79,12 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
cir::FuncType type,
cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage);
+ cir::GlobalOp buildRuntimeVariable(
+ mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
+ mlir::Type type,
+ cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage,
+ cir::VisibilityKind visibility = cir::VisibilityKind::Default);
+
///
/// AST related
/// -----------
@@ -90,6 +106,25 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
} // namespace
+cir::GlobalOp LoweringPreparePass::buildRuntimeVariable(
+ mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
+ mlir::Type type, cir::GlobalLinkageKind linkage,
+ cir::VisibilityKind visibility) {
+ cir::GlobalOp g = dyn_cast_or_null<cir::GlobalOp>(
+ mlir::SymbolTable::lookupNearestSymbolFrom(
+ mlirModule, mlir::StringAttr::get(mlirModule->getContext(), name)));
+ if (!g) {
+ g = cir::GlobalOp::create(builder, loc, name, type);
+ g.setLinkageAttr(
+ cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage));
+ mlir::SymbolTable::setSymbolVisibility(
+ g, mlir::SymbolTable::Visibility::Private);
+ g.setGlobalVisibilityAttr(
+ cir::VisibilityAttr::get(builder.getContext(), visibility));
+ }
+ return g;
+}
+
cir::FuncOp LoweringPreparePass::buildRuntimeFunction(
mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
cir::FuncType type, cir::GlobalLinkageKind linkage) {
@@ -640,7 +675,8 @@ LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) {
// Create a variable initialization function.
CIRBaseBuilderTy builder(getContext());
builder.setInsertionPointAfter(op);
- auto fnType = cir::FuncType::get({}, builder.getVoidTy());
+ cir::VoidType voidTy = builder.getVoidTy();
+ auto fnType = cir::FuncType::get({}, voidTy);
FuncOp f = buildRuntimeFunction(builder, fnName, op.getLoc(), fnType,
cir::GlobalLinkageKind::InternalLinkage);
@@ -655,8 +691,57 @@ LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) {
// Register the destructor call with __cxa_atexit
mlir::Region &dtorRegion = op.getDtorRegion();
if (!dtorRegion.empty()) {
- assert(!cir::MissingFeatures::opGlobalDtorLowering());
- llvm_unreachable("dtor region lowering is NYI");
+ assert(!cir::MissingFeatures::astVarDeclInterface());
+ assert(!cir::MissingFeatures::opGlobalThreadLocal());
+ // Create a variable that binds the atexit to this shared object.
+ builder.setInsertionPointToStart(&mlirModule.getBodyRegion().front());
+ cir::GlobalOp handle = buildRuntimeVariable(
+ builder, "__dso_handle", op.getLoc(), builder.getI8Type(),
+ cir::GlobalLinkageKind::ExternalLinkage, cir::VisibilityKind::Hidden);
+
+ // Look for the destructor call in dtorBlock
+ mlir::Block &dtorBlock = dtorRegion.front();
+ cir::CallOp dtorCall;
+ for (auto op : reverse(dtorBlock.getOps<cir::CallOp>())) {
+ dtorCall = op;
+ break;
+ }
+ assert(dtorCall && "Expected a dtor call");
+ cir::FuncOp dtorFunc = getCalledFunction(dtorCall);
+ assert(dtorFunc && "Expected a dtor call");
+
+ // Create a runtime helper function:
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ auto voidPtrTy = cir::PointerType::get(voidTy);
+ auto voidFnTy = cir::FuncType::get({voidPtrTy}, voidTy);
+ auto voidFnPtrTy = cir::PointerType::get(voidFnTy);
+ auto handlePtrTy = cir::PointerType::get(handle.getSymType());
+ auto fnAtExitType =
+ cir::FuncType::get({voidFnPtrTy, voidPtrTy, handlePtrTy}, voidTy);
+ const char *nameAtExit = "__cxa_atexit";
+ cir::FuncOp fnAtExit =
+ buildRuntimeFunction(builder, nameAtExit, op.getLoc(), fnAtExitType);
+
+ // Replace the dtor call with a call to __cxa_atexit(&dtor, &var,
+ // &__dso_handle)
+ builder.setInsertionPointAfter(dtorCall);
+ mlir::Value args[3];
+ auto dtorPtrTy = cir::PointerType::get(dtorFunc.getFunctionType());
+ // dtorPtrTy
+ args[0] = cir::GetGlobalOp::create(builder, dtorCall.getLoc(), dtorPtrTy,
+ dtorFunc.getSymName());
+ args[0] = cir::CastOp::create(builder, dtorCall.getLoc(), voidFnPtrTy,
+ cir::CastKind::bitcast, args[0]);
+ args[1] =
+ cir::CastOp::create(builder, dtorCall.getLoc(), voidPtrTy,
+ cir::CastKind::bitcast, dtorCall.getArgOperand(0));
+ args[2] = cir::GetGlobalOp::create(builder, handle.getLoc(), handlePtrTy,
+ handle.getSymName());
+ builder.createCallOp(dtorCall.getLoc(), fnAtExit, args);
+ dtorCall->erase();
+ entryBB->getOperations().splice(entryBB->end(), dtorBlock.getOperations(),
+ dtorBlock.begin(),
+ std::prev(dtorBlock.end()));
}
// Replace cir.yield with cir.return
@@ -666,11 +751,12 @@ LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) {
mlir::Block &block = op.getCtorRegion().front();
yieldOp = &block.getOperations().back();
} else {
- assert(!cir::MissingFeatures::opGlobalDtorLowering());
- llvm_unreachable("dtor region lowering is NYI");
+ assert(!dtorRegion.empty());
+ mlir::Block &block = dtorRegion.front();
+ yieldOp = &block.getOperations().back();
}
- assert(isa<YieldOp>(*yieldOp));
+ assert(isa<cir::YieldOp>(*yieldOp));
cir::ReturnOp::create(builder, yieldOp->getLoc());
return f;
}
@@ -715,7 +801,10 @@ void LoweringPreparePass::buildGlobalCtorDtorList() {
mlir::ArrayAttr::get(&getContext(), globalCtors));
}
- assert(!cir::MissingFeatures::opGlobalDtorLowering());
+ // We will eventual need to populate a global_dtor list, but that's not
+ // needed for globals with destructors. It will only be needed for functions
+ // that are marked as global destructors with an attribute.
+ assert(!cir::MissingFeatures::opGlobalDtorList());
}
void LoweringPreparePass::buildCXXGlobalInitFunc() {
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index a80a295..a1ecfc7 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1771,9 +1771,13 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite(
}
// Rewrite op.
- rewriter.replaceOpWithNewOp<mlir::LLVM::GlobalOp>(
+ auto newOp = rewriter.replaceOpWithNewOp<mlir::LLVM::GlobalOp>(
op, llvmType, isConst, linkage, symbol, init.value_or(mlir::Attribute()),
alignment, addrSpace, isDsoLocal, isThreadLocal, comdatAttr, attributes);
+ newOp.setVisibility_Attr(mlir::LLVM::VisibilityAttr::get(
+ getContext(), lowerCIRVisibilityToLLVMVisibility(
+ op.getGlobalVisibilityAttr().getValue())));
+
return mlir::success();
}
@@ -2594,6 +2598,7 @@ void ConvertCIRToLLVMPass::runOnOperation() {
return std::make_pair(ctorAttr.getName(),
ctorAttr.getPriority());
});
+ assert(!cir::MissingFeatures::opGlobalDtorList());
}
mlir::LogicalResult CIRToLLVMBrOpLowering::matchAndRewrite(
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index a931ce4..c5371e4 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -3018,8 +3018,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
ArgNo = 0;
if (AddedPotentialArgAccess && MemAttrForPtrArgs) {
- llvm::FunctionType *FunctionType = FunctionType =
- getTypes().GetFunctionType(FI);
+ llvm::FunctionType *FunctionType = getTypes().GetFunctionType(FI);
for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
E = FI.arg_end();
I != E; ++I, ++ArgNo) {
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index b91cb36..9fe9a13 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -900,10 +900,13 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
assert((BT->getKind() != BuiltinType::SveCount || Info.NumVectors == 1) &&
"Unsupported number of vectors for svcount_t");
- // Debuggers can't extract 1bit from a vector, so will display a
- // bitpattern for predicates instead.
unsigned NumElems = Info.EC.getKnownMinValue() * Info.NumVectors;
- if (Info.ElementType == CGM.getContext().BoolTy) {
+ llvm::Metadata *BitStride = nullptr;
+ if (BT->getKind() == BuiltinType::SveBool) {
+ Info.ElementType = CGM.getContext().UnsignedCharTy;
+ BitStride = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), 1));
+ } else if (BT->getKind() == BuiltinType::SveCount) {
NumElems /= 8;
Info.ElementType = CGM.getContext().UnsignedCharTy;
}
@@ -929,7 +932,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
getOrCreateType(Info.ElementType, TheCU->getFile());
auto Align = getTypeAlignIfRequired(BT, CGM.getContext());
return DBuilder.createVectorType(/*Size*/ 0, Align, ElemTy,
- SubscriptArray);
+ SubscriptArray, BitStride);
}
// It doesn't make sense to generate debug info for PowerPC MMA vector types.
// So we return a safe type here to avoid generating an error.
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 7dd6a83..e8255b0 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -30,6 +30,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/NSAPI.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CodeGenOptions.h"
@@ -1353,6 +1354,115 @@ void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, QualType AllocType) {
CB->setMetadata(llvm::LLVMContext::MD_alloc_token, MDN);
}
+namespace {
+/// Infer type from a simple sizeof expression.
+QualType inferTypeFromSizeofExpr(const Expr *E) {
+ const Expr *Arg = E->IgnoreParenImpCasts();
+ if (const auto *UET = dyn_cast<UnaryExprOrTypeTraitExpr>(Arg)) {
+ if (UET->getKind() == UETT_SizeOf) {
+ if (UET->isArgumentType())
+ return UET->getArgumentTypeInfo()->getType();
+ else
+ return UET->getArgumentExpr()->getType();
+ }
+ }
+ return QualType();
+}
+
+/// Infer type from an arithmetic expression involving a sizeof. For example:
+///
+/// malloc(sizeof(MyType) + padding); // infers 'MyType'
+/// malloc(sizeof(MyType) * 32); // infers 'MyType'
+/// malloc(32 * sizeof(MyType)); // infers 'MyType'
+/// malloc(sizeof(MyType) << 1); // infers 'MyType'
+/// ...
+///
+/// More complex arithmetic expressions are supported, but are a heuristic, e.g.
+/// when considering allocations for structs with flexible array members:
+///
+/// malloc(sizeof(HasFlexArray) + sizeof(int) * 32); // infers 'HasFlexArray'
+///
+QualType inferPossibleTypeFromArithSizeofExpr(const Expr *E) {
+ const Expr *Arg = E->IgnoreParenImpCasts();
+ // The argument is a lone sizeof expression.
+ if (QualType T = inferTypeFromSizeofExpr(Arg); !T.isNull())
+ return T;
+ if (const auto *BO = dyn_cast<BinaryOperator>(Arg)) {
+ // Argument is an arithmetic expression. Cover common arithmetic patterns
+ // involving sizeof.
+ switch (BO->getOpcode()) {
+ case BO_Add:
+ case BO_Div:
+ case BO_Mul:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_Sub:
+ if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getLHS());
+ !T.isNull())
+ return T;
+ if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getRHS());
+ !T.isNull())
+ return T;
+ break;
+ default:
+ break;
+ }
+ }
+ return QualType();
+}
+
+/// If the expression E is a reference to a variable, infer the type from a
+/// variable's initializer if it contains a sizeof. Beware, this is a heuristic
+/// and ignores if a variable is later reassigned. For example:
+///
+/// size_t my_size = sizeof(MyType);
+/// void *x = malloc(my_size); // infers 'MyType'
+///
+QualType inferPossibleTypeFromVarInitSizeofExpr(const Expr *E) {
+ const Expr *Arg = E->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (const Expr *Init = VD->getInit())
+ return inferPossibleTypeFromArithSizeofExpr(Init);
+ }
+ }
+ return QualType();
+}
+
+/// Deduces the allocated type by checking if the allocation call's result
+/// is immediately used in a cast expression. For example:
+///
+/// MyType *x = (MyType *)malloc(4096); // infers 'MyType'
+///
+QualType inferPossibleTypeFromCastExpr(const CallExpr *CallE,
+ const CastExpr *CastE) {
+ if (!CastE)
+ return QualType();
+ QualType PtrType = CastE->getType();
+ if (PtrType->isPointerType())
+ return PtrType->getPointeeType();
+ return QualType();
+}
+} // end anonymous namespace
+
+void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, const CallExpr *E) {
+ QualType AllocType;
+ // First check arguments.
+ for (const Expr *Arg : E->arguments()) {
+ AllocType = inferPossibleTypeFromArithSizeofExpr(Arg);
+ if (AllocType.isNull())
+ AllocType = inferPossibleTypeFromVarInitSizeofExpr(Arg);
+ if (!AllocType.isNull())
+ break;
+ }
+ // Then check later casts.
+ if (AllocType.isNull())
+ AllocType = inferPossibleTypeFromCastExpr(E, CurCast);
+ // Emit if we were able to infer the type.
+ if (!AllocType.isNull())
+ EmitAllocToken(CB, AllocType);
+}
+
CodeGenFunction::ComplexPairTy CodeGenFunction::
EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
@@ -5723,6 +5833,9 @@ LValue CodeGenFunction::EmitConditionalOperatorLValue(
/// are permitted with aggregate result, including noop aggregate casts, and
/// cast from scalar to union.
LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
+ auto RestoreCurCast =
+ llvm::make_scope_exit([this, Prev = CurCast] { CurCast = Prev; });
+ CurCast = E;
switch (E->getCastKind()) {
case CK_ToVoid:
case CK_BitCast:
@@ -6668,16 +6781,24 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType,
RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
E == MustTailCall, E->getExprLoc());
- // Generate function declaration DISuprogram in order to be used
- // in debug info about call sites.
- if (CGDebugInfo *DI = getDebugInfo()) {
- if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ // Generate function declaration DISuprogram in order to be used
+ // in debug info about call sites.
+ if (CGDebugInfo *DI = getDebugInfo()) {
FunctionArgList Args;
QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
DI->EmitFuncDeclForCallSite(LocalCallOrInvoke,
DI->getFunctionType(CalleeDecl, ResTy, Args),
CalleeDecl);
}
+ if (CalleeDecl->hasAttr<RestrictAttr>() ||
+ CalleeDecl->hasAttr<AllocSizeAttr>()) {
+ // Function has 'malloc' (aka. 'restrict') or 'alloc_size' attribute.
+ if (SanOpts.has(SanitizerKind::AllocToken)) {
+ // Set !alloc_token metadata.
+ EmitAllocToken(LocalCallOrInvoke, E);
+ }
+ }
}
if (CallOrInvoke)
*CallOrInvoke = LocalCallOrInvoke;
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index 290c2e0..31ac266 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -1371,8 +1371,16 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
if (auto *FD = dyn_cast<FunctionDecl>(Decl))
- if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
- return EmitNewDeleteCall(*this, FD, Type, Args);
+ if (Ctx.hasSameType(FD->getType(), QualType(Type, 0))) {
+ RValue RV = EmitNewDeleteCall(*this, FD, Type, Args);
+ if (auto *CB = dyn_cast_if_present<llvm::CallBase>(RV.getScalarVal())) {
+ if (SanOpts.has(SanitizerKind::AllocToken)) {
+ // Set !alloc_token metadata.
+ EmitAllocToken(CB, TheCall);
+ }
+ }
+ return RV;
+ }
llvm_unreachable("predeclared global operator new/delete is missing");
}
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 06d9d81..715160d 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -33,6 +33,7 @@
#include "clang/Basic/DiagnosticTrap.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/APFixedPoint.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
@@ -2434,6 +2435,10 @@ static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue SrcVal,
// have to handle a more broad range of conversions than explicit casts, as they
// handle things like function to ptr-to-function decay etc.
Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
+ auto RestoreCurCast =
+ llvm::make_scope_exit([this, Prev = CGF.CurCast] { CGF.CurCast = Prev; });
+ CGF.CurCast = CE;
+
Expr *E = CE->getSubExpr();
QualType DestTy = CE->getType();
CastKind Kind = CE->getCastKind();
diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
index 6c0fc8d..4f2f5a76 100644
--- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp
+++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
@@ -352,6 +352,19 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
SmallVector<Value *> Args{OrderID, SpaceOp, RangeOp, IndexOp, Name};
return Builder.CreateIntrinsic(HandleTy, IntrinsicID, Args);
}
+ case Builtin::BI__builtin_hlsl_resource_counterhandlefromimplicitbinding: {
+ Value *MainHandle = EmitScalarExpr(E->getArg(0));
+ if (!CGM.getTriple().isSPIRV())
+ return MainHandle;
+
+ llvm::Type *HandleTy = CGM.getTypes().ConvertType(E->getType());
+ Value *OrderID = EmitScalarExpr(E->getArg(1));
+ Value *SpaceOp = EmitScalarExpr(E->getArg(2));
+ llvm::Intrinsic::ID IntrinsicID =
+ llvm::Intrinsic::spv_resource_counterhandlefromimplicitbinding;
+ SmallVector<Value *> Args{MainHandle, OrderID, SpaceOp};
+ return Builder.CreateIntrinsic(HandleTy, IntrinsicID, Args);
+ }
case Builtin::BI__builtin_hlsl_resource_nonuniformindex: {
Value *IndexOp = EmitScalarExpr(E->getArg(0));
llvm::Type *RetTy = ConvertType(E->getType());
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp
index ede1780..603cef9 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -145,19 +145,29 @@ static CXXMethodDecl *lookupResourceInitMethodAndSetupArgs(
// explicit binding
auto *RegSlot = llvm::ConstantInt::get(CGM.IntTy, Binding.getSlot());
Args.add(RValue::get(RegSlot), AST.UnsignedIntTy);
- CreateMethod = lookupMethod(ResourceDecl, "__createFromBinding", SC_Static);
+ const char *Name = Binding.hasCounterImplicitOrderID()
+ ? "__createFromBindingWithImplicitCounter"
+ : "__createFromBinding";
+ CreateMethod = lookupMethod(ResourceDecl, Name, SC_Static);
} else {
// implicit binding
auto *OrderID =
llvm::ConstantInt::get(CGM.IntTy, Binding.getImplicitOrderID());
Args.add(RValue::get(OrderID), AST.UnsignedIntTy);
- CreateMethod =
- lookupMethod(ResourceDecl, "__createFromImplicitBinding", SC_Static);
+ const char *Name = Binding.hasCounterImplicitOrderID()
+ ? "__createFromImplicitBindingWithImplicitCounter"
+ : "__createFromImplicitBinding";
+ CreateMethod = lookupMethod(ResourceDecl, Name, SC_Static);
}
Args.add(RValue::get(Space), AST.UnsignedIntTy);
Args.add(RValue::get(Range), AST.IntTy);
Args.add(RValue::get(Index), AST.UnsignedIntTy);
Args.add(RValue::get(NameStr), AST.getPointerType(AST.CharTy.withConst()));
+ if (Binding.hasCounterImplicitOrderID()) {
+ uint32_t CounterBinding = Binding.getCounterImplicitOrderID();
+ auto *CounterOrderID = llvm::ConstantInt::get(CGM.IntTy, CounterBinding);
+ Args.add(RValue::get(CounterOrderID), AST.UnsignedIntTy);
+ }
return CreateMethod;
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 4272d8b..3613b6a 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -869,6 +869,8 @@ CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
CGM.getLangOpts().OpenMPOffloadMandatory,
/*HasRequiresReverseOffload*/ false, /*HasRequiresUnifiedAddress*/ false,
hasRequiresUnifiedSharedMemory(), /*HasRequiresDynamicAllocators*/ false);
+ Config.setDefaultTargetAS(
+ CGM.getContext().getTargetInfo().getTargetAddressSpace(LangAS::Default));
OMPBuilder.setConfig(Config);
if (!CGM.getLangOpts().OpenMPIsTargetDevice)
@@ -1243,7 +1245,10 @@ void CGOpenMPRuntimeGPU::emitParallelCall(
llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
if (WFn)
ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
- llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy);
+ llvm::Type *FnPtrTy = llvm::PointerType::get(
+ CGF.getLLVMContext(), CGM.getDataLayout().getProgramAddressSpace());
+
+ llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, FnPtrTy);
// Create a private scope that will globalize the arguments
// passed from the outside of the target region.
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index e14e60c..1f0be2d 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -346,6 +346,10 @@ public:
QualType FnRetTy;
llvm::Function *CurFn = nullptr;
+ /// If a cast expression is being visited, this holds the current cast's
+ /// expression.
+ const CastExpr *CurCast = nullptr;
+
/// Save Parameter Decl for coroutine.
llvm::SmallVector<const ParmVarDecl *, 4> FnArgs;
@@ -3350,6 +3354,9 @@ public:
/// Emit additional metadata used by the AllocToken instrumentation.
void EmitAllocToken(llvm::CallBase *CB, QualType AllocType);
+ /// Emit additional metadata used by the AllocToken instrumentation,
+ /// inferring the type from an allocation call expression.
+ void EmitAllocToken(llvm::CallBase *CB, const CallExpr *E);
llvm::Value *GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD,
const FieldDecl *CountDecl);
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index 234683f..d2356eb 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -1609,7 +1609,12 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
if (Sanitize.needsFuzzer() && !Args.hasArg(options::OPT_dynamiclib)) {
AddLinkSanitizerLibArgs(Args, CmdArgs, "fuzzer", /*shared=*/false);
- // Libfuzzer is written in C++ and requires libcxx.
+ // Libfuzzer is written in C++ and requires libcxx.
+ // Since darwin::Linker::ConstructJob already adds -lc++ for clang++
+ // by default if ShouldLinkCXXStdlib(Args), we only add the option if
+ // !ShouldLinkCXXStdlib(Args). This avoids duplicate library errors
+ // on Darwin.
+ if (!ShouldLinkCXXStdlib(Args))
AddCXXStdlibLibArgs(Args, CmdArgs);
}
if (Sanitize.needsStatsRt()) {
diff --git a/clang/lib/Driver/ToolChains/HIPAMD.cpp b/clang/lib/Driver/ToolChains/HIPAMD.cpp
index 5f3fbea..c0c8afe 100644
--- a/clang/lib/Driver/ToolChains/HIPAMD.cpp
+++ b/clang/lib/Driver/ToolChains/HIPAMD.cpp
@@ -168,9 +168,12 @@ void AMDGCN::Linker::constructLinkAndEmitSpirvCommand(
const InputInfo &Output, const llvm::opt::ArgList &Args) const {
assert(!Inputs.empty() && "Must have at least one input.");
- constructLlvmLinkCommand(C, JA, Inputs, Output, Args);
+ std::string LinkedBCFilePrefix(
+ Twine(llvm::sys::path::stem(Output.getFilename()), "-linked").str());
+ const char *LinkedBCFilePath = HIP::getTempFile(C, LinkedBCFilePrefix, "bc");
+ InputInfo LinkedBCFile(&JA, LinkedBCFilePath, Output.getBaseInput());
- // Linked BC is now in Output
+ constructLlvmLinkCommand(C, JA, Inputs, LinkedBCFile, Args);
// Emit SPIR-V binary.
llvm::opt::ArgStringList TrArgs{
@@ -180,7 +183,7 @@ void AMDGCN::Linker::constructLinkAndEmitSpirvCommand(
"--spirv-lower-const-expr",
"--spirv-preserve-auxdata",
"--spirv-debug-info-version=nonsemantic-shader-200"};
- SPIRV::constructTranslateCommand(C, *this, JA, Output, Output, TrArgs);
+ SPIRV::constructTranslateCommand(C, *this, JA, Output, LinkedBCFile, TrArgs);
}
// For amdgcn the inputs of the linker job are device bitcode and output is
diff --git a/clang/lib/Driver/ToolChains/HIPSPV.cpp b/clang/lib/Driver/ToolChains/HIPSPV.cpp
index 62bca04..bce7f46 100644
--- a/clang/lib/Driver/ToolChains/HIPSPV.cpp
+++ b/clang/lib/Driver/ToolChains/HIPSPV.cpp
@@ -22,17 +22,6 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-// Convenience function for creating temporary file for both modes of
-// isSaveTempsEnabled().
-static const char *getTempFile(Compilation &C, StringRef Prefix,
- StringRef Extension) {
- if (C.getDriver().isSaveTempsEnabled()) {
- return C.getArgs().MakeArgString(Prefix + "." + Extension);
- }
- auto TmpFile = C.getDriver().GetTemporaryPath(Prefix, Extension);
- return C.addTempFile(C.getArgs().MakeArgString(TmpFile));
-}
-
// Locates HIP pass plugin.
static std::string findPassPlugin(const Driver &D,
const llvm::opt::ArgList &Args) {
@@ -65,7 +54,7 @@ void HIPSPV::Linker::constructLinkAndEmitSpirvCommand(
assert(!Inputs.empty() && "Must have at least one input.");
std::string Name = std::string(llvm::sys::path::stem(Output.getFilename()));
- const char *TempFile = getTempFile(C, Name + "-link", "bc");
+ const char *TempFile = HIP::getTempFile(C, Name + "-link", "bc");
// Link LLVM bitcode.
ArgStringList LinkArgs{};
@@ -93,7 +82,7 @@ void HIPSPV::Linker::constructLinkAndEmitSpirvCommand(
auto PassPluginPath = findPassPlugin(C.getDriver(), Args);
if (!PassPluginPath.empty()) {
const char *PassPathCStr = C.getArgs().MakeArgString(PassPluginPath);
- const char *OptOutput = getTempFile(C, Name + "-lower", "bc");
+ const char *OptOutput = HIP::getTempFile(C, Name + "-lower", "bc");
ArgStringList OptArgs{TempFile, "-load-pass-plugin",
PassPathCStr, "-passes=hip-post-link-passes",
"-o", OptOutput};
diff --git a/clang/lib/Driver/ToolChains/HIPUtility.cpp b/clang/lib/Driver/ToolChains/HIPUtility.cpp
index cb061ff..732403e 100644
--- a/clang/lib/Driver/ToolChains/HIPUtility.cpp
+++ b/clang/lib/Driver/ToolChains/HIPUtility.cpp
@@ -472,3 +472,14 @@ void HIP::constructGenerateObjFileFromHIPFatBinary(
D.getClangProgramPath(), ClangArgs,
Inputs, Output, D.getPrependArg()));
}
+
+// Convenience function for creating temporary file for both modes of
+// isSaveTempsEnabled().
+const char *HIP::getTempFile(Compilation &C, StringRef Prefix,
+ StringRef Extension) {
+ if (C.getDriver().isSaveTempsEnabled()) {
+ return C.getArgs().MakeArgString(Prefix + "." + Extension);
+ }
+ auto TmpFile = C.getDriver().GetTemporaryPath(Prefix, Extension);
+ return C.addTempFile(C.getArgs().MakeArgString(TmpFile));
+}
diff --git a/clang/lib/Driver/ToolChains/HIPUtility.h b/clang/lib/Driver/ToolChains/HIPUtility.h
index 29e5a92..55c155e 100644
--- a/clang/lib/Driver/ToolChains/HIPUtility.h
+++ b/clang/lib/Driver/ToolChains/HIPUtility.h
@@ -16,6 +16,8 @@ namespace driver {
namespace tools {
namespace HIP {
+const char *getTempFile(Compilation &C, StringRef Prefix, StringRef Extension);
+
// Construct command for creating HIP fatbin.
void constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
StringRef OutputFileName,
diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp
index a2c6957..90191b0 100644
--- a/clang/lib/Parse/ParseExprCXX.cpp
+++ b/clang/lib/Parse/ParseExprCXX.cpp
@@ -3200,6 +3200,8 @@ ExprResult Parser::ParseRequiresExpression() {
BalancedDelimiterTracker ExprBraces(*this, tok::l_brace);
ExprBraces.consumeOpen();
ExprResult Expression = ParseExpression();
+ if (Expression.isUsable())
+ Expression = Actions.CheckPlaceholderExpr(Expression.get());
if (!Expression.isUsable()) {
ExprBraces.skipToEnd();
SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
@@ -3369,6 +3371,8 @@ ExprResult Parser::ParseRequiresExpression() {
// expression ';'
SourceLocation StartLoc = Tok.getLocation();
ExprResult Expression = ParseExpression();
+ if (Expression.isUsable())
+ Expression = Actions.CheckPlaceholderExpr(Expression.get());
if (!Expression.isUsable()) {
SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
break;
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
index 3c20ccd..40c318a 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
@@ -144,6 +144,7 @@ private:
_2,
_3,
_4,
+ _5,
Handle = 128,
CounterHandle,
LastStmt
@@ -190,6 +191,9 @@ public:
template <typename T>
BuiltinTypeMethodBuilder &
accessCounterHandleFieldOnResource(T ResourceRecord);
+ template <typename ResourceT, typename ValueT>
+ BuiltinTypeMethodBuilder &
+ setCounterHandleFieldOnResource(ResourceT ResourceRecord, ValueT HandleValue);
template <typename T> BuiltinTypeMethodBuilder &returnValue(T ReturnValue);
BuiltinTypeMethodBuilder &returnThis();
BuiltinTypeDeclBuilder &finalize();
@@ -205,6 +209,11 @@ private:
if (!Method)
createDecl();
}
+
+ template <typename ResourceT, typename ValueT>
+ BuiltinTypeMethodBuilder &setFieldOnResource(ResourceT ResourceRecord,
+ ValueT HandleValue,
+ FieldDecl *HandleField);
};
TemplateParameterListBuilder::~TemplateParameterListBuilder() {
@@ -592,13 +601,27 @@ template <typename ResourceT, typename ValueT>
BuiltinTypeMethodBuilder &
BuiltinTypeMethodBuilder::setHandleFieldOnResource(ResourceT ResourceRecord,
ValueT HandleValue) {
+ return setFieldOnResource(ResourceRecord, HandleValue,
+ DeclBuilder.getResourceHandleField());
+}
+
+template <typename ResourceT, typename ValueT>
+BuiltinTypeMethodBuilder &
+BuiltinTypeMethodBuilder::setCounterHandleFieldOnResource(
+ ResourceT ResourceRecord, ValueT HandleValue) {
+ return setFieldOnResource(ResourceRecord, HandleValue,
+ DeclBuilder.getResourceCounterHandleField());
+}
+
+template <typename ResourceT, typename ValueT>
+BuiltinTypeMethodBuilder &BuiltinTypeMethodBuilder::setFieldOnResource(
+ ResourceT ResourceRecord, ValueT HandleValue, FieldDecl *HandleField) {
ensureCompleteDecl();
Expr *ResourceExpr = convertPlaceholder(ResourceRecord);
Expr *HandleValueExpr = convertPlaceholder(HandleValue);
ASTContext &AST = DeclBuilder.SemaRef.getASTContext();
- FieldDecl *HandleField = DeclBuilder.getResourceHandleField();
MemberExpr *HandleMemberExpr = MemberExpr::CreateImplicit(
AST, ResourceExpr, false, HandleField, HandleField->getType(), VK_LValue,
OK_Ordinary);
@@ -829,6 +852,18 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addDefaultHandleConstructor() {
.finalize();
}
+BuiltinTypeDeclBuilder &
+BuiltinTypeDeclBuilder::addStaticInitializationFunctions(bool HasCounter) {
+ if (HasCounter) {
+ addCreateFromBindingWithImplicitCounter();
+ addCreateFromImplicitBindingWithImplicitCounter();
+ } else {
+ addCreateFromBinding();
+ addCreateFromImplicitBinding();
+ }
+ return *this;
+}
+
// Adds static method that initializes resource from binding:
//
// static Resource<T> __createFromBinding(unsigned registerNo,
@@ -903,6 +938,102 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCreateFromImplicitBinding() {
.finalize();
}
+// Adds static method that initializes resource from binding:
+//
+// static Resource<T>
+// __createFromBindingWithImplicitCounter(unsigned registerNo,
+// unsigned spaceNo, int range,
+// unsigned index, const char *name,
+// unsigned counterOrderId) {
+// Resource<T> tmp;
+// tmp.__handle = __builtin_hlsl_resource_handlefrombinding(
+// tmp.__handle, registerNo, spaceNo, range, index, name);
+// tmp.__counter_handle =
+// __builtin_hlsl_resource_counterhandlefromimplicitbinding(
+// tmp.__handle, counterOrderId, spaceNo);
+// return tmp;
+// }
+BuiltinTypeDeclBuilder &
+BuiltinTypeDeclBuilder::addCreateFromBindingWithImplicitCounter() {
+ assert(!Record->isCompleteDefinition() && "record is already complete");
+
+ using PH = BuiltinTypeMethodBuilder::PlaceHolder;
+ ASTContext &AST = SemaRef.getASTContext();
+ QualType HandleType = getResourceHandleField()->getType();
+ QualType RecordType = AST.getTypeDeclType(cast<TypeDecl>(Record));
+ BuiltinTypeMethodBuilder::LocalVar TmpVar("tmp", RecordType);
+
+ return BuiltinTypeMethodBuilder(*this,
+ "__createFromBindingWithImplicitCounter",
+ RecordType, false, false, SC_Static)
+ .addParam("registerNo", AST.UnsignedIntTy)
+ .addParam("spaceNo", AST.UnsignedIntTy)
+ .addParam("range", AST.IntTy)
+ .addParam("index", AST.UnsignedIntTy)
+ .addParam("name", AST.getPointerType(AST.CharTy.withConst()))
+ .addParam("counterOrderId", AST.UnsignedIntTy)
+ .declareLocalVar(TmpVar)
+ .accessHandleFieldOnResource(TmpVar)
+ .callBuiltin("__builtin_hlsl_resource_handlefrombinding", HandleType,
+ PH::LastStmt, PH::_0, PH::_1, PH::_2, PH::_3, PH::_4)
+ .setHandleFieldOnResource(TmpVar, PH::LastStmt)
+ .accessHandleFieldOnResource(TmpVar)
+ .callBuiltin("__builtin_hlsl_resource_counterhandlefromimplicitbinding",
+ HandleType, PH::LastStmt, PH::_5, PH::_1)
+ .setCounterHandleFieldOnResource(TmpVar, PH::LastStmt)
+ .returnValue(TmpVar)
+ .finalize();
+}
+
+// Adds static method that initializes resource from binding:
+//
+// static Resource<T>
+// __createFromImplicitBindingWithImplicitCounter(unsigned orderId,
+// unsigned spaceNo, int range,
+// unsigned index,
+// const char *name,
+// unsigned counterOrderId) {
+// Resource<T> tmp;
+// tmp.__handle = __builtin_hlsl_resource_handlefromimplicitbinding(
+// tmp.__handle, orderId, spaceNo, range, index, name);
+// tmp.__counter_handle =
+// __builtin_hlsl_resource_counterhandlefromimplicitbinding(
+// tmp.__handle, counterOrderId, spaceNo);
+// return tmp;
+// }
+BuiltinTypeDeclBuilder &
+BuiltinTypeDeclBuilder::addCreateFromImplicitBindingWithImplicitCounter() {
+ assert(!Record->isCompleteDefinition() && "record is already complete");
+
+ using PH = BuiltinTypeMethodBuilder::PlaceHolder;
+ ASTContext &AST = SemaRef.getASTContext();
+ QualType HandleType = getResourceHandleField()->getType();
+ QualType RecordType = AST.getTypeDeclType(cast<TypeDecl>(Record));
+ BuiltinTypeMethodBuilder::LocalVar TmpVar("tmp", RecordType);
+
+ return BuiltinTypeMethodBuilder(
+ *this, "__createFromImplicitBindingWithImplicitCounter",
+ RecordType, false, false, SC_Static)
+ .addParam("orderId", AST.UnsignedIntTy)
+ .addParam("spaceNo", AST.UnsignedIntTy)
+ .addParam("range", AST.IntTy)
+ .addParam("index", AST.UnsignedIntTy)
+ .addParam("name", AST.getPointerType(AST.CharTy.withConst()))
+ .addParam("counterOrderId", AST.UnsignedIntTy)
+ .declareLocalVar(TmpVar)
+ .accessHandleFieldOnResource(TmpVar)
+ .callBuiltin("__builtin_hlsl_resource_handlefromimplicitbinding",
+ HandleType, PH::LastStmt, PH::_0, PH::_1, PH::_2, PH::_3,
+ PH::_4)
+ .setHandleFieldOnResource(TmpVar, PH::LastStmt)
+ .accessHandleFieldOnResource(TmpVar)
+ .callBuiltin("__builtin_hlsl_resource_counterhandlefromimplicitbinding",
+ HandleType, PH::LastStmt, PH::_5, PH::_1)
+ .setCounterHandleFieldOnResource(TmpVar, PH::LastStmt)
+ .returnValue(TmpVar)
+ .finalize();
+}
+
BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCopyConstructor() {
assert(!Record->isCompleteDefinition() && "record is already complete");
@@ -1048,7 +1179,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addIncrementCounterMethod() {
return BuiltinTypeMethodBuilder(*this, "IncrementCounter",
SemaRef.getASTContext().UnsignedIntTy)
.callBuiltin("__builtin_hlsl_buffer_update_counter", QualType(),
- PH::Handle, getConstantIntExpr(1))
+ PH::CounterHandle, getConstantIntExpr(1))
.finalize();
}
@@ -1057,7 +1188,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addDecrementCounterMethod() {
return BuiltinTypeMethodBuilder(*this, "DecrementCounter",
SemaRef.getASTContext().UnsignedIntTy)
.callBuiltin("__builtin_hlsl_buffer_update_counter", QualType(),
- PH::Handle, getConstantIntExpr(-1))
+ PH::CounterHandle, getConstantIntExpr(-1))
.finalize();
}
@@ -1102,7 +1233,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addAppendMethod() {
return BuiltinTypeMethodBuilder(*this, "Append", AST.VoidTy)
.addParam("value", ElemTy)
.callBuiltin("__builtin_hlsl_buffer_update_counter", AST.UnsignedIntTy,
- PH::Handle, getConstantIntExpr(1))
+ PH::CounterHandle, getConstantIntExpr(1))
.callBuiltin("__builtin_hlsl_resource_getpointer",
AST.getPointerType(AddrSpaceElemTy), PH::Handle,
PH::LastStmt)
@@ -1119,7 +1250,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addConsumeMethod() {
AST.getAddrSpaceQualType(ElemTy, LangAS::hlsl_device);
return BuiltinTypeMethodBuilder(*this, "Consume", ElemTy)
.callBuiltin("__builtin_hlsl_buffer_update_counter", AST.UnsignedIntTy,
- PH::Handle, getConstantIntExpr(-1))
+ PH::CounterHandle, getConstantIntExpr(-1))
.callBuiltin("__builtin_hlsl_resource_getpointer",
AST.getPointerType(AddrSpaceElemTy), PH::Handle,
PH::LastStmt)
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
index a981602..86cbd10 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
@@ -83,8 +83,7 @@ public:
BuiltinTypeDeclBuilder &addCopyAssignmentOperator();
// Static create methods
- BuiltinTypeDeclBuilder &addCreateFromBinding();
- BuiltinTypeDeclBuilder &addCreateFromImplicitBinding();
+ BuiltinTypeDeclBuilder &addStaticInitializationFunctions(bool HasCounter);
// Builtin types methods
BuiltinTypeDeclBuilder &addLoadMethods();
@@ -96,6 +95,10 @@ public:
BuiltinTypeDeclBuilder &addConsumeMethod();
private:
+ BuiltinTypeDeclBuilder &addCreateFromBinding();
+ BuiltinTypeDeclBuilder &addCreateFromImplicitBinding();
+ BuiltinTypeDeclBuilder &addCreateFromBindingWithImplicitCounter();
+ BuiltinTypeDeclBuilder &addCreateFromImplicitBindingWithImplicitCounter();
BuiltinTypeDeclBuilder &addResourceMember(StringRef MemberName,
ResourceClass RC, bool IsROV,
bool RawBuffer, bool IsCounter,
diff --git a/clang/lib/Sema/HLSLExternalSemaSource.cpp b/clang/lib/Sema/HLSLExternalSemaSource.cpp
index cc43e94..e118dda 100644
--- a/clang/lib/Sema/HLSLExternalSemaSource.cpp
+++ b/clang/lib/Sema/HLSLExternalSemaSource.cpp
@@ -236,8 +236,7 @@ static BuiltinTypeDeclBuilder setupBufferType(CXXRecordDecl *Decl, Sema &S,
.addDefaultHandleConstructor()
.addCopyConstructor()
.addCopyAssignmentOperator()
- .addCreateFromBinding()
- .addCreateFromImplicitBinding();
+ .addStaticInitializationFunctions(HasCounter);
}
// This function is responsible for constructing the constraint expression for
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 4d3c7d6..4230ea7 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -9014,24 +9014,6 @@ bool Sema::IsInvalidSMECallConversion(QualType FromType, QualType ToType) {
return FromAttributes != ToAttributes;
}
-// Check if we have a conversion between incompatible cmse function pointer
-// types, that is, a conversion between a function pointer with the
-// cmse_nonsecure_call attribute and one without.
-static bool IsInvalidCmseNSCallConversion(Sema &S, QualType FromType,
- QualType ToType) {
- if (const auto *ToFn =
- dyn_cast<FunctionType>(S.Context.getCanonicalType(ToType))) {
- if (const auto *FromFn =
- dyn_cast<FunctionType>(S.Context.getCanonicalType(FromType))) {
- FunctionType::ExtInfo ToEInfo = ToFn->getExtInfo();
- FunctionType::ExtInfo FromEInfo = FromFn->getExtInfo();
-
- return ToEInfo.getCmseNSCall() != FromEInfo.getCmseNSCall();
- }
- }
- return false;
-}
-
// checkPointerTypesForAssignment - This is a very tricky routine (despite
// being closely modeled after the C99 spec:-). The odd characteristic of this
// routine is it effectively iqnores the qualifiers on the top level pointee.
@@ -9187,18 +9169,43 @@ static AssignConvertType checkPointerTypesForAssignment(Sema &S,
return AssignConvertType::IncompatibleFunctionPointer;
return AssignConvertType::IncompatiblePointer;
}
- bool DiscardingCFIUncheckedCallee, AddingCFIUncheckedCallee;
- if (!S.getLangOpts().CPlusPlus &&
- S.IsFunctionConversion(ltrans, rtrans, &DiscardingCFIUncheckedCallee,
- &AddingCFIUncheckedCallee)) {
- // Allow conversions between CFIUncheckedCallee-ness.
- if (!DiscardingCFIUncheckedCallee && !AddingCFIUncheckedCallee)
+ // Note: in C++, typesAreCompatible(ltrans, rtrans) will have guaranteed
+ // hasSameType, so we can skip further checks.
+ const auto *LFT = ltrans->getAs<FunctionType>();
+ const auto *RFT = rtrans->getAs<FunctionType>();
+ if (!S.getLangOpts().CPlusPlus && LFT && RFT) {
+ // The invocation of IsFunctionConversion below will try to transform rtrans
+ // to obtain an exact match for ltrans. This should not fail because of
+ // mismatches in result type and parameter types, they were already checked
+ // by typesAreCompatible above. So we will recreate rtrans (or where
+ // appropriate ltrans) using the result type and parameter types from ltrans
+ // (respectively rtrans), but keeping its ExtInfo/ExtProtoInfo.
+ const auto *LFPT = dyn_cast<FunctionProtoType>(LFT);
+ const auto *RFPT = dyn_cast<FunctionProtoType>(RFT);
+ if (LFPT && RFPT) {
+ rtrans = S.Context.getFunctionType(LFPT->getReturnType(),
+ LFPT->getParamTypes(),
+ RFPT->getExtProtoInfo());
+ } else if (LFPT) {
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = RFT->getExtInfo();
+ rtrans = S.Context.getFunctionType(LFPT->getReturnType(),
+ LFPT->getParamTypes(), EPI);
+ } else if (RFPT) {
+ // In this case, we want to retain rtrans as a FunctionProtoType, to keep
+ // all of its ExtProtoInfo. Transform ltrans instead.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = LFT->getExtInfo();
+ ltrans = S.Context.getFunctionType(RFPT->getReturnType(),
+ RFPT->getParamTypes(), EPI);
+ } else {
+ rtrans = S.Context.getFunctionNoProtoType(LFT->getReturnType(),
+ RFT->getExtInfo());
+ }
+ if (!S.Context.hasSameUnqualifiedType(rtrans, ltrans) &&
+ !S.IsFunctionConversion(rtrans, ltrans))
return AssignConvertType::IncompatibleFunctionPointer;
}
- if (IsInvalidCmseNSCallConversion(S, ltrans, rtrans))
- return AssignConvertType::IncompatibleFunctionPointer;
- if (S.IsInvalidSMECallConversion(rtrans, ltrans))
- return AssignConvertType::IncompatibleFunctionPointer;
return ConvTy;
}
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 09e5d69..17cb1e4 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -1240,6 +1240,20 @@ static CXXMethodDecl *lookupMethod(Sema &S, CXXRecordDecl *RecordDecl,
} // end anonymous namespace
+static bool hasCounterHandle(const CXXRecordDecl *RD) {
+ if (RD->field_empty())
+ return false;
+ auto It = std::next(RD->field_begin());
+ if (It == RD->field_end())
+ return false;
+ const FieldDecl *SecondField = *It;
+ if (const auto *ResTy =
+ SecondField->getType()->getAs<HLSLAttributedResourceType>()) {
+ return ResTy->getAttrs().IsCounter;
+ }
+ return false;
+}
+
bool SemaHLSL::handleRootSignatureElements(
ArrayRef<hlsl::RootSignatureElement> Elements) {
// Define some common error handling functions
@@ -2973,6 +2987,25 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
TheCall->setType(ResourceTy);
break;
}
+ case Builtin::BI__builtin_hlsl_resource_counterhandlefromimplicitbinding: {
+ ASTContext &AST = SemaRef.getASTContext();
+ if (SemaRef.checkArgCount(TheCall, 3) ||
+ CheckResourceHandle(&SemaRef, TheCall, 0) ||
+ CheckArgTypeMatches(&SemaRef, TheCall->getArg(1), AST.UnsignedIntTy) ||
+ CheckArgTypeMatches(&SemaRef, TheCall->getArg(2), AST.UnsignedIntTy))
+ return true;
+
+ QualType MainHandleTy = TheCall->getArg(0)->getType();
+ auto *MainResType = MainHandleTy->getAs<HLSLAttributedResourceType>();
+ auto MainAttrs = MainResType->getAttrs();
+ assert(!MainAttrs.IsCounter && "cannot create a counter from a counter");
+ MainAttrs.IsCounter = true;
+ QualType CounterHandleTy = AST.getHLSLAttributedResourceType(
+ MainResType->getWrappedType(), MainResType->getContainedType(),
+ MainAttrs);
+ TheCall->setType(CounterHandleTy);
+ break;
+ }
case Builtin::BI__builtin_hlsl_and:
case Builtin::BI__builtin_hlsl_or: {
if (SemaRef.checkArgCount(TheCall, 2))
@@ -3780,10 +3813,24 @@ void SemaHLSL::ActOnVariableDeclarator(VarDecl *VD) {
uint32_t OrderID = getNextImplicitBindingOrderID();
if (Binding.hasBinding())
Binding.setImplicitOrderID(OrderID);
- else
+ else {
addImplicitBindingAttrToDecl(
SemaRef, VD, getRegisterType(getResourceArrayHandleType(VD)),
OrderID);
+ // Re-create the binding object to pick up the new attribute.
+ Binding = ResourceBindingAttrs(VD);
+ }
+ }
+
+ // Get to the base type of a potentially multi-dimensional array.
+ QualType Ty = getASTContext().getBaseElementType(VD->getType());
+
+ const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (hasCounterHandle(RD)) {
+ if (!Binding.hasCounterImplicitOrderID()) {
+ uint32_t OrderID = getNextImplicitBindingOrderID();
+ Binding.setCounterImplicitOrderID(OrderID);
+ }
}
}
}
@@ -3808,19 +3855,31 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) {
CXXMethodDecl *CreateMethod = nullptr;
llvm::SmallVector<Expr *> Args;
+ bool HasCounter = hasCounterHandle(ResourceDecl);
+ const char *CreateMethodName;
+ if (Binding.isExplicit())
+ CreateMethodName = HasCounter ? "__createFromBindingWithImplicitCounter"
+ : "__createFromBinding";
+ else
+ CreateMethodName = HasCounter
+ ? "__createFromImplicitBindingWithImplicitCounter"
+ : "__createFromImplicitBinding";
+
+ CreateMethod =
+ lookupMethod(SemaRef, ResourceDecl, CreateMethodName, VD->getLocation());
+
+ if (!CreateMethod)
+ // This can happen if someone creates a struct that looks like an HLSL
+ // resource record but does not have the required static create method.
+ // No binding will be generated for it.
+ return false;
+
if (Binding.isExplicit()) {
- // The resource has explicit binding.
- CreateMethod = lookupMethod(SemaRef, ResourceDecl, "__createFromBinding",
- VD->getLocation());
IntegerLiteral *RegSlot =
IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, Binding.getSlot()),
AST.UnsignedIntTy, SourceLocation());
Args.push_back(RegSlot);
} else {
- // The resource has implicit binding.
- CreateMethod =
- lookupMethod(SemaRef, ResourceDecl, "__createFromImplicitBinding",
- VD->getLocation());
uint32_t OrderID = (Binding.hasImplicitOrderID())
? Binding.getImplicitOrderID()
: getNextImplicitBindingOrderID();
@@ -3830,12 +3889,6 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) {
Args.push_back(OrderId);
}
- if (!CreateMethod)
- // This can happen if someone creates a struct that looks like an HLSL
- // resource record but does not have the required static create method.
- // No binding will be generated for it.
- return false;
-
IntegerLiteral *Space =
IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, Binding.getSpace()),
AST.UnsignedIntTy, SourceLocation());
@@ -3859,6 +3912,15 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) {
Name, nullptr, VK_PRValue, FPOptionsOverride());
Args.push_back(NameCast);
+ if (HasCounter) {
+ // Will this be in the correct order?
+ uint32_t CounterOrderID = getNextImplicitBindingOrderID();
+ IntegerLiteral *CounterId =
+ IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, CounterOrderID),
+ AST.UnsignedIntTy, SourceLocation());
+ Args.push_back(CounterId);
+ }
+
// Make sure the create method template is instantiated and emitted.
if (!CreateMethod->isDefined() && CreateMethod->isTemplateInstantiation())
SemaRef.InstantiateFunctionDefinition(VD->getLocation(), CreateMethod,
@@ -3899,20 +3961,24 @@ bool SemaHLSL::initGlobalResourceArrayDecl(VarDecl *VD) {
ASTContext &AST = SemaRef.getASTContext();
QualType ResElementTy = AST.getBaseElementType(VD->getType());
CXXRecordDecl *ResourceDecl = ResElementTy->getAsCXXRecordDecl();
-
- HLSLResourceBindingAttr *RBA = VD->getAttr<HLSLResourceBindingAttr>();
- HLSLVkBindingAttr *VkBinding = VD->getAttr<HLSLVkBindingAttr>();
CXXMethodDecl *CreateMethod = nullptr;
- if (VkBinding || (RBA && RBA->hasRegisterSlot()))
+ bool HasCounter = hasCounterHandle(ResourceDecl);
+ ResourceBindingAttrs ResourceAttrs(VD);
+ if (ResourceAttrs.isExplicit())
// Resource has explicit binding.
- CreateMethod = lookupMethod(SemaRef, ResourceDecl, "__createFromBinding",
- VD->getLocation());
- else
- // Resource has implicit binding.
CreateMethod =
- lookupMethod(SemaRef, ResourceDecl, "__createFromImplicitBinding",
+ lookupMethod(SemaRef, ResourceDecl,
+ HasCounter ? "__createFromBindingWithImplicitCounter"
+ : "__createFromBinding",
VD->getLocation());
+ else
+ // Resource has implicit binding.
+ CreateMethod = lookupMethod(
+ SemaRef, ResourceDecl,
+ HasCounter ? "__createFromImplicitBindingWithImplicitCounter"
+ : "__createFromImplicitBinding",
+ VD->getLocation());
if (!CreateMethod)
return false;
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index 8471f02..4824b5a 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -2946,5 +2946,5 @@ OpenACCReductionRecipe SemaOpenACC::CreateReductionInitRecipe(
AllocaDecl->setInit(Init.get());
AllocaDecl->setInitStyle(VarDecl::CallInit);
}
- return OpenACCReductionRecipe(AllocaDecl);
+ return OpenACCReductionRecipe(AllocaDecl, {});
}
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 8d32ef6..8339bb1 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -1892,14 +1892,7 @@ bool Sema::TryFunctionConversion(QualType FromType, QualType ToType,
return Changed;
}
-bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
- bool *DiscardingCFIUncheckedCallee,
- bool *AddingCFIUncheckedCallee) const {
- if (DiscardingCFIUncheckedCallee)
- *DiscardingCFIUncheckedCallee = false;
- if (AddingCFIUncheckedCallee)
- *AddingCFIUncheckedCallee = false;
-
+bool Sema::IsFunctionConversion(QualType FromType, QualType ToType) const {
if (Context.hasSameUnqualifiedType(FromType, ToType))
return false;
@@ -1958,25 +1951,14 @@ bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
const auto *ToFPT = dyn_cast<FunctionProtoType>(ToFn);
if (FromFPT && ToFPT) {
- if (FromFPT->hasCFIUncheckedCallee() && !ToFPT->hasCFIUncheckedCallee()) {
- QualType NewTy = Context.getFunctionType(
- FromFPT->getReturnType(), FromFPT->getParamTypes(),
- FromFPT->getExtProtoInfo().withCFIUncheckedCallee(false));
- FromFPT = cast<FunctionProtoType>(NewTy.getTypePtr());
- FromFn = FromFPT;
- Changed = true;
- if (DiscardingCFIUncheckedCallee)
- *DiscardingCFIUncheckedCallee = true;
- } else if (!FromFPT->hasCFIUncheckedCallee() &&
- ToFPT->hasCFIUncheckedCallee()) {
+ if (FromFPT->hasCFIUncheckedCallee() != ToFPT->hasCFIUncheckedCallee()) {
QualType NewTy = Context.getFunctionType(
FromFPT->getReturnType(), FromFPT->getParamTypes(),
- FromFPT->getExtProtoInfo().withCFIUncheckedCallee(true));
+ FromFPT->getExtProtoInfo().withCFIUncheckedCallee(
+ ToFPT->hasCFIUncheckedCallee()));
FromFPT = cast<FunctionProtoType>(NewTy.getTypePtr());
FromFn = FromFPT;
Changed = true;
- if (AddingCFIUncheckedCallee)
- *AddingCFIUncheckedCallee = true;
}
}
@@ -2007,11 +1989,7 @@ bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
Changed = true;
}
- // For C, when called from checkPointerTypesForAssignment,
- // we need to not alter FromFn, or else even an innocuous cast
- // like dropping effects will fail. In C++ however we do want to
- // alter FromFn (because of the way PerformImplicitConversion works).
- if (Context.hasAnyFunctionEffects() && getLangOpts().CPlusPlus) {
+ if (Context.hasAnyFunctionEffects()) {
FromFPT = cast<FunctionProtoType>(FromFn); // in case FromFn changed above
// Transparently add/drop effects; here we are concerned with
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index 6acf79a..868f0cc 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -13009,9 +13009,22 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() {
llvm::SmallVector<OpenACCReductionRecipe> RecipeList;
for (unsigned I = 0; I < VarList.size(); ++I) {
- static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *));
VarDecl *Recipe = readDeclAs<VarDecl>();
- RecipeList.push_back({Recipe});
+
+ static_assert(sizeof(OpenACCReductionRecipe::CombinerRecipe) ==
+ 3 * sizeof(int *));
+
+ llvm::SmallVector<OpenACCReductionRecipe::CombinerRecipe> Combiners;
+ unsigned NumCombiners = readInt();
+ for (unsigned I = 0; I < NumCombiners; ++I) {
+ VarDecl *LHS = readDeclAs<VarDecl>();
+ VarDecl *RHS = readDeclAs<VarDecl>();
+ Expr *Op = readExpr();
+
+ Combiners.push_back({LHS, RHS, Op});
+ }
+
+ RecipeList.push_back({Recipe, Combiners});
}
return OpenACCReductionClause::Create(getContext(), BeginLoc, LParenLoc, Op,
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index 09b1e58..82ccde8 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -8925,8 +8925,17 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) {
writeOpenACCVarList(RC);
for (const OpenACCReductionRecipe &R : RC->getRecipes()) {
- static_assert(sizeof(OpenACCReductionRecipe) == 1 * sizeof(int *));
AddDeclRef(R.AllocaDecl);
+
+ static_assert(sizeof(OpenACCReductionRecipe::CombinerRecipe) ==
+ 3 * sizeof(int *));
+ writeUInt32(R.CombinerRecipes.size());
+
+ for (auto &CombinerRecipe : R.CombinerRecipes) {
+ AddDeclRef(CombinerRecipe.LHS);
+ AddDeclRef(CombinerRecipe.RHS);
+ AddStmt(CombinerRecipe.Op);
+ }
}
return;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/VAListChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VAListChecker.cpp
index 79fd0bd..503fa5d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/VAListChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/VAListChecker.cpp
@@ -149,7 +149,7 @@ void VAListChecker::checkPreCall(const CallEvent &Call,
else if (VaEnd.matches(Call))
checkVAListEndCall(Call, C);
else {
- for (auto FuncInfo : VAListAccepters) {
+ for (const auto &FuncInfo : VAListAccepters) {
if (!FuncInfo.Func.matches(Call))
continue;
const MemRegion *VAList =
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp
index 15a0c5a..ace639c 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp
@@ -232,7 +232,7 @@ public:
bool ignoreARC =
!PD->isReadOnly() && PD->getSetterKind() == ObjCPropertyDecl::Assign;
auto IsUnsafePtr = isUnsafePtr(QT, ignoreARC);
- return {IsUnsafePtr && *IsUnsafePtr, PropType};
+ return {IsUnsafePtr && *IsUnsafePtr && !PD->isRetaining(), PropType};
}
bool shouldSkipDecl(const RecordDecl *RD) const {