aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ExprConstant.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp124
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.cpp69
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp97
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp59
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp156
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp40
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp165
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h111
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CMakeLists.txt1
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h99
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp98
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp186
-rw-r--r--clang/lib/CIR/Lowering/CIRPasses.cpp2
-rw-r--r--clang/lib/CodeGen/CGCall.cpp36
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp27
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp157
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp37
-rw-r--r--clang/lib/Driver/Driver.cpp24
-rw-r--r--clang/lib/Driver/ToolChain.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Arch/Sparc.cpp13
-rw-r--r--clang/lib/Driver/ToolChains/MSVC.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/MinGW.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Solaris.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/UEFI.cpp4
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp3
-rw-r--r--clang/lib/Format/Format.cpp6
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp3
-rw-r--r--clang/lib/Parse/ParseDecl.cpp3
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp7
-rw-r--r--clang/lib/Parse/ParseHLSLRootSignature.cpp7
-rw-r--r--clang/lib/Sema/SemaAvailability.cpp6
-rw-r--r--clang/lib/Sema/SemaConcept.cpp3
-rw-r--r--clang/lib/Sema/SemaDecl.cpp11
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp10
-rw-r--r--clang/lib/Sema/SemaOpenACCAtomic.cpp16
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp37
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp21
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp14
41 files changed, 1461 insertions, 216 deletions
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 0d12161..9808298 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -14636,7 +14636,9 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && IsRelational) {
bool WasArrayIndex;
unsigned Mismatch = FindDesignatorMismatch(
- getType(LHSValue.Base), LHSDesignator, RHSDesignator, WasArrayIndex);
+ LHSValue.Base.isNull() ? QualType()
+ : getType(LHSValue.Base).getNonReferenceType(),
+ LHSDesignator, RHSDesignator, WasArrayIndex);
// At the point where the designators diverge, the comparison has a
// specified value if:
// - we are comparing array indices
@@ -14680,7 +14682,7 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
// compare pointers within the object in question; otherwise, the result
// depends on where the object is located in memory.
if (!LHSValue.Base.isNull() && IsRelational) {
- QualType BaseTy = getType(LHSValue.Base);
+ QualType BaseTy = getType(LHSValue.Base).getNonReferenceType();
if (BaseTy->isIncompleteType())
return Error(E);
CharUnits Size = Info.Ctx.getTypeSizeInChars(BaseTy);
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index fbf53db..50cca0e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -12,6 +12,7 @@
#include "CIRGenCXXABI.h"
#include "CIRGenFunction.h"
+#include "CIRGenValue.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
@@ -311,6 +312,116 @@ void CIRGenFunction::emitInitializerForField(FieldDecl *field, LValue lhs,
assert(!cir::MissingFeatures::requiresCleanups());
}
+/// Emit a loop to call a particular constructor for each of several members
+/// of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param arrayType the type of the array to initialize
+/// \param arrayBegin an arrayType*
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void CIRGenFunction::emitCXXAggrConstructorCall(
+ const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType,
+ Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked,
+ bool zeroInitialize) {
+ QualType elementType;
+ mlir::Value numElements = emitArrayLength(arrayType, elementType, arrayBegin);
+ emitCXXAggrConstructorCall(ctor, numElements, arrayBegin, e,
+ newPointerIsChecked, zeroInitialize);
+}
+
+/// Emit a loop to call a particular constructor for each of several members
+/// of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param numElements the number of elements in the array;
+/// may be zero
+/// \param arrayBase a T*, where T is the type constructed by ctor
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void CIRGenFunction::emitCXXAggrConstructorCall(
+ const CXXConstructorDecl *ctor, mlir::Value numElements, Address arrayBase,
+ const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize) {
+ // It's legal for numElements to be zero. This can happen both
+ // dynamically, because x can be zero in 'new A[x]', and statically,
+ // because of GCC extensions that permit zero-length arrays. There
+ // are probably legitimate places where we could assume that this
+ // doesn't happen, but it's not clear that it's worth it.
+
+ // Optimize for a constant count.
+ auto constantCount = dyn_cast<cir::ConstantOp>(numElements.getDefiningOp());
+ if (constantCount) {
+ auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constantCount.getValue());
+ // Just skip out if the constant count is zero.
+ if (constIntAttr && constIntAttr.getUInt() == 0)
+ return;
+ } else {
+ // Otherwise, emit the check.
+ cgm.errorNYI(e->getSourceRange(), "dynamic-length array expression");
+ }
+
+ auto arrayTy = mlir::cast<cir::ArrayType>(arrayBase.getElementType());
+ mlir::Type elementType = arrayTy.getElementType();
+ cir::PointerType ptrToElmType = builder.getPointerTo(elementType);
+
+ // Tradional LLVM codegen emits a loop here. CIR lowers to a loop as part of
+ // LoweringPrepare.
+
+ // The alignment of the base, adjusted by the size of a single element,
+ // provides a conservative estimate of the alignment of every element.
+ // (This assumes we never start tracking offsetted alignments.)
+ //
+ // Note that these are complete objects and so we don't need to
+ // use the non-virtual size or alignment.
+ QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CharUnits eltAlignment = arrayBase.getAlignment().alignmentOfArrayElement(
+ getContext().getTypeSizeInChars(type));
+
+ // Zero initialize the storage, if requested.
+ if (zeroInitialize)
+ emitNullInitialization(*currSrcLoc, arrayBase, type);
+
+ // C++ [class.temporary]p4:
+ // There are two contexts in which temporaries are destroyed at a different
+ // point than the end of the full-expression. The first context is when a
+ // default constructor is called to initialize an element of an array.
+ // If the constructor has one or more default arguments, the destruction of
+ // every temporary created in a default argument expression is sequenced
+ // before the construction of the next array element, if any.
+ {
+ assert(!cir::MissingFeatures::runCleanupsScope());
+
+ // Evaluate the constructor and its arguments in a regular
+ // partial-destroy cleanup.
+ if (getLangOpts().Exceptions &&
+ !ctor->getParent()->hasTrivialDestructor()) {
+ cgm.errorNYI(e->getSourceRange(), "partial array cleanups");
+ }
+
+ // Emit the constructor call that will execute for every array element.
+ mlir::Value arrayOp =
+ builder.createPtrBitcast(arrayBase.getPointer(), arrayTy);
+ builder.create<cir::ArrayCtor>(
+ *currSrcLoc, arrayOp, [&](mlir::OpBuilder &b, mlir::Location loc) {
+ mlir::BlockArgument arg =
+ b.getInsertionBlock()->addArgument(ptrToElmType, loc);
+ Address curAddr = Address(arg, elementType, eltAlignment);
+ assert(!cir::MissingFeatures::sanitizers());
+ auto currAVS = AggValueSlot::forAddr(
+ curAddr, type.getQualifiers(), AggValueSlot::IsDestructed,
+ AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap,
+ AggValueSlot::IsNotZeroed);
+ emitCXXConstructorCall(ctor, Ctor_Complete,
+ /*ForVirtualBase=*/false,
+ /*Delegating=*/false, currAVS, e);
+ builder.create<cir::YieldOp>(loc);
+ });
+ }
+
+ if (constantCount.use_empty())
+ constantCount.erase();
+}
+
void CIRGenFunction::emitDelegateCXXConstructorCall(
const CXXConstructorDecl *ctor, CXXCtorType ctorType,
const FunctionArgList &args, SourceLocation loc) {
@@ -369,6 +480,19 @@ void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &args) {
s->getStmtClassName());
}
+void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr,
+ QualType type) {
+ const RecordType *rtype = type->castAs<RecordType>();
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const CXXDestructorDecl *dtor = record->getDestructor();
+ // TODO(cir): Unlike traditional codegen, CIRGen should actually emit trivial
+ // dtors which shall be removed on later CIR passes. However, only remove this
+ // assertion after we have a test case to exercise this path.
+ assert(!dtor->isTrivial());
+ cgf.emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase*/ false,
+ /*delegating=*/false, addr, type);
+}
+
void CIRGenFunction::emitDelegatingCXXConstructorCall(
const CXXConstructorDecl *ctor, const FunctionArgList &args) {
assert(ctor->isDelegatingConstructor());
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
new file mode 100644
index 0000000..be21ce9
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -0,0 +1,69 @@
+//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code dealing with the IR generation for cleanups
+// and related information.
+//
+// A "cleanup" is a piece of code which needs to be executed whenever
+// control transfers out of a particular scope. This can be
+// conditionalized to occur only on exceptional control flow, only on
+// normal control flow, or both.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+
+#include "clang/CIR/MissingFeatures.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+//===----------------------------------------------------------------------===//
+// CIRGenFunction cleanup related
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// EHScopeStack
+//===----------------------------------------------------------------------===//
+
+void EHScopeStack::Cleanup::anchor() {}
+
+static mlir::Block *getCurCleanupBlock(CIRGenFunction &cgf) {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ mlir::Block *cleanup =
+ cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
+ return cleanup;
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CIRGenFunction::popCleanupBlock() {
+ assert(!ehStack.cleanupStack.empty() && "cleanup stack is empty!");
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ std::unique_ptr<EHScopeStack::Cleanup> cleanup =
+ ehStack.cleanupStack.pop_back_val();
+
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ mlir::Block *cleanupEntry = getCurCleanupBlock(*this);
+ builder.setInsertionPointToEnd(cleanupEntry);
+ cleanup->emit(*this);
+}
+
+/// Pops cleanup blocks until the given savepoint is reached.
+void CIRGenFunction::popCleanupBlocks(size_t oldCleanupStackDepth) {
+ assert(!cir::MissingFeatures::ehstackBranches());
+
+ assert(ehStack.getStackDepth() >= oldCleanupStackDepth);
+
+ // Pop cleanup blocks until we reach the base stack depth for the
+ // current scope.
+ while (ehStack.getStackDepth() > oldCleanupStackDepth) {
+ popCleanupBlock();
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index afbe92a..a28ac3c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -183,8 +183,8 @@ void CIRGenFunction::emitAutoVarCleanups(
const VarDecl &d = *emission.Variable;
// Check the type for a cleanup.
- if (d.needsDestruction(getContext()))
- cgm.errorNYI(d.getSourceRange(), "emitAutoVarCleanups: type cleanup");
+ if (QualType::DestructionKind dtorKind = d.needsDestruction(getContext()))
+ emitAutoVarTypeCleanup(emission, dtorKind);
assert(!cir::MissingFeatures::opAllocaPreciseLifetime());
@@ -648,3 +648,96 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, mlir::Value rhs,
assert(!cir::MissingFeatures::sanitizers());
}
+
+/// Immediately perform the destruction of the given object.
+///
+/// \param addr - the address of the object; a type*
+/// \param type - the type of the object; if an array type, all
+/// objects are destroyed in reverse order
+/// \param destroyer - the function to call to destroy individual
+/// elements
+void CIRGenFunction::emitDestroy(Address addr, QualType type,
+ Destroyer *destroyer) {
+ if (getContext().getAsArrayType(type))
+ cgm.errorNYI("emitDestroy: array type");
+
+ return destroyer(*this, addr, type);
+}
+
+CIRGenFunction::Destroyer *
+CIRGenFunction::getDestroyer(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none:
+ llvm_unreachable("no destroyer for trivial dtor");
+ case QualType::DK_cxx_destructor:
+ return destroyCXXObject;
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
+ cgm.errorNYI("getDestroyer: other destruction kind");
+ return nullptr;
+ }
+ llvm_unreachable("Unknown DestructionKind");
+}
+
+namespace {
+struct DestroyObject final : EHScopeStack::Cleanup {
+ DestroyObject(Address addr, QualType type,
+ CIRGenFunction::Destroyer *destroyer)
+ : addr(addr), type(type), destroyer(destroyer) {}
+
+ Address addr;
+ QualType type;
+ CIRGenFunction::Destroyer *destroyer;
+
+ void emit(CIRGenFunction &cgf) override {
+ cgf.emitDestroy(addr, type, destroyer);
+ }
+};
+} // namespace
+
+/// Enter a destroy cleanup for the given local variable.
+void CIRGenFunction::emitAutoVarTypeCleanup(
+ const CIRGenFunction::AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind) {
+ assert(dtorKind != QualType::DK_none);
+
+ // Note that for __block variables, we want to destroy the
+ // original stack object, not the possibly forwarded object.
+ Address addr = emission.getObjectAddress(*this);
+
+ const VarDecl *var = emission.Variable;
+ QualType type = var->getType();
+
+ CleanupKind cleanupKind = NormalAndEHCleanup;
+ CIRGenFunction::Destroyer *destroyer = nullptr;
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ llvm_unreachable("no cleanup for trivially-destructible variable");
+
+ case QualType::DK_cxx_destructor:
+ // If there's an NRVO flag on the emission, we need a different
+ // cleanup.
+ if (emission.NRVOFlag) {
+ cgm.errorNYI(var->getSourceRange(), "emitAutoVarTypeCleanup: NRVO");
+ return;
+ }
+ // Otherwise, this is handled below.
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
+ cgm.errorNYI(var->getSourceRange(),
+ "emitAutoVarTypeCleanup: other dtor kind");
+ return;
+ }
+
+ // If we haven't chosen a more specific destroyer, use the default.
+ if (!destroyer)
+ destroyer = getDestroyer(dtorKind);
+
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ ehStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 1f64801..7ff5f26 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1657,37 +1657,38 @@ void CIRGenFunction::emitCXXConstructExpr(const CXXConstructExpr *e,
return;
}
- if (getContext().getAsArrayType(e->getType())) {
- cgm.errorNYI(e->getSourceRange(), "emitCXXConstructExpr: array type");
- return;
- }
+ if (const ArrayType *arrayType = getContext().getAsArrayType(e->getType())) {
+ assert(!cir::MissingFeatures::sanitizers());
+ emitCXXAggrConstructorCall(cd, arrayType, dest.getAddress(), e, false);
+ } else {
- clang::CXXCtorType type = Ctor_Complete;
- bool forVirtualBase = false;
- bool delegating = false;
-
- switch (e->getConstructionKind()) {
- case CXXConstructionKind::Complete:
- type = Ctor_Complete;
- break;
- case CXXConstructionKind::Delegating:
- // We should be emitting a constructor; GlobalDecl will assert this
- type = curGD.getCtorType();
- delegating = true;
- break;
- case CXXConstructionKind::VirtualBase:
- // This should just set 'forVirtualBase' to true and fall through, but
- // virtual base class support is otherwise missing, so this needs to wait
- // until it can be tested.
- cgm.errorNYI(e->getSourceRange(),
- "emitCXXConstructExpr: virtual base constructor");
- return;
- case CXXConstructionKind::NonVirtualBase:
- type = Ctor_Base;
- break;
- }
+ clang::CXXCtorType type = Ctor_Complete;
+ bool forVirtualBase = false;
+ bool delegating = false;
- emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
+ switch (e->getConstructionKind()) {
+ case CXXConstructionKind::Complete:
+ type = Ctor_Complete;
+ break;
+ case CXXConstructionKind::Delegating:
+ // We should be emitting a constructor; GlobalDecl will assert this
+ type = curGD.getCtorType();
+ delegating = true;
+ break;
+ case CXXConstructionKind::VirtualBase:
+ // This should just set 'forVirtualBase' to true and fall through, but
+ // virtual base class support is otherwise missing, so this needs to wait
+ // until it can be tested.
+ cgm.errorNYI(e->getSourceRange(),
+ "emitCXXConstructExpr: virtual base constructor");
+ return;
+ case CXXConstructionKind::NonVirtualBase:
+ type = Ctor_Base;
+ break;
+ }
+
+ emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
+ }
}
RValue CIRGenFunction::emitReferenceBindingToExpr(const Expr *e) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index 6756a7c..7f2e2ce 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -34,11 +34,20 @@ public:
}
mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc);
+
/// Store the specified real/imag parts into the
/// specified value pointer.
void emitStoreOfComplex(mlir::Location loc, mlir::Value val, LValue lv,
bool isInit);
+ /// Emit a cast from complex value Val to DestType.
+ mlir::Value emitComplexToComplexCast(mlir::Value value, QualType srcType,
+ QualType destType, SourceLocation loc);
+
+ /// Emit a cast from scalar value Val to DestType.
+ mlir::Value emitScalarToComplexCast(mlir::Value value, QualType srcType,
+ QualType destType, SourceLocation loc);
+
mlir::Value
VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
mlir::Value VisitArraySubscriptExpr(Expr *e);
@@ -164,14 +173,110 @@ LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e,
mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
QualType destTy) {
switch (ck) {
+ case CK_Dependent:
+ llvm_unreachable("dependent type must be resolved before the CIR codegen");
+
case CK_NoOp:
case CK_LValueToRValue:
return Visit(op);
- default:
- break;
+
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_UserDefinedConversion: {
+ cgf.cgm.errorNYI(
+ "ComplexExprEmitter::emitCast Atmoic & UserDefinedConversion");
+ return {};
}
- cgf.cgm.errorNYI("ComplexType Cast");
- return {};
+
+ case CK_LValueBitCast: {
+ cgf.cgm.errorNYI("ComplexExprEmitter::emitCast CK_LValueBitCast");
+ return {};
+ }
+
+ case CK_LValueToRValueBitCast: {
+ LValue sourceLVal = cgf.emitLValue(op);
+ Address addr = sourceLVal.getAddress().withElementType(
+ builder, cgf.convertTypeForMem(destTy));
+ LValue destLV = cgf.makeAddrLValue(addr, destTy);
+ assert(!cir::MissingFeatures::opTBAA());
+ return emitLoadOfLValue(destLV, op->getExprLoc());
+ }
+
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_BooleanToSignedIntegral:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
+ case CK_ZeroToOCLOpaqueType:
+ case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint:
+ case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
+ case CK_HLSLArrayRValue:
+ case CK_HLSLElementwiseCast:
+ case CK_HLSLAggregateSplatCast:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_FloatingRealToComplex:
+ case CK_IntegralRealToComplex: {
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ return emitScalarToComplexCast(cgf.emitScalarExpr(op), op->getType(),
+ destTy, op->getExprLoc());
+ }
+
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex: {
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ return emitComplexToComplexCast(Visit(op), op->getType(), destTy,
+ op->getExprLoc());
+ }
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
}
mlir::Value ComplexExprEmitter::emitConstant(
@@ -207,6 +312,49 @@ void ComplexExprEmitter::emitStoreOfComplex(mlir::Location loc, mlir::Value val,
builder.createStore(loc, val, destAddr);
}
+mlir::Value ComplexExprEmitter::emitComplexToComplexCast(mlir::Value val,
+ QualType srcType,
+ QualType destType,
+ SourceLocation loc) {
+ if (srcType == destType)
+ return val;
+
+ // Get the src/dest element type.
+ QualType srcElemTy = srcType->castAs<ComplexType>()->getElementType();
+ QualType destElemTy = destType->castAs<ComplexType>()->getElementType();
+
+ cir::CastKind castOpKind;
+ if (srcElemTy->isFloatingType() && destElemTy->isFloatingType())
+ castOpKind = cir::CastKind::float_complex;
+ else if (srcElemTy->isFloatingType() && destElemTy->isIntegerType())
+ castOpKind = cir::CastKind::float_complex_to_int_complex;
+ else if (srcElemTy->isIntegerType() && destElemTy->isFloatingType())
+ castOpKind = cir::CastKind::int_complex_to_float_complex;
+ else if (srcElemTy->isIntegerType() && destElemTy->isIntegerType())
+ castOpKind = cir::CastKind::int_complex;
+ else
+ llvm_unreachable("unexpected src type or dest type");
+
+ return builder.createCast(cgf.getLoc(loc), castOpKind, val,
+ cgf.convertType(destType));
+}
+
+mlir::Value ComplexExprEmitter::emitScalarToComplexCast(mlir::Value val,
+ QualType srcType,
+ QualType destType,
+ SourceLocation loc) {
+ cir::CastKind castOpKind;
+ if (srcType->isFloatingType())
+ castOpKind = cir::CastKind::float_to_complex;
+ else if (srcType->isIntegerType())
+ castOpKind = cir::CastKind::int_to_complex;
+ else
+ llvm_unreachable("unexpected src type");
+
+ return builder.createCast(cgf.getLoc(loc), castOpKind, val,
+ cgf.convertType(destType));
+}
+
mlir::Value ComplexExprEmitter::VisitAbstractConditionalOperator(
const AbstractConditionalOperator *e) {
mlir::Value condValue = Visit(e->getCond());
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index eba6bff..2523b0f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -88,6 +88,10 @@ public:
// Utilities
//===--------------------------------------------------------------------===//
+ mlir::Value emitComplexToScalarConversion(mlir::Location loc,
+ mlir::Value value, CastKind kind,
+ QualType destTy);
+
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
return builder.createFloatingCast(result, cgf.convertType(promotionType));
}
@@ -1125,7 +1129,7 @@ LValue ScalarExprEmitter::emitCompoundAssignLValue(
// 'An assignment expression has the value of the left operand after the
// assignment...'.
if (lhsLV.isBitField())
- cgf.cgm.errorNYI(e->getSourceRange(), "store through bitfield lvalue");
+ cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
else
cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
@@ -1135,6 +1139,31 @@ LValue ScalarExprEmitter::emitCompoundAssignLValue(
return lhsLV;
}
+mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
+ mlir::Value value,
+ CastKind kind,
+ QualType destTy) {
+ cir::CastKind castOpKind;
+ switch (kind) {
+ case CK_FloatingComplexToReal:
+ castOpKind = cir::CastKind::float_complex_to_real;
+ break;
+ case CK_IntegralComplexToReal:
+ castOpKind = cir::CastKind::int_complex_to_real;
+ break;
+ case CK_FloatingComplexToBoolean:
+ castOpKind = cir::CastKind::float_complex_to_bool;
+ break;
+ case CK_IntegralComplexToBoolean:
+ castOpKind = cir::CastKind::int_complex_to_bool;
+ break;
+ default:
+ llvm_unreachable("invalid complex-to-scalar cast kind");
+ }
+
+ return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
+}
+
mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
QualType promotionType) {
e = e->IgnoreParens();
@@ -1758,6 +1787,15 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
ce->getExprLoc(), opts);
}
+ case CK_FloatingComplexToReal:
+ case CK_IntegralComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ mlir::Value value = cgf.emitComplexExpr(subExpr);
+ return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
+ kind, destTy);
+ }
+
case CK_FloatingRealToComplex:
case CK_FloatingComplexCast:
case CK_IntegralRealToComplex:
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 3e69e56..b4b95d6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -26,7 +26,11 @@ namespace clang::CIRGen {
CIRGenFunction::CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder,
bool suppressNewContext)
- : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {}
+ : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
+ ehStack.setCGF(this);
+ currentCleanupStackDepth = 0;
+ assert(ehStack.getStackDepth() == 0);
+}
CIRGenFunction::~CIRGenFunction() {}
@@ -227,6 +231,14 @@ void CIRGenFunction::LexicalScope::cleanup() {
CIRGenBuilderTy &builder = cgf.builder;
LexicalScope *localScope = cgf.curLexScope;
+ auto applyCleanup = [&]() {
+ if (performCleanup) {
+ // ApplyDebugLocation
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ forceCleanup();
+ }
+ };
+
if (returnBlock != nullptr) {
// Write out the return block, which loads the value from `__retval` and
// issues the `cir.return`.
@@ -235,32 +247,42 @@ void CIRGenFunction::LexicalScope::cleanup() {
(void)emitReturn(*returnLoc);
}
- mlir::Block *curBlock = builder.getBlock();
- if (isGlobalInit() && !curBlock)
- return;
- if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
- return;
-
- // Get rid of any empty block at the end of the scope.
- bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
- if (!entryBlock && curBlock->empty()) {
- curBlock->erase();
- if (returnBlock != nullptr && returnBlock->getUses().empty())
- returnBlock->erase();
- return;
- }
-
- // Reached the end of the scope.
- {
+ auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
mlir::OpBuilder::InsertionGuard guard(builder);
- builder.setInsertionPointToEnd(curBlock);
+ builder.setInsertionPointToEnd(insPt);
+
+ // If we still don't have a cleanup block, it means that `applyCleanup`
+ // below might be able to get us one.
+ mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
+
+ // Leverage and defers to RunCleanupsScope's dtor and scope handling.
+ applyCleanup();
+
+ // If we now have one after `applyCleanup`, hook it up properly.
+ if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
+ cleanupBlock = localScope->getCleanupBlock(builder);
+ builder.create<cir::BrOp>(insPt->back().getLoc(), cleanupBlock);
+ if (!cleanupBlock->mightHaveTerminator()) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToEnd(cleanupBlock);
+ builder.create<cir::YieldOp>(localScope->endLoc);
+ }
+ }
if (localScope->depth == 0) {
// Reached the end of the function.
if (returnBlock != nullptr) {
- if (returnBlock->getUses().empty())
+ if (returnBlock->getUses().empty()) {
returnBlock->erase();
- else {
+ } else {
+ // Thread return block via cleanup block.
+ if (cleanupBlock) {
+ for (mlir::BlockOperand &blockUse : returnBlock->getUses()) {
+ cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
+ brOp.setSuccessor(cleanupBlock);
+ }
+ }
+
builder.create<cir::BrOp>(*returnLoc, returnBlock);
return;
}
@@ -268,13 +290,50 @@ void CIRGenFunction::LexicalScope::cleanup() {
emitImplicitReturn();
return;
}
- // Reached the end of a non-function scope. Some scopes, such as those
- // used with the ?: operator, can return a value.
- if (!localScope->isTernary() && !curBlock->mightHaveTerminator()) {
+
+ // End of any local scope != function
+ // Ternary ops have to deal with matching arms for yielding types
+ // and do return a value, it must do its own cir.yield insertion.
+ if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
!retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
: builder.create<cir::YieldOp>(localScope->endLoc, retVal);
}
+ };
+
+ // If a cleanup block has been created at some point, branch to it
+ // and set the insertion point to continue at the cleanup block.
+ // Terminators are then inserted either in the cleanup block or
+ // inline in this current block.
+ mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
+ if (cleanupBlock)
+ insertCleanupAndLeave(cleanupBlock);
+
+ // Now deal with any pending block wrap up like implicit end of
+ // scope.
+
+ mlir::Block *curBlock = builder.getBlock();
+ if (isGlobalInit() && !curBlock)
+ return;
+ if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
+ return;
+
+ // Get rid of any empty block at the end of the scope.
+ bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
+ if (!entryBlock && curBlock->empty()) {
+ curBlock->erase();
+ if (returnBlock != nullptr && returnBlock->getUses().empty())
+ returnBlock->erase();
+ return;
}
+
+ // If there's a cleanup block, branch to it, nothing else to do.
+ if (cleanupBlock) {
+ builder.create<cir::BrOp>(curBlock->back().getLoc(), cleanupBlock);
+ return;
+ }
+
+ // No pre-existent cleanup block, emit cleanup code and yield/return.
+ insertCleanupAndLeave(curBlock);
}
cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
@@ -408,7 +467,19 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
}
}
-void CIRGenFunction::finishFunction(SourceLocation endLoc) {}
+void CIRGenFunction::finishFunction(SourceLocation endLoc) {
+ // Pop any cleanups that might have been associated with the
+ // parameters. Do this in whatever block we're currently in; it's
+ // important to do this before we enter the return block or return
+ // edges will be *really* confused.
+ // TODO(cir): Use prologueCleanupDepth here.
+ bool hasCleanups = ehStack.getStackDepth() != currentCleanupStackDepth;
+ if (hasCleanups) {
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ // FIXME(cir): should we clearInsertionPoint? breaks many testcases
+ popCleanupBlocks(currentCleanupStackDepth);
+ }
+}
mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
auto result = mlir::LogicalResult::success();
@@ -808,4 +879,48 @@ bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *ce) {
return true;
}
+/// Computes the length of an array in elements, as well as the base
+/// element type and a properly-typed first element pointer.
+mlir::Value
+CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType,
+ QualType &baseType, Address &addr) {
+ const clang::ArrayType *arrayType = origArrayType;
+
+ // If it's a VLA, we have to load the stored size. Note that
+ // this is the size of the VLA in bytes, not its size in elements.
+ if (isa<VariableArrayType>(arrayType)) {
+ assert(cir::MissingFeatures::vlas());
+ cgm.errorNYI(*currSrcLoc, "VLAs");
+ return builder.getConstInt(*currSrcLoc, SizeTy, 0);
+ }
+
+ uint64_t countFromCLAs = 1;
+ QualType eltType;
+
+ auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
+
+ while (cirArrayType) {
+ assert(isa<ConstantArrayType>(arrayType));
+ countFromCLAs *= cirArrayType.getSize();
+ eltType = arrayType->getElementType();
+
+ cirArrayType =
+ mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
+
+ arrayType = getContext().getAsArrayType(arrayType->getElementType());
+ assert((!cirArrayType || arrayType) &&
+ "CIR and Clang types are out-of-sync");
+ }
+
+ if (arrayType) {
+ // From this point onwards, the Clang array type has been emitted
+ // as some other type (probably a packed struct). Compute the array
+ // size, and just emit the 'begin' expression as a bitcast.
+ cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
+ }
+
+ baseType = eltType;
+ return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
+}
+
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 2aceeef..4891c74 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -18,6 +18,7 @@
#include "CIRGenModule.h"
#include "CIRGenTypeCache.h"
#include "CIRGenValue.h"
+#include "EHScopeStack.h"
#include "Address.h"
@@ -61,6 +62,9 @@ public:
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
+ /// Tracks function scope overall cleanup handling.
+ EHScopeStack ehStack;
+
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
ImplicitParamDecl *cxxabiThisDecl = nullptr;
@@ -595,14 +599,65 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// Takes the old cleanup stack size and emits the cleanup blocks
+ /// that have been added.
+ void popCleanupBlocks(size_t oldCleanupStackDepth);
+ void popCleanupBlock();
+
+ /// Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
+ size_t cleanupStackDepth, oldCleanupStackDepth;
+
+ protected:
+ bool performCleanup;
+
+ private:
+ RunCleanupsScope(const RunCleanupsScope &) = delete;
+ void operator=(const RunCleanupsScope &) = delete;
+
+ protected:
+ CIRGenFunction &cgf;
+
+ /// Enter a new cleanup scope.
+ explicit RunCleanupsScope(CIRGenFunction &cgf)
+ : performCleanup(true), cgf(cgf) {
+ cleanupStackDepth = cgf.ehStack.getStackDepth();
+ oldCleanupStackDepth = cgf.currentCleanupStackDepth;
+ cgf.currentCleanupStackDepth = cleanupStackDepth;
+ }
+
+ /// Exit this cleanup scope, emitting any accumulated cleanups.
+ ~RunCleanupsScope() {
+ if (performCleanup)
+ forceCleanup();
+ }
+
+ /// Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void forceCleanup() {
+ assert(performCleanup && "Already forced cleanup");
+ {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ cgf.popCleanupBlocks(cleanupStackDepth);
+ performCleanup = false;
+ cgf.currentCleanupStackDepth = oldCleanupStackDepth;
+ }
+ }
+ };
+
+ // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
+ size_t currentCleanupStackDepth;
+
+public:
/// Represents a scope, including function bodies, compound statements, and
/// the substatements of if/while/do/for/switch/try statements. This class
/// handles any automatic cleanup, along with the return value.
- struct LexicalScope {
+ struct LexicalScope : public RunCleanupsScope {
private:
- // TODO(CIR): This will live in the base class RunCleanupScope once that
- // class is upstreamed.
- CIRGenFunction &cgf;
+ // Block containing cleanup code for things initialized in this
+ // lexical context (scope).
+ mlir::Block *cleanupBlock = nullptr;
// Points to the scope entry block. This is useful, for instance, for
// helping to insert allocas before finalizing any recursive CodeGen from
@@ -632,8 +687,8 @@ public:
unsigned depth = 0;
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
- : cgf(cgf), entryBlock(eb), parentScope(cgf.curLexScope), beginLoc(loc),
- endLoc(loc) {
+ : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
+ beginLoc(loc), endLoc(loc) {
assert(entryBlock && "LexicalScope requires an entry block");
cgf.curLexScope = this;
@@ -671,6 +726,27 @@ public:
void setAsSwitch() { scopeKind = Kind::Switch; }
void setAsTernary() { scopeKind = Kind::Ternary; }
+ // Lazy create cleanup block or return what's available.
+ mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
+ if (cleanupBlock)
+ return cleanupBlock;
+ cleanupBlock = createCleanupBlock(builder);
+ return cleanupBlock;
+ }
+
+ mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
+ return cleanupBlock;
+ }
+
+ mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
+ // Create the cleanup block but dont hook it up around just yet.
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
+ : &cgf.curFn->getRegion(0);
+ cleanupBlock = builder.createBlock(r);
+ return cleanupBlock;
+ }
+
// ---
// Return handling.
// ---
@@ -721,6 +797,12 @@ public:
LexicalScope *curLexScope = nullptr;
+ typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
+
+ static Destroyer destroyCXXObject;
+
+ Destroyer *getDestroyer(clang::QualType::DestructionKind kind);
+
/// ----------------------
/// CIR emit functions
/// ----------------------
@@ -766,6 +848,8 @@ public:
/// even if no aggregate location is provided.
RValue emitAnyExprToTemp(const clang::Expr *e);
+ mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
+ QualType &baseType, Address &addr);
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e);
Address emitArrayToPointerDecay(const Expr *array);
@@ -779,6 +863,8 @@ public:
void emitAutoVarCleanups(const AutoVarEmission &emission);
void emitAutoVarInit(const AutoVarEmission &emission);
+ void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
+ clang::QualType::DestructionKind dtorKind);
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
CXXCtorInitializer *baseInit);
@@ -836,6 +922,9 @@ public:
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e);
void emitConstructorBody(FunctionArgList &args);
+
+ void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
+
void emitDestructorBody(FunctionArgList &args);
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
@@ -843,6 +932,16 @@ public:
void emitCXXConstructExpr(const clang::CXXConstructExpr *e,
AggValueSlot dest);
+ void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ const clang::ArrayType *arrayType,
+ Address arrayBegin, const CXXConstructExpr *e,
+ bool newPointerIsChecked,
+ bool zeroInitialize = false);
+ void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ mlir::Value numElements, Address arrayBase,
+ const CXXConstructExpr *e,
+ bool newPointerIsChecked,
+ bool zeroInitialize);
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d,
clang::CXXCtorType type, bool forVirtualBase,
bool delegating, AggValueSlot thisAVS,
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index 6577f5f..e5e4c68 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -113,8 +113,6 @@ static StructorCIRGen getCIRGenToUse(CIRGenModule &cgm,
GlobalDecl aliasDecl;
if (const auto *dd = dyn_cast<CXXDestructorDecl>(md)) {
- // The assignment is correct here, but other support for this is NYI.
- cgm.errorNYI(md->getSourceRange(), "getCIRGenToUse: dtor");
aliasDecl = GlobalDecl(dd, Dtor_Complete);
} else {
const auto *cd = cast<CXXConstructorDecl>(md);
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 9193f6f..21bee33 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -409,7 +409,10 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
}
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
+ // This should emit a branch through the cleanup block if one exists.
builder.create<cir::BrOp>(loc, retBlock);
+ if (ehStack.getStackDepth() != currentCleanupStackDepth)
+ cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
builder.createBlock(builder.getBlock()->getParent());
return mlir::success();
diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index 03ea60c..ca3a329 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -11,6 +11,7 @@ add_clang_library(clangCIR
CIRGenBuilder.cpp
CIRGenCall.cpp
CIRGenClass.cpp
+ CIRGenCleanup.cpp
CIRGenCXX.cpp
CIRGenCXXABI.cpp
CIRGenCXXExpr.cpp
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
new file mode 100644
index 0000000..22750ac
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -0,0 +1,99 @@
+//===-- EHScopeStack.h - Stack for cleanup CIR generation -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes should be the minimum interface required for other parts of
+// CIR CodeGen to emit cleanups. The implementation is in CIRGenCleanup.cpp and
+// other implemenentation details that are not widely needed are in
+// CIRGenCleanup.h.
+//
+// TODO(cir): this header should be shared between LLVM and CIR codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+#define CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang::CIRGen {
+
+class CIRGenFunction;
+
+enum CleanupKind : unsigned {
+ /// Denotes a cleanup that should run when a scope is exited using exceptional
+ /// control flow (a throw statement leading to stack unwinding, ).
+ EHCleanup = 0x1,
+
+ /// Denotes a cleanup that should run when a scope is exited using normal
+ /// control flow (falling off the end of the scope, return, goto, ...).
+ NormalCleanup = 0x2,
+
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ LifetimeMarker = 0x8,
+ NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup,
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
+ ///
+ /// Cleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
+
+ public:
+ Cleanup(const Cleanup &) = default;
+ Cleanup(Cleanup &&) {}
+ Cleanup() = default;
+
+ virtual ~Cleanup() = default;
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param flags cleanup kind.
+ virtual void emit(CIRGenFunction &cgf) = 0;
+ };
+
+ // Classic codegen has a finely tuned custom allocator and a complex stack
+ // management scheme. We'll probably eventually want to find a way to share
+ // that implementation. For now, we will use a very simplified implementation
+ // to get cleanups working.
+ llvm::SmallVector<std::unique_ptr<Cleanup>, 8> cleanupStack;
+
+private:
+ /// The CGF this Stack belong to
+ CIRGenFunction *cgf = nullptr;
+
+public:
+ EHScopeStack() = default;
+ ~EHScopeStack() = default;
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class... As> void pushCleanup(CleanupKind kind, As... a) {
+ cleanupStack.push_back(std::make_unique<T>(a...));
+ }
+
+ void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
+
+ size_t getStackDepth() const { return cleanupStack.size(); }
+};
+
+} // namespace clang::CIRGen
+
+#endif // CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index f0416b6..cd77166 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -489,6 +489,104 @@ LogicalResult cir::CastOp::verify() {
return emitOpError() << "requires two types differ in addrspace only";
return success();
}
+ case cir::CastKind::float_to_complex: {
+ if (!mlir::isa<cir::FPTypeInterface>(srcType))
+ return emitOpError() << "requires !cir.float type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy)
+ return emitOpError() << "requires !cir.complex type for result";
+ if (srcType != resComplexTy.getElementType())
+ return emitOpError() << "requires source type match result element type";
+ return success();
+ }
+ case cir::CastKind::int_to_complex: {
+ if (!mlir::isa<cir::IntType>(srcType))
+ return emitOpError() << "requires !cir.int type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy)
+ return emitOpError() << "requires !cir.complex type for result";
+ if (srcType != resComplexTy.getElementType())
+ return emitOpError() << "requires source type match result element type";
+ return success();
+ }
+ case cir::CastKind::float_complex_to_real: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy)
+ return emitOpError() << "requires !cir.complex type for source";
+ if (!mlir::isa<cir::FPTypeInterface>(resType))
+ return emitOpError() << "requires !cir.float type for result";
+ if (srcComplexTy.getElementType() != resType)
+ return emitOpError() << "requires source element type match result type";
+ return success();
+ }
+ case cir::CastKind::int_complex_to_real: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy)
+ return emitOpError() << "requires !cir.complex type for source";
+ if (!mlir::isa<cir::IntType>(resType))
+ return emitOpError() << "requires !cir.int type for result";
+ if (srcComplexTy.getElementType() != resType)
+ return emitOpError() << "requires source element type match result type";
+ return success();
+ }
+ case cir::CastKind::float_complex_to_bool: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ if (!mlir::isa<cir::BoolType>(resType))
+ return emitOpError() << "requires !cir.bool type for result";
+ return success();
+ }
+ case cir::CastKind::int_complex_to_bool: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isIntegerComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ if (!mlir::isa<cir::BoolType>(resType))
+ return emitOpError() << "requires !cir.bool type for result";
+ return success();
+ }
+ case cir::CastKind::float_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for result";
+ return success();
+ }
+ case cir::CastKind::float_complex_to_int_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for result";
+ return success();
+ }
+ case cir::CastKind::int_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for result";
+ return success();
+ }
+ case cir::CastKind::int_complex_to_float_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for result";
+ return success();
+ }
default:
llvm_unreachable("Unknown CastOp kind?");
}
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index 8f848c7..cef83ea 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -8,11 +8,14 @@
#include "PassDetail.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h"
#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "clang/CIR/Dialect/IR/CIROpsEnums.h"
#include "clang/CIR/Dialect/Passes.h"
+#include "clang/CIR/MissingFeatures.h"
+#include <iostream>
#include <memory>
using namespace mlir;
@@ -24,11 +27,106 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
void runOnOperation() override;
void runOnOp(mlir::Operation *op);
+ void lowerCastOp(cir::CastOp op);
void lowerUnaryOp(cir::UnaryOp op);
+ void lowerArrayCtor(ArrayCtor op);
+
+ ///
+ /// AST related
+ /// -----------
+
+ clang::ASTContext *astCtx;
+
+ void setASTContext(clang::ASTContext *c) { astCtx = c; }
};
} // namespace
+static mlir::Value lowerScalarToComplexCast(mlir::MLIRContext &ctx,
+ cir::CastOp op) {
+ cir::CIRBaseBuilderTy builder(ctx);
+ builder.setInsertionPoint(op);
+
+ mlir::Value src = op.getSrc();
+ mlir::Value imag = builder.getNullValue(src.getType(), op.getLoc());
+ return builder.createComplexCreate(op.getLoc(), src, imag);
+}
+
+static mlir::Value lowerComplexToScalarCast(mlir::MLIRContext &ctx,
+ cir::CastOp op,
+ cir::CastKind elemToBoolKind) {
+ cir::CIRBaseBuilderTy builder(ctx);
+ builder.setInsertionPoint(op);
+
+ mlir::Value src = op.getSrc();
+ if (!mlir::isa<cir::BoolType>(op.getType()))
+ return builder.createComplexReal(op.getLoc(), src);
+
+ // Complex cast to bool: (bool)(a+bi) => (bool)a || (bool)b
+ mlir::Value srcReal = builder.createComplexReal(op.getLoc(), src);
+ mlir::Value srcImag = builder.createComplexImag(op.getLoc(), src);
+
+ cir::BoolType boolTy = builder.getBoolTy();
+ mlir::Value srcRealToBool =
+ builder.createCast(op.getLoc(), elemToBoolKind, srcReal, boolTy);
+ mlir::Value srcImagToBool =
+ builder.createCast(op.getLoc(), elemToBoolKind, srcImag, boolTy);
+ return builder.createLogicalOr(op.getLoc(), srcRealToBool, srcImagToBool);
+}
+
+static mlir::Value lowerComplexToComplexCast(mlir::MLIRContext &ctx,
+ cir::CastOp op,
+ cir::CastKind scalarCastKind) {
+ CIRBaseBuilderTy builder(ctx);
+ builder.setInsertionPoint(op);
+
+ mlir::Value src = op.getSrc();
+ auto dstComplexElemTy =
+ mlir::cast<cir::ComplexType>(op.getType()).getElementType();
+
+ mlir::Value srcReal = builder.createComplexReal(op.getLoc(), src);
+ mlir::Value srcImag = builder.createComplexImag(op.getLoc(), src);
+
+ mlir::Value dstReal = builder.createCast(op.getLoc(), scalarCastKind, srcReal,
+ dstComplexElemTy);
+ mlir::Value dstImag = builder.createCast(op.getLoc(), scalarCastKind, srcImag,
+ dstComplexElemTy);
+ return builder.createComplexCreate(op.getLoc(), dstReal, dstImag);
+}
+
+void LoweringPreparePass::lowerCastOp(cir::CastOp op) {
+ mlir::MLIRContext &ctx = getContext();
+ mlir::Value loweredValue = [&]() -> mlir::Value {
+ switch (op.getKind()) {
+ case cir::CastKind::float_to_complex:
+ case cir::CastKind::int_to_complex:
+ return lowerScalarToComplexCast(ctx, op);
+ case cir::CastKind::float_complex_to_real:
+ case cir::CastKind::int_complex_to_real:
+ return lowerComplexToScalarCast(ctx, op, op.getKind());
+ case cir::CastKind::float_complex_to_bool:
+ return lowerComplexToScalarCast(ctx, op, cir::CastKind::float_to_bool);
+ case cir::CastKind::int_complex_to_bool:
+ return lowerComplexToScalarCast(ctx, op, cir::CastKind::int_to_bool);
+ case cir::CastKind::float_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::floating);
+ case cir::CastKind::float_complex_to_int_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::float_to_int);
+ case cir::CastKind::int_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::integral);
+ case cir::CastKind::int_complex_to_float_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::int_to_float);
+ default:
+ return nullptr;
+ }
+ }();
+
+ if (loweredValue) {
+ op.replaceAllUsesWith(loweredValue);
+ op.erase();
+ }
+}
+
void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
mlir::Type ty = op.getType();
if (!mlir::isa<cir::ComplexType>(ty))
@@ -71,8 +169,85 @@ void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
op.erase();
}
+static void lowerArrayDtorCtorIntoLoop(cir::CIRBaseBuilderTy &builder,
+ clang::ASTContext *astCtx,
+ mlir::Operation *op, mlir::Type eltTy,
+ mlir::Value arrayAddr,
+ uint64_t arrayLen) {
+ // Generate loop to call into ctor/dtor for every element.
+ mlir::Location loc = op->getLoc();
+
+ // TODO: instead of fixed integer size, create alias for PtrDiffTy and unify
+ // with CIRGen stuff.
+ const unsigned sizeTypeSize =
+ astCtx->getTypeSize(astCtx->getSignedSizeType());
+ auto ptrDiffTy =
+ cir::IntType::get(builder.getContext(), sizeTypeSize, /*isSigned=*/false);
+ mlir::Value numArrayElementsConst = builder.getUnsignedInt(loc, arrayLen, 64);
+
+ auto begin = builder.create<cir::CastOp>(
+ loc, eltTy, cir::CastKind::array_to_ptrdecay, arrayAddr);
+ mlir::Value end = builder.create<cir::PtrStrideOp>(loc, eltTy, begin,
+ numArrayElementsConst);
+
+ mlir::Value tmpAddr = builder.createAlloca(
+ loc, /*addr type*/ builder.getPointerTo(eltTy),
+ /*var type*/ eltTy, "__array_idx", builder.getAlignmentAttr(1));
+ builder.createStore(loc, begin, tmpAddr);
+
+ cir::DoWhileOp loop = builder.createDoWhile(
+ loc,
+ /*condBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto currentElement = b.create<cir::LoadOp>(loc, eltTy, tmpAddr);
+ mlir::Type boolTy = cir::BoolType::get(b.getContext());
+ auto cmp = builder.create<cir::CmpOp>(loc, boolTy, cir::CmpOpKind::ne,
+ currentElement, end);
+ builder.createCondition(cmp);
+ },
+ /*bodyBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto currentElement = b.create<cir::LoadOp>(loc, eltTy, tmpAddr);
+
+ cir::CallOp ctorCall;
+ op->walk([&](cir::CallOp c) { ctorCall = c; });
+ assert(ctorCall && "expected ctor call");
+
+ auto one = builder.create<cir::ConstantOp>(
+ loc, ptrDiffTy, cir::IntAttr::get(ptrDiffTy, 1));
+
+ ctorCall->moveAfter(one);
+ ctorCall->setOperand(0, currentElement);
+
+ // Advance pointer and store them to temporary variable
+ auto nextElement =
+ builder.create<cir::PtrStrideOp>(loc, eltTy, currentElement, one);
+ builder.createStore(loc, nextElement, tmpAddr);
+ builder.createYield(loc);
+ });
+
+ op->replaceAllUsesWith(loop);
+ op->erase();
+}
+
+void LoweringPreparePass::lowerArrayCtor(cir::ArrayCtor op) {
+ cir::CIRBaseBuilderTy builder(getContext());
+ builder.setInsertionPointAfter(op.getOperation());
+
+ mlir::Type eltTy = op->getRegion(0).getArgument(0).getType();
+ assert(!cir::MissingFeatures::vlas());
+ auto arrayLen =
+ mlir::cast<cir::ArrayType>(op.getAddr().getType().getPointee()).getSize();
+ lowerArrayDtorCtorIntoLoop(builder, astCtx, op, eltTy, op.getAddr(),
+ arrayLen);
+}
+
void LoweringPreparePass::runOnOp(mlir::Operation *op) {
- if (auto unary = dyn_cast<cir::UnaryOp>(op))
+ if (auto arrayCtor = dyn_cast<ArrayCtor>(op))
+ lowerArrayCtor(arrayCtor);
+ else if (auto cast = mlir::dyn_cast<cir::CastOp>(op))
+ lowerCastOp(cast);
+ else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op))
lowerUnaryOp(unary);
}
@@ -82,7 +257,7 @@ void LoweringPreparePass::runOnOperation() {
llvm::SmallVector<mlir::Operation *> opsToTransform;
op->walk([&](mlir::Operation *op) {
- if (mlir::isa<cir::UnaryOp>(op))
+ if (mlir::isa<cir::ArrayCtor, cir::CastOp, cir::UnaryOp>(op))
opsToTransform.push_back(op);
});
@@ -93,3 +268,10 @@ void LoweringPreparePass::runOnOperation() {
std::unique_ptr<Pass> mlir::createLoweringPreparePass() {
return std::make_unique<LoweringPreparePass>();
}
+
+std::unique_ptr<Pass>
+mlir::createLoweringPreparePass(clang::ASTContext *astCtx) {
+ auto pass = std::make_unique<LoweringPreparePass>();
+ pass->setASTContext(astCtx);
+ return std::move(pass);
+}
diff --git a/clang/lib/CIR/Lowering/CIRPasses.cpp b/clang/lib/CIR/Lowering/CIRPasses.cpp
index 5607abc..bb9781b 100644
--- a/clang/lib/CIR/Lowering/CIRPasses.cpp
+++ b/clang/lib/CIR/Lowering/CIRPasses.cpp
@@ -31,7 +31,7 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule,
if (enableCIRSimplify)
pm.addPass(mlir::createCIRSimplifyPass());
- pm.addPass(mlir::createLoweringPreparePass());
+ pm.addPass(mlir::createLoweringPreparePass(&astContext));
pm.enableVerifier(enableVerifier);
(void)mlir::applyPassManagerCLOptions(pm);
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 0bceece..d9bd443 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -2852,20 +2852,28 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
- // Depending on the ABI, this may be either a byval or a dead_on_return
- // argument.
- if (AI.getIndirectByVal()) {
- Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
- } else {
- // Add dead_on_return when the object's lifetime ends in the callee.
- // This includes trivially-destructible objects, as well as objects
- // whose destruction / clean-up is carried out within the callee (e.g.,
- // Obj-C ARC-managed structs, MSVC callee-destroyed objects).
- if (!ParamType.isDestructedType() || !ParamType->isRecordType() ||
- ParamType->castAs<RecordType>()
- ->getDecl()
- ->isParamDestroyedInCallee())
- Attrs.addAttribute(llvm::Attribute::DeadOnReturn);
+ // HLSL out and inout parameters must not be marked with ByVal or
+ // DeadOnReturn attributes because stores to these parameters by the
+ // callee are visible to the caller.
+ if (auto ParamABI = FI.getExtParameterInfo(ArgNo).getABI();
+ ParamABI != ParameterABI::HLSLOut &&
+ ParamABI != ParameterABI::HLSLInOut) {
+
+ // Depending on the ABI, this may be either a byval or a dead_on_return
+ // argument.
+ if (AI.getIndirectByVal()) {
+ Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
+ } else {
+ // Add dead_on_return when the object's lifetime ends in the callee.
+ // This includes trivially-destructible objects, as well as objects
+ // whose destruction / clean-up is carried out within the callee
+ // (e.g., Obj-C ARC-managed structs, MSVC callee-destroyed objects).
+ if (!ParamType.isDestructedType() || !ParamType->isRecordType() ||
+ ParamType->castAs<RecordType>()
+ ->getDecl()
+ ->isParamDestroyedInCallee())
+ Attrs.addAttribute(llvm::Attribute::DeadOnReturn);
+ }
}
auto *Decl = ParamType->getAsRecordDecl();
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index 359e30c..b8238a4 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -2146,30 +2146,9 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
return;
}
- // We might be deleting a pointer to array. If so, GEP down to the
- // first non-array element.
- // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
- if (DeleteTy->isConstantArrayType()) {
- llvm::Value *Zero = Builder.getInt32(0);
- SmallVector<llvm::Value*,8> GEP;
-
- GEP.push_back(Zero); // point at the outermost array
-
- // For each layer of array type we're pointing at:
- while (const ConstantArrayType *Arr
- = getContext().getAsConstantArrayType(DeleteTy)) {
- // 1. Unpeel the array type.
- DeleteTy = Arr->getElementType();
-
- // 2. GEP to the first element of the array.
- GEP.push_back(Zero);
- }
-
- Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, ConvertTypeForMem(DeleteTy),
- Ptr.getAlignment(), "del.first");
- }
-
- assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
+ // We might be deleting a pointer to array.
+ DeleteTy = getContext().getBaseElementType(DeleteTy);
+ Ptr = Ptr.withElementType(ConvertTypeForMem(DeleteTy));
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index ce2dd4d..91237cf 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -7080,6 +7080,110 @@ private:
return ConstLength.getSExtValue() != 1;
}
+ /// A helper class to copy structures with overlapped elements, i.e. those
+ /// which have mappings of both "s" and "s.mem". Consecutive elements that
+ /// are not explicitly copied have mapping nodes synthesized for them,
+ /// taking care to avoid generating zero-sized copies.
+ class CopyOverlappedEntryGaps {
+ CodeGenFunction &CGF;
+ MapCombinedInfoTy &CombinedInfo;
+ OpenMPOffloadMappingFlags Flags = OpenMPOffloadMappingFlags::OMP_MAP_NONE;
+ const ValueDecl *MapDecl = nullptr;
+ const Expr *MapExpr = nullptr;
+ Address BP = Address::invalid();
+ bool IsNonContiguous = false;
+ uint64_t DimSize = 0;
+ // These elements track the position as the struct is iterated over
+ // (in order of increasing element address).
+ const RecordDecl *LastParent = nullptr;
+ uint64_t Cursor = 0;
+ unsigned LastIndex = -1u;
+ Address LB = Address::invalid();
+
+ public:
+ CopyOverlappedEntryGaps(CodeGenFunction &CGF,
+ MapCombinedInfoTy &CombinedInfo,
+ OpenMPOffloadMappingFlags Flags,
+ const ValueDecl *MapDecl, const Expr *MapExpr,
+ Address BP, Address LB, bool IsNonContiguous,
+ uint64_t DimSize)
+ : CGF(CGF), CombinedInfo(CombinedInfo), Flags(Flags), MapDecl(MapDecl),
+ MapExpr(MapExpr), BP(BP), IsNonContiguous(IsNonContiguous),
+ DimSize(DimSize), LB(LB) {}
+
+ void processField(
+ const OMPClauseMappableExprCommon::MappableComponent &MC,
+ const FieldDecl *FD,
+ llvm::function_ref<LValue(CodeGenFunction &, const MemberExpr *)>
+ EmitMemberExprBase) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+ uint64_t FieldOffset = RL.getFieldOffset(FD->getFieldIndex());
+ uint64_t FieldSize =
+ CGF.getContext().getTypeSize(FD->getType().getCanonicalType());
+ Address ComponentLB = Address::invalid();
+
+ if (FD->getType()->isLValueReferenceType()) {
+ const auto *ME = cast<MemberExpr>(MC.getAssociatedExpression());
+ LValue BaseLVal = EmitMemberExprBase(CGF, ME);
+ ComponentLB =
+ CGF.EmitLValueForFieldInitialization(BaseLVal, FD).getAddress();
+ } else {
+ ComponentLB =
+ CGF.EmitOMPSharedLValue(MC.getAssociatedExpression()).getAddress();
+ }
+
+ if (!LastParent)
+ LastParent = RD;
+ if (FD->getParent() == LastParent) {
+ if (FD->getFieldIndex() != LastIndex + 1)
+ copyUntilField(FD, ComponentLB);
+ } else {
+ LastParent = FD->getParent();
+ if (((int64_t)FieldOffset - (int64_t)Cursor) > 0)
+ copyUntilField(FD, ComponentLB);
+ }
+ Cursor = FieldOffset + FieldSize;
+ LastIndex = FD->getFieldIndex();
+ LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
+ }
+
+ void copyUntilField(const FieldDecl *FD, Address ComponentLB) {
+ llvm::Value *ComponentLBPtr = ComponentLB.emitRawPointer(CGF);
+ llvm::Value *LBPtr = LB.emitRawPointer(CGF);
+ llvm::Value *Size =
+ CGF.Builder.CreatePtrDiff(CGF.Int8Ty, ComponentLBPtr, LBPtr);
+ copySizedChunk(LBPtr, Size);
+ }
+
+ void copyUntilEnd(Address HB) {
+ if (LastParent) {
+ const ASTRecordLayout &RL =
+ CGF.getContext().getASTRecordLayout(LastParent);
+ if ((uint64_t)CGF.getContext().toBits(RL.getSize()) <= Cursor)
+ return;
+ }
+ llvm::Value *LBPtr = LB.emitRawPointer(CGF);
+ llvm::Value *Size = CGF.Builder.CreatePtrDiff(
+ CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).emitRawPointer(CGF),
+ LBPtr);
+ copySizedChunk(LBPtr, Size);
+ }
+
+ void copySizedChunk(llvm::Value *Base, llvm::Value *Size) {
+ CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
+ CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
+ CombinedInfo.Pointers.push_back(Base);
+ CombinedInfo.Sizes.push_back(
+ CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
+ CombinedInfo.Types.push_back(Flags);
+ CombinedInfo.Mappers.push_back(nullptr);
+ CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize : 1);
+ }
+ };
+
/// Generate the base pointers, section pointers, sizes, map type bits, and
/// user-defined mappers (all included in \a CombinedInfo) for the provided
/// map type, map or motion modifiers, and expression components.
@@ -7570,63 +7674,22 @@ private:
getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
/*AddPtrFlag=*/false,
/*AddIsTargetParamFlag=*/false, IsNonContiguous);
- llvm::Value *Size = nullptr;
+ CopyOverlappedEntryGaps CopyGaps(CGF, CombinedInfo, Flags, MapDecl,
+ MapExpr, BP, LB, IsNonContiguous,
+ DimSize);
// Do bitcopy of all non-overlapped structure elements.
for (OMPClauseMappableExprCommon::MappableExprComponentListRef
Component : OverlappedElements) {
- Address ComponentLB = Address::invalid();
for (const OMPClauseMappableExprCommon::MappableComponent &MC :
Component) {
if (const ValueDecl *VD = MC.getAssociatedDeclaration()) {
- const auto *FD = dyn_cast<FieldDecl>(VD);
- if (FD && FD->getType()->isLValueReferenceType()) {
- const auto *ME =
- cast<MemberExpr>(MC.getAssociatedExpression());
- LValue BaseLVal = EmitMemberExprBase(CGF, ME);
- ComponentLB =
- CGF.EmitLValueForFieldInitialization(BaseLVal, FD)
- .getAddress();
- } else {
- ComponentLB =
- CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
- .getAddress();
+ if (const auto *FD = dyn_cast<FieldDecl>(VD)) {
+ CopyGaps.processField(MC, FD, EmitMemberExprBase);
}
- llvm::Value *ComponentLBPtr = ComponentLB.emitRawPointer(CGF);
- llvm::Value *LBPtr = LB.emitRawPointer(CGF);
- Size = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, ComponentLBPtr,
- LBPtr);
- break;
}
}
- assert(Size && "Failed to determine structure size");
- CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
- CombinedInfo.DevicePtrDecls.push_back(nullptr);
- CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
- CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
- Size, CGF.Int64Ty, /*isSigned=*/true));
- CombinedInfo.Types.push_back(Flags);
- CombinedInfo.Mappers.push_back(nullptr);
- CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
- : 1);
- LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
}
- CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
- CombinedInfo.DevicePtrDecls.push_back(nullptr);
- CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
- llvm::Value *LBPtr = LB.emitRawPointer(CGF);
- Size = CGF.Builder.CreatePtrDiff(
- CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).emitRawPointer(CGF),
- LBPtr);
- CombinedInfo.Sizes.push_back(
- CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
- CombinedInfo.Types.push_back(Flags);
- CombinedInfo.Mappers.push_back(nullptr);
- CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
- : 1);
+ CopyGaps.copyUntilEnd(HB);
break;
}
llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index 7dccf82..70f510a 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -274,7 +274,7 @@ llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
const CallExpr *E) {
- constexpr const char *Tag = "amdgpu-as";
+ constexpr const char *Tag = "amdgpu-synchronize-as";
LLVMContext &Ctx = Inst->getContext();
SmallVector<MMRAMetadata::TagT, 3> MMRAs;
@@ -633,6 +633,41 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
return Builder.CreateCall(F, {Addr});
}
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128: {
+
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
+ IID = Intrinsic::amdgcn_global_load_monitor_b32;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
+ IID = Intrinsic::amdgcn_global_load_monitor_b64;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
+ IID = Intrinsic::amdgcn_global_load_monitor_b128;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
+ IID = Intrinsic::amdgcn_flat_load_monitor_b32;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
+ IID = Intrinsic::amdgcn_flat_load_monitor_b64;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128:
+ IID = Intrinsic::amdgcn_flat_load_monitor_b128;
+ break;
+ }
+
+ llvm::Type *LoadTy = ConvertType(E->getType());
+ llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+ llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
+ return Builder.CreateCall(F, {Addr, Val});
+ }
case AMDGPU::BI__builtin_amdgcn_load_to_lds: {
// Should this have asan instrumentation?
return emitBuiltinWithOneOverloadedType<5>(*this, E,
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index e3d220d..853f694 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -209,8 +209,8 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
CCLogDiagnostics(false), CCGenDiagnostics(false),
CCPrintProcessStats(false), CCPrintInternalStats(false),
TargetTriple(TargetTriple), Saver(Alloc), PrependArg(nullptr),
- CheckInputsExist(true), ProbePrecompiled(true),
- SuppressMissingInputWarning(false) {
+ PreferredLinker(CLANG_DEFAULT_LINKER), CheckInputsExist(true),
+ ProbePrecompiled(true), SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
this->VFS = llvm::vfs::getRealFileSystem();
@@ -908,7 +908,7 @@ getSystemOffloadArchs(Compilation &C, Action::OffloadKind Kind) {
StringRef Program = C.getArgs().getLastArgValue(
options::OPT_offload_arch_tool_EQ, "offload-arch");
- SmallVector<std::string, 1> GPUArchs;
+ SmallVector<std::string> GPUArchs;
if (llvm::ErrorOr<std::string> Executable =
llvm::sys::findProgramByName(Program)) {
llvm::SmallVector<StringRef> Args{*Executable};
@@ -4886,7 +4886,13 @@ Action *Driver::BuildOffloadingActions(Compilation &C,
// Compiling HIP in device-only non-RDC mode requires linking each action
// individually.
for (Action *&A : DeviceActions) {
- if ((A->getType() != types::TY_Object &&
+ // Special handling for the HIP SPIR-V toolchain because it doesn't use
+ // the SPIR-V backend yet doesn't report the output as an object.
+ bool IsAMDGCNSPIRV = A->getOffloadingToolChain() &&
+ A->getOffloadingToolChain()->getTriple().getOS() ==
+ llvm::Triple::OSType::AMDHSA &&
+ A->getOffloadingToolChain()->getTriple().isSPIRV();
+ if ((A->getType() != types::TY_Object && !IsAMDGCNSPIRV &&
A->getType() != types::TY_LTO_BC) ||
!HIPNoRDC || !offloadDeviceOnly())
continue;
@@ -4942,8 +4948,9 @@ Action *Driver::BuildOffloadingActions(Compilation &C,
// fatbinary for each translation unit, linking each input individually.
Action *FatbinAction =
C.MakeAction<LinkJobAction>(OffloadActions, types::TY_HIP_FATBIN);
- DDep.add(*FatbinAction, *C.getSingleOffloadToolChain<Action::OFK_HIP>(),
- nullptr, Action::OFK_HIP);
+ DDep.add(*FatbinAction,
+ *C.getOffloadToolChains<Action::OFK_HIP>().first->second, nullptr,
+ Action::OFK_HIP);
} else {
// Package all the offloading actions into a single output that can be
// embedded in the host and linked.
@@ -5098,7 +5105,10 @@ Action *Driver::ConstructPhaseAction(
false) ||
(Args.hasFlag(options::OPT_offload_new_driver,
options::OPT_no_offload_new_driver, false) &&
- !offloadDeviceOnly())) ||
+ (!offloadDeviceOnly() ||
+ (Input->getOffloadingToolChain() &&
+ TargetDeviceOffloadKind == Action::OFK_HIP &&
+ Input->getOffloadingToolChain()->getTriple().isSPIRV())))) ||
TargetDeviceOffloadKind == Action::OFK_OpenMP))) {
types::ID Output =
Args.hasArg(options::OPT_S) &&
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 47f93fa1..1d7dad0 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -1087,7 +1087,7 @@ std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD) const {
// Get -fuse-ld= first to prevent -Wunused-command-line-argument. -fuse-ld= is
// considered as the linker flavor, e.g. "bfd", "gold", or "lld".
const Arg* A = Args.getLastArg(options::OPT_fuse_ld_EQ);
- StringRef UseLinker = A ? A->getValue() : CLANG_DEFAULT_LINKER;
+ StringRef UseLinker = A ? A->getValue() : getDriver().getPreferredLinker();
// --ld-path= takes precedence over -fuse-ld= and specifies the executable
// name. -B, COMPILER_PATH and PATH and consulted if the value does not
diff --git a/clang/lib/Driver/ToolChains/Arch/Sparc.cpp b/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
index 3333135..94a94f1 100644
--- a/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
@@ -37,6 +37,13 @@ const char *sparc::getSparcAsmModeForCPU(StringRef Name,
.Case("niagara4", "-Av9d")
.Default(DefV9CPU);
} else {
+ const char *DefV8CPU;
+
+ if (Triple.isOSSolaris())
+ DefV8CPU = "-Av8plus";
+ else
+ DefV8CPU = "-Av8";
+
return llvm::StringSwitch<const char *>(Name)
.Case("v8", "-Av8")
.Case("supersparc", "-Av8")
@@ -72,7 +79,7 @@ const char *sparc::getSparcAsmModeForCPU(StringRef Name,
.Case("gr712rc", "-Aleon")
.Case("leon4", "-Aleon")
.Case("gr740", "-Aleon")
- .Default("-Av8");
+ .Default(DefV8CPU);
}
}
@@ -160,6 +167,8 @@ void sparc::getSparcTargetFeatures(const Driver &D, const llvm::Triple &Triple,
(Triple.getArch() == llvm::Triple::sparcv9) &&
(Triple.isOSLinux() || Triple.isOSFreeBSD() || Triple.isOSOpenBSD());
bool IsSparcV9BTarget = Triple.isOSSolaris();
+ bool IsSparcV8PlusTarget =
+ Triple.getArch() == llvm::Triple::sparc && Triple.isOSSolaris();
if (Arg *A = Args.getLastArg(options::OPT_mvis, options::OPT_mno_vis)) {
if (A->getOption().matches(options::OPT_mvis))
Features.push_back("+vis");
@@ -196,6 +205,8 @@ void sparc::getSparcTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (Arg *A = Args.getLastArg(options::OPT_mv8plus, options::OPT_mno_v8plus)) {
if (A->getOption().matches(options::OPT_mv8plus))
Features.push_back("+v8plus");
+ } else if (IsSparcV8PlusTarget) {
+ Features.push_back("+v8plus");
}
if (Args.hasArg(options::OPT_ffixed_g1))
diff --git a/clang/lib/Driver/ToolChains/MSVC.cpp b/clang/lib/Driver/ToolChains/MSVC.cpp
index 7d31eea..bb469ff 100644
--- a/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -279,8 +279,8 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddRunTimeLibs(TC, TC.getDriver(), CmdArgs, Args);
}
- StringRef Linker =
- Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER);
+ StringRef Linker = Args.getLastArgValue(options::OPT_fuse_ld_EQ,
+ TC.getDriver().getPreferredLinker());
if (Linker.empty())
Linker = "link";
// We need to translate 'lld' into 'lld-link'.
diff --git a/clang/lib/Driver/ToolChains/MinGW.cpp b/clang/lib/Driver/ToolChains/MinGW.cpp
index b2e36ae..6abd0c0 100644
--- a/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -548,7 +548,7 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back(Base + "lib");
NativeLLVMSupport =
- Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER)
+ Args.getLastArgValue(options::OPT_fuse_ld_EQ, D.getPreferredLinker())
.equals_insensitive("lld");
}
diff --git a/clang/lib/Driver/ToolChains/Solaris.cpp b/clang/lib/Driver/ToolChains/Solaris.cpp
index a3574e1..02aa598 100644
--- a/clang/lib/Driver/ToolChains/Solaris.cpp
+++ b/clang/lib/Driver/ToolChains/Solaris.cpp
@@ -39,7 +39,7 @@ void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
bool solaris::isLinkerGnuLd(const ToolChain &TC, const ArgList &Args) {
// Only used if targetting Solaris.
const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ);
- StringRef UseLinker = A ? A->getValue() : CLANG_DEFAULT_LINKER;
+ StringRef UseLinker = A ? A->getValue() : TC.getDriver().getPreferredLinker();
return UseLinker == "bfd" || UseLinker == "gld";
}
@@ -52,7 +52,7 @@ static bool getPIE(const ArgList &Args, const ToolChain &TC) {
TC.isPIEDefault(Args));
}
-// FIXME: Need to handle CLANG_DEFAULT_LINKER here?
+// FIXME: Need to handle PreferredLinker here?
std::string solaris::Linker::getLinkerPath(const ArgList &Args) const {
const ToolChain &ToolChain = getToolChain();
if (const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ)) {
@@ -345,7 +345,7 @@ SanitizerMask Solaris::getSupportedSanitizers() const {
const char *Solaris::getDefaultLinker() const {
// FIXME: Only handle Solaris ld and GNU ld here.
- return llvm::StringSwitch<const char *>(CLANG_DEFAULT_LINKER)
+ return llvm::StringSwitch<const char *>(getDriver().getPreferredLinker())
.Cases("bfd", "gld", "/usr/gnu/bin/ld")
.Default("/usr/bin/ld");
}
diff --git a/clang/lib/Driver/ToolChains/UEFI.cpp b/clang/lib/Driver/ToolChains/UEFI.cpp
index ac6668e..2b41173 100644
--- a/clang/lib/Driver/ToolChains/UEFI.cpp
+++ b/clang/lib/Driver/ToolChains/UEFI.cpp
@@ -83,8 +83,8 @@ void tools::uefi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// This should ideally be handled by ToolChain::GetLinkerPath but we need
// to special case some linker paths. In the case of lld, we need to
// translate 'lld' into 'lld-link'.
- StringRef Linker =
- Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER);
+ StringRef Linker = Args.getLastArgValue(options::OPT_fuse_ld_EQ,
+ TC.getDriver().getPreferredLinker());
if (Linker.empty() || Linker == "lld")
Linker = "lld-link";
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index bf67f9e..9a10403 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -1725,7 +1725,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
}
if (Previous && (Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) ||
(Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) &&
- !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)))) {
+ !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr,
+ TT_CtorInitializerColon)))) {
CurrentState.NestedBlockInlined =
!Newline && hasNestedBlockInlined(Previous, Current, Style);
}
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 62feb3d..1cfa3d1 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -731,6 +731,7 @@ template <> struct MappingTraits<FormatStyle::SpaceBeforeParensCustom> {
IO.mapOptional("AfterFunctionDeclarationName",
Spacing.AfterFunctionDeclarationName);
IO.mapOptional("AfterIfMacros", Spacing.AfterIfMacros);
+ IO.mapOptional("AfterNot", Spacing.AfterNot);
IO.mapOptional("AfterOverloadedOperator", Spacing.AfterOverloadedOperator);
IO.mapOptional("AfterPlacementOperator", Spacing.AfterPlacementOperator);
IO.mapOptional("AfterRequiresInClause", Spacing.AfterRequiresInClause);
@@ -2643,13 +2644,14 @@ private:
for (FormatToken *Tok = Line->First; Tok && Tok->Next; Tok = Tok->Next) {
if (Tok->isNot(TT_PointerOrReference))
continue;
- // Don't treat space in `void foo() &&` as evidence.
+ // Don't treat space in `void foo() &&` or `void() &&` as evidence.
if (const auto *Prev = Tok->getPreviousNonComment()) {
if (Prev->is(tok::r_paren) && Prev->MatchingParen) {
if (const auto *Func =
Prev->MatchingParen->getPreviousNonComment()) {
if (Func->isOneOf(TT_FunctionDeclarationName, TT_StartOfName,
- TT_OverloadedOperator)) {
+ TT_OverloadedOperator) ||
+ Func->isTypeName(LangOpts)) {
continue;
}
}
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 581bfba..d28d2fd 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -5478,7 +5478,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.TokenText == "!")
return Style.SpaceAfterLogicalNot;
assert(Left.TokenText == "not");
- return Right.isOneOf(tok::coloncolon, TT_UnaryOperator);
+ return Right.isOneOf(tok::coloncolon, TT_UnaryOperator) ||
+ (Right.is(tok::l_paren) && Style.SpaceBeforeParensOptions.AfterNot);
}
// If the next token is a binary operator or a selector name, we have
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index 893ef02..e47caeb 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -5695,11 +5695,10 @@ Parser::DeclGroupPtrTy Parser::ParseTopLevelStmtDecl() {
Scope::CompoundStmtScope);
TopLevelStmtDecl *TLSD = Actions.ActOnStartTopLevelStmtDecl(getCurScope());
StmtResult R = ParseStatementOrDeclaration(Stmts, SubStmtCtx);
+ Actions.ActOnFinishTopLevelStmtDecl(TLSD, R.get());
if (!R.isUsable())
R = Actions.ActOnNullStmt(Tok.getLocation());
- Actions.ActOnFinishTopLevelStmtDecl(TLSD, R.get());
-
if (Tok.is(tok::annot_repl_input_end) &&
Tok.getAnnotationValue() != nullptr) {
ConsumeAnnotationToken();
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index 31392d1d..bc8841c 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -4940,9 +4940,8 @@ void Parser::ParseHLSLRootSignatureAttributeArgs(ParsedAttributes &Attrs) {
// signature string and construct the in-memory elements
if (!Found) {
// Invoke the root signature parser to construct the in-memory constructs
- SmallVector<hlsl::RootSignatureElement> RootElements;
- hlsl::RootSignatureParser Parser(getLangOpts().HLSLRootSigVer, RootElements,
- Signature, PP);
+ hlsl::RootSignatureParser Parser(getLangOpts().HLSLRootSigVer, Signature,
+ PP);
if (Parser.parse()) {
T.consumeClose();
return;
@@ -4950,7 +4949,7 @@ void Parser::ParseHLSLRootSignatureAttributeArgs(ParsedAttributes &Attrs) {
// Construct the declaration.
Actions.HLSL().ActOnFinishRootSignatureDecl(RootSignatureLoc, DeclIdent,
- RootElements);
+ Parser.getElements());
}
// Create the arg for the ParsedAttr
diff --git a/clang/lib/Parse/ParseHLSLRootSignature.cpp b/clang/lib/Parse/ParseHLSLRootSignature.cpp
index db9ed83..98dc458 100644
--- a/clang/lib/Parse/ParseHLSLRootSignature.cpp
+++ b/clang/lib/Parse/ParseHLSLRootSignature.cpp
@@ -27,11 +27,10 @@ static const TokenKind RootElementKeywords[] = {
};
RootSignatureParser::RootSignatureParser(
- llvm::dxbc::RootSignatureVersion Version,
- SmallVector<RootSignatureElement> &Elements, StringLiteral *Signature,
+ llvm::dxbc::RootSignatureVersion Version, StringLiteral *Signature,
Preprocessor &PP)
- : Version(Version), Elements(Elements), Signature(Signature),
- Lexer(Signature->getString()), PP(PP), CurToken(0) {}
+ : Version(Version), Signature(Signature), Lexer(Signature->getString()),
+ PP(PP), CurToken(0) {}
bool RootSignatureParser::parse() {
// Iterate as many RootSignatureElements as possible, until we hit the
diff --git a/clang/lib/Sema/SemaAvailability.cpp b/clang/lib/Sema/SemaAvailability.cpp
index 8c6a173..68a698f 100644
--- a/clang/lib/Sema/SemaAvailability.cpp
+++ b/clang/lib/Sema/SemaAvailability.cpp
@@ -547,6 +547,12 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
return;
}
case AR_Deprecated:
+ // Suppress -Wdeprecated-declarations in implicit
+ // functions.
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(S.getCurFunctionDecl());
+ FD && FD->isImplicit())
+ return;
+
if (ObjCPropertyAccess)
diag = diag::warn_property_method_deprecated;
else if (S.currentEvaluationContext().IsCaseExpr)
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 5205ca0b..044cf5c 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -588,6 +588,9 @@ static bool CheckConstraintSatisfaction(
return true;
for (const AssociatedConstraint &AC : AssociatedConstraints) {
+ if (AC.isNull())
+ return true;
+
Sema::ArgPackSubstIndexRAII _(S, AC.ArgPackSubstIndex);
ExprResult Res = calculateConstraintSatisfaction(
S, Template, TemplateIDRange.getBegin(), TemplateArgsLists,
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index fd22e01..d7420bd 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -3267,6 +3267,14 @@ void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
if (isa<UsedAttr>(I) || isa<RetainAttr>(I))
continue;
+ if (isa<InferredNoReturnAttr>(I)) {
+ if (auto *FD = dyn_cast<FunctionDecl>(New)) {
+ if (FD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ continue; // Don't propagate inferred noreturn attributes to explicit
+ // specializations.
+ }
+ }
+
if (mergeDeclAttribute(*this, New, I, LocalAMK))
foundAny = true;
}
@@ -20573,7 +20581,8 @@ TopLevelStmtDecl *Sema::ActOnStartTopLevelStmtDecl(Scope *S) {
}
void Sema::ActOnFinishTopLevelStmtDecl(TopLevelStmtDecl *D, Stmt *Statement) {
- D->setStmt(Statement);
+ if (Statement)
+ D->setStmt(Statement);
PopCompoundScope();
PopFunctionScopeInfo();
PopDeclContext();
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index 9a2950c..a4e8de4 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -1970,6 +1970,13 @@ void clang::inferNoReturnAttr(Sema &S, const Decl *D) {
if (!FD)
return;
+ // Skip explicit specializations here as they may have
+ // a user-provided definition that may deliberately differ from the primary
+ // template. If an explicit specialization truly never returns, the user
+ // should explicitly mark it with [[noreturn]].
+ if (FD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ return;
+
auto *NonConstFD = const_cast<FunctionDecl *>(FD);
DiagnosticsEngine &Diags = S.getDiagnostics();
if (Diags.isIgnored(diag::warn_falloff_nonvoid, FD->getLocation()) &&
@@ -2034,7 +2041,8 @@ bool Sema::CheckAttrTarget(const ParsedAttr &AL) {
// Check whether the attribute is valid on the current target.
if (!AL.existsInTarget(Context.getTargetInfo())) {
if (AL.isRegularKeywordAttribute())
- Diag(AL.getLoc(), diag::err_keyword_not_supported_on_target);
+ Diag(AL.getLoc(), diag::err_keyword_not_supported_on_target)
+ << AL << AL.getRange();
else
DiagnoseUnknownAttribute(AL);
AL.setInvalid();
diff --git a/clang/lib/Sema/SemaOpenACCAtomic.cpp b/clang/lib/Sema/SemaOpenACCAtomic.cpp
index 9c8c8d1..a9319dc 100644
--- a/clang/lib/Sema/SemaOpenACCAtomic.cpp
+++ b/clang/lib/Sema/SemaOpenACCAtomic.cpp
@@ -576,6 +576,11 @@ class AtomicOperandChecker {
return AssocStmt;
}
+ const Expr *IgnoreBeforeCompare(const Expr *E) {
+ return E->IgnoreParenImpCasts()->IgnoreParenNoopCasts(
+ SemaRef.getASTContext());
+ }
+
bool CheckVarRefsSame(IDACInfo::ExprKindTy FirstKind, const Expr *FirstX,
IDACInfo::ExprKindTy SecondKind, const Expr *SecondX) {
llvm::FoldingSetNodeID First_ID, Second_ID;
@@ -648,8 +653,10 @@ class AtomicOperandChecker {
if (CheckOperandVariable(AssignRes->RHS, PD))
return getRecoveryExpr();
- if (CheckVarRefsSame(FirstExprResults.ExprKind, FirstExprResults.X_Var,
- IDACInfo::SimpleAssign, AssignRes->RHS))
+ if (CheckVarRefsSame(FirstExprResults.ExprKind,
+ IgnoreBeforeCompare(FirstExprResults.X_Var),
+ IDACInfo::SimpleAssign,
+ IgnoreBeforeCompare(AssignRes->RHS)))
return getRecoveryExpr();
break;
}
@@ -660,9 +667,10 @@ class AtomicOperandChecker {
if (SecondExprResults.Failed)
return getRecoveryExpr();
- if (CheckVarRefsSame(FirstExprResults.ExprKind, FirstExprResults.X_Var,
+ if (CheckVarRefsSame(FirstExprResults.ExprKind,
+ IgnoreBeforeCompare(FirstExprResults.X_Var),
SecondExprResults.ExprKind,
- SecondExprResults.X_Var))
+ IgnoreBeforeCompare(SecondExprResults.X_Var)))
return getRecoveryExpr();
break;
}
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 4ecc9b0..2c5d97c 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -2829,7 +2829,7 @@ static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
continue;
}
for (Expr *Ref : RC->varlist()) {
- assert(Ref && "NULL expr in OpenMP nontemporal clause.");
+ assert(Ref && "NULL expr in OpenMP reduction clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = Ref;
@@ -7612,6 +7612,23 @@ void SemaOpenMP::ActOnOpenMPDeclareVariantDirective(
return;
}
+ // OpenMP 6.0 [9.6.2 (page 332, line 31-33, adjust_args clause, Restrictions]
+ // If the `need_device_addr` adjust-op modifier is present, each list item
+ // that appears in the clause must refer to an argument in the declaration of
+ // the function variant that has a reference type
+ if (getLangOpts().OpenMP >= 60) {
+ for (Expr *E : AdjustArgsNeedDeviceAddr) {
+ E = E->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (!VD->getType()->isReferenceType())
+ Diag(E->getExprLoc(),
+ diag::err_omp_non_by_ref_need_device_addr_modifier_argument);
+ }
+ }
+ }
+ }
+
auto *NewAttr = OMPDeclareVariantAttr::CreateImplicit(
getASTContext(), VariantRef, &TI,
const_cast<Expr **>(AdjustArgsNothing.data()), AdjustArgsNothing.size(),
@@ -18344,7 +18361,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP lastprivate clause.");
+ assert(RefExpr && "NULL expr in OpenMP shared clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -19991,7 +20008,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPAlignedClause(
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP linear clause.");
+ assert(RefExpr && "NULL expr in OpenMP aligned clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -20167,7 +20184,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SmallVector<Expr *, 8> DstExprs;
SmallVector<Expr *, 8> AssignmentOps;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP linear clause.");
+ assert(RefExpr && "NULL expr in OpenMP copyprivate clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -20526,7 +20543,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPDependClause(
TotalDepCount = VarOffset.TotalDepCount;
} else {
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ assert(RefExpr && "NULL expr in OpenMP depend clause.");
if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -23737,7 +23754,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPAllocateClause(
// Analyze and build list of variables.
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP private clause.");
+ assert(RefExpr && "NULL expr in OpenMP allocate clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -23829,7 +23846,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
+ assert(RefExpr && "NULL expr in OpenMP inclusive clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -23870,7 +23887,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
+ assert(RefExpr && "NULL expr in OpenMP exclusive clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -24063,7 +24080,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPAffinityClause(
SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : Locators) {
- assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ assert(RefExpr && "NULL expr in OpenMP affinity clause.");
if (isa<DependentScopeDeclRefExpr>(RefExpr) || RefExpr->isTypeDependent()) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -24375,7 +24392,7 @@ ExprResult SemaOpenMP::ActOnOMPArraySectionExpr(
return ExprError();
}
}
- } else if (ColonLocFirst.isValid() &&
+ } else if (SemaRef.getLangOpts().OpenMP < 60 && ColonLocFirst.isValid() &&
(OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() &&
!OriginalTy->isVariableArrayType()))) {
// OpenMP 5.0, [2.1.5 Array Sections]
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index e1a975b..9e56e697 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -5523,6 +5523,15 @@ static TemplateDeductionResult CheckDeductionConsistency(
// FIXME: A substitution can be incomplete on a non-structural part of the
// type. Use the canonical type for now, until the TemplateInstantiator can
// deal with that.
+
+ // Workaround: Implicit deduction guides use InjectedClassNameTypes, whereas
+ // the explicit guides don't. The substitution doesn't transform these types,
+ // so let it transform their specializations instead.
+ bool IsDeductionGuide = isa<CXXDeductionGuideDecl>(FTD->getTemplatedDecl());
+ if (IsDeductionGuide) {
+ if (auto *Injected = P->getAs<InjectedClassNameType>())
+ P = Injected->getInjectedSpecializationType();
+ }
QualType InstP = S.SubstType(P.getCanonicalType(), MLTAL, FTD->getLocation(),
FTD->getDeclName(), &IsIncompleteSubstitution);
if (InstP.isNull() && !IsIncompleteSubstitution)
@@ -5537,9 +5546,15 @@ static TemplateDeductionResult CheckDeductionConsistency(
if (auto *PA = dyn_cast<PackExpansionType>(A);
PA && !isa<PackExpansionType>(InstP))
A = PA->getPattern();
- if (!S.Context.hasSameType(
- S.Context.getUnqualifiedArrayType(InstP.getNonReferenceType()),
- S.Context.getUnqualifiedArrayType(A.getNonReferenceType())))
+ auto T1 = S.Context.getUnqualifiedArrayType(InstP.getNonReferenceType());
+ auto T2 = S.Context.getUnqualifiedArrayType(A.getNonReferenceType());
+ if (IsDeductionGuide) {
+ if (auto *Injected = T1->getAs<InjectedClassNameType>())
+ T1 = Injected->getInjectedSpecializationType();
+ if (auto *Injected = T2->getAs<InjectedClassNameType>())
+ T2 = Injected->getInjectedSpecializationType();
+ }
+ if (!S.Context.hasSameType(T1, T2))
return TemplateDeductionResult::NonDeducedMismatch;
return TemplateDeductionResult::Success;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 68efdba..a7704da 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -3730,13 +3730,15 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N,
return nullptr;
}
- // Save the first destructor/function as release point.
- assert(!ReleaseFunctionLC && "There should be only one release point");
+ // Record the stack frame that is _responsible_ for this memory release
+ // event. This will be used by the false positive suppression heuristics
+ // that recognize the release points of reference-counted objects.
+ //
+ // Usually (e.g. in C) we say that the _responsible_ stack frame is the
+ // current innermost stack frame:
ReleaseFunctionLC = CurrentLC->getStackFrame();
-
- // See if we're releasing memory while inlining a destructor that
- // decrement reference counters (or one of its callees).
- // This turns on various common false positive suppressions.
+ // ...but if the stack contains a destructor call, then we say that the
+ // outermost destructor stack frame is the _responsible_ one:
for (const LocationContext *LC = CurrentLC; LC; LC = LC->getParent()) {
if (const auto *DD = dyn_cast<CXXDestructorDecl>(LC->getDecl())) {
if (isReferenceCountingPointerDestructor(DD)) {