aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR')
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuilder.h14
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXX.cpp59
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h32
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp134
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp46
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp80
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp27
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp14
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h29
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp193
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp23
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenVTables.cpp35
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h3
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp139
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp82
19 files changed, 887 insertions, 46 deletions
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 58345b4..25afe8b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -122,6 +122,11 @@ public:
return getPointerTo(cir::VPtrType::get(getContext()));
}
+ cir::FuncType getFuncType(llvm::ArrayRef<mlir::Type> params, mlir::Type retTy,
+ bool isVarArg = false) {
+ return cir::FuncType::get(params, retTy, isVarArg);
+ }
+
/// Get a CIR record kind from a AST declaration tag.
cir::RecordType::RecordKind getRecordKind(const clang::TagTypeKind kind) {
switch (kind) {
@@ -372,6 +377,15 @@ public:
return cir::BinOp::create(*this, loc, cir::BinOpKind::Div, lhs, rhs);
}
+ mlir::Value createDynCast(mlir::Location loc, mlir::Value src,
+ cir::PointerType destType, bool isRefCast,
+ cir::DynamicCastInfoAttr info) {
+ auto castKind =
+ isRefCast ? cir::DynamicCastKind::Ref : cir::DynamicCastKind::Ptr;
+ return cir::DynamicCastOp::create(*this, loc, destType, castKind, src, info,
+ /*relative_layout=*/false);
+ }
+
Address createBaseClassAddr(mlir::Location loc, Address addr,
mlir::Type destType, unsigned offset,
bool assumeNotNull) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
index d5b35c2..274d11b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CIRGenCXXABI.h"
#include "CIRGenFunction.h"
#include "CIRGenModule.h"
@@ -95,7 +96,63 @@ static void emitDeclDestroy(CIRGenFunction &cgf, const VarDecl *vd,
return;
}
- cgf.cgm.errorNYI(vd->getSourceRange(), "global with destructor");
+ // If not constant storage we'll emit this regardless of NeedsDtor value.
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+
+ // Prepare the dtor region.
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ mlir::Block *block = builder.createBlock(&addr.getDtorRegion());
+ CIRGenFunction::LexicalScope lexScope{cgf, addr.getLoc(),
+ builder.getInsertionBlock()};
+ lexScope.setAsGlobalInit();
+ builder.setInsertionPointToStart(block);
+
+ CIRGenModule &cgm = cgf.cgm;
+ QualType type = vd->getType();
+
+ // Special-case non-array C++ destructors, if they have the right signature.
+ // Under some ABIs, destructors return this instead of void, and cannot be
+ // passed directly to __cxa_atexit if the target does not allow this
+ // mismatch.
+ const CXXRecordDecl *record = type->getAsCXXRecordDecl();
+ bool canRegisterDestructor =
+ record && (!cgm.getCXXABI().hasThisReturn(
+ GlobalDecl(record->getDestructor(), Dtor_Complete)) ||
+ cgm.getCXXABI().canCallMismatchedFunctionType());
+
+ // If __cxa_atexit is disabled via a flag, a different helper function is
+ // generated elsewhere which uses atexit instead, and it takes the destructor
+ // directly.
+ cir::FuncOp fnOp;
+ if (record && (canRegisterDestructor || cgm.getCodeGenOpts().CXAAtExit)) {
+ if (vd->getTLSKind())
+ cgm.errorNYI(vd->getSourceRange(), "TLS destructor");
+ assert(!record->hasTrivialDestructor());
+ assert(!cir::MissingFeatures::openCL());
+ CXXDestructorDecl *dtor = record->getDestructor();
+ // In LLVM OG codegen this is done in registerGlobalDtor, but CIRGen
+ // relies on LoweringPrepare for further decoupling, so build the
+ // call right here.
+ auto gd = GlobalDecl(dtor, Dtor_Complete);
+ fnOp = cgm.getAddrAndTypeOfCXXStructor(gd).second;
+ cgf.getBuilder().createCallOp(
+ cgf.getLoc(vd->getSourceRange()),
+ mlir::FlatSymbolRefAttr::get(fnOp.getSymNameAttr()),
+ mlir::ValueRange{cgm.getAddrOfGlobalVar(vd)});
+ } else {
+ cgm.errorNYI(vd->getSourceRange(), "array destructor");
+ }
+ assert(fnOp && "expected cir.func");
+ cgm.getCXXABI().registerGlobalDtor(vd, fnOp, nullptr);
+
+ builder.setInsertionPointToEnd(block);
+ if (block->empty()) {
+ block->erase();
+ // Don't confuse lexical cleanup.
+ builder.clearInsertionPoint();
+ } else {
+ builder.create<cir::YieldOp>(addr.getLoc());
+ }
}
cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl gd) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index 2465a68..06f41cd 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -54,6 +54,12 @@ public:
Address thisAddr, const CXXRecordDecl *classDecl,
const CXXRecordDecl *baseClassDecl) = 0;
+ virtual mlir::Value emitDynamicCast(CIRGenFunction &cgf, mlir::Location loc,
+ QualType srcRecordTy,
+ QualType destRecordTy,
+ cir::PointerType destCIRTy,
+ bool isRefCast, Address src) = 0;
+
public:
/// Similar to AddedStructorArgs, but only notes the number of additional
/// arguments.
@@ -149,6 +155,14 @@ public:
/// Loads the incoming C++ this pointer as it was passed by the caller.
mlir::Value loadIncomingCXXThis(CIRGenFunction &cgf);
+ /// Get the implicit (second) parameter that comes after the "this" pointer,
+ /// or nullptr if there is isn't one.
+ virtual mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &cgf,
+ const CXXDestructorDecl *dd,
+ CXXDtorType type,
+ bool forVirtualBase,
+ bool delegating) = 0;
+
/// Emit constructor variants required by this ABI.
virtual void emitCXXConstructors(const clang::CXXConstructorDecl *d) = 0;
@@ -160,6 +174,14 @@ public:
bool forVirtualBase, bool delegating,
Address thisAddr, QualType thisTy) = 0;
+ /// Emit code to force the execution of a destructor during global
+ /// teardown. The default implementation of this uses atexit.
+ ///
+ /// \param dtor - a function taking a single pointer argument
+ /// \param addr - a pointer to pass to the destructor function.
+ virtual void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor,
+ mlir::Value addr) = 0;
+
/// Checks if ABI requires extra virtual offset for vtable field.
virtual bool
isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf,
@@ -233,6 +255,16 @@ public:
return false;
}
+ /// Returns true if the target allows calling a function through a pointer
+ /// with a different signature than the actual function (or equivalently,
+ /// bitcasting a function or function pointer to a different function type).
+ /// In principle in the most general case this could depend on the target, the
+ /// calling convention, and the actual types of the arguments and return
+ /// value. Here it just means whether the signature mismatch could *ever* be
+ /// allowed; in other words, does the target do strict checking of signatures
+ /// for all calls.
+ virtual bool canCallMismatchedFunctionType() const { return true; }
+
/// Gets the mangle context.
clang::MangleContext &getMangleContext() { return *mangleContext; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index 8f4377b..485b2c8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -126,6 +126,30 @@ static bool isInitializerOfDynamicClass(const CXXCtorInitializer *baseInit) {
}
namespace {
+/// Call the destructor for a direct base class.
+struct CallBaseDtor final : EHScopeStack::Cleanup {
+ const CXXRecordDecl *baseClass;
+ bool baseIsVirtual;
+ CallBaseDtor(const CXXRecordDecl *base, bool baseIsVirtual)
+ : baseClass(base), baseIsVirtual(baseIsVirtual) {}
+
+ void emit(CIRGenFunction &cgf) override {
+ const CXXRecordDecl *derivedClass =
+ cast<CXXMethodDecl>(cgf.curFuncDecl)->getParent();
+
+ const CXXDestructorDecl *d = baseClass->getDestructor();
+ // We are already inside a destructor, so presumably the object being
+ // destroyed should have the expected type.
+ QualType thisTy = d->getFunctionObjectParameterType();
+ assert(cgf.currSrcLoc && "expected source location");
+ Address addr = cgf.getAddressOfDirectBaseInCompleteClass(
+ *cgf.currSrcLoc, cgf.loadCXXThisAddress(), derivedClass, baseClass,
+ baseIsVirtual);
+ cgf.emitCXXDestructorCall(d, Dtor_Base, baseIsVirtual,
+ /*delegating=*/false, addr, thisTy);
+ }
+};
+
/// A visitor which checks whether an initializer uses 'this' in a
/// way which requires the vtable to be properly set.
struct DynamicThisUseChecker
@@ -870,6 +894,116 @@ void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr,
/*delegating=*/false, addr, type);
}
+namespace {
+class DestroyField final : public EHScopeStack::Cleanup {
+ const FieldDecl *field;
+ CIRGenFunction::Destroyer *destroyer;
+
+public:
+ DestroyField(const FieldDecl *field, CIRGenFunction::Destroyer *destroyer)
+ : field(field), destroyer(destroyer) {}
+
+ void emit(CIRGenFunction &cgf) override {
+ // Find the address of the field.
+ Address thisValue = cgf.loadCXXThisAddress();
+ CanQualType recordTy =
+ cgf.getContext().getCanonicalTagType(field->getParent());
+ LValue thisLV = cgf.makeAddrLValue(thisValue, recordTy);
+ LValue lv = cgf.emitLValueForField(thisLV, field);
+ assert(lv.isSimple());
+
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ cgf.emitDestroy(lv.getAddress(), field->getType(), destroyer);
+ }
+};
+} // namespace
+
+/// Emit all code that comes at the end of class's destructor. This is to call
+/// destructors on members and base classes in reverse order of their
+/// construction.
+///
+/// For a deleting destructor, this also handles the case where a destroying
+/// operator delete completely overrides the definition.
+void CIRGenFunction::enterDtorCleanups(const CXXDestructorDecl *dd,
+ CXXDtorType dtorType) {
+ assert((!dd->isTrivial() || dd->hasAttr<DLLExportAttr>()) &&
+ "Should not emit dtor epilogue for non-exported trivial dtor!");
+
+ // The deleting-destructor phase just needs to call the appropriate
+ // operator delete that Sema picked up.
+ if (dtorType == Dtor_Deleting) {
+ cgm.errorNYI(dd->getSourceRange(), "deleting destructor cleanups");
+ return;
+ }
+
+ const CXXRecordDecl *classDecl = dd->getParent();
+
+ // Unions have no bases and do not call field destructors.
+ if (classDecl->isUnion())
+ return;
+
+ // The complete-destructor phase just destructs all the virtual bases.
+ if (dtorType == Dtor_Complete) {
+ assert(!cir::MissingFeatures::sanitizers());
+
+ // We push them in the forward order so that they'll be popped in
+ // the reverse order.
+ for (const CXXBaseSpecifier &base : classDecl->vbases()) {
+ auto *baseClassDecl = base.getType()->castAsCXXRecordDecl();
+
+ if (baseClassDecl->hasTrivialDestructor()) {
+ // Under SanitizeMemoryUseAfterDtor, poison the trivial base class
+ // memory. For non-trival base classes the same is done in the class
+ // destructor.
+ assert(!cir::MissingFeatures::sanitizers());
+ } else {
+ ehStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, baseClassDecl,
+ /*baseIsVirtual=*/true);
+ }
+ }
+
+ return;
+ }
+
+ assert(dtorType == Dtor_Base);
+ assert(!cir::MissingFeatures::sanitizers());
+
+ // Destroy non-virtual bases.
+ for (const CXXBaseSpecifier &base : classDecl->bases()) {
+ // Ignore virtual bases.
+ if (base.isVirtual())
+ continue;
+
+ CXXRecordDecl *baseClassDecl = base.getType()->getAsCXXRecordDecl();
+
+ if (baseClassDecl->hasTrivialDestructor())
+ assert(!cir::MissingFeatures::sanitizers());
+ else
+ ehStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, baseClassDecl,
+ /*baseIsVirtual=*/false);
+ }
+
+ assert(!cir::MissingFeatures::sanitizers());
+
+ // Destroy direct fields.
+ for (const FieldDecl *field : classDecl->fields()) {
+ QualType type = field->getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind)
+ continue;
+
+ // Anonymous union members do not have their destructors called.
+ const RecordType *rt = type->getAsUnionType();
+ if (rt && rt->getOriginalDecl()->isAnonymousStructOrUnion())
+ continue;
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ ehStack.pushCleanup<DestroyField>(cleanupKind, field,
+ getDestroyer(dtorKind));
+ }
+}
+
void CIRGenFunction::emitDelegatingCXXConstructorCall(
const CXXConstructorDecl *ctor, const FunctionArgList &args) {
assert(ctor->isDelegatingConstructor());
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 563a753..039d290 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -695,12 +695,6 @@ struct DestroyObject final : EHScopeStack::Cleanup {
void emit(CIRGenFunction &cgf) override {
cgf.emitDestroy(addr, type, destroyer);
}
-
- // This is a placeholder until EHCleanupScope is implemented.
- size_t getSize() const override {
- assert(!cir::MissingFeatures::ehCleanupScope());
- return sizeof(DestroyObject);
- }
};
} // namespace
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index be94890..f416571 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1185,10 +1185,16 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
case CK_BuiltinFnToFnPtr:
llvm_unreachable("builtin functions are handled elsewhere");
+ case CK_Dynamic: {
+ LValue lv = emitLValue(e->getSubExpr());
+ Address v = lv.getAddress();
+ const auto *dce = cast<CXXDynamicCastExpr>(e);
+ return makeNaturalAlignAddrLValue(emitDynamicCast(v, dce), e->getType());
+ }
+
// These are never l-values; just use the aggregate emission code.
case CK_NonAtomicToAtomic:
case CK_AtomicToNonAtomic:
- case CK_Dynamic:
case CK_ToUnion:
case CK_BaseToDerived:
case CK_AddressSpaceConversion:
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 4eb8ca8..97c0944 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -463,12 +463,6 @@ struct CallObjectDelete final : EHScopeStack::Cleanup {
void emit(CIRGenFunction &cgf) override {
cgf.emitDeleteCall(operatorDelete, ptr, elementType);
}
-
- // This is a placeholder until EHCleanupScope is implemented.
- size_t getSize() const override {
- assert(!cir::MissingFeatures::ehCleanupScope());
- return sizeof(CallObjectDelete);
- }
};
} // namespace
@@ -728,3 +722,43 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD,
// Emit the call to delete.
emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs);
}
+
+mlir::Value CIRGenFunction::emitDynamicCast(Address thisAddr,
+ const CXXDynamicCastExpr *dce) {
+ mlir::Location loc = getLoc(dce->getSourceRange());
+
+ cgm.emitExplicitCastExprType(dce, this);
+ QualType destTy = dce->getTypeAsWritten();
+ QualType srcTy = dce->getSubExpr()->getType();
+
+ // C++ [expr.dynamic.cast]p7:
+ // If T is "pointer to cv void," then the result is a pointer to the most
+ // derived object pointed to by v.
+ bool isDynCastToVoid = destTy->isVoidPointerType();
+ bool isRefCast = destTy->isReferenceType();
+
+ QualType srcRecordTy;
+ QualType destRecordTy;
+ if (isDynCastToVoid) {
+ srcRecordTy = srcTy->getPointeeType();
+ // No destRecordTy.
+ } else if (const PointerType *destPTy = destTy->getAs<PointerType>()) {
+ srcRecordTy = srcTy->castAs<PointerType>()->getPointeeType();
+ destRecordTy = destPTy->getPointeeType();
+ } else {
+ srcRecordTy = srcTy;
+ destRecordTy = destTy->castAs<ReferenceType>()->getPointeeType();
+ }
+
+ assert(srcRecordTy->isRecordType() && "source type must be a record type!");
+ assert(!cir::MissingFeatures::emitTypeCheck());
+
+ if (dce->isAlwaysNull()) {
+ cgm.errorNYI(dce->getSourceRange(), "emitDynamicCastToNull");
+ return {};
+ }
+
+ auto destCirTy = mlir::cast<cir::PointerType>(convertType(destTy));
+ return cgm.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy,
+ destCirTy, isRefCast, thisAddr);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 59aa257..89e9ec4 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -500,6 +500,26 @@ private:
bool appendBitField(const FieldDecl *field, uint64_t fieldOffset,
cir::IntAttr ci, bool allowOverwrite = false);
+ /// Applies zero-initialization to padding bytes before and within a field.
+ /// \param layout The record layout containing field offset information.
+ /// \param fieldNo The field index in the record.
+ /// \param field The field declaration.
+ /// \param allowOverwrite Whether to allow overwriting existing values.
+ /// \param sizeSoFar The current size processed, updated by this function.
+ /// \param zeroFieldSize Set to true if the field has zero size.
+ /// \returns true on success, false if padding could not be applied.
+ bool applyZeroInitPadding(const ASTRecordLayout &layout, unsigned fieldNo,
+ const FieldDecl &field, bool allowOverwrite,
+ CharUnits &sizeSoFar, bool &zeroFieldSize);
+
+ /// Applies zero-initialization to trailing padding bytes in a record.
+ /// \param layout The record layout containing size information.
+ /// \param allowOverwrite Whether to allow overwriting existing values.
+ /// \param sizeSoFar The current size processed.
+ /// \returns true on success, false if padding could not be applied.
+ bool applyZeroInitPadding(const ASTRecordLayout &layout, bool allowOverwrite,
+ CharUnits &sizeSoFar);
+
bool build(InitListExpr *ile, bool allowOverwrite);
bool build(const APValue &val, const RecordDecl *rd, bool isPrimaryBase,
const CXXRecordDecl *vTableClass, CharUnits baseOffset);
@@ -548,6 +568,49 @@ bool ConstRecordBuilder::appendBitField(const FieldDecl *field,
allowOverwrite);
}
+bool ConstRecordBuilder::applyZeroInitPadding(
+ const ASTRecordLayout &layout, unsigned fieldNo, const FieldDecl &field,
+ bool allowOverwrite, CharUnits &sizeSoFar, bool &zeroFieldSize) {
+ uint64_t startBitOffset = layout.getFieldOffset(fieldNo);
+ CharUnits startOffset =
+ cgm.getASTContext().toCharUnitsFromBits(startBitOffset);
+ if (sizeSoFar < startOffset) {
+ if (!appendBytes(sizeSoFar, computePadding(cgm, startOffset - sizeSoFar),
+ allowOverwrite))
+ return false;
+ }
+
+ if (!field.isBitField()) {
+ CharUnits fieldSize =
+ cgm.getASTContext().getTypeSizeInChars(field.getType());
+ sizeSoFar = startOffset + fieldSize;
+ zeroFieldSize = fieldSize.isZero();
+ } else {
+ const CIRGenRecordLayout &rl =
+ cgm.getTypes().getCIRGenRecordLayout(field.getParent());
+ const CIRGenBitFieldInfo &info = rl.getBitFieldInfo(&field);
+ uint64_t endBitOffset = startBitOffset + info.size;
+ sizeSoFar = cgm.getASTContext().toCharUnitsFromBits(endBitOffset);
+ if (endBitOffset % cgm.getASTContext().getCharWidth() != 0)
+ sizeSoFar++;
+ zeroFieldSize = info.size == 0;
+ }
+ return true;
+}
+
+bool ConstRecordBuilder::applyZeroInitPadding(const ASTRecordLayout &layout,
+ bool allowOverwrite,
+ CharUnits &sizeSoFar) {
+ CharUnits totalSize = layout.getSize();
+ if (sizeSoFar < totalSize) {
+ if (!appendBytes(sizeSoFar, computePadding(cgm, totalSize - sizeSoFar),
+ allowOverwrite))
+ return false;
+ }
+ sizeSoFar = totalSize;
+ return true;
+}
+
bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
RecordDecl *rd = ile->getType()
->castAs<clang::RecordType>()
@@ -562,11 +625,9 @@ bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
if (cxxrd->getNumBases())
return false;
- if (cgm.shouldZeroInitPadding()) {
- assert(!cir::MissingFeatures::recordZeroInitPadding());
- cgm.errorNYI(rd->getSourceRange(), "zero init padding");
- return false;
- }
+ const bool zeroInitPadding = cgm.shouldZeroInitPadding();
+ bool zeroFieldSize = false;
+ CharUnits sizeSoFar = CharUnits::Zero();
unsigned elementNo = 0;
for (auto [index, field] : llvm::enumerate(rd->fields())) {
@@ -596,7 +657,10 @@ bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
continue;
}
- assert(!cir::MissingFeatures::recordZeroInitPadding());
+ if (zeroInitPadding &&
+ !applyZeroInitPadding(layout, index, *field, allowOverwrite, sizeSoFar,
+ zeroFieldSize))
+ return false;
// When emitting a DesignatedInitUpdateExpr, a nested InitListExpr
// represents additional overwriting of our current constant value, and not
@@ -641,8 +705,8 @@ bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
}
}
- assert(!cir::MissingFeatures::recordZeroInitPadding());
- return true;
+ return !zeroInitPadding ||
+ applyZeroInitPadding(layout, allowOverwrite, sizeSoFar);
}
namespace {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 5d3496a..637f9ef 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -1893,7 +1893,34 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
}
return v;
}
+ case CK_IntegralToPointer: {
+ mlir::Type destCIRTy = cgf.convertType(destTy);
+ mlir::Value src = Visit(const_cast<Expr *>(subExpr));
+
+ // Properly resize by casting to an int of the same size as the pointer.
+ // Clang's IntegralToPointer includes 'bool' as the source, but in CIR
+ // 'bool' is not an integral type. So check the source type to get the
+ // correct CIR conversion.
+ mlir::Type middleTy = cgf.cgm.getDataLayout().getIntPtrType(destCIRTy);
+ mlir::Value middleVal = builder.createCast(
+ subExpr->getType()->isBooleanType() ? cir::CastKind::bool_to_int
+ : cir::CastKind::integral,
+ src, middleTy);
+
+ if (cgf.cgm.getCodeGenOpts().StrictVTablePointers) {
+ cgf.cgm.errorNYI(subExpr->getSourceRange(),
+ "IntegralToPointer: strict vtable pointers");
+ return {};
+ }
+ return builder.createIntToPtr(middleVal, destCIRTy);
+ }
+
+ case CK_Dynamic: {
+ Address v = cgf.emitPointerWithAlignment(subExpr);
+ const auto *dce = cast<CXXDynamicCastExpr>(ce);
+ return cgf.emitDynamicCast(v, dce);
+ }
case CK_ArrayToPointerDecay:
return cgf.emitArrayToPointerDecay(subExpr).getPointer();
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 52fb0d7..7a774e0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -689,7 +689,9 @@ void CIRGenFunction::emitDestructorBody(FunctionArgList &args) {
cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
assert(!cir::MissingFeatures::sanitizers());
- assert(!cir::MissingFeatures::dtorCleanups());
+
+ // Enter the epilogue cleanups.
+ RunCleanupsScope dtorEpilogue(*this);
// If this is the complete variant, just invoke the base variant;
// the epilogue will destruct the virtual bases. But we can't do
@@ -708,7 +710,8 @@ void CIRGenFunction::emitDestructorBody(FunctionArgList &args) {
assert((body || getTarget().getCXXABI().isMicrosoft()) &&
"can't emit a dtor without a body for non-Microsoft ABIs");
- assert(!cir::MissingFeatures::dtorCleanups());
+ // Enter the cleanup scopes for virtual bases.
+ enterDtorCleanups(dtor, Dtor_Complete);
if (!isTryBody) {
QualType thisTy = dtor->getFunctionObjectParameterType();
@@ -723,7 +726,9 @@ void CIRGenFunction::emitDestructorBody(FunctionArgList &args) {
case Dtor_Base:
assert(body);
- assert(!cir::MissingFeatures::dtorCleanups());
+ // Enter the cleanup scopes for fields and non-virtual bases.
+ enterDtorCleanups(dtor, Dtor_Base);
+
assert(!cir::MissingFeatures::vtableInitialization());
if (isTryBody) {
@@ -741,7 +746,8 @@ void CIRGenFunction::emitDestructorBody(FunctionArgList &args) {
break;
}
- assert(!cir::MissingFeatures::dtorCleanups());
+ // Jump out through the epilogue cleanups.
+ dtorEpilogue.forceCleanup();
// Exit the try if applicable.
if (isTryBody)
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index a60efe1..7a606ee 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -556,6 +556,33 @@ public:
cir::GlobalOp gv,
cir::GetGlobalOp gvAddr);
+ /// Enter the cleanups necessary to complete the given phase of destruction
+ /// for a destructor. The end result should call destructors on members and
+ /// base classes in reverse order of their construction.
+ void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type);
+
+ /// Determines whether an EH cleanup is required to destroy a type
+ /// with the given destruction kind.
+ /// TODO(cir): could be shared with Clang LLVM codegen
+ bool needsEHCleanup(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none:
+ return false;
+ case QualType::DK_cxx_destructor:
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
+ return getLangOpts().Exceptions;
+ case QualType::DK_objc_strong_lifetime:
+ return getLangOpts().Exceptions &&
+ cgm.getCodeGenOpts().ObjCAutoRefCountExceptions;
+ }
+ llvm_unreachable("bad destruction kind");
+ }
+
+ CleanupKind getCleanupKind(QualType::DestructionKind kind) {
+ return needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup;
+ }
+
/// Set the address of a local variable.
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) {
assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
@@ -1285,6 +1312,8 @@ public:
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
+ mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
+
/// Emit an expression as an initializer for an object (variable, field, etc.)
/// at the given location. The expression is not necessarily the normal
/// initializer for the object, and the address is not necessarily
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index 0418174..9e490c6d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -59,7 +59,11 @@ public:
void addImplicitStructorParams(CIRGenFunction &cgf, QualType &resTy,
FunctionArgList &params) override;
-
+ mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &cgf,
+ const CXXDestructorDecl *dd,
+ CXXDtorType type,
+ bool forVirtualBase,
+ bool delegating) override;
void emitCXXConstructors(const clang::CXXConstructorDecl *d) override;
void emitCXXDestructors(const clang::CXXDestructorDecl *d) override;
void emitCXXStructor(clang::GlobalDecl gd) override;
@@ -68,6 +72,8 @@ public:
CXXDtorType type, bool forVirtualBase,
bool delegating, Address thisAddr,
QualType thisTy) override;
+ void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor,
+ mlir::Value addr) override;
void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) override;
void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) override;
@@ -116,6 +122,16 @@ public:
Address thisAddr, const CXXRecordDecl *classDecl,
const CXXRecordDecl *baseClassDecl) override;
+ // The traditional clang CodeGen emits calls to `__dynamic_cast` directly into
+ // LLVM in the `emitDynamicCastCall` function. In CIR, `dynamic_cast`
+ // expressions are lowered to `cir.dyn_cast` ops instead of calls to runtime
+ // functions. So during CIRGen we don't need the `emitDynamicCastCall`
+ // function that clang CodeGen has.
+ mlir::Value emitDynamicCast(CIRGenFunction &cgf, mlir::Location loc,
+ QualType srcRecordTy, QualType destRecordTy,
+ cir::PointerType destCIRTy, bool isRefCast,
+ Address src) override;
+
/**************************** RTTI Uniqueness ******************************/
protected:
/// Returns true if the ABI requires RTTI type_info objects to be unique
@@ -1492,11 +1508,8 @@ void CIRGenItaniumCXXABI::emitDestructorCall(
CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type,
bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) {
GlobalDecl gd(dd, type);
- if (needsVTTParameter(gd)) {
- cgm.errorNYI(dd->getSourceRange(), "emitDestructorCall: VTT");
- }
-
- mlir::Value vtt = nullptr;
+ mlir::Value vtt =
+ getCXXDestructorImplicitParam(cgf, dd, type, forVirtualBase, delegating);
ASTContext &astContext = cgm.getASTContext();
QualType vttTy = astContext.getPointerType(astContext.VoidPtrTy);
assert(!cir::MissingFeatures::appleKext());
@@ -1507,6 +1520,34 @@ void CIRGenItaniumCXXABI::emitDestructorCall(
vttTy, nullptr);
}
+void CIRGenItaniumCXXABI::registerGlobalDtor(const VarDecl *vd,
+ cir::FuncOp dtor,
+ mlir::Value addr) {
+ if (vd->isNoDestroy(cgm.getASTContext()))
+ return;
+
+ if (vd->getTLSKind()) {
+ cgm.errorNYI(vd->getSourceRange(), "registerGlobalDtor: TLS");
+ return;
+ }
+
+ // HLSL doesn't support atexit.
+ if (cgm.getLangOpts().HLSL) {
+ cgm.errorNYI(vd->getSourceRange(), "registerGlobalDtor: HLSL");
+ return;
+ }
+
+ // The default behavior is to use atexit. This is handled in lowering
+ // prepare. Nothing to be done for CIR here.
+}
+
+mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam(
+ CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type,
+ bool forVirtualBase, bool delegating) {
+ GlobalDecl gd(dd, type);
+ return cgf.getVTTParameter(gd, forVirtualBase, delegating);
+}
+
// The idea here is creating a separate block for the throw with an
// `UnreachableOp` as the terminator. So, we branch from the current block
// to the throw block and create a block for the remaining operations.
@@ -1796,3 +1837,143 @@ mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset(
}
return vbaseOffset;
}
+
+static cir::FuncOp getBadCastFn(CIRGenFunction &cgf) {
+ // Prototype: void __cxa_bad_cast();
+
+ // TODO(cir): set the calling convention of the runtime function.
+ assert(!cir::MissingFeatures::opFuncCallingConv());
+
+ cir::FuncType fnTy =
+ cgf.getBuilder().getFuncType({}, cgf.getBuilder().getVoidTy());
+ return cgf.cgm.createRuntimeFunction(fnTy, "__cxa_bad_cast");
+}
+
+// TODO(cir): This could be shared with classic codegen.
+static CharUnits computeOffsetHint(ASTContext &astContext,
+ const CXXRecordDecl *src,
+ const CXXRecordDecl *dst) {
+ CXXBasePaths paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+
+ // If Dst is not derived from Src we can skip the whole computation below and
+ // return that Src is not a public base of Dst. Record all inheritance paths.
+ if (!dst->isDerivedFrom(src, paths))
+ return CharUnits::fromQuantity(-2ULL);
+
+ unsigned numPublicPaths = 0;
+ CharUnits offset;
+
+ // Now walk all possible inheritance paths.
+ for (const CXXBasePath &path : paths) {
+ if (path.Access != AS_public) // Ignore non-public inheritance.
+ continue;
+
+ ++numPublicPaths;
+
+ for (const CXXBasePathElement &pathElement : path) {
+ // If the path contains a virtual base class we can't give any hint.
+ // -1: no hint.
+ if (pathElement.Base->isVirtual())
+ return CharUnits::fromQuantity(-1ULL);
+
+ if (numPublicPaths > 1) // Won't use offsets, skip computation.
+ continue;
+
+ // Accumulate the base class offsets.
+ const ASTRecordLayout &L =
+ astContext.getASTRecordLayout(pathElement.Class);
+ offset += L.getBaseClassOffset(
+ pathElement.Base->getType()->getAsCXXRecordDecl());
+ }
+ }
+
+ // -2: Src is not a public base of Dst.
+ if (numPublicPaths == 0)
+ return CharUnits::fromQuantity(-2ULL);
+
+ // -3: Src is a multiple public base type but never a virtual base type.
+ if (numPublicPaths > 1)
+ return CharUnits::fromQuantity(-3ULL);
+
+ // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
+ // Return the offset of Src from the origin of Dst.
+ return offset;
+}
+
+static cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &cgf) {
+ // Prototype:
+ // void *__dynamic_cast(const void *sub,
+ // global_as const abi::__class_type_info *src,
+ // global_as const abi::__class_type_info *dst,
+ // std::ptrdiff_t src2dst_offset);
+
+ mlir::Type voidPtrTy = cgf.getBuilder().getVoidPtrTy();
+ mlir::Type rttiPtrTy = cgf.getBuilder().getUInt8PtrTy();
+ mlir::Type ptrDiffTy = cgf.convertType(cgf.getContext().getPointerDiffType());
+
+ // TODO(cir): mark the function as nowind willreturn readonly.
+ assert(!cir::MissingFeatures::opFuncNoUnwind());
+ assert(!cir::MissingFeatures::opFuncWillReturn());
+ assert(!cir::MissingFeatures::opFuncReadOnly());
+
+ // TODO(cir): set the calling convention of the runtime function.
+ assert(!cir::MissingFeatures::opFuncCallingConv());
+
+ cir::FuncType FTy = cgf.getBuilder().getFuncType(
+ {voidPtrTy, rttiPtrTy, rttiPtrTy, ptrDiffTy}, voidPtrTy);
+ return cgf.cgm.createRuntimeFunction(FTy, "__dynamic_cast");
+}
+
+static cir::DynamicCastInfoAttr emitDynamicCastInfo(CIRGenFunction &cgf,
+ mlir::Location loc,
+ QualType srcRecordTy,
+ QualType destRecordTy) {
+ auto srcRtti = mlir::cast<cir::GlobalViewAttr>(
+ cgf.cgm.getAddrOfRTTIDescriptor(loc, srcRecordTy));
+ auto destRtti = mlir::cast<cir::GlobalViewAttr>(
+ cgf.cgm.getAddrOfRTTIDescriptor(loc, destRecordTy));
+
+ cir::FuncOp runtimeFuncOp = getItaniumDynamicCastFn(cgf);
+ cir::FuncOp badCastFuncOp = getBadCastFn(cgf);
+ auto runtimeFuncRef = mlir::FlatSymbolRefAttr::get(runtimeFuncOp);
+ auto badCastFuncRef = mlir::FlatSymbolRefAttr::get(badCastFuncOp);
+
+ const CXXRecordDecl *srcDecl = srcRecordTy->getAsCXXRecordDecl();
+ const CXXRecordDecl *destDecl = destRecordTy->getAsCXXRecordDecl();
+ CharUnits offsetHint = computeOffsetHint(cgf.getContext(), srcDecl, destDecl);
+
+ mlir::Type ptrdiffTy = cgf.convertType(cgf.getContext().getPointerDiffType());
+ auto offsetHintAttr = cir::IntAttr::get(ptrdiffTy, offsetHint.getQuantity());
+
+ return cir::DynamicCastInfoAttr::get(srcRtti, destRtti, runtimeFuncRef,
+ badCastFuncRef, offsetHintAttr);
+}
+
+mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf,
+ mlir::Location loc,
+ QualType srcRecordTy,
+ QualType destRecordTy,
+ cir::PointerType destCIRTy,
+ bool isRefCast, Address src) {
+ bool isCastToVoid = destRecordTy.isNull();
+ assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference");
+
+ if (isCastToVoid) {
+ cgm.errorNYI(loc, "emitDynamicCastToVoid");
+ return {};
+ }
+
+ // If the destination is effectively final, the cast succeeds if and only
+ // if the dynamic type of the pointer is exactly the destination type.
+ if (destRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() &&
+ cgf.cgm.getCodeGenOpts().OptimizationLevel > 0) {
+ cgm.errorNYI(loc, "emitExactDynamicCast");
+ return {};
+ }
+
+ cir::DynamicCastInfoAttr castInfo =
+ emitDynamicCastInfo(cgf, loc, srcRecordTy, destRecordTy);
+ return cgf.getBuilder().createDynCast(loc, src.getPointer(), destCIRTy,
+ isRefCast, castInfo);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 910c8a9..fe1ea56 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -2079,6 +2079,29 @@ CIRGenModule::createCIRBuiltinFunction(mlir::Location loc, StringRef name,
return fnOp;
}
+cir::FuncOp CIRGenModule::createRuntimeFunction(cir::FuncType ty,
+ StringRef name, mlir::ArrayAttr,
+ [[maybe_unused]] bool isLocal,
+ bool assumeConvergent) {
+ if (assumeConvergent)
+ errorNYI("createRuntimeFunction: assumeConvergent");
+ if (isLocal)
+ errorNYI("createRuntimeFunction: local");
+
+ cir::FuncOp entry = getOrCreateCIRFunction(name, ty, GlobalDecl(),
+ /*forVtable=*/false);
+
+ if (entry) {
+ // TODO(cir): set the attributes of the function.
+ assert(!cir::MissingFeatures::setLLVMFunctionFEnvAttributes());
+ assert(!cir::MissingFeatures::opFuncCallingConv());
+ assert(!cir::MissingFeatures::opGlobalDLLImportExport());
+ entry.setDSOLocal(true);
+ }
+
+ return entry;
+}
+
mlir::SymbolTable::Visibility
CIRGenModule::getMLIRVisibility(cir::GlobalOp op) {
// MLIR doesn't accept public symbols declarations (only
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index c6a6681..f627bae 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -480,6 +480,10 @@ public:
cir::FuncType ty,
const clang::FunctionDecl *fd);
+ cir::FuncOp createRuntimeFunction(cir::FuncType ty, llvm::StringRef name,
+ mlir::ArrayAttr = {}, bool isLocal = false,
+ bool assumeConvergent = false);
+
static constexpr const char *builtinCoroId = "__builtin_coro_id";
/// Given a builtin id for a function like "__builtin_fabsf", return a
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp
index a9af753..4cf2237 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp
@@ -87,7 +87,10 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) {
if (const auto *section = dyn_cast<ArraySectionExpr>(curVarExpr)) {
QualType baseTy = ArraySectionExpr::getBaseOriginalType(
section->getBase()->IgnoreParenImpCasts());
- boundTypes.push_back(QualType(baseTy->getPointeeOrArrayElementType(), 0));
+ if (auto *at = getContext().getAsArrayType(baseTy))
+ boundTypes.push_back(at->getElementType());
+ else
+ boundTypes.push_back(baseTy->getPointeeType());
} else {
boundTypes.push_back(curVarExpr->getType());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
index 94d856b..84f5977 100644
--- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
@@ -327,9 +327,40 @@ cir::GlobalLinkageKind CIRGenModule::getVTableLinkage(const CXXRecordDecl *rd) {
llvm_unreachable("Should not have been asked to emit this");
}
}
+ // -fapple-kext mode does not support weak linkage, so we must use
+ // internal linkage.
+ if (astContext.getLangOpts().AppleKext)
+ return cir::GlobalLinkageKind::InternalLinkage;
+
+ auto discardableODRLinkage = cir::GlobalLinkageKind::LinkOnceODRLinkage;
+ auto nonDiscardableODRLinkage = cir::GlobalLinkageKind::WeakODRLinkage;
+ if (rd->hasAttr<DLLExportAttr>()) {
+ // Cannot discard exported vtables.
+ discardableODRLinkage = nonDiscardableODRLinkage;
+ } else if (rd->hasAttr<DLLImportAttr>()) {
+ // Imported vtables are available externally.
+ discardableODRLinkage = cir::GlobalLinkageKind::AvailableExternallyLinkage;
+ nonDiscardableODRLinkage =
+ cir::GlobalLinkageKind::AvailableExternallyLinkage;
+ }
+
+ switch (rd->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ return discardableODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration: {
+ errorNYI(rd->getSourceRange(),
+ "getVTableLinkage: explicit instantiation declaration");
+ return cir::GlobalLinkageKind::ExternalLinkage;
+ }
+
+ case TSK_ExplicitInstantiationDefinition:
+ return nonDiscardableODRLinkage;
+ }
- errorNYI(rd->getSourceRange(), "getVTableLinkage: no key function");
- return cir::GlobalLinkageKind::ExternalLinkage;
+ llvm_unreachable("Invalid TemplateSpecializationKind!");
}
cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *rd) {
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 66c1f76..67a72f5 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -108,9 +108,6 @@ public:
///
// \param flags cleanup kind.
virtual void emit(CIRGenFunction &cgf) = 0;
-
- // This is a placeholder until EHScope is implemented.
- virtual size_t getSize() const = 0;
};
private:
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index 2eeef81..706e54f 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -41,6 +41,16 @@ static SmallString<128> getTransformedFileName(mlir::ModuleOp mlirModule) {
return fileName;
}
+/// Return the FuncOp called by `callOp`.
+static cir::FuncOp getCalledFunction(cir::CallOp callOp) {
+ mlir::SymbolRefAttr sym = llvm::dyn_cast_if_present<mlir::SymbolRefAttr>(
+ callOp.getCallableForCallee());
+ if (!sym)
+ return nullptr;
+ return dyn_cast_or_null<cir::FuncOp>(
+ mlir::SymbolTable::lookupNearestSymbolFrom(callOp, sym));
+}
+
namespace {
struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
LoweringPreparePass() = default;
@@ -61,11 +71,20 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
/// Build a module init function that calls all the dynamic initializers.
void buildCXXGlobalInitFunc();
+ /// Materialize global ctor/dtor list
+ void buildGlobalCtorDtorList();
+
cir::FuncOp buildRuntimeFunction(
mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
cir::FuncType type,
cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage);
+ cir::GlobalOp buildRuntimeVariable(
+ mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
+ mlir::Type type,
+ cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage,
+ cir::VisibilityKind visibility = cir::VisibilityKind::Default);
+
///
/// AST related
/// -----------
@@ -79,11 +98,33 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
llvm::StringMap<uint32_t> dynamicInitializerNames;
llvm::SmallVector<cir::FuncOp> dynamicInitializers;
+ /// List of ctors and their priorities to be called before main()
+ llvm::SmallVector<std::pair<std::string, uint32_t>, 4> globalCtorList;
+
void setASTContext(clang::ASTContext *c) { astCtx = c; }
};
} // namespace
+cir::GlobalOp LoweringPreparePass::buildRuntimeVariable(
+ mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
+ mlir::Type type, cir::GlobalLinkageKind linkage,
+ cir::VisibilityKind visibility) {
+ cir::GlobalOp g = dyn_cast_or_null<cir::GlobalOp>(
+ mlir::SymbolTable::lookupNearestSymbolFrom(
+ mlirModule, mlir::StringAttr::get(mlirModule->getContext(), name)));
+ if (!g) {
+ g = cir::GlobalOp::create(builder, loc, name, type);
+ g.setLinkageAttr(
+ cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage));
+ mlir::SymbolTable::setSymbolVisibility(
+ g, mlir::SymbolTable::Visibility::Private);
+ g.setGlobalVisibilityAttr(
+ cir::VisibilityAttr::get(builder.getContext(), visibility));
+ }
+ return g;
+}
+
cir::FuncOp LoweringPreparePass::buildRuntimeFunction(
mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
cir::FuncType type, cir::GlobalLinkageKind linkage) {
@@ -634,7 +675,8 @@ LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) {
// Create a variable initialization function.
CIRBaseBuilderTy builder(getContext());
builder.setInsertionPointAfter(op);
- auto fnType = cir::FuncType::get({}, builder.getVoidTy());
+ cir::VoidType voidTy = builder.getVoidTy();
+ auto fnType = cir::FuncType::get({}, voidTy);
FuncOp f = buildRuntimeFunction(builder, fnName, op.getLoc(), fnType,
cir::GlobalLinkageKind::InternalLinkage);
@@ -649,8 +691,57 @@ LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) {
// Register the destructor call with __cxa_atexit
mlir::Region &dtorRegion = op.getDtorRegion();
if (!dtorRegion.empty()) {
- assert(!cir::MissingFeatures::opGlobalDtorLowering());
- llvm_unreachable("dtor region lowering is NYI");
+ assert(!cir::MissingFeatures::astVarDeclInterface());
+ assert(!cir::MissingFeatures::opGlobalThreadLocal());
+ // Create a variable that binds the atexit to this shared object.
+ builder.setInsertionPointToStart(&mlirModule.getBodyRegion().front());
+ cir::GlobalOp handle = buildRuntimeVariable(
+ builder, "__dso_handle", op.getLoc(), builder.getI8Type(),
+ cir::GlobalLinkageKind::ExternalLinkage, cir::VisibilityKind::Hidden);
+
+ // Look for the destructor call in dtorBlock
+ mlir::Block &dtorBlock = dtorRegion.front();
+ cir::CallOp dtorCall;
+ for (auto op : reverse(dtorBlock.getOps<cir::CallOp>())) {
+ dtorCall = op;
+ break;
+ }
+ assert(dtorCall && "Expected a dtor call");
+ cir::FuncOp dtorFunc = getCalledFunction(dtorCall);
+ assert(dtorFunc && "Expected a dtor call");
+
+ // Create a runtime helper function:
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ auto voidPtrTy = cir::PointerType::get(voidTy);
+ auto voidFnTy = cir::FuncType::get({voidPtrTy}, voidTy);
+ auto voidFnPtrTy = cir::PointerType::get(voidFnTy);
+ auto handlePtrTy = cir::PointerType::get(handle.getSymType());
+ auto fnAtExitType =
+ cir::FuncType::get({voidFnPtrTy, voidPtrTy, handlePtrTy}, voidTy);
+ const char *nameAtExit = "__cxa_atexit";
+ cir::FuncOp fnAtExit =
+ buildRuntimeFunction(builder, nameAtExit, op.getLoc(), fnAtExitType);
+
+ // Replace the dtor call with a call to __cxa_atexit(&dtor, &var,
+ // &__dso_handle)
+ builder.setInsertionPointAfter(dtorCall);
+ mlir::Value args[3];
+ auto dtorPtrTy = cir::PointerType::get(dtorFunc.getFunctionType());
+ // dtorPtrTy
+ args[0] = cir::GetGlobalOp::create(builder, dtorCall.getLoc(), dtorPtrTy,
+ dtorFunc.getSymName());
+ args[0] = cir::CastOp::create(builder, dtorCall.getLoc(), voidFnPtrTy,
+ cir::CastKind::bitcast, args[0]);
+ args[1] =
+ cir::CastOp::create(builder, dtorCall.getLoc(), voidPtrTy,
+ cir::CastKind::bitcast, dtorCall.getArgOperand(0));
+ args[2] = cir::GetGlobalOp::create(builder, handle.getLoc(), handlePtrTy,
+ handle.getSymName());
+ builder.createCallOp(dtorCall.getLoc(), fnAtExit, args);
+ dtorCall->erase();
+ entryBB->getOperations().splice(entryBB->end(), dtorBlock.getOperations(),
+ dtorBlock.begin(),
+ std::prev(dtorBlock.end()));
}
// Replace cir.yield with cir.return
@@ -660,11 +751,12 @@ LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) {
mlir::Block &block = op.getCtorRegion().front();
yieldOp = &block.getOperations().back();
} else {
- assert(!cir::MissingFeatures::opGlobalDtorLowering());
- llvm_unreachable("dtor region lowering is NYI");
+ assert(!dtorRegion.empty());
+ mlir::Block &block = dtorRegion.front();
+ yieldOp = &block.getOperations().back();
}
- assert(isa<YieldOp>(*yieldOp));
+ assert(isa<cir::YieldOp>(*yieldOp));
cir::ReturnOp::create(builder, yieldOp->getLoc());
return f;
}
@@ -689,11 +781,39 @@ void LoweringPreparePass::lowerGlobalOp(GlobalOp op) {
assert(!cir::MissingFeatures::opGlobalAnnotations());
}
+template <typename AttributeTy>
+static llvm::SmallVector<mlir::Attribute>
+prepareCtorDtorAttrList(mlir::MLIRContext *context,
+ llvm::ArrayRef<std::pair<std::string, uint32_t>> list) {
+ llvm::SmallVector<mlir::Attribute> attrs;
+ for (const auto &[name, priority] : list)
+ attrs.push_back(AttributeTy::get(context, name, priority));
+ return attrs;
+}
+
+void LoweringPreparePass::buildGlobalCtorDtorList() {
+ if (!globalCtorList.empty()) {
+ llvm::SmallVector<mlir::Attribute> globalCtors =
+ prepareCtorDtorAttrList<cir::GlobalCtorAttr>(&getContext(),
+ globalCtorList);
+
+ mlirModule->setAttr(cir::CIRDialect::getGlobalCtorsAttrName(),
+ mlir::ArrayAttr::get(&getContext(), globalCtors));
+ }
+
+ // We will eventual need to populate a global_dtor list, but that's not
+ // needed for globals with destructors. It will only be needed for functions
+ // that are marked as global destructors with an attribute.
+ assert(!cir::MissingFeatures::opGlobalDtorList());
+}
+
void LoweringPreparePass::buildCXXGlobalInitFunc() {
if (dynamicInitializers.empty())
return;
- assert(!cir::MissingFeatures::opGlobalCtorList());
+ // TODO: handle globals with a user-specified initialzation priority.
+ // TODO: handle default priority more nicely.
+ assert(!cir::MissingFeatures::opGlobalCtorPriority());
SmallString<256> fnName;
// Include the filename in the symbol name. Including "sub_" matches gcc
@@ -722,6 +842,10 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() {
builder.setInsertionPointToStart(f.addEntryBlock());
for (cir::FuncOp &f : dynamicInitializers)
builder.createCallOp(f.getLoc(), f, {});
+ // Add the global init function (not the individual ctor functions) to the
+ // global ctor list.
+ globalCtorList.emplace_back(fnName,
+ cir::GlobalCtorAttr::getDefaultPriority());
cir::ReturnOp::create(builder, f.getLoc());
}
@@ -852,6 +976,7 @@ void LoweringPreparePass::runOnOperation() {
runOnOp(o);
buildCXXGlobalInitFunc();
+ buildGlobalCtorDtorList();
}
std::unique_ptr<Pass> mlir::createLoweringPreparePass() {
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index e9649af..a1ecfc7 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1771,9 +1771,13 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite(
}
// Rewrite op.
- rewriter.replaceOpWithNewOp<mlir::LLVM::GlobalOp>(
+ auto newOp = rewriter.replaceOpWithNewOp<mlir::LLVM::GlobalOp>(
op, llvmType, isConst, linkage, symbol, init.value_or(mlir::Attribute()),
alignment, addrSpace, isDsoLocal, isThreadLocal, comdatAttr, attributes);
+ newOp.setVisibility_Attr(mlir::LLVM::VisibilityAttr::get(
+ getContext(), lowerCIRVisibilityToLLVMVisibility(
+ op.getGlobalVisibilityAttr().getValue())));
+
return mlir::success();
}
@@ -2413,6 +2417,73 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter,
});
}
+static void buildCtorDtorList(
+ mlir::ModuleOp module, StringRef globalXtorName, StringRef llvmXtorName,
+ llvm::function_ref<std::pair<StringRef, int>(mlir::Attribute)> createXtor) {
+ llvm::SmallVector<std::pair<StringRef, int>> globalXtors;
+ for (const mlir::NamedAttribute namedAttr : module->getAttrs()) {
+ if (namedAttr.getName() == globalXtorName) {
+ for (auto attr : mlir::cast<mlir::ArrayAttr>(namedAttr.getValue()))
+ globalXtors.emplace_back(createXtor(attr));
+ break;
+ }
+ }
+
+ if (globalXtors.empty())
+ return;
+
+ mlir::OpBuilder builder(module.getContext());
+ builder.setInsertionPointToEnd(&module.getBodyRegion().back());
+
+ // Create a global array llvm.global_ctors with element type of
+ // struct { i32, ptr, ptr }
+ auto ctorPFTy = mlir::LLVM::LLVMPointerType::get(builder.getContext());
+ llvm::SmallVector<mlir::Type> ctorStructFields;
+ ctorStructFields.push_back(builder.getI32Type());
+ ctorStructFields.push_back(ctorPFTy);
+ ctorStructFields.push_back(ctorPFTy);
+
+ auto ctorStructTy = mlir::LLVM::LLVMStructType::getLiteral(
+ builder.getContext(), ctorStructFields);
+ auto ctorStructArrayTy =
+ mlir::LLVM::LLVMArrayType::get(ctorStructTy, globalXtors.size());
+
+ mlir::Location loc = module.getLoc();
+ auto newGlobalOp = mlir::LLVM::GlobalOp::create(
+ builder, loc, ctorStructArrayTy, /*constant=*/false,
+ mlir::LLVM::Linkage::Appending, llvmXtorName, mlir::Attribute());
+
+ builder.createBlock(&newGlobalOp.getRegion());
+ builder.setInsertionPointToEnd(newGlobalOp.getInitializerBlock());
+
+ mlir::Value result =
+ mlir::LLVM::UndefOp::create(builder, loc, ctorStructArrayTy);
+
+ for (auto [index, fn] : llvm::enumerate(globalXtors)) {
+ mlir::Value structInit =
+ mlir::LLVM::UndefOp::create(builder, loc, ctorStructTy);
+ mlir::Value initPriority = mlir::LLVM::ConstantOp::create(
+ builder, loc, ctorStructFields[0], fn.second);
+ mlir::Value initFuncAddr = mlir::LLVM::AddressOfOp::create(
+ builder, loc, ctorStructFields[1], fn.first);
+ mlir::Value initAssociate =
+ mlir::LLVM::ZeroOp::create(builder, loc, ctorStructFields[2]);
+ // Literal zero makes the InsertValueOp::create ambiguous.
+ llvm::SmallVector<int64_t> zero{0};
+ structInit = mlir::LLVM::InsertValueOp::create(builder, loc, structInit,
+ initPriority, zero);
+ structInit = mlir::LLVM::InsertValueOp::create(builder, loc, structInit,
+ initFuncAddr, 1);
+ // TODO: handle associated data for initializers.
+ structInit = mlir::LLVM::InsertValueOp::create(builder, loc, structInit,
+ initAssociate, 2);
+ result = mlir::LLVM::InsertValueOp::create(builder, loc, result, structInit,
+ index);
+ }
+
+ builder.create<mlir::LLVM::ReturnOp>(loc, result);
+}
+
// The applyPartialConversion function traverses blocks in the dominance order,
// so it does not lower and operations that are not reachachable from the
// operations passed in as arguments. Since we do need to lower such code in
@@ -2519,6 +2590,15 @@ void ConvertCIRToLLVMPass::runOnOperation() {
if (failed(applyPartialConversion(ops, target, std::move(patterns))))
signalPassFailure();
+
+ // Emit the llvm.global_ctors array.
+ buildCtorDtorList(module, cir::CIRDialect::getGlobalCtorsAttrName(),
+ "llvm.global_ctors", [](mlir::Attribute attr) {
+ auto ctorAttr = mlir::cast<cir::GlobalCtorAttr>(attr);
+ return std::make_pair(ctorAttr.getName(),
+ ctorAttr.getPriority());
+ });
+ assert(!cir::MissingFeatures::opGlobalDtorList());
}
mlir::LogicalResult CIRToLLVMBrOpLowering::matchAndRewrite(