aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR')
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuilder.h18
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXX.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp98
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp12
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp58
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h40
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp43
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp89
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypeCache.h3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypes.cpp15
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp158
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp48
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp69
19 files changed, 603 insertions, 76 deletions
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index a6f10e6..50d585d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -380,6 +380,16 @@ public:
/*relative_layout=*/false);
}
+ mlir::Value createDynCastToVoid(mlir::Location loc, mlir::Value src,
+ bool vtableUseRelativeLayout) {
+ // TODO(cir): consider address space here.
+ assert(!cir::MissingFeatures::addressSpace());
+ cir::PointerType destTy = getVoidPtrTy();
+ return cir::DynamicCastOp::create(
+ *this, loc, destTy, cir::DynamicCastKind::Ptr, src,
+ cir::DynamicCastInfoAttr{}, vtableUseRelativeLayout);
+ }
+
Address createBaseClassAddr(mlir::Location loc, Address addr,
mlir::Type destType, unsigned offset,
bool assumeNotNull) {
@@ -519,6 +529,14 @@ public:
return createGlobal(module, loc, uniqueName, type, isConstant, linkage);
}
+ cir::StackSaveOp createStackSave(mlir::Location loc, mlir::Type ty) {
+ return cir::StackSaveOp::create(*this, loc, ty);
+ }
+
+ cir::StackRestoreOp createStackRestore(mlir::Location loc, mlir::Value v) {
+ return cir::StackRestoreOp::create(*this, loc, v);
+ }
+
mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType,
Address dstAddr, mlir::Type storageType,
mlir::Value src, const CIRGenBitFieldInfo &info,
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 4cfa91e..ea31871 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -463,7 +463,9 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return emitLibraryCall(*this, fd, e,
cgm.getBuiltinLibFunction(fd, builtinID));
- cgm.errorNYI(e->getSourceRange(), "unimplemented builtin call");
+ cgm.errorNYI(e->getSourceRange(),
+ std::string("unimplemented builtin call: ") +
+ getContext().BuiltinInfo.getName(builtinID));
return getUndefRValue(e->getType());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
index 274d11b..171ce1c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
@@ -171,7 +171,8 @@ cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl gd) {
curCGF = nullptr;
setNonAliasAttributes(gd, fn);
- assert(!cir::MissingFeatures::opFuncAttributesForDefinition());
+ setCIRFunctionAttributesForDefinition(mlir::cast<FunctionDecl>(gd.getDecl()),
+ fn);
return fn;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index dd357ce..89f4926 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -478,8 +478,7 @@ void CIRGenFunction::getVTablePointers(BaseSubobject base,
for (const auto &nextBase : rd->bases()) {
const auto *baseDecl =
- cast<CXXRecordDecl>(
- nextBase.getType()->castAs<RecordType>()->getOriginalDecl())
+ cast<CXXRecordDecl>(nextBase.getType()->castAs<RecordType>()->getDecl())
->getDefinitionOrSelf();
// Ignore classes without a vtable.
@@ -1025,7 +1024,7 @@ void CIRGenFunction::enterDtorCleanups(const CXXDestructorDecl *dd,
// Anonymous union members do not have their destructors called.
const RecordType *rt = type->getAsUnionType();
- if (rt && rt->getOriginalDecl()->isAnonymousStructOrUnion())
+ if (rt && rt->getDecl()->isAnonymousStructOrUnion())
continue;
CleanupKind cleanupKind = getCleanupKind(dtorKind);
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 039d290..4a19d91 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -44,38 +44,70 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d,
// If the type is variably-modified, emit all the VLA sizes for it.
if (ty->isVariablyModifiedType())
- cgm.errorNYI(d.getSourceRange(), "emitAutoVarDecl: variably modified type");
+ emitVariablyModifiedType(ty);
assert(!cir::MissingFeatures::openMP());
Address address = Address::invalid();
- if (!ty->isConstantSizeType())
- cgm.errorNYI(d.getSourceRange(), "emitAutoVarDecl: non-constant size type");
-
- // A normal fixed sized variable becomes an alloca in the entry block,
- // unless:
- // - it's an NRVO variable.
- // - we are compiling OpenMP and it's an OpenMP local variable.
- if (nrvo) {
- // The named return value optimization: allocate this variable in the
- // return slot, so that we can elide the copy when returning this
- // variable (C++0x [class.copy]p34).
- address = returnValue;
-
- if (const RecordDecl *rd = ty->getAsRecordDecl()) {
- if (const auto *cxxrd = dyn_cast<CXXRecordDecl>(rd);
- (cxxrd && !cxxrd->hasTrivialDestructor()) ||
- rd->isNonTrivialToPrimitiveDestroy())
- cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: set NRVO flag");
+ if (ty->isConstantSizeType()) {
+ // A normal fixed sized variable becomes an alloca in the entry block,
+ // unless:
+ // - it's an NRVO variable.
+ // - we are compiling OpenMP and it's an OpenMP local variable.
+ if (nrvo) {
+ // The named return value optimization: allocate this variable in the
+ // return slot, so that we can elide the copy when returning this
+ // variable (C++0x [class.copy]p34).
+ address = returnValue;
+
+ if (const RecordDecl *rd = ty->getAsRecordDecl()) {
+ if (const auto *cxxrd = dyn_cast<CXXRecordDecl>(rd);
+ (cxxrd && !cxxrd->hasTrivialDestructor()) ||
+ rd->isNonTrivialToPrimitiveDestroy())
+ cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: set NRVO flag");
+ }
+ } else {
+ // A normal fixed sized variable becomes an alloca in the entry block,
+ mlir::Type allocaTy = convertTypeForMem(ty);
+ // Create the temp alloca and declare variable using it.
+ address = createTempAlloca(allocaTy, alignment, loc, d.getName(),
+ /*arraySize=*/nullptr, /*alloca=*/nullptr, ip);
+ declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()),
+ alignment);
}
} else {
- // A normal fixed sized variable becomes an alloca in the entry block,
- mlir::Type allocaTy = convertTypeForMem(ty);
- // Create the temp alloca and declare variable using it.
- address = createTempAlloca(allocaTy, alignment, loc, d.getName(),
- /*arraySize=*/nullptr, /*alloca=*/nullptr, ip);
- declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()),
- alignment);
+ // Non-constant size type
+ assert(!cir::MissingFeatures::openMP());
+ if (!didCallStackSave) {
+ // Save the stack.
+ cir::PointerType defaultTy = AllocaInt8PtrTy;
+ CharUnits align = CharUnits::fromQuantity(
+ cgm.getDataLayout().getAlignment(defaultTy, false));
+ Address stack = createTempAlloca(defaultTy, align, loc, "saved_stack");
+
+ mlir::Value v = builder.createStackSave(loc, defaultTy);
+ assert(v.getType() == AllocaInt8PtrTy);
+ builder.createStore(loc, v, stack);
+
+ didCallStackSave = true;
+
+ // Push a cleanup block and restore the stack there.
+ // FIXME: in general circumstances, this should be an EH cleanup.
+ pushStackRestore(NormalCleanup, stack);
+ }
+
+ VlaSizePair vlaSize = getVLASize(ty);
+ mlir::Type memTy = convertTypeForMem(vlaSize.type);
+
+ // Allocate memory for the array.
+ address =
+ createTempAlloca(memTy, alignment, loc, d.getName(), vlaSize.numElts,
+ /*alloca=*/nullptr, builder.saveInsertionPoint());
+
+ // If we have debug info enabled, properly describe the VLA dimensions for
+ // this type by registering the vla size expression for each of the
+ // dimensions.
+ assert(!cir::MissingFeatures::generateDebugInfo());
}
emission.addr = address;
@@ -696,6 +728,16 @@ struct DestroyObject final : EHScopeStack::Cleanup {
cgf.emitDestroy(addr, type, destroyer);
}
};
+
+struct CallStackRestore final : EHScopeStack::Cleanup {
+ Address stack;
+ CallStackRestore(Address stack) : stack(stack) {}
+ void emit(CIRGenFunction &cgf) override {
+ mlir::Location loc = stack.getPointer().getLoc();
+ mlir::Value v = cgf.getBuilder().createLoad(loc, stack);
+ cgf.getBuilder().createStackRestore(loc, v);
+ }
+};
} // namespace
void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
@@ -805,6 +847,10 @@ CIRGenFunction::getDestroyer(QualType::DestructionKind kind) {
llvm_unreachable("Unknown DestructionKind");
}
+void CIRGenFunction::pushStackRestore(CleanupKind kind, Address spMem) {
+ ehStack.pushCleanup<CallStackRestore>(kind, spMem);
+}
+
/// Enter a destroy cleanup for the given local variable.
void CIRGenFunction::emitAutoVarTypeCleanup(
const CIRGenFunction::AutoVarEmission &emission,
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index f416571..4897c29 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -2068,7 +2068,7 @@ mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
mlir::OpBuilder::InsertionGuard guard(builder);
builder.restoreInsertionPoint(ip);
addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
- /*var type*/ ty, name, alignIntAttr);
+ /*var type*/ ty, name, alignIntAttr, arraySize);
assert(!cir::MissingFeatures::astVarDeclInterface());
}
return addr;
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 89e9ec4..81e5fe2 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -614,7 +614,7 @@ bool ConstRecordBuilder::applyZeroInitPadding(const ASTRecordLayout &layout,
bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
RecordDecl *rd = ile->getType()
->castAs<clang::RecordType>()
- ->getOriginalDecl()
+ ->getDecl()
->getDefinitionOrSelf();
const ASTRecordLayout &layout = cgm.getASTContext().getASTRecordLayout(rd);
@@ -817,9 +817,8 @@ bool ConstRecordBuilder::build(const APValue &val, const RecordDecl *rd,
mlir::Attribute ConstRecordBuilder::finalize(QualType type) {
type = type.getNonReferenceType();
- RecordDecl *rd = type->castAs<clang::RecordType>()
- ->getOriginalDecl()
- ->getDefinitionOrSelf();
+ RecordDecl *rd =
+ type->castAs<clang::RecordType>()->getDecl()->getDefinitionOrSelf();
mlir::Type valTy = cgm.convertType(type);
return builder.build(valTy, rd->hasFlexibleArrayMember());
}
@@ -842,9 +841,8 @@ mlir::Attribute ConstRecordBuilder::buildRecord(ConstantEmitter &emitter,
ConstantAggregateBuilder constant(emitter.cgm);
ConstRecordBuilder builder(emitter, constant, CharUnits::Zero());
- const RecordDecl *rd = valTy->castAs<clang::RecordType>()
- ->getOriginalDecl()
- ->getDefinitionOrSelf();
+ const RecordDecl *rd =
+ valTy->castAs<clang::RecordType>()->getDecl()->getDefinitionOrSelf();
const CXXRecordDecl *cd = dyn_cast<CXXRecordDecl>(rd);
if (!builder.build(val, rd, false, cd, CharUnits::Zero()))
return nullptr;
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 637f9ef..138082b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -1734,9 +1734,9 @@ mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
// LLVM we shall take VLA's, division by element size, etc.
//
// See more in `EmitSub` in CGExprScalar.cpp.
- assert(!cir::MissingFeatures::ptrDiffOp());
- cgf.cgm.errorNYI("ptrdiff");
- return {};
+ assert(!cir::MissingFeatures::llvmLoweringPtrDiffConsidersPointee());
+ return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.PtrDiffTy,
+ ops.lhs, ops.rhs);
}
mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 01a43a99..ba36cbe 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -410,6 +410,8 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
curFn = fn;
const Decl *d = gd.getDecl();
+
+ didCallStackSave = false;
curCodeDecl = d;
const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
curFuncDecl = d->getNonClosureContext();
@@ -1006,6 +1008,41 @@ mlir::Value CIRGenFunction::emitAlignmentAssumption(
offsetValue);
}
+CIRGenFunction::VlaSizePair CIRGenFunction::getVLASize(QualType type) {
+ const VariableArrayType *vla =
+ cgm.getASTContext().getAsVariableArrayType(type);
+ assert(vla && "type was not a variable array type!");
+ return getVLASize(vla);
+}
+
+CIRGenFunction::VlaSizePair
+CIRGenFunction::getVLASize(const VariableArrayType *type) {
+ // The number of elements so far; always size_t.
+ mlir::Value numElements;
+
+ QualType elementType;
+ do {
+ elementType = type->getElementType();
+ mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()];
+ assert(vlaSize && "no size for VLA!");
+ assert(vlaSize.getType() == SizeTy);
+
+ if (!numElements) {
+ numElements = vlaSize;
+ } else {
+ // It's undefined behavior if this wraps around, so mark it that way.
+ // FIXME: Teach -fsanitize=undefined to trap this.
+
+ numElements =
+ builder.createMul(numElements.getLoc(), numElements, vlaSize,
+ cir::OverflowBehavior::NoUnsignedWrap);
+ }
+ } while ((type = getContext().getAsVariableArrayType(elementType)));
+
+ assert(numElements && "Undefined elements number");
+ return {numElements, elementType};
+}
+
// TODO(cir): Most of this function can be shared between CIRGen
// and traditional LLVM codegen
void CIRGenFunction::emitVariablyModifiedType(QualType type) {
@@ -1086,7 +1123,26 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) {
break;
case Type::VariableArray: {
- cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA");
+ // Losing element qualification here is fine.
+ const VariableArrayType *vat = cast<clang::VariableArrayType>(ty);
+
+ // Unknown size indication requires no size computation.
+ // Otherwise, evaluate and record it.
+ if (const Expr *sizeExpr = vat->getSizeExpr()) {
+ // It's possible that we might have emitted this already,
+ // e.g. with a typedef and a pointer to it.
+ mlir::Value &entry = vlaSizeMap[sizeExpr];
+ if (!entry) {
+ mlir::Value size = emitScalarExpr(sizeExpr);
+ assert(!cir::MissingFeatures::sanitizers());
+
+ // Always zexting here would be wrong if it weren't
+ // undefined behavior to have a negative bound.
+ // FIXME: What about when size's type is larger than size_t?
+ entry = builder.createIntCast(size, SizeTy);
+ }
+ }
+ type = vat->getElementType();
break;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index d71de2f..3c36f5c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -149,6 +149,10 @@ public:
using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
SymTableTy symbolTable;
+ /// Whether a cir.stacksave operation has been added. Used to avoid
+ /// inserting cir.stacksave for multiple VLAs in the same scope.
+ bool didCallStackSave = false;
+
/// Whether or not a Microsoft-style asm block has been processed within
/// this fuction. These can potentially set the return value.
bool sawAsmBlock = false;
@@ -188,6 +192,14 @@ public:
llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
+ // This keeps track of the associated size for each VLA type.
+ // We track this by the size expression rather than the type itself because
+ // in certain situations, like a const qualifier applied to an VLA typedef,
+ // multiple VLA types can share the same size expression.
+ // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
+ // enter/leave scopes.
+ llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap;
+
public:
/// A non-RAII class containing all the information about a bound
/// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
@@ -436,6 +448,20 @@ public:
}
};
+ struct VlaSizePair {
+ mlir::Value numElts;
+ QualType type;
+
+ VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {}
+ };
+
+ /// Returns an MLIR::Value+QualType pair that corresponds to the size,
+ /// in non-variably-sized elements, of a variable length array type,
+ /// plus that largest non-variably-sized element type. Assumes that
+ /// the type has already been emitted with emitVariablyModifiedType.
+ VlaSizePair getVLASize(const VariableArrayType *type);
+ VlaSizePair getVLASize(QualType type);
+
void finishFunction(SourceLocation endLoc);
/// Determine whether the given initializer is trivial in the sense
@@ -583,6 +609,8 @@ public:
return needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup;
}
+ void pushStackRestore(CleanupKind kind, Address spMem);
+
/// Set the address of a local variable.
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) {
assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
@@ -854,6 +882,7 @@ public:
protected:
bool performCleanup;
+ bool oldDidCallStackSave;
private:
RunCleanupsScope(const RunCleanupsScope &) = delete;
@@ -867,6 +896,8 @@ public:
explicit RunCleanupsScope(CIRGenFunction &cgf)
: performCleanup(true), cgf(cgf) {
cleanupStackDepth = cgf.ehStack.stable_begin();
+ oldDidCallStackSave = cgf.didCallStackSave;
+ cgf.didCallStackSave = false;
oldCleanupStackDepth = cgf.currentCleanupStackDepth;
cgf.currentCleanupStackDepth = cleanupStackDepth;
}
@@ -883,6 +914,7 @@ public:
assert(performCleanup && "Already forced cleanup");
{
mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ cgf.didCallStackSave = oldDidCallStackSave;
cgf.popCleanupBlocks(cleanupStackDepth);
performCleanup = false;
cgf.currentCleanupStackDepth = oldCleanupStackDepth;
@@ -1281,10 +1313,10 @@ public:
mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
- void emitNewArrayInitializer(const CXXNewExpr *E, QualType ElementType,
- mlir::Type ElementTy, Address BeginPtr,
- mlir::Value NumElements,
- mlir::Value AllocSizeWithoutCookie);
+ void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType,
+ mlir::Type elementTy, Address beginPtr,
+ mlir::Value numElements,
+ mlir::Value allocSizeWithoutCookie);
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e,
const CXXMethodDecl *md,
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index d30c975..d54d2e9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -744,8 +744,8 @@ static bool shouldUseExternalRttiDescriptor(CIRGenModule &cgm, QualType ty) {
return false;
if (const auto *recordTy = dyn_cast<RecordType>(ty)) {
- const CXXRecordDecl *rd =
- cast<CXXRecordDecl>(recordTy->getOriginalDecl())->getDefinitionOrSelf();
+ const auto *rd =
+ cast<CXXRecordDecl>(recordTy->getDecl())->getDefinitionOrSelf();
if (!rd->hasDefinition())
return false;
@@ -859,9 +859,7 @@ static bool canUseSingleInheritance(const CXXRecordDecl *rd) {
/// IsIncompleteClassType - Returns whether the given record type is incomplete.
static bool isIncompleteClassType(const RecordType *recordTy) {
- return !recordTy->getOriginalDecl()
- ->getDefinitionOrSelf()
- ->isCompleteDefinition();
+ return !recordTy->getDecl()->getDefinitionOrSelf()->isCompleteDefinition();
}
/// Returns whether the given type contains an
@@ -939,8 +937,7 @@ const char *vTableClassNameForType(const CIRGenModule &cgm, const Type *ty) {
case Type::Atomic:
// FIXME: GCC treats block pointers as fundamental types?!
case Type::BlockPointer:
- cgm.errorNYI("VTableClassNameForType: __fundamental_type_info");
- break;
+ return "_ZTVN10__cxxabiv123__fundamental_type_infoE";
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
@@ -957,9 +954,8 @@ const char *vTableClassNameForType(const CIRGenModule &cgm, const Type *ty) {
break;
case Type::Record: {
- const CXXRecordDecl *rd =
- cast<CXXRecordDecl>(cast<RecordType>(ty)->getOriginalDecl())
- ->getDefinitionOrSelf();
+ const auto *rd = cast<CXXRecordDecl>(cast<RecordType>(ty)->getDecl())
+ ->getDefinitionOrSelf();
if (!rd->hasDefinition() || !rd->getNumBases()) {
return classTypeInfo;
@@ -1031,8 +1027,8 @@ static cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &cgm,
return cir::GlobalLinkageKind::LinkOnceODRLinkage;
if (const RecordType *record = dyn_cast<RecordType>(ty)) {
- const CXXRecordDecl *rd =
- cast<CXXRecordDecl>(record->getOriginalDecl())->getDefinitionOrSelf();
+ const auto *rd =
+ cast<CXXRecordDecl>(record->getDecl())->getDefinitionOrSelf();
if (rd->hasAttr<WeakAttr>())
return cir::GlobalLinkageKind::WeakODRLinkage;
@@ -1382,9 +1378,8 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo(
break;
case Type::Record: {
- const auto *rd =
- cast<CXXRecordDecl>(cast<RecordType>(ty)->getOriginalDecl())
- ->getDefinitionOrSelf();
+ const auto *rd = cast<CXXRecordDecl>(cast<RecordType>(ty)->getDecl())
+ ->getDefinitionOrSelf();
if (!rd->hasDefinition() || !rd->getNumBases()) {
// We don't need to emit any fields.
break;
@@ -1651,8 +1646,7 @@ void CIRGenItaniumCXXABI::emitThrow(CIRGenFunction &cgf,
// Lowering pass to skip passing the trivial function.
//
if (const RecordType *recordTy = clangThrowType->getAs<RecordType>()) {
- CXXRecordDecl *rec =
- cast<CXXRecordDecl>(recordTy->getOriginalDecl()->getDefinition());
+ auto *rec = cast<CXXRecordDecl>(recordTy->getDecl()->getDefinition());
assert(!cir::MissingFeatures::isTrivialCtorOrDtor());
if (!rec->hasTrivialDestructor()) {
cgm.errorNYI("emitThrow: non-trivial destructor");
@@ -1951,6 +1945,15 @@ static cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &cgf) {
return cgf.cgm.createRuntimeFunction(FTy, "__dynamic_cast");
}
+static Address emitDynamicCastToVoid(CIRGenFunction &cgf, mlir::Location loc,
+ QualType srcRecordTy, Address src) {
+ bool vtableUsesRelativeLayout =
+ cgf.cgm.getItaniumVTableContext().isRelativeLayout();
+ mlir::Value ptr = cgf.getBuilder().createDynCastToVoid(
+ loc, src.getPointer(), vtableUsesRelativeLayout);
+ return Address{ptr, src.getAlignment()};
+}
+
static cir::DynamicCastInfoAttr emitDynamicCastInfo(CIRGenFunction &cgf,
mlir::Location loc,
QualType srcRecordTy,
@@ -1985,10 +1988,8 @@ mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf,
bool isCastToVoid = destRecordTy.isNull();
assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference");
- if (isCastToVoid) {
- cgm.errorNYI(loc, "emitDynamicCastToVoid");
- return {};
- }
+ if (isCastToVoid)
+ return emitDynamicCastToVoid(cgf, loc, srcRecordTy, src).getPointer();
// If the destination is effectively final, the cast succeeds if and only
// if the dynamic type of the pointer is exactly the destination type.
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 82b1051..127f763 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -88,6 +88,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
FP80Ty = cir::FP80Type::get(&getMLIRContext());
FP128Ty = cir::FP128Type::get(&getMLIRContext());
+ AllocaInt8PtrTy = cir::PointerType::get(UInt8Ty, cirAllocaAddressSpace);
+
PointerAlignInBytes =
astContext
.toCharUnitsFromBits(
@@ -449,7 +451,7 @@ void CIRGenModule::emitGlobalFunctionDefinition(clang::GlobalDecl gd,
curCGF = nullptr;
setNonAliasAttributes(gd, funcOp);
- assert(!cir::MissingFeatures::opFuncAttributesForDefinition());
+ setCIRFunctionAttributesForDefinition(funcDecl, funcOp);
auto getPriority = [this](const auto *attr) -> int {
Expr *e = attr->getPriority();
@@ -1917,6 +1919,91 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl,
}
}
+void CIRGenModule::setCIRFunctionAttributesForDefinition(
+ const clang::FunctionDecl *decl, cir::FuncOp f) {
+ assert(!cir::MissingFeatures::opFuncUnwindTablesAttr());
+ assert(!cir::MissingFeatures::stackProtector());
+
+ std::optional<cir::InlineKind> existingInlineKind = f.getInlineKind();
+ bool isNoInline =
+ existingInlineKind && *existingInlineKind == cir::InlineKind::NoInline;
+ bool isAlwaysInline = existingInlineKind &&
+ *existingInlineKind == cir::InlineKind::AlwaysInline;
+
+ if (!decl) {
+ assert(!cir::MissingFeatures::hlsl());
+
+ if (!isAlwaysInline &&
+ codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
+ // If inlining is disabled and we don't have a declaration to control
+ // inlining, mark the function as 'noinline' unless it is explicitly
+ // marked as 'alwaysinline'.
+ f.setInlineKindAttr(
+ cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline));
+ }
+
+ return;
+ }
+
+ assert(!cir::MissingFeatures::opFuncArmStreamingAttr());
+ assert(!cir::MissingFeatures::opFuncArmNewAttr());
+ assert(!cir::MissingFeatures::opFuncOptNoneAttr());
+ assert(!cir::MissingFeatures::opFuncMinSizeAttr());
+ assert(!cir::MissingFeatures::opFuncNakedAttr());
+ assert(!cir::MissingFeatures::opFuncNoDuplicateAttr());
+ assert(!cir::MissingFeatures::hlsl());
+
+ // Handle inline attributes
+ if (decl->hasAttr<NoInlineAttr>() && !isAlwaysInline) {
+ // Add noinline if the function isn't always_inline.
+ f.setInlineKindAttr(
+ cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline));
+ } else if (decl->hasAttr<AlwaysInlineAttr>() && !isNoInline) {
+ // Don't override AlwaysInline with NoInline, or vice versa, since we can't
+ // specify both in IR.
+ f.setInlineKindAttr(
+ cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::AlwaysInline));
+ } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
+ // If inlining is disabled, force everything that isn't always_inline
+ // to carry an explicit noinline attribute.
+ if (!isAlwaysInline) {
+ f.setInlineKindAttr(
+ cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline));
+ }
+ } else {
+ // Otherwise, propagate the inline hint attribute and potentially use its
+ // absence to mark things as noinline.
+ // Search function and template pattern redeclarations for inline.
+ if (auto *fd = dyn_cast<FunctionDecl>(decl)) {
+ // TODO: Share this checkForInline implementation with classic codegen.
+ // This logic is likely to change over time, so sharing would help ensure
+ // consistency.
+ auto checkForInline = [](const FunctionDecl *decl) {
+ auto checkRedeclForInline = [](const FunctionDecl *redecl) {
+ return redecl->isInlineSpecified();
+ };
+ if (any_of(decl->redecls(), checkRedeclForInline))
+ return true;
+ const FunctionDecl *pattern = decl->getTemplateInstantiationPattern();
+ if (!pattern)
+ return false;
+ return any_of(pattern->redecls(), checkRedeclForInline);
+ };
+ if (checkForInline(fd)) {
+ f.setInlineKindAttr(cir::InlineAttr::get(&getMLIRContext(),
+ cir::InlineKind::InlineHint));
+ } else if (codeGenOpts.getInlining() ==
+ CodeGenOptions::OnlyHintInlining &&
+ !fd->isInlined() && !isAlwaysInline) {
+ f.setInlineKindAttr(
+ cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline));
+ }
+ }
+ }
+
+ assert(!cir::MissingFeatures::opFuncColdHotAttr());
+}
+
cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
StringRef mangledName, mlir::Type funcType, GlobalDecl gd, bool forVTable,
bool dontDefer, bool isThunk, ForDefinition_t isForDefinition,
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 690f0ed..1fc116d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -429,6 +429,10 @@ public:
void setFunctionAttributes(GlobalDecl gd, cir::FuncOp f,
bool isIncompleteFunction, bool isThunk);
+ /// Set extra attributes (inline, etc.) for a function.
+ void setCIRFunctionAttributesForDefinition(const clang::FunctionDecl *fd,
+ cir::FuncOp f);
+
void emitGlobalDefinition(clang::GlobalDecl gd,
mlir::Operation *op = nullptr);
void emitGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op);
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
index ce14aa8..f638d39 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
@@ -398,6 +398,7 @@ void OpenACCRecipeBuilderBase::createRecipeDestroySection(
emitDestroy(block->getArgument(1), elementTy);
}
+ ls.forceCleanup();
mlir::acc::YieldOp::create(builder, locEnd);
}
void OpenACCRecipeBuilderBase::makeBoundsInit(
@@ -480,6 +481,7 @@ void OpenACCRecipeBuilderBase::createInitRecipe(
/*isInitSection=*/true);
}
+ ls.forceCleanup();
mlir::acc::YieldOp::create(builder, locEnd);
}
@@ -518,6 +520,7 @@ void OpenACCRecipeBuilderBase::createFirstprivateRecipeCopy(
cgf.emitAutoVarInit(tempDeclEmission);
builder.setInsertionPointToEnd(&copyRegion.back());
+ ls.forceCleanup();
mlir::acc::YieldOp::create(builder, locEnd);
}
@@ -662,6 +665,7 @@ void OpenACCRecipeBuilderBase::createReductionRecipeCombiner(
}
builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back());
+ ls.forceCleanup();
mlir::acc::YieldOp::create(builder, locEnd, block->getArgument(0));
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
index 273ec7f..b5612d9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
+++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
@@ -65,6 +65,9 @@ struct CIRGenTypeCache {
cir::PointerType VoidPtrTy;
cir::PointerType UInt8PtrTy;
+ /// void* in alloca address space
+ cir::PointerType AllocaInt8PtrTy;
+
/// The size and alignment of a pointer into the generic address space.
union {
unsigned char PointerAlignInBytes;
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
index 2ab1ea0c..d1b91d0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
@@ -159,7 +159,7 @@ isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt,
for (const clang::CXXBaseSpecifier &i : crd->bases())
if (!isSafeToConvert(i.getType()
->castAs<RecordType>()
- ->getOriginalDecl()
+ ->getDecl()
->getDefinitionOrSelf(),
cgt, alreadyChecked))
return false;
@@ -279,8 +279,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
// Process record types before the type cache lookup.
if (const auto *recordType = dyn_cast<RecordType>(type))
- return convertRecordDeclType(
- recordType->getOriginalDecl()->getDefinitionOrSelf());
+ return convertRecordDeclType(recordType->getDecl()->getDefinitionOrSelf());
// Has the type already been processed?
TypeCacheTy::iterator tci = typeCache.find(ty);
@@ -421,6 +420,16 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
break;
}
+ case Type::VariableArray: {
+ const VariableArrayType *a = cast<VariableArrayType>(ty);
+ if (a->getIndexTypeCVRQualifiers() != 0)
+ cgm.errorNYI(SourceLocation(), "non trivial array types", type);
+ // VLAs resolve to the innermost element type; this matches
+ // the return of alloca, and there isn't any obviously better choice.
+ resultType = convertTypeForMem(a->getElementType());
+ break;
+ }
+
case Type::IncompleteArray: {
const IncompleteArrayType *arrTy = cast<IncompleteArrayType>(ty);
if (arrTy->getIndexTypeCVRQualifiers() != 0)
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 7af3dc1..f03b891 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -1758,6 +1758,36 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) {
}).failed())
return failure();
+ // Parse optional inline kind: inline(never|always|hint)
+ if (parser.parseOptionalKeyword("inline").succeeded()) {
+ if (parser.parseLParen().failed())
+ return failure();
+
+ llvm::StringRef inlineKindStr;
+ const std::array<llvm::StringRef, cir::getMaxEnumValForInlineKind()>
+ allowedInlineKindStrs{
+ cir::stringifyInlineKind(cir::InlineKind::NoInline),
+ cir::stringifyInlineKind(cir::InlineKind::AlwaysInline),
+ cir::stringifyInlineKind(cir::InlineKind::InlineHint),
+ };
+ if (parser.parseOptionalKeyword(&inlineKindStr, allowedInlineKindStrs)
+ .failed())
+ return parser.emitError(parser.getCurrentLocation(),
+ "expected 'never', 'always', or 'hint'");
+
+ std::optional<InlineKind> inlineKind =
+ cir::symbolizeInlineKind(inlineKindStr);
+ if (!inlineKind)
+ return parser.emitError(parser.getCurrentLocation(),
+ "invalid inline kind");
+
+ state.addAttribute(getInlineKindAttrName(state.name),
+ cir::InlineAttr::get(builder.getContext(), *inlineKind));
+
+ if (parser.parseRParen().failed())
+ return failure();
+ }
+
// Parse the optional function body.
auto *body = state.addRegion();
OptionalParseResult parseResult = parser.parseOptionalRegion(
@@ -1851,6 +1881,10 @@ void cir::FuncOp::print(OpAsmPrinter &p) {
p << "(" << globalDtorPriority.value() << ")";
}
+ if (cir::InlineAttr inlineAttr = getInlineKindAttr()) {
+ p << " inline(" << cir::stringifyInlineKind(inlineAttr.getValue()) << ")";
+ }
+
// Print the body if this is not an external function.
Region &body = getOperation()->getRegion(0);
if (!body.empty()) {
@@ -2915,6 +2949,130 @@ LogicalResult cir::TypeInfoAttr::verify(
}
//===----------------------------------------------------------------------===//
+// TryOp
+//===----------------------------------------------------------------------===//
+
+void cir::TryOp::getSuccessorRegions(
+ mlir::RegionBranchPoint point,
+ llvm::SmallVectorImpl<mlir::RegionSuccessor> &regions) {
+ // The `try` and the `catchers` region branch back to the parent operation.
+ if (!point.isParent()) {
+ regions.push_back(mlir::RegionSuccessor());
+ return;
+ }
+
+ regions.push_back(mlir::RegionSuccessor(&getTryRegion()));
+
+ // TODO(CIR): If we know a target function never throws a specific type, we
+ // can remove the catch handler.
+ for (mlir::Region &handlerRegion : this->getHandlerRegions())
+ regions.push_back(mlir::RegionSuccessor(&handlerRegion));
+}
+
+static void
+printTryHandlerRegions(mlir::OpAsmPrinter &printer, cir::TryOp op,
+ mlir::MutableArrayRef<mlir::Region> handlerRegions,
+ mlir::ArrayAttr handlerTypes) {
+ if (!handlerTypes)
+ return;
+
+ for (const auto [typeIdx, typeAttr] : llvm::enumerate(handlerTypes)) {
+ if (typeIdx)
+ printer << " ";
+
+ if (mlir::isa<cir::CatchAllAttr>(typeAttr)) {
+ printer << "catch all ";
+ } else if (mlir::isa<cir::UnwindAttr>(typeAttr)) {
+ printer << "unwind ";
+ } else {
+ printer << "catch [type ";
+ printer.printAttribute(typeAttr);
+ printer << "] ";
+ }
+
+ printer.printRegion(handlerRegions[typeIdx],
+ /*printEntryBLockArgs=*/false,
+ /*printBlockTerminators=*/true);
+ }
+}
+
+static mlir::ParseResult parseTryHandlerRegions(
+ mlir::OpAsmParser &parser,
+ llvm::SmallVectorImpl<std::unique_ptr<mlir::Region>> &handlerRegions,
+ mlir::ArrayAttr &handlerTypes) {
+
+ auto parseCheckedCatcherRegion = [&]() -> mlir::ParseResult {
+ handlerRegions.emplace_back(new mlir::Region);
+
+ mlir::Region &currRegion = *handlerRegions.back();
+ mlir::SMLoc regionLoc = parser.getCurrentLocation();
+ if (parser.parseRegion(currRegion)) {
+ handlerRegions.clear();
+ return failure();
+ }
+
+ if (!currRegion.empty() && !(currRegion.back().mightHaveTerminator() &&
+ currRegion.back().getTerminator()))
+ return parser.emitError(
+ regionLoc, "blocks are expected to be explicitly terminated");
+
+ return success();
+ };
+
+ bool hasCatchAll = false;
+ llvm::SmallVector<mlir::Attribute, 4> catcherAttrs;
+ while (parser.parseOptionalKeyword("catch").succeeded()) {
+ bool hasLSquare = parser.parseOptionalLSquare().succeeded();
+
+ llvm::StringRef attrStr;
+ if (parser.parseOptionalKeyword(&attrStr, {"all", "type"}).failed())
+ return parser.emitError(parser.getCurrentLocation(),
+ "expected 'all' or 'type' keyword");
+
+ bool isCatchAll = attrStr == "all";
+ if (isCatchAll) {
+ if (hasCatchAll)
+ return parser.emitError(parser.getCurrentLocation(),
+ "can't have more than one catch all");
+ hasCatchAll = true;
+ }
+
+ mlir::Attribute exceptionRTTIAttr;
+ if (!isCatchAll && parser.parseAttribute(exceptionRTTIAttr).failed())
+ return parser.emitError(parser.getCurrentLocation(),
+ "expected valid RTTI info attribute");
+
+ catcherAttrs.push_back(isCatchAll
+ ? cir::CatchAllAttr::get(parser.getContext())
+ : exceptionRTTIAttr);
+
+ if (hasLSquare && isCatchAll)
+ return parser.emitError(parser.getCurrentLocation(),
+ "catch all dosen't need RTTI info attribute");
+
+ if (hasLSquare && parser.parseRSquare().failed())
+ return parser.emitError(parser.getCurrentLocation(),
+ "expected `]` after RTTI info attribute");
+
+ if (parseCheckedCatcherRegion().failed())
+ return mlir::failure();
+ }
+
+ if (parser.parseOptionalKeyword("unwind").succeeded()) {
+ if (hasCatchAll)
+ return parser.emitError(parser.getCurrentLocation(),
+ "unwind can't be used with catch all");
+
+ catcherAttrs.push_back(cir::UnwindAttr::get(parser.getContext()));
+ if (parseCheckedCatcherRegion().failed())
+ return mlir::failure();
+ }
+
+ handlerTypes = parser.getBuilder().getArrayAttr(catcherAttrs);
+ return mlir::success();
+}
+
+//===----------------------------------------------------------------------===//
// TableGen'd op method definitions
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp
index 7d3c711..11ce2a8 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp
@@ -92,7 +92,53 @@ static mlir::Value
buildDynamicCastToVoidAfterNullCheck(cir::CIRBaseBuilderTy &builder,
clang::ASTContext &astCtx,
cir::DynamicCastOp op) {
- llvm_unreachable("dynamic cast to void is NYI");
+ mlir::Location loc = op.getLoc();
+ bool vtableUsesRelativeLayout = op.getRelativeLayout();
+
+ // TODO(cir): consider address space in this function.
+ assert(!cir::MissingFeatures::addressSpace());
+
+ mlir::Type vtableElemTy;
+ uint64_t vtableElemAlign;
+ if (vtableUsesRelativeLayout) {
+ vtableElemTy = builder.getSIntNTy(32);
+ vtableElemAlign = 4;
+ } else {
+ const auto &targetInfo = astCtx.getTargetInfo();
+ auto ptrdiffTy = targetInfo.getPtrDiffType(clang::LangAS::Default);
+ bool ptrdiffTyIsSigned = clang::TargetInfo::isTypeSigned(ptrdiffTy);
+ uint64_t ptrdiffTyWidth = targetInfo.getTypeWidth(ptrdiffTy);
+
+ vtableElemTy = cir::IntType::get(builder.getContext(), ptrdiffTyWidth,
+ ptrdiffTyIsSigned);
+ vtableElemAlign =
+ llvm::divideCeil(targetInfo.getPointerAlign(clang::LangAS::Default), 8);
+ }
+
+ // Access vtable to get the offset from the given object to its containing
+ // complete object.
+ // TODO: Add a specialized operation to get the object offset?
+ auto vptrTy = cir::VPtrType::get(builder.getContext());
+ cir::PointerType vptrPtrTy = builder.getPointerTo(vptrTy);
+ auto vptrPtr =
+ cir::VTableGetVPtrOp::create(builder, loc, vptrPtrTy, op.getSrc());
+ mlir::Value vptr = builder.createLoad(loc, vptrPtr);
+ mlir::Value elementPtr =
+ builder.createBitcast(vptr, builder.getPointerTo(vtableElemTy));
+ mlir::Value minusTwo = builder.getSignedInt(loc, -2, 64);
+ auto offsetToTopSlotPtr = cir::PtrStrideOp::create(
+ builder, loc, builder.getPointerTo(vtableElemTy), elementPtr, minusTwo);
+ mlir::Value offsetToTop =
+ builder.createAlignedLoad(loc, offsetToTopSlotPtr, vtableElemAlign);
+
+ // Add the offset to the given pointer to get the cast result.
+ // Cast the input pointer to a uint8_t* to allow pointer arithmetic.
+ cir::PointerType u8PtrTy = builder.getPointerTo(builder.getUIntNTy(8));
+ mlir::Value srcBytePtr = builder.createBitcast(op.getSrc(), u8PtrTy);
+ auto dstBytePtr =
+ cir::PtrStrideOp::create(builder, loc, u8PtrTy, srcBytePtr, offsetToTop);
+ // Cast the result to a void*.
+ return builder.createBitcast(dstBytePtr, builder.getVoidPtrTy());
}
mlir::Value
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 3abba3d..0243bf1 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1499,6 +1499,54 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite(
return mlir::success();
}
+static uint64_t getTypeSize(mlir::Type type, mlir::Operation &op) {
+ mlir::DataLayout layout(op.getParentOfType<mlir::ModuleOp>());
+ // For LLVM purposes we treat void as u8.
+ if (isa<cir::VoidType>(type))
+ type = cir::IntType::get(type.getContext(), 8, /*isSigned=*/false);
+ return llvm::divideCeil(layout.getTypeSizeInBits(type), 8);
+}
+
+mlir::LogicalResult CIRToLLVMPtrDiffOpLowering::matchAndRewrite(
+ cir::PtrDiffOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto dstTy = mlir::cast<cir::IntType>(op.getType());
+ mlir::Type llvmDstTy = getTypeConverter()->convertType(dstTy);
+
+ auto lhs = rewriter.create<mlir::LLVM::PtrToIntOp>(op.getLoc(), llvmDstTy,
+ adaptor.getLhs());
+ auto rhs = rewriter.create<mlir::LLVM::PtrToIntOp>(op.getLoc(), llvmDstTy,
+ adaptor.getRhs());
+
+ auto diff =
+ rewriter.create<mlir::LLVM::SubOp>(op.getLoc(), llvmDstTy, lhs, rhs);
+
+ cir::PointerType ptrTy = op.getLhs().getType();
+ assert(!cir::MissingFeatures::llvmLoweringPtrDiffConsidersPointee());
+ uint64_t typeSize = getTypeSize(ptrTy.getPointee(), *op);
+
+ // Avoid silly division by 1.
+ mlir::Value resultVal = diff.getResult();
+ if (typeSize != 1) {
+ auto typeSizeVal = rewriter.create<mlir::LLVM::ConstantOp>(
+ op.getLoc(), llvmDstTy, typeSize);
+
+ if (dstTy.isUnsigned()) {
+ auto uDiv =
+ rewriter.create<mlir::LLVM::UDivOp>(op.getLoc(), diff, typeSizeVal);
+ uDiv.setIsExact(true);
+ resultVal = uDiv.getResult();
+ } else {
+ auto sDiv =
+ rewriter.create<mlir::LLVM::SDivOp>(op.getLoc(), diff, typeSizeVal);
+ sDiv.setIsExact(true);
+ resultVal = sDiv.getResult();
+ }
+ }
+ rewriter.replaceOp(op, resultVal);
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMExpectOpLowering::matchAndRewrite(
cir::ExpectOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -1539,6 +1587,7 @@ void CIRToLLVMFuncOpLowering::lowerFuncAttributes(
attr.getName() == getLinkageAttrNameString() ||
attr.getName() == func.getGlobalVisibilityAttrName() ||
attr.getName() == func.getDsoLocalAttrName() ||
+ attr.getName() == func.getInlineKindAttrName() ||
(filterArgAndResAttrs &&
(attr.getName() == func.getArgAttrsAttrName() ||
attr.getName() == func.getResAttrsAttrName())))
@@ -1623,6 +1672,12 @@ mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewrite(
assert(!cir::MissingFeatures::opFuncMultipleReturnVals());
+ if (auto inlineKind = op.getInlineKind()) {
+ fn.setNoInline(inlineKind == cir::InlineKind::NoInline);
+ fn.setInlineHint(inlineKind == cir::InlineKind::InlineHint);
+ fn.setAlwaysInline(inlineKind == cir::InlineKind::AlwaysInline);
+ }
+
fn.setVisibility_Attr(mlir::LLVM::VisibilityAttr::get(
getContext(), lowerCIRVisibilityToLLVMVisibility(
op.getGlobalVisibilityAttr().getValue())));
@@ -1793,12 +1848,20 @@ CIRToLLVMGlobalOpLowering::getComdatAttr(cir::GlobalOp &op,
if (!comdatOp) {
builder.setInsertionPointToStart(module.getBody());
comdatOp =
- builder.create<mlir::LLVM::ComdatOp>(module.getLoc(), comdatName);
+ mlir::LLVM::ComdatOp::create(builder, module.getLoc(), comdatName);
+ }
+
+ if (auto comdatSelector = comdatOp.lookupSymbol<mlir::LLVM::ComdatSelectorOp>(
+ op.getSymName())) {
+ return mlir::SymbolRefAttr::get(
+ builder.getContext(), comdatName,
+ mlir::FlatSymbolRefAttr::get(comdatSelector.getSymNameAttr()));
}
builder.setInsertionPointToStart(&comdatOp.getBody().back());
- auto selectorOp = builder.create<mlir::LLVM::ComdatSelectorOp>(
- comdatOp.getLoc(), op.getSymName(), mlir::LLVM::comdat::Comdat::Any);
+ auto selectorOp = mlir::LLVM::ComdatSelectorOp::create(
+ builder, comdatOp.getLoc(), op.getSymName(),
+ mlir::LLVM::comdat::Comdat::Any);
return mlir::SymbolRefAttr::get(
builder.getContext(), comdatName,
mlir::FlatSymbolRefAttr::get(selectorOp.getSymNameAttr()));