diff options
Diffstat (limited to 'clang/lib/CIR')
29 files changed, 1006 insertions, 159 deletions
diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index fb74aa0..a67cbad 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -17,6 +17,7 @@ #include "mlir/IR/Value.h" #include "clang/AST/CharUnits.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/ADT/PointerIntPair.h" namespace clang::CIRGen { @@ -90,6 +91,13 @@ public: return getPointer(); } + /// Return the pointer contained in this class after authenticating it and + /// adding offset to it if necessary. + mlir::Value emitRawPointer() const { + assert(!cir::MissingFeatures::addressPointerAuthInfo()); + return getBasePointer(); + } + mlir::Type getType() const { assert(mlir::cast<cir::PointerType>( pointerAndKnownNonNull.getPointer().getType()) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 0f4d6d2..a9983f8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -255,7 +255,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, mlir::Value expected = builder.createLoad(loc, val1); mlir::Value desired = builder.createLoad(loc, val2); - auto cmpxchg = cir::AtomicCmpXchg::create( + auto cmpxchg = cir::AtomicCmpXchgOp::create( builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(), expected, desired, cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder), @@ -404,7 +404,7 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__atomic_exchange: - opName = cir::AtomicXchg::getOperationName(); + opName = cir::AtomicXchgOp::getOperationName(); break; case AtomicExpr::AO__opencl_atomic_init: diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index a6f10e6..50d585d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -380,6 +380,16 @@ public: /*relative_layout=*/false); } + mlir::Value createDynCastToVoid(mlir::Location loc, mlir::Value src, + bool vtableUseRelativeLayout) { + // TODO(cir): consider address space here. + assert(!cir::MissingFeatures::addressSpace()); + cir::PointerType destTy = getVoidPtrTy(); + return cir::DynamicCastOp::create( + *this, loc, destTy, cir::DynamicCastKind::Ptr, src, + cir::DynamicCastInfoAttr{}, vtableUseRelativeLayout); + } + Address createBaseClassAddr(mlir::Location loc, Address addr, mlir::Type destType, unsigned offset, bool assumeNotNull) { @@ -519,6 +529,14 @@ public: return createGlobal(module, loc, uniqueName, type, isConstant, linkage); } + cir::StackSaveOp createStackSave(mlir::Location loc, mlir::Type ty) { + return cir::StackSaveOp::create(*this, loc, ty); + } + + cir::StackRestoreOp createStackRestore(mlir::Location loc, mlir::Value v) { + return cir::StackRestoreOp::create(*this, loc, v); + } + mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, Address dstAddr, mlir::Type storageType, mlir::Value src, const CIRGenBitFieldInfo &info, diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 4cfa91e..ea31871 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -463,7 +463,9 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID, return emitLibraryCall(*this, fd, e, cgm.getBuiltinLibFunction(fd, builtinID)); - cgm.errorNYI(e->getSourceRange(), "unimplemented builtin call"); + cgm.errorNYI(e->getSourceRange(), + std::string("unimplemented builtin call: ") + + getContext().BuiltinInfo.getName(builtinID)); return getUndefRValue(e->getType()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 274d11b..171ce1c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -171,7 +171,8 @@ cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl gd) { curCGF = nullptr; setNonAliasAttributes(gd, fn); - assert(!cir::MissingFeatures::opFuncAttributesForDefinition()); + setCIRFunctionAttributesForDefinition(mlir::cast<FunctionDecl>(gd.getDecl()), + fn); return fn; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 06f41cd..6d3741c4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -191,6 +191,15 @@ public: virtual void emitVTableDefinitions(CIRGenVTables &cgvt, const CXXRecordDecl *rd) = 0; + using DeleteOrMemberCallExpr = + llvm::PointerUnion<const CXXDeleteExpr *, const CXXMemberCallExpr *>; + + virtual mlir::Value emitVirtualDestructorCall(CIRGenFunction &cgf, + const CXXDestructorDecl *dtor, + CXXDtorType dtorType, + Address thisAddr, + DeleteOrMemberCallExpr e) = 0; + /// Emit any tables needed to implement virtual inheritance. For Itanium, /// this emits virtual table tables. virtual void emitVirtualInheritanceTables(const CXXRecordDecl *rd) = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 485b2c8..89f4926 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -478,8 +478,7 @@ void CIRGenFunction::getVTablePointers(BaseSubobject base, for (const auto &nextBase : rd->bases()) { const auto *baseDecl = - cast<CXXRecordDecl>( - nextBase.getType()->castAs<RecordType>()->getOriginalDecl()) + cast<CXXRecordDecl>(nextBase.getType()->castAs<RecordType>()->getDecl()) ->getDefinitionOrSelf(); // Ignore classes without a vtable. @@ -895,6 +894,26 @@ void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr, } namespace { +mlir::Value loadThisForDtorDelete(CIRGenFunction &cgf, + const CXXDestructorDecl *dd) { + if (Expr *thisArg = dd->getOperatorDeleteThisArg()) + return cgf.emitScalarExpr(thisArg); + return cgf.loadCXXThis(); +} + +/// Call the operator delete associated with the current destructor. +struct CallDtorDelete final : EHScopeStack::Cleanup { + CallDtorDelete() {} + + void emit(CIRGenFunction &cgf) override { + const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(cgf.curFuncDecl); + const CXXRecordDecl *classDecl = dtor->getParent(); + cgf.emitDeleteCall(dtor->getOperatorDelete(), + loadThisForDtorDelete(cgf, dtor), + cgf.getContext().getCanonicalTagType(classDecl)); + } +}; + class DestroyField final : public EHScopeStack::Cleanup { const FieldDecl *field; CIRGenFunction::Destroyer *destroyer; @@ -932,7 +951,18 @@ void CIRGenFunction::enterDtorCleanups(const CXXDestructorDecl *dd, // The deleting-destructor phase just needs to call the appropriate // operator delete that Sema picked up. if (dtorType == Dtor_Deleting) { - cgm.errorNYI(dd->getSourceRange(), "deleting destructor cleanups"); + assert(dd->getOperatorDelete() && + "operator delete missing - EnterDtorCleanups"); + if (cxxStructorImplicitParamValue) { + cgm.errorNYI(dd->getSourceRange(), "deleting destructor with vtt"); + } else { + if (dd->getOperatorDelete()->isDestroyingOperatorDelete()) { + cgm.errorNYI(dd->getSourceRange(), + "deleting destructor with destroying operator delete"); + } else { + ehStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); + } + } return; } @@ -994,7 +1024,7 @@ void CIRGenFunction::enterDtorCleanups(const CXXDestructorDecl *dd, // Anonymous union members do not have their destructors called. const RecordType *rt = type->getAsUnionType(); - if (rt && rt->getOriginalDecl()->isAnonymousStructOrUnion()) + if (rt && rt->getDecl()->isAnonymousStructOrUnion()) continue; CleanupKind cleanupKind = getCleanupKind(dtorKind); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 039d290..4a19d91 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -44,38 +44,70 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d, // If the type is variably-modified, emit all the VLA sizes for it. if (ty->isVariablyModifiedType()) - cgm.errorNYI(d.getSourceRange(), "emitAutoVarDecl: variably modified type"); + emitVariablyModifiedType(ty); assert(!cir::MissingFeatures::openMP()); Address address = Address::invalid(); - if (!ty->isConstantSizeType()) - cgm.errorNYI(d.getSourceRange(), "emitAutoVarDecl: non-constant size type"); - - // A normal fixed sized variable becomes an alloca in the entry block, - // unless: - // - it's an NRVO variable. - // - we are compiling OpenMP and it's an OpenMP local variable. - if (nrvo) { - // The named return value optimization: allocate this variable in the - // return slot, so that we can elide the copy when returning this - // variable (C++0x [class.copy]p34). - address = returnValue; - - if (const RecordDecl *rd = ty->getAsRecordDecl()) { - if (const auto *cxxrd = dyn_cast<CXXRecordDecl>(rd); - (cxxrd && !cxxrd->hasTrivialDestructor()) || - rd->isNonTrivialToPrimitiveDestroy()) - cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: set NRVO flag"); + if (ty->isConstantSizeType()) { + // A normal fixed sized variable becomes an alloca in the entry block, + // unless: + // - it's an NRVO variable. + // - we are compiling OpenMP and it's an OpenMP local variable. + if (nrvo) { + // The named return value optimization: allocate this variable in the + // return slot, so that we can elide the copy when returning this + // variable (C++0x [class.copy]p34). + address = returnValue; + + if (const RecordDecl *rd = ty->getAsRecordDecl()) { + if (const auto *cxxrd = dyn_cast<CXXRecordDecl>(rd); + (cxxrd && !cxxrd->hasTrivialDestructor()) || + rd->isNonTrivialToPrimitiveDestroy()) + cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: set NRVO flag"); + } + } else { + // A normal fixed sized variable becomes an alloca in the entry block, + mlir::Type allocaTy = convertTypeForMem(ty); + // Create the temp alloca and declare variable using it. + address = createTempAlloca(allocaTy, alignment, loc, d.getName(), + /*arraySize=*/nullptr, /*alloca=*/nullptr, ip); + declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()), + alignment); } } else { - // A normal fixed sized variable becomes an alloca in the entry block, - mlir::Type allocaTy = convertTypeForMem(ty); - // Create the temp alloca and declare variable using it. - address = createTempAlloca(allocaTy, alignment, loc, d.getName(), - /*arraySize=*/nullptr, /*alloca=*/nullptr, ip); - declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()), - alignment); + // Non-constant size type + assert(!cir::MissingFeatures::openMP()); + if (!didCallStackSave) { + // Save the stack. + cir::PointerType defaultTy = AllocaInt8PtrTy; + CharUnits align = CharUnits::fromQuantity( + cgm.getDataLayout().getAlignment(defaultTy, false)); + Address stack = createTempAlloca(defaultTy, align, loc, "saved_stack"); + + mlir::Value v = builder.createStackSave(loc, defaultTy); + assert(v.getType() == AllocaInt8PtrTy); + builder.createStore(loc, v, stack); + + didCallStackSave = true; + + // Push a cleanup block and restore the stack there. + // FIXME: in general circumstances, this should be an EH cleanup. + pushStackRestore(NormalCleanup, stack); + } + + VlaSizePair vlaSize = getVLASize(ty); + mlir::Type memTy = convertTypeForMem(vlaSize.type); + + // Allocate memory for the array. + address = + createTempAlloca(memTy, alignment, loc, d.getName(), vlaSize.numElts, + /*alloca=*/nullptr, builder.saveInsertionPoint()); + + // If we have debug info enabled, properly describe the VLA dimensions for + // this type by registering the vla size expression for each of the + // dimensions. + assert(!cir::MissingFeatures::generateDebugInfo()); } emission.addr = address; @@ -696,6 +728,16 @@ struct DestroyObject final : EHScopeStack::Cleanup { cgf.emitDestroy(addr, type, destroyer); } }; + +struct CallStackRestore final : EHScopeStack::Cleanup { + Address stack; + CallStackRestore(Address stack) : stack(stack) {} + void emit(CIRGenFunction &cgf) override { + mlir::Location loc = stack.getPointer().getLoc(); + mlir::Value v = cgf.getBuilder().createLoad(loc, stack); + cgf.getBuilder().createStackRestore(loc, v); + } +}; } // namespace void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, @@ -805,6 +847,10 @@ CIRGenFunction::getDestroyer(QualType::DestructionKind kind) { llvm_unreachable("Unknown DestructionKind"); } +void CIRGenFunction::pushStackRestore(CleanupKind kind, Address spMem) { + ehStack.pushCleanup<CallStackRestore>(kind, spMem); +} + /// Enter a destroy cleanup for the given local variable. void CIRGenFunction::emitAutoVarTypeCleanup( const CIRGenFunction::AutoVarEmission &emission, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index f416571..4897c29 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2068,7 +2068,7 @@ mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty, mlir::OpBuilder::InsertionGuard guard(builder); builder.restoreInsertionPoint(ip); addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy, - /*var type*/ ty, name, alignIntAttr); + /*var type*/ ty, name, alignIntAttr, arraySize); assert(!cir::MissingFeatures::astVarDeclInterface()); } return addr; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 97c0944..b1e9e76 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -130,13 +130,11 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr( const CXXMethodDecl *calleeDecl = devirtualizedMethod ? devirtualizedMethod : md; const CIRGenFunctionInfo *fInfo = nullptr; - if (isa<CXXDestructorDecl>(calleeDecl)) { - cgm.errorNYI(ce->getSourceRange(), - "emitCXXMemberOrOperatorMemberCallExpr: destructor call"); - return RValue::get(nullptr); - } - - fInfo = &cgm.getTypes().arrangeCXXMethodDeclaration(calleeDecl); + if (const auto *dtor = dyn_cast<CXXDestructorDecl>(calleeDecl)) + fInfo = &cgm.getTypes().arrangeCXXStructorDeclaration( + GlobalDecl(dtor, Dtor_Complete)); + else + fInfo = &cgm.getTypes().arrangeCXXMethodDeclaration(calleeDecl); cir::FuncType ty = cgm.getTypes().getFunctionType(*fInfo); @@ -151,9 +149,34 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr( // because then we know what the type is. bool useVirtualCall = canUseVirtualCall && !devirtualizedMethod; - if (isa<CXXDestructorDecl>(calleeDecl)) { - cgm.errorNYI(ce->getSourceRange(), - "emitCXXMemberOrOperatorMemberCallExpr: destructor call"); + if (const auto *dtor = dyn_cast<CXXDestructorDecl>(calleeDecl)) { + assert(ce->arg_begin() == ce->arg_end() && + "Destructor shouldn't have explicit parameters"); + assert(returnValue.isNull() && "Destructor shouldn't have return value"); + if (useVirtualCall) { + cgm.getCXXABI().emitVirtualDestructorCall(*this, dtor, Dtor_Complete, + thisPtr.getAddress(), + cast<CXXMemberCallExpr>(ce)); + } else { + GlobalDecl globalDecl(dtor, Dtor_Complete); + CIRGenCallee callee; + assert(!cir::MissingFeatures::appleKext()); + if (!devirtualizedMethod) { + callee = CIRGenCallee::forDirect( + cgm.getAddrOfCXXStructor(globalDecl, fInfo, ty), globalDecl); + } else { + cgm.errorNYI(ce->getSourceRange(), "devirtualized destructor call"); + return RValue::get(nullptr); + } + + QualType thisTy = + isArrow ? base->getType()->getPointeeType() : base->getType(); + // CIRGen does not pass CallOrInvoke here (different from OG LLVM codegen) + // because in practice it always null even in OG. + emitCXXDestructorCall(globalDecl, callee, thisPtr.getPointer(), thisTy, + /*implicitParam=*/nullptr, + /*implicitParamTy=*/QualType(), ce); + } return RValue::get(nullptr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp index 89e9ec4..19ed656 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp @@ -614,7 +614,7 @@ bool ConstRecordBuilder::applyZeroInitPadding(const ASTRecordLayout &layout, bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) { RecordDecl *rd = ile->getType() ->castAs<clang::RecordType>() - ->getOriginalDecl() + ->getDecl() ->getDefinitionOrSelf(); const ASTRecordLayout &layout = cgm.getASTContext().getASTRecordLayout(rd); @@ -817,9 +817,8 @@ bool ConstRecordBuilder::build(const APValue &val, const RecordDecl *rd, mlir::Attribute ConstRecordBuilder::finalize(QualType type) { type = type.getNonReferenceType(); - RecordDecl *rd = type->castAs<clang::RecordType>() - ->getOriginalDecl() - ->getDefinitionOrSelf(); + RecordDecl *rd = + type->castAs<clang::RecordType>()->getDecl()->getDefinitionOrSelf(); mlir::Type valTy = cgm.convertType(type); return builder.build(valTy, rd->hasFlexibleArrayMember()); } @@ -842,9 +841,8 @@ mlir::Attribute ConstRecordBuilder::buildRecord(ConstantEmitter &emitter, ConstantAggregateBuilder constant(emitter.cgm); ConstRecordBuilder builder(emitter, constant, CharUnits::Zero()); - const RecordDecl *rd = valTy->castAs<clang::RecordType>() - ->getOriginalDecl() - ->getDefinitionOrSelf(); + const RecordDecl *rd = + valTy->castAs<clang::RecordType>()->getDecl()->getDefinitionOrSelf(); const CXXRecordDecl *cd = dyn_cast<CXXRecordDecl>(rd); if (!builder.build(val, rd, false, cd, CharUnits::Zero())) return nullptr; @@ -873,7 +871,7 @@ bool ConstRecordBuilder::updateRecord(ConstantEmitter &emitter, class ConstExprEmitter : public StmtVisitor<ConstExprEmitter, mlir::Attribute, QualType> { CIRGenModule &cgm; - LLVM_ATTRIBUTE_UNUSED ConstantEmitter &emitter; + [[maybe_unused]] ConstantEmitter &emitter; public: ConstExprEmitter(ConstantEmitter &emitter) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 637f9ef..138082b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1734,9 +1734,9 @@ mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) { // LLVM we shall take VLA's, division by element size, etc. // // See more in `EmitSub` in CGExprScalar.cpp. - assert(!cir::MissingFeatures::ptrDiffOp()); - cgf.cgm.errorNYI("ptrdiff"); - return {}; + assert(!cir::MissingFeatures::llvmLoweringPtrDiffConsidersPointee()); + return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.PtrDiffTy, + ops.lhs, ops.rhs); } mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 7a774e0..ba36cbe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -410,6 +410,8 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType, curFn = fn; const Decl *d = gd.getDecl(); + + didCallStackSave = false; curCodeDecl = d; const auto *fd = dyn_cast_or_null<FunctionDecl>(d); curFuncDecl = d->getNonClosureContext(); @@ -678,7 +680,13 @@ void CIRGenFunction::emitDestructorBody(FunctionArgList &args) { // possible to delegate the destructor body to the complete // destructor. Do so. if (dtorType == Dtor_Deleting) { - cgm.errorNYI(dtor->getSourceRange(), "deleting destructor"); + RunCleanupsScope dtorEpilogue(*this); + enterDtorCleanups(dtor, Dtor_Deleting); + if (haveInsertPoint()) { + QualType thisTy = dtor->getFunctionObjectParameterType(); + emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false, + /*delegating=*/false, loadCXXThisAddress(), thisTy); + } return; } @@ -1000,6 +1008,41 @@ mlir::Value CIRGenFunction::emitAlignmentAssumption( offsetValue); } +CIRGenFunction::VlaSizePair CIRGenFunction::getVLASize(QualType type) { + const VariableArrayType *vla = + cgm.getASTContext().getAsVariableArrayType(type); + assert(vla && "type was not a variable array type!"); + return getVLASize(vla); +} + +CIRGenFunction::VlaSizePair +CIRGenFunction::getVLASize(const VariableArrayType *type) { + // The number of elements so far; always size_t. + mlir::Value numElements; + + QualType elementType; + do { + elementType = type->getElementType(); + mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()]; + assert(vlaSize && "no size for VLA!"); + assert(vlaSize.getType() == SizeTy); + + if (!numElements) { + numElements = vlaSize; + } else { + // It's undefined behavior if this wraps around, so mark it that way. + // FIXME: Teach -fsanitize=undefined to trap this. + + numElements = + builder.createMul(numElements.getLoc(), numElements, vlaSize, + cir::OverflowBehavior::NoUnsignedWrap); + } + } while ((type = getContext().getAsVariableArrayType(elementType))); + + assert(numElements && "Undefined elements number"); + return {numElements, elementType}; +} + // TODO(cir): Most of this function can be shared between CIRGen // and traditional LLVM codegen void CIRGenFunction::emitVariablyModifiedType(QualType type) { @@ -1080,7 +1123,26 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) { break; case Type::VariableArray: { - cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA"); + // Losing element qualification here is fine. + const VariableArrayType *vat = cast<clang::VariableArrayType>(ty); + + // Unknown size indication requires no size computation. + // Otherwise, evaluate and record it. + if (const Expr *sizeExpr = vat->getSizeExpr()) { + // It's possible that we might have emitted this already, + // e.g. with a typedef and a pointer to it. + mlir::Value &entry = vlaSizeMap[sizeExpr]; + if (!entry) { + mlir::Value size = emitScalarExpr(sizeExpr); + assert(!cir::MissingFeatures::sanitizers()); + + // Always zexting here would be wrong if it weren't + // undefined behavior to have a negative bound. + // FIXME: What about when size's type is larger than size_t? + entry = builder.createIntCast(size, SizeTy); + } + } + type = vat->getElementType(); break; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index d71de2f..3c36f5c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -149,6 +149,10 @@ public: using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>; SymTableTy symbolTable; + /// Whether a cir.stacksave operation has been added. Used to avoid + /// inserting cir.stacksave for multiple VLAs in the same scope. + bool didCallStackSave = false; + /// Whether or not a Microsoft-style asm block has been processed within /// this fuction. These can potentially set the return value. bool sawAsmBlock = false; @@ -188,6 +192,14 @@ public: llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues; llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues; + // This keeps track of the associated size for each VLA type. + // We track this by the size expression rather than the type itself because + // in certain situations, like a const qualifier applied to an VLA typedef, + // multiple VLA types can share the same size expression. + // FIXME: Maybe this could be a stack of maps that is pushed/popped as we + // enter/leave scopes. + llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap; + public: /// A non-RAII class containing all the information about a bound /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for @@ -436,6 +448,20 @@ public: } }; + struct VlaSizePair { + mlir::Value numElts; + QualType type; + + VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {} + }; + + /// Returns an MLIR::Value+QualType pair that corresponds to the size, + /// in non-variably-sized elements, of a variable length array type, + /// plus that largest non-variably-sized element type. Assumes that + /// the type has already been emitted with emitVariablyModifiedType. + VlaSizePair getVLASize(const VariableArrayType *type); + VlaSizePair getVLASize(QualType type); + void finishFunction(SourceLocation endLoc); /// Determine whether the given initializer is trivial in the sense @@ -583,6 +609,8 @@ public: return needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup; } + void pushStackRestore(CleanupKind kind, Address spMem); + /// Set the address of a local variable. void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) { assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!"); @@ -854,6 +882,7 @@ public: protected: bool performCleanup; + bool oldDidCallStackSave; private: RunCleanupsScope(const RunCleanupsScope &) = delete; @@ -867,6 +896,8 @@ public: explicit RunCleanupsScope(CIRGenFunction &cgf) : performCleanup(true), cgf(cgf) { cleanupStackDepth = cgf.ehStack.stable_begin(); + oldDidCallStackSave = cgf.didCallStackSave; + cgf.didCallStackSave = false; oldCleanupStackDepth = cgf.currentCleanupStackDepth; cgf.currentCleanupStackDepth = cleanupStackDepth; } @@ -883,6 +914,7 @@ public: assert(performCleanup && "Already forced cleanup"); { mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder()); + cgf.didCallStackSave = oldDidCallStackSave; cgf.popCleanupBlocks(cleanupStackDepth); performCleanup = false; cgf.currentCleanupStackDepth = oldCleanupStackDepth; @@ -1281,10 +1313,10 @@ public: mlir::Value emitCXXNewExpr(const CXXNewExpr *e); - void emitNewArrayInitializer(const CXXNewExpr *E, QualType ElementType, - mlir::Type ElementTy, Address BeginPtr, - mlir::Value NumElements, - mlir::Value AllocSizeWithoutCookie); + void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, + mlir::Type elementTy, Address beginPtr, + mlir::Value numElements, + mlir::Value allocSizeWithoutCookie); RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 9e490c6d..c184d4a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -95,7 +95,10 @@ public: clang::GlobalDecl gd, Address thisAddr, mlir::Type ty, SourceLocation loc) override; - + mlir::Value emitVirtualDestructorCall(CIRGenFunction &cgf, + const CXXDestructorDecl *dtor, + CXXDtorType dtorType, Address thisAddr, + DeleteOrMemberCallExpr e) override; mlir::Value getVTableAddressPoint(BaseSubobject base, const CXXRecordDecl *vtableClass) override; mlir::Value getVTableAddressPointInStructorWithVTT( @@ -465,6 +468,29 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &cgvt, } } +mlir::Value CIRGenItaniumCXXABI::emitVirtualDestructorCall( + CIRGenFunction &cgf, const CXXDestructorDecl *dtor, CXXDtorType dtorType, + Address thisAddr, DeleteOrMemberCallExpr expr) { + auto *callExpr = dyn_cast<const CXXMemberCallExpr *>(expr); + auto *delExpr = dyn_cast<const CXXDeleteExpr *>(expr); + assert((callExpr != nullptr) ^ (delExpr != nullptr)); + assert(callExpr == nullptr || callExpr->arg_begin() == callExpr->arg_end()); + assert(dtorType == Dtor_Deleting || dtorType == Dtor_Complete); + + GlobalDecl globalDecl(dtor, dtorType); + const CIRGenFunctionInfo *fnInfo = + &cgm.getTypes().arrangeCXXStructorDeclaration(globalDecl); + const cir::FuncType &fnTy = cgm.getTypes().getFunctionType(*fnInfo); + auto callee = CIRGenCallee::forVirtual(callExpr, globalDecl, thisAddr, fnTy); + + QualType thisTy = + callExpr ? callExpr->getObjectType() : delExpr->getDestroyedType(); + + cgf.emitCXXDestructorCall(globalDecl, callee, thisAddr.emitRawPointer(), + thisTy, nullptr, QualType(), nullptr); + return nullptr; +} + void CIRGenItaniumCXXABI::emitVirtualInheritanceTables( const CXXRecordDecl *rd) { CIRGenVTables &vtables = cgm.getVTables(); @@ -718,8 +744,8 @@ static bool shouldUseExternalRttiDescriptor(CIRGenModule &cgm, QualType ty) { return false; if (const auto *recordTy = dyn_cast<RecordType>(ty)) { - const CXXRecordDecl *rd = - cast<CXXRecordDecl>(recordTy->getOriginalDecl())->getDefinitionOrSelf(); + const auto *rd = + cast<CXXRecordDecl>(recordTy->getDecl())->getDefinitionOrSelf(); if (!rd->hasDefinition()) return false; @@ -833,9 +859,7 @@ static bool canUseSingleInheritance(const CXXRecordDecl *rd) { /// IsIncompleteClassType - Returns whether the given record type is incomplete. static bool isIncompleteClassType(const RecordType *recordTy) { - return !recordTy->getOriginalDecl() - ->getDefinitionOrSelf() - ->isCompleteDefinition(); + return !recordTy->getDecl()->getDefinitionOrSelf()->isCompleteDefinition(); } /// Returns whether the given type contains an @@ -913,8 +937,7 @@ const char *vTableClassNameForType(const CIRGenModule &cgm, const Type *ty) { case Type::Atomic: // FIXME: GCC treats block pointers as fundamental types?! case Type::BlockPointer: - cgm.errorNYI("VTableClassNameForType: __fundamental_type_info"); - break; + return "_ZTVN10__cxxabiv123__fundamental_type_infoE"; case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: @@ -927,13 +950,11 @@ const char *vTableClassNameForType(const CIRGenModule &cgm, const Type *ty) { break; case Type::Enum: - cgm.errorNYI("VTableClassNameForType: Enum"); - break; + return "_ZTVN10__cxxabiv116__enum_type_infoE"; case Type::Record: { - const CXXRecordDecl *rd = - cast<CXXRecordDecl>(cast<RecordType>(ty)->getOriginalDecl()) - ->getDefinitionOrSelf(); + const auto *rd = cast<CXXRecordDecl>(cast<RecordType>(ty)->getDecl()) + ->getDefinitionOrSelf(); if (!rd->hasDefinition() || !rd->getNumBases()) { return classTypeInfo; @@ -1005,8 +1026,8 @@ static cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &cgm, return cir::GlobalLinkageKind::LinkOnceODRLinkage; if (const RecordType *record = dyn_cast<RecordType>(ty)) { - const CXXRecordDecl *rd = - cast<CXXRecordDecl>(record->getOriginalDecl())->getDefinitionOrSelf(); + const auto *rd = + cast<CXXRecordDecl>(record->getDecl())->getDefinitionOrSelf(); if (rd->hasAttr<WeakAttr>()) return cir::GlobalLinkageKind::WeakODRLinkage; @@ -1356,9 +1377,8 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo( break; case Type::Record: { - const auto *rd = - cast<CXXRecordDecl>(cast<RecordType>(ty)->getOriginalDecl()) - ->getDefinitionOrSelf(); + const auto *rd = cast<CXXRecordDecl>(cast<RecordType>(ty)->getDecl()) + ->getDefinitionOrSelf(); if (!rd->hasDefinition() || !rd->getNumBases()) { // We don't need to emit any fields. break; @@ -1625,8 +1645,7 @@ void CIRGenItaniumCXXABI::emitThrow(CIRGenFunction &cgf, // Lowering pass to skip passing the trivial function. // if (const RecordType *recordTy = clangThrowType->getAs<RecordType>()) { - CXXRecordDecl *rec = - cast<CXXRecordDecl>(recordTy->getOriginalDecl()->getDefinition()); + auto *rec = cast<CXXRecordDecl>(recordTy->getDecl()->getDefinition()); assert(!cir::MissingFeatures::isTrivialCtorOrDtor()); if (!rec->hasTrivialDestructor()) { cgm.errorNYI("emitThrow: non-trivial destructor"); @@ -1925,6 +1944,15 @@ static cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &cgf) { return cgf.cgm.createRuntimeFunction(FTy, "__dynamic_cast"); } +static Address emitDynamicCastToVoid(CIRGenFunction &cgf, mlir::Location loc, + QualType srcRecordTy, Address src) { + bool vtableUsesRelativeLayout = + cgf.cgm.getItaniumVTableContext().isRelativeLayout(); + mlir::Value ptr = cgf.getBuilder().createDynCastToVoid( + loc, src.getPointer(), vtableUsesRelativeLayout); + return Address{ptr, src.getAlignment()}; +} + static cir::DynamicCastInfoAttr emitDynamicCastInfo(CIRGenFunction &cgf, mlir::Location loc, QualType srcRecordTy, @@ -1959,10 +1987,8 @@ mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf, bool isCastToVoid = destRecordTy.isNull(); assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference"); - if (isCastToVoid) { - cgm.errorNYI(loc, "emitDynamicCastToVoid"); - return {}; - } + if (isCastToVoid) + return emitDynamicCastToVoid(cgf, loc, srcRecordTy, src).getPointer(); // If the destination is effectively final, the cast succeeds if and only // if the dynamic type of the pointer is exactly the destination type. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fe1ea56..127f763 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -88,6 +88,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, FP80Ty = cir::FP80Type::get(&getMLIRContext()); FP128Ty = cir::FP128Type::get(&getMLIRContext()); + AllocaInt8PtrTy = cir::PointerType::get(UInt8Ty, cirAllocaAddressSpace); + PointerAlignInBytes = astContext .toCharUnitsFromBits( @@ -449,17 +451,49 @@ void CIRGenModule::emitGlobalFunctionDefinition(clang::GlobalDecl gd, curCGF = nullptr; setNonAliasAttributes(gd, funcOp); - assert(!cir::MissingFeatures::opFuncAttributesForDefinition()); + setCIRFunctionAttributesForDefinition(funcDecl, funcOp); + + auto getPriority = [this](const auto *attr) -> int { + Expr *e = attr->getPriority(); + if (e) + return e->EvaluateKnownConstInt(this->getASTContext()).getExtValue(); + return attr->DefaultPriority; + }; - if (funcDecl->getAttr<ConstructorAttr>()) - errorNYI(funcDecl->getSourceRange(), "constructor attribute"); - if (funcDecl->getAttr<DestructorAttr>()) - errorNYI(funcDecl->getSourceRange(), "destructor attribute"); + if (const ConstructorAttr *ca = funcDecl->getAttr<ConstructorAttr>()) + addGlobalCtor(funcOp, getPriority(ca)); + if (const DestructorAttr *da = funcDecl->getAttr<DestructorAttr>()) + addGlobalDtor(funcOp, getPriority(da)); if (funcDecl->getAttr<AnnotateAttr>()) errorNYI(funcDecl->getSourceRange(), "deferredAnnotations"); } +/// Track functions to be called before main() runs. +void CIRGenModule::addGlobalCtor(cir::FuncOp ctor, + std::optional<int> priority) { + assert(!cir::MissingFeatures::globalCtorLexOrder()); + assert(!cir::MissingFeatures::globalCtorAssociatedData()); + + // Traditional LLVM codegen directly adds the function to the list of global + // ctors. In CIR we just add a global_ctor attribute to the function. The + // global list is created in LoweringPrepare. + // + // FIXME(from traditional LLVM): Type coercion of void()* types. + ctor.setGlobalCtorPriority(priority); +} + +/// Add a function to the list that will be called when the module is unloaded. +void CIRGenModule::addGlobalDtor(cir::FuncOp dtor, + std::optional<int> priority) { + if (codeGenOpts.RegisterGlobalDtorsWithAtExit && + (!getASTContext().getTargetInfo().getTriple().isOSAIX())) + errorNYI(dtor.getLoc(), "registerGlobalDtorsWithAtExit"); + + // FIXME(from traditional LLVM): Type coercion of void()* types. + dtor.setGlobalDtorPriority(priority); +} + void CIRGenModule::handleCXXStaticMemberVarInstantiation(VarDecl *vd) { VarDecl::DefinitionKind dk = vd->isThisDeclarationADefinition(); if (dk == VarDecl::Definition && vd->hasAttr<DLLImportAttr>()) @@ -1885,6 +1919,91 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl, } } +void CIRGenModule::setCIRFunctionAttributesForDefinition( + const clang::FunctionDecl *decl, cir::FuncOp f) { + assert(!cir::MissingFeatures::opFuncUnwindTablesAttr()); + assert(!cir::MissingFeatures::stackProtector()); + + std::optional<cir::InlineKind> existingInlineKind = f.getInlineKind(); + bool isNoInline = + existingInlineKind && *existingInlineKind == cir::InlineKind::NoInline; + bool isAlwaysInline = existingInlineKind && + *existingInlineKind == cir::InlineKind::AlwaysInline; + + if (!decl) { + assert(!cir::MissingFeatures::hlsl()); + + if (!isAlwaysInline && + codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { + // If inlining is disabled and we don't have a declaration to control + // inlining, mark the function as 'noinline' unless it is explicitly + // marked as 'alwaysinline'. + f.setInlineKindAttr( + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline)); + } + + return; + } + + assert(!cir::MissingFeatures::opFuncArmStreamingAttr()); + assert(!cir::MissingFeatures::opFuncArmNewAttr()); + assert(!cir::MissingFeatures::opFuncOptNoneAttr()); + assert(!cir::MissingFeatures::opFuncMinSizeAttr()); + assert(!cir::MissingFeatures::opFuncNakedAttr()); + assert(!cir::MissingFeatures::opFuncNoDuplicateAttr()); + assert(!cir::MissingFeatures::hlsl()); + + // Handle inline attributes + if (decl->hasAttr<NoInlineAttr>() && !isAlwaysInline) { + // Add noinline if the function isn't always_inline. + f.setInlineKindAttr( + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline)); + } else if (decl->hasAttr<AlwaysInlineAttr>() && !isNoInline) { + // Don't override AlwaysInline with NoInline, or vice versa, since we can't + // specify both in IR. + f.setInlineKindAttr( + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::AlwaysInline)); + } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { + // If inlining is disabled, force everything that isn't always_inline + // to carry an explicit noinline attribute. + if (!isAlwaysInline) { + f.setInlineKindAttr( + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline)); + } + } else { + // Otherwise, propagate the inline hint attribute and potentially use its + // absence to mark things as noinline. + // Search function and template pattern redeclarations for inline. + if (auto *fd = dyn_cast<FunctionDecl>(decl)) { + // TODO: Share this checkForInline implementation with classic codegen. + // This logic is likely to change over time, so sharing would help ensure + // consistency. + auto checkForInline = [](const FunctionDecl *decl) { + auto checkRedeclForInline = [](const FunctionDecl *redecl) { + return redecl->isInlineSpecified(); + }; + if (any_of(decl->redecls(), checkRedeclForInline)) + return true; + const FunctionDecl *pattern = decl->getTemplateInstantiationPattern(); + if (!pattern) + return false; + return any_of(pattern->redecls(), checkRedeclForInline); + }; + if (checkForInline(fd)) { + f.setInlineKindAttr(cir::InlineAttr::get(&getMLIRContext(), + cir::InlineKind::InlineHint)); + } else if (codeGenOpts.getInlining() == + CodeGenOptions::OnlyHintInlining && + !fd->isInlined() && !isAlwaysInline) { + f.setInlineKindAttr( + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline)); + } + } + } + + assert(!cir::MissingFeatures::opFuncColdHotAttr()); +} + cir::FuncOp CIRGenModule::getOrCreateCIRFunction( StringRef mangledName, mlir::Type funcType, GlobalDecl gd, bool forVTable, bool dontDefer, bool isThunk, ForDefinition_t isForDefinition, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index f627bae..1fc116d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -159,6 +159,13 @@ public: bool isConstant = false, mlir::Operation *insertPoint = nullptr); + /// Add a global constructor or destructor to the module. + /// The priority is optional, if not specified, the default priority is used. + void addGlobalCtor(cir::FuncOp ctor, + std::optional<int> priority = std::nullopt); + void addGlobalDtor(cir::FuncOp dtor, + std::optional<int> priority = std::nullopt); + bool shouldZeroInitPadding() const { // In C23 (N3096) $6.7.10: // """ @@ -422,6 +429,10 @@ public: void setFunctionAttributes(GlobalDecl gd, cir::FuncOp f, bool isIncompleteFunction, bool isThunk); + /// Set extra attributes (inline, etc.) for a function. + void setCIRFunctionAttributesForDefinition(const clang::FunctionDecl *fd, + cir::FuncOp f); + void emitGlobalDefinition(clang::GlobalDecl gd, mlir::Operation *op = nullptr); void emitGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op); diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp index 3d86f71..ce4ae7e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp @@ -1005,7 +1005,7 @@ public: /*temporary=*/nullptr, OpenACCReductionOperator::Invalid, Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, - privateOp); + privateOp, /*reductionCombinerRecipes=*/{}); // TODO: OpenACC: The dialect is going to change in the near future to // have these be on a different operation, so when that changes, we // probably need to change these here. @@ -1046,7 +1046,7 @@ public: OpenACCReductionOperator::Invalid, Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, - firstPrivateOp); + firstPrivateOp, /*reductionCombinerRecipe=*/{}); // TODO: OpenACC: The dialect is going to change in the near future to // have these be on a different operation, so when that changes, we @@ -1088,7 +1088,7 @@ public: /*temporary=*/nullptr, clause.getReductionOp(), Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, - reductionOp); + reductionOp, varRecipe.CombinerRecipes); operation.addReduction(builder.getContext(), reductionOp, recipe); } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp index 24a5fc2..f638d39 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp @@ -398,6 +398,7 @@ void OpenACCRecipeBuilderBase::createRecipeDestroySection( emitDestroy(block->getArgument(1), elementTy); } + ls.forceCleanup(); mlir::acc::YieldOp::create(builder, locEnd); } void OpenACCRecipeBuilderBase::makeBoundsInit( @@ -480,6 +481,7 @@ void OpenACCRecipeBuilderBase::createInitRecipe( /*isInitSection=*/true); } + ls.forceCleanup(); mlir::acc::YieldOp::create(builder, locEnd); } @@ -518,6 +520,7 @@ void OpenACCRecipeBuilderBase::createFirstprivateRecipeCopy( cgf.emitAutoVarInit(tempDeclEmission); builder.setInsertionPointToEnd(©Region.back()); + ls.forceCleanup(); mlir::acc::YieldOp::create(builder, locEnd); } @@ -527,16 +530,143 @@ void OpenACCRecipeBuilderBase::createFirstprivateRecipeCopy( // doesn't restore it aftewards. void OpenACCRecipeBuilderBase::createReductionRecipeCombiner( mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, - mlir::acc::ReductionRecipeOp recipe, size_t numBounds) { + mlir::acc::ReductionRecipeOp recipe, size_t numBounds, QualType origType, + llvm::ArrayRef<OpenACCReductionRecipe::CombinerRecipe> combinerRecipes) { mlir::Block *block = createRecipeBlock(recipe.getCombinerRegion(), mainOp.getType(), loc, numBounds, /*isInit=*/false); builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back()); CIRGenFunction::LexicalScope ls(cgf, loc, block); - mlir::BlockArgument lhsArg = block->getArgument(0); + mlir::Value lhsArg = block->getArgument(0); + mlir::Value rhsArg = block->getArgument(1); + llvm::MutableArrayRef<mlir::BlockArgument> boundsRange = + block->getArguments().drop_front(2); + + if (llvm::any_of(combinerRecipes, [](auto &r) { return r.Op == nullptr; })) { + cgf.cgm.errorNYI(loc, "OpenACC Reduction combiner not generated"); + mlir::acc::YieldOp::create(builder, locEnd, block->getArgument(0)); + return; + } + + // apply the bounds so that we can get our bounds emitted correctly. + for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange)) + std::tie(lhsArg, rhsArg) = + createBoundsLoop(lhsArg, rhsArg, boundArg, loc, /*inverse=*/false); + + // Emitter for when we know this isn't a struct or array we have to loop + // through. This should work for the 'field' once the get-element call has + // been made. + auto emitSingleCombiner = + [&](mlir::Value lhsArg, mlir::Value rhsArg, + const OpenACCReductionRecipe::CombinerRecipe &combiner) { + mlir::Type elementTy = + mlir::cast<cir::PointerType>(lhsArg.getType()).getPointee(); + CIRGenFunction::DeclMapRevertingRAII declMapRAIILhs{cgf, combiner.LHS}; + cgf.setAddrOfLocalVar( + combiner.LHS, Address{lhsArg, elementTy, + cgf.getContext().getDeclAlign(combiner.LHS)}); + CIRGenFunction::DeclMapRevertingRAII declMapRAIIRhs{cgf, combiner.RHS}; + cgf.setAddrOfLocalVar( + combiner.RHS, Address{rhsArg, elementTy, + cgf.getContext().getDeclAlign(combiner.RHS)}); + + [[maybe_unused]] mlir::LogicalResult stmtRes = + cgf.emitStmt(combiner.Op, /*useCurrentScope=*/true); + }; + + // Emitter for when we know this is either a non-array or element of an array + // (which also shouldn't be an array type?). This function should generate the + // initialization code for an entire 'array-element'/non-array, including + // diving into each element of a struct (if necessary). + auto emitCombiner = [&](mlir::Value lhsArg, mlir::Value rhsArg, QualType ty) { + assert(!ty->isArrayType() && "Array type shouldn't get here"); + if (const auto *rd = ty->getAsRecordDecl()) { + if (combinerRecipes.size() == 1 && + cgf.getContext().hasSameType(ty, combinerRecipes[0].LHS->getType())) { + // If this is a 'top level' operator on the type we can just emit this + // as a simple one. + emitSingleCombiner(lhsArg, rhsArg, combinerRecipes[0]); + } else { + // else we have to handle each individual field after after a + // get-element. + for (const auto &[field, combiner] : + llvm::zip_equal(rd->fields(), combinerRecipes)) { + mlir::Type fieldType = cgf.convertType(field->getType()); + auto fieldPtr = cir::PointerType::get(fieldType); + + mlir::Value lhsField = builder.createGetMember( + loc, fieldPtr, lhsArg, field->getName(), field->getFieldIndex()); + mlir::Value rhsField = builder.createGetMember( + loc, fieldPtr, rhsArg, field->getName(), field->getFieldIndex()); + + emitSingleCombiner(lhsField, rhsField, combiner); + } + } + + } else { + // if this is a single-thing (because we should know this isn't an array, + // as Sema wouldn't let us get here), we can just do a normal emit call. + emitSingleCombiner(lhsArg, rhsArg, combinerRecipes[0]); + } + }; + + if (const auto *cat = cgf.getContext().getAsConstantArrayType(origType)) { + // If we're in an array, we have to emit the combiner for each element of + // the array. + auto itrTy = mlir::cast<cir::IntType>(cgf.PtrDiffTy); + auto itrPtrTy = cir::PointerType::get(itrTy); + + mlir::Value zero = + builder.getConstInt(loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 0); + mlir::Value itr = + cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "itr", + cgf.cgm.getSize(cgf.getPointerAlign())); + builder.CIRBaseBuilderTy::createStore(loc, zero, itr); + + builder.setInsertionPointAfter(builder.createFor( + loc, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto loadItr = cir::LoadOp::create(builder, loc, {itr}); + mlir::Value arraySize = builder.getConstInt( + loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), cat->getZExtSize()); + auto cmp = builder.createCompare(loc, cir::CmpOpKind::lt, loadItr, + arraySize); + builder.createCondition(cmp); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto loadItr = cir::LoadOp::create(builder, loc, {itr}); + auto lhsElt = builder.getArrayElement( + loc, loc, lhsArg, cgf.convertType(cat->getElementType()), loadItr, + /*shouldDecay=*/true); + auto rhsElt = builder.getArrayElement( + loc, loc, rhsArg, cgf.convertType(cat->getElementType()), loadItr, + /*shouldDecay=*/true); + + emitCombiner(lhsElt, rhsElt, cat->getElementType()); + builder.createYield(loc); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto loadItr = cir::LoadOp::create(builder, loc, {itr}); + auto inc = cir::UnaryOp::create(builder, loc, loadItr.getType(), + cir::UnaryOpKind::Inc, loadItr); + builder.CIRBaseBuilderTy::createStore(loc, inc, itr); + builder.createYield(loc); + })); - mlir::acc::YieldOp::create(builder, locEnd, lhsArg); + } else if (origType->isArrayType()) { + cgf.cgm.errorNYI(loc, + "OpenACC Reduction combiner non-constant array recipe"); + } else { + emitCombiner(lhsArg, rhsArg, origType); + } + + builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back()); + ls.forceCleanup(); + mlir::acc::YieldOp::create(builder, locEnd, block->getArgument(0)); } } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h index a5da744..745d424 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h @@ -64,10 +64,10 @@ protected: // that this function is not 'insertion point' clean, in that it alters the // insertion point to be inside of the 'combiner' section of the recipe, but // doesn't restore it aftewards. - void createReductionRecipeCombiner(mlir::Location loc, mlir::Location locEnd, - mlir::Value mainOp, - mlir::acc::ReductionRecipeOp recipe, - size_t numBounds); + void createReductionRecipeCombiner( + mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, + mlir::acc::ReductionRecipeOp recipe, size_t numBounds, QualType origType, + llvm::ArrayRef<OpenACCReductionRecipe::CombinerRecipe> combinerRecipes); void createInitRecipe(mlir::Location loc, mlir::Location locEnd, SourceRange exprRange, mlir::Value mainOp, @@ -169,7 +169,9 @@ public: const Expr *varRef, const VarDecl *varRecipe, const VarDecl *temporary, OpenACCReductionOperator reductionOp, DeclContext *dc, QualType origType, size_t numBounds, llvm::ArrayRef<QualType> boundTypes, QualType baseType, - mlir::Value mainOp) { + mlir::Value mainOp, + llvm::ArrayRef<OpenACCReductionRecipe::CombinerRecipe> + reductionCombinerRecipes) { assert(!varRecipe->getType()->isSpecificBuiltinType( BuiltinType::ArraySection) && "array section shouldn't make it to recipe creation"); @@ -208,7 +210,8 @@ public: createInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp, recipe.getInitRegion(), numBounds, boundTypes, varRecipe, origType, /*emitInitExpr=*/true); - createReductionRecipeCombiner(loc, locEnd, mainOp, recipe, numBounds); + createReductionRecipeCombiner(loc, locEnd, mainOp, recipe, numBounds, + origType, reductionCombinerRecipes); } else { static_assert(std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp>); createInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp, diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index cfd48a2..5ba64dd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -536,7 +536,7 @@ mlir::LogicalResult CIRGenFunction::emitLabel(const clang::LabelDecl &d) { mlir::Block *currBlock = builder.getBlock(); mlir::Block *labelBlock = currBlock; - if (!currBlock->empty()) { + if (!currBlock->empty() || currBlock->isEntryBlock()) { { mlir::OpBuilder::InsertionGuard guard(builder); labelBlock = builder.createBlock(builder.getBlock()->getParent()); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 273ec7f..b5612d9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -65,6 +65,9 @@ struct CIRGenTypeCache { cir::PointerType VoidPtrTy; cir::PointerType UInt8PtrTy; + /// void* in alloca address space + cir::PointerType AllocaInt8PtrTy; + /// The size and alignment of a pointer into the generic address space. union { unsigned char PointerAlignInBytes; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index e65896a..d1b91d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -159,7 +159,7 @@ isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt, for (const clang::CXXBaseSpecifier &i : crd->bases()) if (!isSafeToConvert(i.getType() ->castAs<RecordType>() - ->getOriginalDecl() + ->getDecl() ->getDefinitionOrSelf(), cgt, alreadyChecked)) return false; @@ -279,8 +279,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) { // Process record types before the type cache lookup. if (const auto *recordType = dyn_cast<RecordType>(type)) - return convertRecordDeclType( - recordType->getOriginalDecl()->getDefinitionOrSelf()); + return convertRecordDeclType(recordType->getDecl()->getDefinitionOrSelf()); // Has the type already been processed? TypeCacheTy::iterator tci = typeCache.find(ty); @@ -421,6 +420,16 @@ mlir::Type CIRGenTypes::convertType(QualType type) { break; } + case Type::VariableArray: { + const VariableArrayType *a = cast<VariableArrayType>(ty); + if (a->getIndexTypeCVRQualifiers() != 0) + cgm.errorNYI(SourceLocation(), "non trivial array types", type); + // VLAs resolve to the innermost element type; this matches + // the return of alloca, and there isn't any obviously better choice. + resultType = convertTypeForMem(a->getElementType()); + break; + } + case Type::IncompleteArray: { const IncompleteArrayType *arrTy = cast<IncompleteArrayType>(ty); if (arrTy->getIndexTypeCVRQualifiers() != 0) @@ -619,10 +628,8 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeGlobalDeclaration(GlobalDecl gd) { const auto *fd = cast<FunctionDecl>(gd.getDecl()); if (isa<CXXConstructorDecl>(gd.getDecl()) || - isa<CXXDestructorDecl>(gd.getDecl())) { - cgm.errorNYI(SourceLocation(), - "arrangeGlobalDeclaration for C++ constructor or destructor"); - } + isa<CXXDestructorDecl>(gd.getDecl())) + return arrangeCXXStructorDeclaration(gd); return arrangeFunctionDeclaration(fd); } diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 84f5977..36bab62 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -120,12 +120,6 @@ mlir::Attribute CIRGenVTables::getVTableComponent( assert(!cir::MissingFeatures::vtableRelativeLayout()); switch (component.getKind()) { - case VTableComponent::CK_CompleteDtorPointer: - cgm.errorNYI("getVTableComponent: CompleteDtorPointer"); - return mlir::Attribute(); - case VTableComponent::CK_DeletingDtorPointer: - cgm.errorNYI("getVTableComponent: DeletingDtorPointer"); - return mlir::Attribute(); case VTableComponent::CK_UnusedFunctionPointer: cgm.errorNYI("getVTableComponent: UnusedFunctionPointer"); return mlir::Attribute(); @@ -148,7 +142,9 @@ mlir::Attribute CIRGenVTables::getVTableComponent( "expected GlobalViewAttr or ConstPtrAttr"); return rtti; - case VTableComponent::CK_FunctionPointer: { + case VTableComponent::CK_FunctionPointer: + case VTableComponent::CK_CompleteDtorPointer: + case VTableComponent::CK_DeletingDtorPointer: { GlobalDecl gd = component.getGlobalDecl(); assert(!cir::MissingFeatures::cudaSupport()); diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 25b6ecb..c05142e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -307,8 +307,8 @@ class AggValueSlot { /// This is set to true if some external code is responsible for setting up a /// destructor for the slot. Otherwise the code which constructs it should /// push the appropriate cleanup. - LLVM_PREFERRED_TYPE(bool) - LLVM_ATTRIBUTE_UNUSED unsigned destructedFlag : 1; + [[maybe_unused]] + LLVM_PREFERRED_TYPE(bool) unsigned destructedFlag : 1; /// This is set to true if the memory in the slot is known to be zero before /// the assignment into it. This means that zero fields don't need to be set. @@ -326,16 +326,16 @@ class AggValueSlot { /// over. Since it's invalid in general to memcpy a non-POD C++ /// object, it's important that this flag never be set when /// evaluating an expression which constructs such an object. - LLVM_PREFERRED_TYPE(bool) - LLVM_ATTRIBUTE_UNUSED unsigned aliasedFlag : 1; + [[maybe_unused]] + LLVM_PREFERRED_TYPE(bool) unsigned aliasedFlag : 1; /// This is set to true if the tail padding of this slot might overlap /// another object that may have already been initialized (and whose /// value must be preserved by this initialization). If so, we may only /// store up to the dsize of the type. Otherwise we can widen stores to /// the size of the type. - LLVM_PREFERRED_TYPE(bool) - LLVM_ATTRIBUTE_UNUSED unsigned overlapFlag : 1; + [[maybe_unused]] + LLVM_PREFERRED_TYPE(bool) unsigned overlapFlag : 1; public: enum IsDestructed_t { IsNotDestructed, IsDestructed }; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 5f88590..b4c3704 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -15,6 +15,7 @@ #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "mlir/IR/DialectImplementation.h" #include "mlir/Interfaces/ControlFlowInterfaces.h" #include "mlir/Interfaces/FunctionImplementation.h" #include "mlir/Support/LLVM.h" @@ -1720,6 +1721,73 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { hasAlias = true; } + auto parseGlobalDtorCtor = + [&](StringRef keyword, + llvm::function_ref<void(std::optional<int> prio)> createAttr) + -> mlir::LogicalResult { + if (mlir::succeeded(parser.parseOptionalKeyword(keyword))) { + std::optional<int> priority; + if (mlir::succeeded(parser.parseOptionalLParen())) { + auto parsedPriority = mlir::FieldParser<int>::parse(parser); + if (mlir::failed(parsedPriority)) + return parser.emitError(parser.getCurrentLocation(), + "failed to parse 'priority', of type 'int'"); + priority = parsedPriority.value_or(int()); + // Parse literal ')' + if (parser.parseRParen()) + return failure(); + } + createAttr(priority); + } + return success(); + }; + + if (parseGlobalDtorCtor("global_ctor", [&](std::optional<int> priority) { + mlir::IntegerAttr globalCtorPriorityAttr = + builder.getI32IntegerAttr(priority.value_or(65535)); + state.addAttribute(getGlobalCtorPriorityAttrName(state.name), + globalCtorPriorityAttr); + }).failed()) + return failure(); + + if (parseGlobalDtorCtor("global_dtor", [&](std::optional<int> priority) { + mlir::IntegerAttr globalDtorPriorityAttr = + builder.getI32IntegerAttr(priority.value_or(65535)); + state.addAttribute(getGlobalDtorPriorityAttrName(state.name), + globalDtorPriorityAttr); + }).failed()) + return failure(); + + // Parse optional inline kind: inline(never|always|hint) + if (parser.parseOptionalKeyword("inline").succeeded()) { + if (parser.parseLParen().failed()) + return failure(); + + llvm::StringRef inlineKindStr; + const std::array<llvm::StringRef, cir::getMaxEnumValForInlineKind()> + allowedInlineKindStrs{ + cir::stringifyInlineKind(cir::InlineKind::NoInline), + cir::stringifyInlineKind(cir::InlineKind::AlwaysInline), + cir::stringifyInlineKind(cir::InlineKind::InlineHint), + }; + if (parser.parseOptionalKeyword(&inlineKindStr, allowedInlineKindStrs) + .failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected 'never', 'always', or 'hint'"); + + std::optional<InlineKind> inlineKind = + cir::symbolizeInlineKind(inlineKindStr); + if (!inlineKind) + return parser.emitError(parser.getCurrentLocation(), + "invalid inline kind"); + + state.addAttribute(getInlineKindAttrName(state.name), + cir::InlineAttr::get(builder.getContext(), *inlineKind)); + + if (parser.parseRParen().failed()) + return failure(); + } + // Parse the optional function body. auto *body = state.addRegion(); OptionalParseResult parseResult = parser.parseOptionalRegion( @@ -1801,6 +1869,22 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p << ")"; } + if (auto globalCtorPriority = getGlobalCtorPriority()) { + p << " global_ctor"; + if (globalCtorPriority.value() != 65535) + p << "(" << globalCtorPriority.value() << ")"; + } + + if (auto globalDtorPriority = getGlobalDtorPriority()) { + p << " global_dtor"; + if (globalDtorPriority.value() != 65535) + p << "(" << globalDtorPriority.value() << ")"; + } + + if (cir::InlineAttr inlineAttr = getInlineKindAttr()) { + p << " inline(" << cir::stringifyInlineKind(inlineAttr.getValue()) << ")"; + } + // Print the body if this is not an external function. Region &body = getOperation()->getRegion(0); if (!body.empty()) { @@ -2851,31 +2935,144 @@ mlir::LogicalResult cir::ThrowOp::verify() { } //===----------------------------------------------------------------------===// -// AtomicCmpXchg +// TypeInfoAttr //===----------------------------------------------------------------------===// -LogicalResult cir::AtomicCmpXchg::verify() { - mlir::Type pointeeType = getPtr().getType().getPointee(); +LogicalResult cir::TypeInfoAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::Type type, ::mlir::ArrayAttr typeInfoData) { - if (pointeeType != getExpected().getType() || - pointeeType != getDesired().getType()) - return emitOpError("ptr, expected and desired types must match"); + if (cir::ConstRecordAttr::verify(emitError, type, typeInfoData).failed()) + return failure(); return success(); } //===----------------------------------------------------------------------===// -// TypeInfoAttr +// TryOp //===----------------------------------------------------------------------===// -LogicalResult cir::TypeInfoAttr::verify( - ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ::mlir::Type type, ::mlir::ArrayAttr typeInfoData) { +void cir::TryOp::getSuccessorRegions( + mlir::RegionBranchPoint point, + llvm::SmallVectorImpl<mlir::RegionSuccessor> ®ions) { + // The `try` and the `catchers` region branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(mlir::RegionSuccessor()); + return; + } - if (cir::ConstRecordAttr::verify(emitError, type, typeInfoData).failed()) - return failure(); + regions.push_back(mlir::RegionSuccessor(&getTryRegion())); - return success(); + // TODO(CIR): If we know a target function never throws a specific type, we + // can remove the catch handler. + for (mlir::Region &handlerRegion : this->getHandlerRegions()) + regions.push_back(mlir::RegionSuccessor(&handlerRegion)); +} + +static void +printTryHandlerRegions(mlir::OpAsmPrinter &printer, cir::TryOp op, + mlir::MutableArrayRef<mlir::Region> handlerRegions, + mlir::ArrayAttr handlerTypes) { + if (!handlerTypes) + return; + + for (const auto [typeIdx, typeAttr] : llvm::enumerate(handlerTypes)) { + if (typeIdx) + printer << " "; + + if (mlir::isa<cir::CatchAllAttr>(typeAttr)) { + printer << "catch all "; + } else if (mlir::isa<cir::UnwindAttr>(typeAttr)) { + printer << "unwind "; + } else { + printer << "catch [type "; + printer.printAttribute(typeAttr); + printer << "] "; + } + + printer.printRegion(handlerRegions[typeIdx], + /*printEntryBLockArgs=*/false, + /*printBlockTerminators=*/true); + } +} + +static mlir::ParseResult parseTryHandlerRegions( + mlir::OpAsmParser &parser, + llvm::SmallVectorImpl<std::unique_ptr<mlir::Region>> &handlerRegions, + mlir::ArrayAttr &handlerTypes) { + + auto parseCheckedCatcherRegion = [&]() -> mlir::ParseResult { + handlerRegions.emplace_back(new mlir::Region); + + mlir::Region &currRegion = *handlerRegions.back(); + mlir::SMLoc regionLoc = parser.getCurrentLocation(); + if (parser.parseRegion(currRegion)) { + handlerRegions.clear(); + return failure(); + } + + if (currRegion.empty()) + return parser.emitError(regionLoc, "handler region shall not be empty"); + + if (!(currRegion.back().mightHaveTerminator() && + currRegion.back().getTerminator())) + return parser.emitError( + regionLoc, "blocks are expected to be explicitly terminated"); + + return success(); + }; + + bool hasCatchAll = false; + llvm::SmallVector<mlir::Attribute, 4> catcherAttrs; + while (parser.parseOptionalKeyword("catch").succeeded()) { + bool hasLSquare = parser.parseOptionalLSquare().succeeded(); + + llvm::StringRef attrStr; + if (parser.parseOptionalKeyword(&attrStr, {"all", "type"}).failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected 'all' or 'type' keyword"); + + bool isCatchAll = attrStr == "all"; + if (isCatchAll) { + if (hasCatchAll) + return parser.emitError(parser.getCurrentLocation(), + "can't have more than one catch all"); + hasCatchAll = true; + } + + mlir::Attribute exceptionRTTIAttr; + if (!isCatchAll && parser.parseAttribute(exceptionRTTIAttr).failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected valid RTTI info attribute"); + + catcherAttrs.push_back(isCatchAll + ? cir::CatchAllAttr::get(parser.getContext()) + : exceptionRTTIAttr); + + if (hasLSquare && isCatchAll) + return parser.emitError(parser.getCurrentLocation(), + "catch all dosen't need RTTI info attribute"); + + if (hasLSquare && parser.parseRSquare().failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected `]` after RTTI info attribute"); + + if (parseCheckedCatcherRegion().failed()) + return mlir::failure(); + } + + if (parser.parseOptionalKeyword("unwind").succeeded()) { + if (hasCatchAll) + return parser.emitError(parser.getCurrentLocation(), + "unwind can't be used with catch all"); + + catcherAttrs.push_back(cir::UnwindAttr::get(parser.getContext())); + if (parseCheckedCatcherRegion().failed()) + return mlir::failure(); + } + + handlerTypes = parser.getBuilder().getArrayAttr(catcherAttrs); + return mlir::success(); } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index dbff0b9..d99c362 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -105,6 +105,8 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> { /// List of ctors and their priorities to be called before main() llvm::SmallVector<std::pair<std::string, uint32_t>, 4> globalCtorList; + /// List of dtors and their priorities to be called when unloading module. + llvm::SmallVector<std::pair<std::string, uint32_t>, 4> globalDtorList; void setASTContext(clang::ASTContext *c) { astCtx = c; @@ -823,10 +825,13 @@ void LoweringPreparePass::buildGlobalCtorDtorList() { mlir::ArrayAttr::get(&getContext(), globalCtors)); } - // We will eventual need to populate a global_dtor list, but that's not - // needed for globals with destructors. It will only be needed for functions - // that are marked as global destructors with an attribute. - assert(!cir::MissingFeatures::opGlobalDtorList()); + if (!globalDtorList.empty()) { + llvm::SmallVector<mlir::Attribute> globalDtors = + prepareCtorDtorAttrList<cir::GlobalDtorAttr>(&getContext(), + globalDtorList); + mlirModule->setAttr(cir::CIRDialect::getGlobalDtorsAttrName(), + mlir::ArrayAttr::get(&getContext(), globalDtors)); + } } void LoweringPreparePass::buildCXXGlobalInitFunc() { @@ -975,22 +980,28 @@ void LoweringPreparePass::lowerArrayCtor(cir::ArrayCtor op) { } void LoweringPreparePass::runOnOp(mlir::Operation *op) { - if (auto arrayCtor = dyn_cast<ArrayCtor>(op)) + if (auto arrayCtor = dyn_cast<cir::ArrayCtor>(op)) { lowerArrayCtor(arrayCtor); - else if (auto arrayDtor = dyn_cast<cir::ArrayDtor>(op)) + } else if (auto arrayDtor = dyn_cast<cir::ArrayDtor>(op)) { lowerArrayDtor(arrayDtor); - else if (auto cast = mlir::dyn_cast<cir::CastOp>(op)) + } else if (auto cast = mlir::dyn_cast<cir::CastOp>(op)) { lowerCastOp(cast); - else if (auto complexDiv = mlir::dyn_cast<cir::ComplexDivOp>(op)) + } else if (auto complexDiv = mlir::dyn_cast<cir::ComplexDivOp>(op)) { lowerComplexDivOp(complexDiv); - else if (auto complexMul = mlir::dyn_cast<cir::ComplexMulOp>(op)) + } else if (auto complexMul = mlir::dyn_cast<cir::ComplexMulOp>(op)) { lowerComplexMulOp(complexMul); - else if (auto glob = mlir::dyn_cast<cir::GlobalOp>(op)) + } else if (auto glob = mlir::dyn_cast<cir::GlobalOp>(op)) { lowerGlobalOp(glob); - else if (auto dynamicCast = mlir::dyn_cast<cir::DynamicCastOp>(op)) + } else if (auto dynamicCast = mlir::dyn_cast<cir::DynamicCastOp>(op)) { lowerDynamicCastOp(dynamicCast); - else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op)) + } else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op)) { lowerUnaryOp(unary); + } else if (auto fnOp = dyn_cast<cir::FuncOp>(op)) { + if (auto globalCtor = fnOp.getGlobalCtorPriority()) + globalCtorList.emplace_back(fnOp.getName(), globalCtor.value()); + else if (auto globalDtor = fnOp.getGlobalDtorPriority()) + globalDtorList.emplace_back(fnOp.getName(), globalDtor.value()); + } } void LoweringPreparePass::runOnOperation() { @@ -1003,7 +1014,7 @@ void LoweringPreparePass::runOnOperation() { op->walk([&](mlir::Operation *op) { if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp, cir::ComplexMulOp, cir::ComplexDivOp, cir::DynamicCastOp, - cir::GlobalOp, cir::UnaryOp>(op)) + cir::FuncOp, cir::GlobalOp, cir::UnaryOp>(op)) opsToTransform.push_back(op); }); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp index 7d3c711..11ce2a8 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp @@ -92,7 +92,53 @@ static mlir::Value buildDynamicCastToVoidAfterNullCheck(cir::CIRBaseBuilderTy &builder, clang::ASTContext &astCtx, cir::DynamicCastOp op) { - llvm_unreachable("dynamic cast to void is NYI"); + mlir::Location loc = op.getLoc(); + bool vtableUsesRelativeLayout = op.getRelativeLayout(); + + // TODO(cir): consider address space in this function. + assert(!cir::MissingFeatures::addressSpace()); + + mlir::Type vtableElemTy; + uint64_t vtableElemAlign; + if (vtableUsesRelativeLayout) { + vtableElemTy = builder.getSIntNTy(32); + vtableElemAlign = 4; + } else { + const auto &targetInfo = astCtx.getTargetInfo(); + auto ptrdiffTy = targetInfo.getPtrDiffType(clang::LangAS::Default); + bool ptrdiffTyIsSigned = clang::TargetInfo::isTypeSigned(ptrdiffTy); + uint64_t ptrdiffTyWidth = targetInfo.getTypeWidth(ptrdiffTy); + + vtableElemTy = cir::IntType::get(builder.getContext(), ptrdiffTyWidth, + ptrdiffTyIsSigned); + vtableElemAlign = + llvm::divideCeil(targetInfo.getPointerAlign(clang::LangAS::Default), 8); + } + + // Access vtable to get the offset from the given object to its containing + // complete object. + // TODO: Add a specialized operation to get the object offset? + auto vptrTy = cir::VPtrType::get(builder.getContext()); + cir::PointerType vptrPtrTy = builder.getPointerTo(vptrTy); + auto vptrPtr = + cir::VTableGetVPtrOp::create(builder, loc, vptrPtrTy, op.getSrc()); + mlir::Value vptr = builder.createLoad(loc, vptrPtr); + mlir::Value elementPtr = + builder.createBitcast(vptr, builder.getPointerTo(vtableElemTy)); + mlir::Value minusTwo = builder.getSignedInt(loc, -2, 64); + auto offsetToTopSlotPtr = cir::PtrStrideOp::create( + builder, loc, builder.getPointerTo(vtableElemTy), elementPtr, minusTwo); + mlir::Value offsetToTop = + builder.createAlignedLoad(loc, offsetToTopSlotPtr, vtableElemAlign); + + // Add the offset to the given pointer to get the cast result. + // Cast the input pointer to a uint8_t* to allow pointer arithmetic. + cir::PointerType u8PtrTy = builder.getPointerTo(builder.getUIntNTy(8)); + mlir::Value srcBytePtr = builder.createBitcast(op.getSrc(), u8PtrTy); + auto dstBytePtr = + cir::PtrStrideOp::create(builder, loc, u8PtrTy, srcBytePtr, offsetToTop); + // Cast the result to a void*. + return builder.createBitcast(dstBytePtr, builder.getVoidPtrTy()); } mlir::Value diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 26e0ba9..0243bf1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -694,8 +694,8 @@ getLLVMMemOrder(std::optional<cir::MemOrder> memorder) { llvm_unreachable("unknown memory order"); } -mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite( - cir::AtomicCmpXchg op, OpAdaptor adaptor, +mlir::LogicalResult CIRToLLVMAtomicCmpXchgOpLowering::matchAndRewrite( + cir::AtomicCmpXchgOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { mlir::Value expected = adaptor.getExpected(); mlir::Value desired = adaptor.getDesired(); @@ -719,8 +719,8 @@ mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite( return mlir::success(); } -mlir::LogicalResult CIRToLLVMAtomicXchgLowering::matchAndRewrite( - cir::AtomicXchg op, OpAdaptor adaptor, +mlir::LogicalResult CIRToLLVMAtomicXchgOpLowering::matchAndRewrite( + cir::AtomicXchgOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { assert(!cir::MissingFeatures::atomicSyncScopeID()); mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(adaptor.getMemOrder()); @@ -1499,6 +1499,54 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( return mlir::success(); } +static uint64_t getTypeSize(mlir::Type type, mlir::Operation &op) { + mlir::DataLayout layout(op.getParentOfType<mlir::ModuleOp>()); + // For LLVM purposes we treat void as u8. + if (isa<cir::VoidType>(type)) + type = cir::IntType::get(type.getContext(), 8, /*isSigned=*/false); + return llvm::divideCeil(layout.getTypeSizeInBits(type), 8); +} + +mlir::LogicalResult CIRToLLVMPtrDiffOpLowering::matchAndRewrite( + cir::PtrDiffOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto dstTy = mlir::cast<cir::IntType>(op.getType()); + mlir::Type llvmDstTy = getTypeConverter()->convertType(dstTy); + + auto lhs = rewriter.create<mlir::LLVM::PtrToIntOp>(op.getLoc(), llvmDstTy, + adaptor.getLhs()); + auto rhs = rewriter.create<mlir::LLVM::PtrToIntOp>(op.getLoc(), llvmDstTy, + adaptor.getRhs()); + + auto diff = + rewriter.create<mlir::LLVM::SubOp>(op.getLoc(), llvmDstTy, lhs, rhs); + + cir::PointerType ptrTy = op.getLhs().getType(); + assert(!cir::MissingFeatures::llvmLoweringPtrDiffConsidersPointee()); + uint64_t typeSize = getTypeSize(ptrTy.getPointee(), *op); + + // Avoid silly division by 1. + mlir::Value resultVal = diff.getResult(); + if (typeSize != 1) { + auto typeSizeVal = rewriter.create<mlir::LLVM::ConstantOp>( + op.getLoc(), llvmDstTy, typeSize); + + if (dstTy.isUnsigned()) { + auto uDiv = + rewriter.create<mlir::LLVM::UDivOp>(op.getLoc(), diff, typeSizeVal); + uDiv.setIsExact(true); + resultVal = uDiv.getResult(); + } else { + auto sDiv = + rewriter.create<mlir::LLVM::SDivOp>(op.getLoc(), diff, typeSizeVal); + sDiv.setIsExact(true); + resultVal = sDiv.getResult(); + } + } + rewriter.replaceOp(op, resultVal); + return mlir::success(); +} + mlir::LogicalResult CIRToLLVMExpectOpLowering::matchAndRewrite( cir::ExpectOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -1539,6 +1587,7 @@ void CIRToLLVMFuncOpLowering::lowerFuncAttributes( attr.getName() == getLinkageAttrNameString() || attr.getName() == func.getGlobalVisibilityAttrName() || attr.getName() == func.getDsoLocalAttrName() || + attr.getName() == func.getInlineKindAttrName() || (filterArgAndResAttrs && (attr.getName() == func.getArgAttrsAttrName() || attr.getName() == func.getResAttrsAttrName()))) @@ -1623,6 +1672,12 @@ mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewrite( assert(!cir::MissingFeatures::opFuncMultipleReturnVals()); + if (auto inlineKind = op.getInlineKind()) { + fn.setNoInline(inlineKind == cir::InlineKind::NoInline); + fn.setInlineHint(inlineKind == cir::InlineKind::InlineHint); + fn.setAlwaysInline(inlineKind == cir::InlineKind::AlwaysInline); + } + fn.setVisibility_Attr(mlir::LLVM::VisibilityAttr::get( getContext(), lowerCIRVisibilityToLLVMVisibility( op.getGlobalVisibilityAttr().getValue()))); @@ -1793,12 +1848,20 @@ CIRToLLVMGlobalOpLowering::getComdatAttr(cir::GlobalOp &op, if (!comdatOp) { builder.setInsertionPointToStart(module.getBody()); comdatOp = - builder.create<mlir::LLVM::ComdatOp>(module.getLoc(), comdatName); + mlir::LLVM::ComdatOp::create(builder, module.getLoc(), comdatName); + } + + if (auto comdatSelector = comdatOp.lookupSymbol<mlir::LLVM::ComdatSelectorOp>( + op.getSymName())) { + return mlir::SymbolRefAttr::get( + builder.getContext(), comdatName, + mlir::FlatSymbolRefAttr::get(comdatSelector.getSymNameAttr())); } builder.setInsertionPointToStart(&comdatOp.getBody().back()); - auto selectorOp = builder.create<mlir::LLVM::ComdatSelectorOp>( - comdatOp.getLoc(), op.getSymName(), mlir::LLVM::comdat::Comdat::Any); + auto selectorOp = mlir::LLVM::ComdatSelectorOp::create( + builder, comdatOp.getLoc(), op.getSymName(), + mlir::LLVM::comdat::Comdat::Any); return mlir::SymbolRefAttr::get( builder.getContext(), comdatName, mlir::FlatSymbolRefAttr::get(selectorOp.getSymNameAttr())); @@ -2598,7 +2661,13 @@ void ConvertCIRToLLVMPass::runOnOperation() { return std::make_pair(ctorAttr.getName(), ctorAttr.getPriority()); }); - assert(!cir::MissingFeatures::opGlobalDtorList()); + // Emit the llvm.global_dtors array. + buildCtorDtorList(module, cir::CIRDialect::getGlobalDtorsAttrName(), + "llvm.global_dtors", [](mlir::Attribute attr) { + auto dtorAttr = mlir::cast<cir::GlobalDtorAttr>(attr); + return std::make_pair(dtorAttr.getName(), + dtorAttr.getPriority()); + }); } mlir::LogicalResult CIRToLLVMBrOpLowering::matchAndRewrite( |