diff options
Diffstat (limited to 'clang/lib/CIR/CodeGen')
27 files changed, 665 insertions, 143 deletions
diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index fb74aa0..a67cbad 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -17,6 +17,7 @@ #include "mlir/IR/Value.h" #include "clang/AST/CharUnits.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/ADT/PointerIntPair.h" namespace clang::CIRGen { @@ -90,6 +91,13 @@ public: return getPointer(); } + /// Return the pointer contained in this class after authenticating it and + /// adding offset to it if necessary. + mlir::Value emitRawPointer() const { + assert(!cir::MissingFeatures::addressPointerAuthInfo()); + return getBasePointer(); + } + mlir::Type getType() const { assert(mlir::cast<cir::PointerType>( pointerAndKnownNonNull.getPointer().getType()) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 0f4d6d2..a9983f8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -255,7 +255,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, mlir::Value expected = builder.createLoad(loc, val1); mlir::Value desired = builder.createLoad(loc, val2); - auto cmpxchg = cir::AtomicCmpXchg::create( + auto cmpxchg = cir::AtomicCmpXchgOp::create( builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(), expected, desired, cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder), @@ -404,7 +404,7 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__atomic_exchange: - opName = cir::AtomicXchg::getOperationName(); + opName = cir::AtomicXchgOp::getOperationName(); break; case AtomicExpr::AO__opencl_atomic_init: diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 25afe8b..50d585d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -319,12 +319,6 @@ public: return cir::ConstantOp::create(*this, loc, cir::IntAttr::get(sInt64Ty, c)); } - // Creates constant nullptr for pointer type ty. - cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { - assert(!cir::MissingFeatures::targetCodeGenInfoGetNullPointer()); - return cir::ConstantOp::create(*this, loc, getConstPtrAttr(ty, 0)); - } - mlir::Value createNeg(mlir::Value value) { if (auto intTy = mlir::dyn_cast<cir::IntType>(value.getType())) { @@ -386,6 +380,16 @@ public: /*relative_layout=*/false); } + mlir::Value createDynCastToVoid(mlir::Location loc, mlir::Value src, + bool vtableUseRelativeLayout) { + // TODO(cir): consider address space here. + assert(!cir::MissingFeatures::addressSpace()); + cir::PointerType destTy = getVoidPtrTy(); + return cir::DynamicCastOp::create( + *this, loc, destTy, cir::DynamicCastKind::Ptr, src, + cir::DynamicCastInfoAttr{}, vtableUseRelativeLayout); + } + Address createBaseClassAddr(mlir::Location loc, Address addr, mlir::Type destType, unsigned offset, bool assumeNotNull) { @@ -525,6 +529,14 @@ public: return createGlobal(module, loc, uniqueName, type, isConstant, linkage); } + cir::StackSaveOp createStackSave(mlir::Location loc, mlir::Type ty) { + return cir::StackSaveOp::create(*this, loc, ty); + } + + cir::StackRestoreOp createStackRestore(mlir::Location loc, mlir::Value v) { + return cir::StackRestoreOp::create(*this, loc, v); + } + mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, Address dstAddr, mlir::Type storageType, mlir::Value src, const CIRGenBitFieldInfo &info, diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 4cfa91e..ea31871 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -463,7 +463,9 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID, return emitLibraryCall(*this, fd, e, cgm.getBuiltinLibFunction(fd, builtinID)); - cgm.errorNYI(e->getSourceRange(), "unimplemented builtin call"); + cgm.errorNYI(e->getSourceRange(), + std::string("unimplemented builtin call: ") + + getContext().BuiltinInfo.getName(builtinID)); return getUndefRValue(e->getType()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 274d11b..171ce1c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -171,7 +171,8 @@ cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl gd) { curCGF = nullptr; setNonAliasAttributes(gd, fn); - assert(!cir::MissingFeatures::opFuncAttributesForDefinition()); + setCIRFunctionAttributesForDefinition(mlir::cast<FunctionDecl>(gd.getDecl()), + fn); return fn; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 06f41cd..6d3741c4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -191,6 +191,15 @@ public: virtual void emitVTableDefinitions(CIRGenVTables &cgvt, const CXXRecordDecl *rd) = 0; + using DeleteOrMemberCallExpr = + llvm::PointerUnion<const CXXDeleteExpr *, const CXXMemberCallExpr *>; + + virtual mlir::Value emitVirtualDestructorCall(CIRGenFunction &cgf, + const CXXDestructorDecl *dtor, + CXXDtorType dtorType, + Address thisAddr, + DeleteOrMemberCallExpr e) = 0; + /// Emit any tables needed to implement virtual inheritance. For Itanium, /// this emits virtual table tables. virtual void emitVirtualInheritanceTables(const CXXRecordDecl *rd) = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 485b2c8..89f4926 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -478,8 +478,7 @@ void CIRGenFunction::getVTablePointers(BaseSubobject base, for (const auto &nextBase : rd->bases()) { const auto *baseDecl = - cast<CXXRecordDecl>( - nextBase.getType()->castAs<RecordType>()->getOriginalDecl()) + cast<CXXRecordDecl>(nextBase.getType()->castAs<RecordType>()->getDecl()) ->getDefinitionOrSelf(); // Ignore classes without a vtable. @@ -895,6 +894,26 @@ void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr, } namespace { +mlir::Value loadThisForDtorDelete(CIRGenFunction &cgf, + const CXXDestructorDecl *dd) { + if (Expr *thisArg = dd->getOperatorDeleteThisArg()) + return cgf.emitScalarExpr(thisArg); + return cgf.loadCXXThis(); +} + +/// Call the operator delete associated with the current destructor. +struct CallDtorDelete final : EHScopeStack::Cleanup { + CallDtorDelete() {} + + void emit(CIRGenFunction &cgf) override { + const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(cgf.curFuncDecl); + const CXXRecordDecl *classDecl = dtor->getParent(); + cgf.emitDeleteCall(dtor->getOperatorDelete(), + loadThisForDtorDelete(cgf, dtor), + cgf.getContext().getCanonicalTagType(classDecl)); + } +}; + class DestroyField final : public EHScopeStack::Cleanup { const FieldDecl *field; CIRGenFunction::Destroyer *destroyer; @@ -932,7 +951,18 @@ void CIRGenFunction::enterDtorCleanups(const CXXDestructorDecl *dd, // The deleting-destructor phase just needs to call the appropriate // operator delete that Sema picked up. if (dtorType == Dtor_Deleting) { - cgm.errorNYI(dd->getSourceRange(), "deleting destructor cleanups"); + assert(dd->getOperatorDelete() && + "operator delete missing - EnterDtorCleanups"); + if (cxxStructorImplicitParamValue) { + cgm.errorNYI(dd->getSourceRange(), "deleting destructor with vtt"); + } else { + if (dd->getOperatorDelete()->isDestroyingOperatorDelete()) { + cgm.errorNYI(dd->getSourceRange(), + "deleting destructor with destroying operator delete"); + } else { + ehStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); + } + } return; } @@ -994,7 +1024,7 @@ void CIRGenFunction::enterDtorCleanups(const CXXDestructorDecl *dd, // Anonymous union members do not have their destructors called. const RecordType *rt = type->getAsUnionType(); - if (rt && rt->getOriginalDecl()->isAnonymousStructOrUnion()) + if (rt && rt->getDecl()->isAnonymousStructOrUnion()) continue; CleanupKind cleanupKind = getCleanupKind(dtorKind); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 039d290..4a19d91 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -44,38 +44,70 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d, // If the type is variably-modified, emit all the VLA sizes for it. if (ty->isVariablyModifiedType()) - cgm.errorNYI(d.getSourceRange(), "emitAutoVarDecl: variably modified type"); + emitVariablyModifiedType(ty); assert(!cir::MissingFeatures::openMP()); Address address = Address::invalid(); - if (!ty->isConstantSizeType()) - cgm.errorNYI(d.getSourceRange(), "emitAutoVarDecl: non-constant size type"); - - // A normal fixed sized variable becomes an alloca in the entry block, - // unless: - // - it's an NRVO variable. - // - we are compiling OpenMP and it's an OpenMP local variable. - if (nrvo) { - // The named return value optimization: allocate this variable in the - // return slot, so that we can elide the copy when returning this - // variable (C++0x [class.copy]p34). - address = returnValue; - - if (const RecordDecl *rd = ty->getAsRecordDecl()) { - if (const auto *cxxrd = dyn_cast<CXXRecordDecl>(rd); - (cxxrd && !cxxrd->hasTrivialDestructor()) || - rd->isNonTrivialToPrimitiveDestroy()) - cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: set NRVO flag"); + if (ty->isConstantSizeType()) { + // A normal fixed sized variable becomes an alloca in the entry block, + // unless: + // - it's an NRVO variable. + // - we are compiling OpenMP and it's an OpenMP local variable. + if (nrvo) { + // The named return value optimization: allocate this variable in the + // return slot, so that we can elide the copy when returning this + // variable (C++0x [class.copy]p34). + address = returnValue; + + if (const RecordDecl *rd = ty->getAsRecordDecl()) { + if (const auto *cxxrd = dyn_cast<CXXRecordDecl>(rd); + (cxxrd && !cxxrd->hasTrivialDestructor()) || + rd->isNonTrivialToPrimitiveDestroy()) + cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: set NRVO flag"); + } + } else { + // A normal fixed sized variable becomes an alloca in the entry block, + mlir::Type allocaTy = convertTypeForMem(ty); + // Create the temp alloca and declare variable using it. + address = createTempAlloca(allocaTy, alignment, loc, d.getName(), + /*arraySize=*/nullptr, /*alloca=*/nullptr, ip); + declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()), + alignment); } } else { - // A normal fixed sized variable becomes an alloca in the entry block, - mlir::Type allocaTy = convertTypeForMem(ty); - // Create the temp alloca and declare variable using it. - address = createTempAlloca(allocaTy, alignment, loc, d.getName(), - /*arraySize=*/nullptr, /*alloca=*/nullptr, ip); - declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()), - alignment); + // Non-constant size type + assert(!cir::MissingFeatures::openMP()); + if (!didCallStackSave) { + // Save the stack. + cir::PointerType defaultTy = AllocaInt8PtrTy; + CharUnits align = CharUnits::fromQuantity( + cgm.getDataLayout().getAlignment(defaultTy, false)); + Address stack = createTempAlloca(defaultTy, align, loc, "saved_stack"); + + mlir::Value v = builder.createStackSave(loc, defaultTy); + assert(v.getType() == AllocaInt8PtrTy); + builder.createStore(loc, v, stack); + + didCallStackSave = true; + + // Push a cleanup block and restore the stack there. + // FIXME: in general circumstances, this should be an EH cleanup. + pushStackRestore(NormalCleanup, stack); + } + + VlaSizePair vlaSize = getVLASize(ty); + mlir::Type memTy = convertTypeForMem(vlaSize.type); + + // Allocate memory for the array. + address = + createTempAlloca(memTy, alignment, loc, d.getName(), vlaSize.numElts, + /*alloca=*/nullptr, builder.saveInsertionPoint()); + + // If we have debug info enabled, properly describe the VLA dimensions for + // this type by registering the vla size expression for each of the + // dimensions. + assert(!cir::MissingFeatures::generateDebugInfo()); } emission.addr = address; @@ -696,6 +728,16 @@ struct DestroyObject final : EHScopeStack::Cleanup { cgf.emitDestroy(addr, type, destroyer); } }; + +struct CallStackRestore final : EHScopeStack::Cleanup { + Address stack; + CallStackRestore(Address stack) : stack(stack) {} + void emit(CIRGenFunction &cgf) override { + mlir::Location loc = stack.getPointer().getLoc(); + mlir::Value v = cgf.getBuilder().createLoad(loc, stack); + cgf.getBuilder().createStackRestore(loc, v); + } +}; } // namespace void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, @@ -805,6 +847,10 @@ CIRGenFunction::getDestroyer(QualType::DestructionKind kind) { llvm_unreachable("Unknown DestructionKind"); } +void CIRGenFunction::pushStackRestore(CleanupKind kind, Address spMem) { + ehStack.pushCleanup<CallStackRestore>(kind, spMem); +} + /// Enter a destroy cleanup for the given local variable. void CIRGenFunction::emitAutoVarTypeCleanup( const CIRGenFunction::AutoVarEmission &emission, diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 6453843..f9ff37b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -64,3 +64,11 @@ void CIRGenFunction::emitAnyExprToExn(const Expr *e, Address addr) { // Deactivate the cleanup block. assert(!cir::MissingFeatures::ehCleanupScope()); } + +mlir::LogicalResult CIRGenFunction::emitCXXTryStmt(const CXXTryStmt &s) { + if (s.getTryBlock()->body_empty()) + return mlir::LogicalResult::success(); + + cgm.errorNYI("exitCXXTryStmt: CXXTryStmt with non-empty body"); + return mlir::LogicalResult::success(); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index f416571..4897c29 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2068,7 +2068,7 @@ mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty, mlir::OpBuilder::InsertionGuard guard(builder); builder.restoreInsertionPoint(ip); addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy, - /*var type*/ ty, name, alignIntAttr); + /*var type*/ ty, name, alignIntAttr, arraySize); assert(!cir::MissingFeatures::astVarDeclInterface()); } return addr; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 97c0944..b1e9e76 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -130,13 +130,11 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr( const CXXMethodDecl *calleeDecl = devirtualizedMethod ? devirtualizedMethod : md; const CIRGenFunctionInfo *fInfo = nullptr; - if (isa<CXXDestructorDecl>(calleeDecl)) { - cgm.errorNYI(ce->getSourceRange(), - "emitCXXMemberOrOperatorMemberCallExpr: destructor call"); - return RValue::get(nullptr); - } - - fInfo = &cgm.getTypes().arrangeCXXMethodDeclaration(calleeDecl); + if (const auto *dtor = dyn_cast<CXXDestructorDecl>(calleeDecl)) + fInfo = &cgm.getTypes().arrangeCXXStructorDeclaration( + GlobalDecl(dtor, Dtor_Complete)); + else + fInfo = &cgm.getTypes().arrangeCXXMethodDeclaration(calleeDecl); cir::FuncType ty = cgm.getTypes().getFunctionType(*fInfo); @@ -151,9 +149,34 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr( // because then we know what the type is. bool useVirtualCall = canUseVirtualCall && !devirtualizedMethod; - if (isa<CXXDestructorDecl>(calleeDecl)) { - cgm.errorNYI(ce->getSourceRange(), - "emitCXXMemberOrOperatorMemberCallExpr: destructor call"); + if (const auto *dtor = dyn_cast<CXXDestructorDecl>(calleeDecl)) { + assert(ce->arg_begin() == ce->arg_end() && + "Destructor shouldn't have explicit parameters"); + assert(returnValue.isNull() && "Destructor shouldn't have return value"); + if (useVirtualCall) { + cgm.getCXXABI().emitVirtualDestructorCall(*this, dtor, Dtor_Complete, + thisPtr.getAddress(), + cast<CXXMemberCallExpr>(ce)); + } else { + GlobalDecl globalDecl(dtor, Dtor_Complete); + CIRGenCallee callee; + assert(!cir::MissingFeatures::appleKext()); + if (!devirtualizedMethod) { + callee = CIRGenCallee::forDirect( + cgm.getAddrOfCXXStructor(globalDecl, fInfo, ty), globalDecl); + } else { + cgm.errorNYI(ce->getSourceRange(), "devirtualized destructor call"); + return RValue::get(nullptr); + } + + QualType thisTy = + isArrow ? base->getType()->getPointeeType() : base->getType(); + // CIRGen does not pass CallOrInvoke here (different from OG LLVM codegen) + // because in practice it always null even in OG. + emitCXXDestructorCall(globalDecl, callee, thisPtr.getPointer(), thisTy, + /*implicitParam=*/nullptr, + /*implicitParamTy=*/QualType(), ce); + } return RValue::get(nullptr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp index 89e9ec4..19ed656 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp @@ -614,7 +614,7 @@ bool ConstRecordBuilder::applyZeroInitPadding(const ASTRecordLayout &layout, bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) { RecordDecl *rd = ile->getType() ->castAs<clang::RecordType>() - ->getOriginalDecl() + ->getDecl() ->getDefinitionOrSelf(); const ASTRecordLayout &layout = cgm.getASTContext().getASTRecordLayout(rd); @@ -817,9 +817,8 @@ bool ConstRecordBuilder::build(const APValue &val, const RecordDecl *rd, mlir::Attribute ConstRecordBuilder::finalize(QualType type) { type = type.getNonReferenceType(); - RecordDecl *rd = type->castAs<clang::RecordType>() - ->getOriginalDecl() - ->getDefinitionOrSelf(); + RecordDecl *rd = + type->castAs<clang::RecordType>()->getDecl()->getDefinitionOrSelf(); mlir::Type valTy = cgm.convertType(type); return builder.build(valTy, rd->hasFlexibleArrayMember()); } @@ -842,9 +841,8 @@ mlir::Attribute ConstRecordBuilder::buildRecord(ConstantEmitter &emitter, ConstantAggregateBuilder constant(emitter.cgm); ConstRecordBuilder builder(emitter, constant, CharUnits::Zero()); - const RecordDecl *rd = valTy->castAs<clang::RecordType>() - ->getOriginalDecl() - ->getDefinitionOrSelf(); + const RecordDecl *rd = + valTy->castAs<clang::RecordType>()->getDecl()->getDefinitionOrSelf(); const CXXRecordDecl *cd = dyn_cast<CXXRecordDecl>(rd); if (!builder.build(val, rd, false, cd, CharUnits::Zero())) return nullptr; @@ -873,7 +871,7 @@ bool ConstRecordBuilder::updateRecord(ConstantEmitter &emitter, class ConstExprEmitter : public StmtVisitor<ConstExprEmitter, mlir::Attribute, QualType> { CIRGenModule &cgm; - LLVM_ATTRIBUTE_UNUSED ConstantEmitter &emitter; + [[maybe_unused]] ConstantEmitter &emitter; public: ConstExprEmitter(ConstantEmitter &emitter) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 637f9ef..138082b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1734,9 +1734,9 @@ mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) { // LLVM we shall take VLA's, division by element size, etc. // // See more in `EmitSub` in CGExprScalar.cpp. - assert(!cir::MissingFeatures::ptrDiffOp()); - cgf.cgm.errorNYI("ptrdiff"); - return {}; + assert(!cir::MissingFeatures::llvmLoweringPtrDiffConsidersPointee()); + return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.PtrDiffTy, + ops.lhs, ops.rhs); } mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 7a774e0..ba36cbe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -410,6 +410,8 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType, curFn = fn; const Decl *d = gd.getDecl(); + + didCallStackSave = false; curCodeDecl = d; const auto *fd = dyn_cast_or_null<FunctionDecl>(d); curFuncDecl = d->getNonClosureContext(); @@ -678,7 +680,13 @@ void CIRGenFunction::emitDestructorBody(FunctionArgList &args) { // possible to delegate the destructor body to the complete // destructor. Do so. if (dtorType == Dtor_Deleting) { - cgm.errorNYI(dtor->getSourceRange(), "deleting destructor"); + RunCleanupsScope dtorEpilogue(*this); + enterDtorCleanups(dtor, Dtor_Deleting); + if (haveInsertPoint()) { + QualType thisTy = dtor->getFunctionObjectParameterType(); + emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false, + /*delegating=*/false, loadCXXThisAddress(), thisTy); + } return; } @@ -1000,6 +1008,41 @@ mlir::Value CIRGenFunction::emitAlignmentAssumption( offsetValue); } +CIRGenFunction::VlaSizePair CIRGenFunction::getVLASize(QualType type) { + const VariableArrayType *vla = + cgm.getASTContext().getAsVariableArrayType(type); + assert(vla && "type was not a variable array type!"); + return getVLASize(vla); +} + +CIRGenFunction::VlaSizePair +CIRGenFunction::getVLASize(const VariableArrayType *type) { + // The number of elements so far; always size_t. + mlir::Value numElements; + + QualType elementType; + do { + elementType = type->getElementType(); + mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()]; + assert(vlaSize && "no size for VLA!"); + assert(vlaSize.getType() == SizeTy); + + if (!numElements) { + numElements = vlaSize; + } else { + // It's undefined behavior if this wraps around, so mark it that way. + // FIXME: Teach -fsanitize=undefined to trap this. + + numElements = + builder.createMul(numElements.getLoc(), numElements, vlaSize, + cir::OverflowBehavior::NoUnsignedWrap); + } + } while ((type = getContext().getAsVariableArrayType(elementType))); + + assert(numElements && "Undefined elements number"); + return {numElements, elementType}; +} + // TODO(cir): Most of this function can be shared between CIRGen // and traditional LLVM codegen void CIRGenFunction::emitVariablyModifiedType(QualType type) { @@ -1080,7 +1123,26 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) { break; case Type::VariableArray: { - cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA"); + // Losing element qualification here is fine. + const VariableArrayType *vat = cast<clang::VariableArrayType>(ty); + + // Unknown size indication requires no size computation. + // Otherwise, evaluate and record it. + if (const Expr *sizeExpr = vat->getSizeExpr()) { + // It's possible that we might have emitted this already, + // e.g. with a typedef and a pointer to it. + mlir::Value &entry = vlaSizeMap[sizeExpr]; + if (!entry) { + mlir::Value size = emitScalarExpr(sizeExpr); + assert(!cir::MissingFeatures::sanitizers()); + + // Always zexting here would be wrong if it weren't + // undefined behavior to have a negative bound. + // FIXME: What about when size's type is larger than size_t? + entry = builder.createIntCast(size, SizeTy); + } + } + type = vat->getElementType(); break; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 7a606ee..3c36f5c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -149,6 +149,10 @@ public: using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>; SymTableTy symbolTable; + /// Whether a cir.stacksave operation has been added. Used to avoid + /// inserting cir.stacksave for multiple VLAs in the same scope. + bool didCallStackSave = false; + /// Whether or not a Microsoft-style asm block has been processed within /// this fuction. These can potentially set the return value. bool sawAsmBlock = false; @@ -188,6 +192,14 @@ public: llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues; llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues; + // This keeps track of the associated size for each VLA type. + // We track this by the size expression rather than the type itself because + // in certain situations, like a const qualifier applied to an VLA typedef, + // multiple VLA types can share the same size expression. + // FIXME: Maybe this could be a stack of maps that is pushed/popped as we + // enter/leave scopes. + llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap; + public: /// A non-RAII class containing all the information about a bound /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for @@ -436,6 +448,20 @@ public: } }; + struct VlaSizePair { + mlir::Value numElts; + QualType type; + + VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {} + }; + + /// Returns an MLIR::Value+QualType pair that corresponds to the size, + /// in non-variably-sized elements, of a variable length array type, + /// plus that largest non-variably-sized element type. Assumes that + /// the type has already been emitted with emitVariablyModifiedType. + VlaSizePair getVLASize(const VariableArrayType *type); + VlaSizePair getVLASize(QualType type); + void finishFunction(SourceLocation endLoc); /// Determine whether the given initializer is trivial in the sense @@ -583,6 +609,8 @@ public: return needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup; } + void pushStackRestore(CleanupKind kind, Address spMem); + /// Set the address of a local variable. void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) { assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!"); @@ -854,6 +882,7 @@ public: protected: bool performCleanup; + bool oldDidCallStackSave; private: RunCleanupsScope(const RunCleanupsScope &) = delete; @@ -867,6 +896,8 @@ public: explicit RunCleanupsScope(CIRGenFunction &cgf) : performCleanup(true), cgf(cgf) { cleanupStackDepth = cgf.ehStack.stable_begin(); + oldDidCallStackSave = cgf.didCallStackSave; + cgf.didCallStackSave = false; oldCleanupStackDepth = cgf.currentCleanupStackDepth; cgf.currentCleanupStackDepth = cleanupStackDepth; } @@ -883,6 +914,7 @@ public: assert(performCleanup && "Already forced cleanup"); { mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder()); + cgf.didCallStackSave = oldDidCallStackSave; cgf.popCleanupBlocks(cleanupStackDepth); performCleanup = false; cgf.currentCleanupStackDepth = oldCleanupStackDepth; @@ -1281,10 +1313,10 @@ public: mlir::Value emitCXXNewExpr(const CXXNewExpr *e); - void emitNewArrayInitializer(const CXXNewExpr *E, QualType ElementType, - mlir::Type ElementTy, Address BeginPtr, - mlir::Value NumElements, - mlir::Value AllocSizeWithoutCookie); + void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, + mlir::Type elementTy, Address beginPtr, + mlir::Value numElements, + mlir::Value allocSizeWithoutCookie); RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, @@ -1297,6 +1329,8 @@ public: void emitCXXThrowExpr(const CXXThrowExpr *e); + mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s); + void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 9e490c6d..c184d4a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -95,7 +95,10 @@ public: clang::GlobalDecl gd, Address thisAddr, mlir::Type ty, SourceLocation loc) override; - + mlir::Value emitVirtualDestructorCall(CIRGenFunction &cgf, + const CXXDestructorDecl *dtor, + CXXDtorType dtorType, Address thisAddr, + DeleteOrMemberCallExpr e) override; mlir::Value getVTableAddressPoint(BaseSubobject base, const CXXRecordDecl *vtableClass) override; mlir::Value getVTableAddressPointInStructorWithVTT( @@ -465,6 +468,29 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &cgvt, } } +mlir::Value CIRGenItaniumCXXABI::emitVirtualDestructorCall( + CIRGenFunction &cgf, const CXXDestructorDecl *dtor, CXXDtorType dtorType, + Address thisAddr, DeleteOrMemberCallExpr expr) { + auto *callExpr = dyn_cast<const CXXMemberCallExpr *>(expr); + auto *delExpr = dyn_cast<const CXXDeleteExpr *>(expr); + assert((callExpr != nullptr) ^ (delExpr != nullptr)); + assert(callExpr == nullptr || callExpr->arg_begin() == callExpr->arg_end()); + assert(dtorType == Dtor_Deleting || dtorType == Dtor_Complete); + + GlobalDecl globalDecl(dtor, dtorType); + const CIRGenFunctionInfo *fnInfo = + &cgm.getTypes().arrangeCXXStructorDeclaration(globalDecl); + const cir::FuncType &fnTy = cgm.getTypes().getFunctionType(*fnInfo); + auto callee = CIRGenCallee::forVirtual(callExpr, globalDecl, thisAddr, fnTy); + + QualType thisTy = + callExpr ? callExpr->getObjectType() : delExpr->getDestroyedType(); + + cgf.emitCXXDestructorCall(globalDecl, callee, thisAddr.emitRawPointer(), + thisTy, nullptr, QualType(), nullptr); + return nullptr; +} + void CIRGenItaniumCXXABI::emitVirtualInheritanceTables( const CXXRecordDecl *rd) { CIRGenVTables &vtables = cgm.getVTables(); @@ -718,8 +744,8 @@ static bool shouldUseExternalRttiDescriptor(CIRGenModule &cgm, QualType ty) { return false; if (const auto *recordTy = dyn_cast<RecordType>(ty)) { - const CXXRecordDecl *rd = - cast<CXXRecordDecl>(recordTy->getOriginalDecl())->getDefinitionOrSelf(); + const auto *rd = + cast<CXXRecordDecl>(recordTy->getDecl())->getDefinitionOrSelf(); if (!rd->hasDefinition()) return false; @@ -833,9 +859,7 @@ static bool canUseSingleInheritance(const CXXRecordDecl *rd) { /// IsIncompleteClassType - Returns whether the given record type is incomplete. static bool isIncompleteClassType(const RecordType *recordTy) { - return !recordTy->getOriginalDecl() - ->getDefinitionOrSelf() - ->isCompleteDefinition(); + return !recordTy->getDecl()->getDefinitionOrSelf()->isCompleteDefinition(); } /// Returns whether the given type contains an @@ -913,8 +937,7 @@ const char *vTableClassNameForType(const CIRGenModule &cgm, const Type *ty) { case Type::Atomic: // FIXME: GCC treats block pointers as fundamental types?! case Type::BlockPointer: - cgm.errorNYI("VTableClassNameForType: __fundamental_type_info"); - break; + return "_ZTVN10__cxxabiv123__fundamental_type_infoE"; case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: @@ -927,13 +950,11 @@ const char *vTableClassNameForType(const CIRGenModule &cgm, const Type *ty) { break; case Type::Enum: - cgm.errorNYI("VTableClassNameForType: Enum"); - break; + return "_ZTVN10__cxxabiv116__enum_type_infoE"; case Type::Record: { - const CXXRecordDecl *rd = - cast<CXXRecordDecl>(cast<RecordType>(ty)->getOriginalDecl()) - ->getDefinitionOrSelf(); + const auto *rd = cast<CXXRecordDecl>(cast<RecordType>(ty)->getDecl()) + ->getDefinitionOrSelf(); if (!rd->hasDefinition() || !rd->getNumBases()) { return classTypeInfo; @@ -1005,8 +1026,8 @@ static cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &cgm, return cir::GlobalLinkageKind::LinkOnceODRLinkage; if (const RecordType *record = dyn_cast<RecordType>(ty)) { - const CXXRecordDecl *rd = - cast<CXXRecordDecl>(record->getOriginalDecl())->getDefinitionOrSelf(); + const auto *rd = + cast<CXXRecordDecl>(record->getDecl())->getDefinitionOrSelf(); if (rd->hasAttr<WeakAttr>()) return cir::GlobalLinkageKind::WeakODRLinkage; @@ -1356,9 +1377,8 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo( break; case Type::Record: { - const auto *rd = - cast<CXXRecordDecl>(cast<RecordType>(ty)->getOriginalDecl()) - ->getDefinitionOrSelf(); + const auto *rd = cast<CXXRecordDecl>(cast<RecordType>(ty)->getDecl()) + ->getDefinitionOrSelf(); if (!rd->hasDefinition() || !rd->getNumBases()) { // We don't need to emit any fields. break; @@ -1625,8 +1645,7 @@ void CIRGenItaniumCXXABI::emitThrow(CIRGenFunction &cgf, // Lowering pass to skip passing the trivial function. // if (const RecordType *recordTy = clangThrowType->getAs<RecordType>()) { - CXXRecordDecl *rec = - cast<CXXRecordDecl>(recordTy->getOriginalDecl()->getDefinition()); + auto *rec = cast<CXXRecordDecl>(recordTy->getDecl()->getDefinition()); assert(!cir::MissingFeatures::isTrivialCtorOrDtor()); if (!rec->hasTrivialDestructor()) { cgm.errorNYI("emitThrow: non-trivial destructor"); @@ -1925,6 +1944,15 @@ static cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &cgf) { return cgf.cgm.createRuntimeFunction(FTy, "__dynamic_cast"); } +static Address emitDynamicCastToVoid(CIRGenFunction &cgf, mlir::Location loc, + QualType srcRecordTy, Address src) { + bool vtableUsesRelativeLayout = + cgf.cgm.getItaniumVTableContext().isRelativeLayout(); + mlir::Value ptr = cgf.getBuilder().createDynCastToVoid( + loc, src.getPointer(), vtableUsesRelativeLayout); + return Address{ptr, src.getAlignment()}; +} + static cir::DynamicCastInfoAttr emitDynamicCastInfo(CIRGenFunction &cgf, mlir::Location loc, QualType srcRecordTy, @@ -1959,10 +1987,8 @@ mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf, bool isCastToVoid = destRecordTy.isNull(); assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference"); - if (isCastToVoid) { - cgm.errorNYI(loc, "emitDynamicCastToVoid"); - return {}; - } + if (isCastToVoid) + return emitDynamicCastToVoid(cgf, loc, srcRecordTy, src).getPointer(); // If the destination is effectively final, the cast succeeds if and only // if the dynamic type of the pointer is exactly the destination type. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fe1ea56..127f763 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -88,6 +88,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, FP80Ty = cir::FP80Type::get(&getMLIRContext()); FP128Ty = cir::FP128Type::get(&getMLIRContext()); + AllocaInt8PtrTy = cir::PointerType::get(UInt8Ty, cirAllocaAddressSpace); + PointerAlignInBytes = astContext .toCharUnitsFromBits( @@ -449,17 +451,49 @@ void CIRGenModule::emitGlobalFunctionDefinition(clang::GlobalDecl gd, curCGF = nullptr; setNonAliasAttributes(gd, funcOp); - assert(!cir::MissingFeatures::opFuncAttributesForDefinition()); + setCIRFunctionAttributesForDefinition(funcDecl, funcOp); + + auto getPriority = [this](const auto *attr) -> int { + Expr *e = attr->getPriority(); + if (e) + return e->EvaluateKnownConstInt(this->getASTContext()).getExtValue(); + return attr->DefaultPriority; + }; - if (funcDecl->getAttr<ConstructorAttr>()) - errorNYI(funcDecl->getSourceRange(), "constructor attribute"); - if (funcDecl->getAttr<DestructorAttr>()) - errorNYI(funcDecl->getSourceRange(), "destructor attribute"); + if (const ConstructorAttr *ca = funcDecl->getAttr<ConstructorAttr>()) + addGlobalCtor(funcOp, getPriority(ca)); + if (const DestructorAttr *da = funcDecl->getAttr<DestructorAttr>()) + addGlobalDtor(funcOp, getPriority(da)); if (funcDecl->getAttr<AnnotateAttr>()) errorNYI(funcDecl->getSourceRange(), "deferredAnnotations"); } +/// Track functions to be called before main() runs. +void CIRGenModule::addGlobalCtor(cir::FuncOp ctor, + std::optional<int> priority) { + assert(!cir::MissingFeatures::globalCtorLexOrder()); + assert(!cir::MissingFeatures::globalCtorAssociatedData()); + + // Traditional LLVM codegen directly adds the function to the list of global + // ctors. In CIR we just add a global_ctor attribute to the function. The + // global list is created in LoweringPrepare. + // + // FIXME(from traditional LLVM): Type coercion of void()* types. + ctor.setGlobalCtorPriority(priority); +} + +/// Add a function to the list that will be called when the module is unloaded. +void CIRGenModule::addGlobalDtor(cir::FuncOp dtor, + std::optional<int> priority) { + if (codeGenOpts.RegisterGlobalDtorsWithAtExit && + (!getASTContext().getTargetInfo().getTriple().isOSAIX())) + errorNYI(dtor.getLoc(), "registerGlobalDtorsWithAtExit"); + + // FIXME(from traditional LLVM): Type coercion of void()* types. + dtor.setGlobalDtorPriority(priority); +} + void CIRGenModule::handleCXXStaticMemberVarInstantiation(VarDecl *vd) { VarDecl::DefinitionKind dk = vd->isThisDeclarationADefinition(); if (dk == VarDecl::Definition && vd->hasAttr<DLLImportAttr>()) @@ -1885,6 +1919,91 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl, } } +void CIRGenModule::setCIRFunctionAttributesForDefinition( + const clang::FunctionDecl *decl, cir::FuncOp f) { + assert(!cir::MissingFeatures::opFuncUnwindTablesAttr()); + assert(!cir::MissingFeatures::stackProtector()); + + std::optional<cir::InlineKind> existingInlineKind = f.getInlineKind(); + bool isNoInline = + existingInlineKind && *existingInlineKind == cir::InlineKind::NoInline; + bool isAlwaysInline = existingInlineKind && + *existingInlineKind == cir::InlineKind::AlwaysInline; + + if (!decl) { + assert(!cir::MissingFeatures::hlsl()); + + if (!isAlwaysInline && + codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { + // If inlining is disabled and we don't have a declaration to control + // inlining, mark the function as 'noinline' unless it is explicitly + // marked as 'alwaysinline'. + f.setInlineKindAttr( + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline)); + } + + return; + } + + assert(!cir::MissingFeatures::opFuncArmStreamingAttr()); + assert(!cir::MissingFeatures::opFuncArmNewAttr()); + assert(!cir::MissingFeatures::opFuncOptNoneAttr()); + assert(!cir::MissingFeatures::opFuncMinSizeAttr()); + assert(!cir::MissingFeatures::opFuncNakedAttr()); + assert(!cir::MissingFeatures::opFuncNoDuplicateAttr()); + assert(!cir::MissingFeatures::hlsl()); + + // Handle inline attributes + if (decl->hasAttr<NoInlineAttr>() && !isAlwaysInline) { + // Add noinline if the function isn't always_inline. + f.setInlineKindAttr( + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline)); + } else if (decl->hasAttr<AlwaysInlineAttr>() && !isNoInline) { + // Don't override AlwaysInline with NoInline, or vice versa, since we can't + // specify both in IR. + f.setInlineKindAttr( + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::AlwaysInline)); + } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { + // If inlining is disabled, force everything that isn't always_inline + // to carry an explicit noinline attribute. + if (!isAlwaysInline) { + f.setInlineKindAttr( + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline)); + } + } else { + // Otherwise, propagate the inline hint attribute and potentially use its + // absence to mark things as noinline. + // Search function and template pattern redeclarations for inline. + if (auto *fd = dyn_cast<FunctionDecl>(decl)) { + // TODO: Share this checkForInline implementation with classic codegen. + // This logic is likely to change over time, so sharing would help ensure + // consistency. + auto checkForInline = [](const FunctionDecl *decl) { + auto checkRedeclForInline = [](const FunctionDecl *redecl) { + return redecl->isInlineSpecified(); + }; + if (any_of(decl->redecls(), checkRedeclForInline)) + return true; + const FunctionDecl *pattern = decl->getTemplateInstantiationPattern(); + if (!pattern) + return false; + return any_of(pattern->redecls(), checkRedeclForInline); + }; + if (checkForInline(fd)) { + f.setInlineKindAttr(cir::InlineAttr::get(&getMLIRContext(), + cir::InlineKind::InlineHint)); + } else if (codeGenOpts.getInlining() == + CodeGenOptions::OnlyHintInlining && + !fd->isInlined() && !isAlwaysInline) { + f.setInlineKindAttr( + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline)); + } + } + } + + assert(!cir::MissingFeatures::opFuncColdHotAttr()); +} + cir::FuncOp CIRGenModule::getOrCreateCIRFunction( StringRef mangledName, mlir::Type funcType, GlobalDecl gd, bool forVTable, bool dontDefer, bool isThunk, ForDefinition_t isForDefinition, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index f627bae..1fc116d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -159,6 +159,13 @@ public: bool isConstant = false, mlir::Operation *insertPoint = nullptr); + /// Add a global constructor or destructor to the module. + /// The priority is optional, if not specified, the default priority is used. + void addGlobalCtor(cir::FuncOp ctor, + std::optional<int> priority = std::nullopt); + void addGlobalDtor(cir::FuncOp dtor, + std::optional<int> priority = std::nullopt); + bool shouldZeroInitPadding() const { // In C23 (N3096) $6.7.10: // """ @@ -422,6 +429,10 @@ public: void setFunctionAttributes(GlobalDecl gd, cir::FuncOp f, bool isIncompleteFunction, bool isThunk); + /// Set extra attributes (inline, etc.) for a function. + void setCIRFunctionAttributesForDefinition(const clang::FunctionDecl *fd, + cir::FuncOp f); + void emitGlobalDefinition(clang::GlobalDecl gd, mlir::Operation *op = nullptr); void emitGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op); diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp index 4cf2237..5ba6bcb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp @@ -73,7 +73,7 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) { // Array sections are special, and we have to treat them that way. if (const auto *section = dyn_cast<ArraySectionExpr>(curVarExpr->IgnoreParenImpCasts())) - origType = ArraySectionExpr::getBaseOriginalType(section); + origType = section->getElementType(); mlir::Location exprLoc = cgm.getLoc(curVarExpr->getBeginLoc()); llvm::SmallVector<mlir::Value> bounds; @@ -84,16 +84,10 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) { e->printPretty(os, nullptr, getContext().getPrintingPolicy()); auto addBoundType = [&](const Expr *e) { - if (const auto *section = dyn_cast<ArraySectionExpr>(curVarExpr)) { - QualType baseTy = ArraySectionExpr::getBaseOriginalType( - section->getBase()->IgnoreParenImpCasts()); - if (auto *at = getContext().getAsArrayType(baseTy)) - boundTypes.push_back(at->getElementType()); - else - boundTypes.push_back(baseTy->getPointeeType()); - } else { + if (const auto *section = dyn_cast<ArraySectionExpr>(curVarExpr)) + boundTypes.push_back(section->getElementType()); + else boundTypes.push_back(curVarExpr->getType()); - } }; addBoundType(curVarExpr); @@ -113,8 +107,7 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) { if (const Expr *len = section->getLength()) { extent = emitOpenACCIntExpr(len); } else { - QualType baseTy = ArraySectionExpr::getBaseOriginalType( - section->getBase()->IgnoreParenImpCasts()); + QualType baseTy = section->getBaseType(); // We know this is the case as implicit lengths are only allowed for // array types with a constant size, or a dependent size. AND since // we are codegen we know we're not dependent. diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp index 3d86f71..ce4ae7e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp @@ -1005,7 +1005,7 @@ public: /*temporary=*/nullptr, OpenACCReductionOperator::Invalid, Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, - privateOp); + privateOp, /*reductionCombinerRecipes=*/{}); // TODO: OpenACC: The dialect is going to change in the near future to // have these be on a different operation, so when that changes, we // probably need to change these here. @@ -1046,7 +1046,7 @@ public: OpenACCReductionOperator::Invalid, Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, - firstPrivateOp); + firstPrivateOp, /*reductionCombinerRecipe=*/{}); // TODO: OpenACC: The dialect is going to change in the near future to // have these be on a different operation, so when that changes, we @@ -1088,7 +1088,7 @@ public: /*temporary=*/nullptr, clause.getReductionOp(), Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, - reductionOp); + reductionOp, varRecipe.CombinerRecipes); operation.addReduction(builder.getContext(), reductionOp, recipe); } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp index 24a5fc2..f638d39 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp @@ -398,6 +398,7 @@ void OpenACCRecipeBuilderBase::createRecipeDestroySection( emitDestroy(block->getArgument(1), elementTy); } + ls.forceCleanup(); mlir::acc::YieldOp::create(builder, locEnd); } void OpenACCRecipeBuilderBase::makeBoundsInit( @@ -480,6 +481,7 @@ void OpenACCRecipeBuilderBase::createInitRecipe( /*isInitSection=*/true); } + ls.forceCleanup(); mlir::acc::YieldOp::create(builder, locEnd); } @@ -518,6 +520,7 @@ void OpenACCRecipeBuilderBase::createFirstprivateRecipeCopy( cgf.emitAutoVarInit(tempDeclEmission); builder.setInsertionPointToEnd(©Region.back()); + ls.forceCleanup(); mlir::acc::YieldOp::create(builder, locEnd); } @@ -527,16 +530,143 @@ void OpenACCRecipeBuilderBase::createFirstprivateRecipeCopy( // doesn't restore it aftewards. void OpenACCRecipeBuilderBase::createReductionRecipeCombiner( mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, - mlir::acc::ReductionRecipeOp recipe, size_t numBounds) { + mlir::acc::ReductionRecipeOp recipe, size_t numBounds, QualType origType, + llvm::ArrayRef<OpenACCReductionRecipe::CombinerRecipe> combinerRecipes) { mlir::Block *block = createRecipeBlock(recipe.getCombinerRegion(), mainOp.getType(), loc, numBounds, /*isInit=*/false); builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back()); CIRGenFunction::LexicalScope ls(cgf, loc, block); - mlir::BlockArgument lhsArg = block->getArgument(0); + mlir::Value lhsArg = block->getArgument(0); + mlir::Value rhsArg = block->getArgument(1); + llvm::MutableArrayRef<mlir::BlockArgument> boundsRange = + block->getArguments().drop_front(2); + + if (llvm::any_of(combinerRecipes, [](auto &r) { return r.Op == nullptr; })) { + cgf.cgm.errorNYI(loc, "OpenACC Reduction combiner not generated"); + mlir::acc::YieldOp::create(builder, locEnd, block->getArgument(0)); + return; + } + + // apply the bounds so that we can get our bounds emitted correctly. + for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange)) + std::tie(lhsArg, rhsArg) = + createBoundsLoop(lhsArg, rhsArg, boundArg, loc, /*inverse=*/false); + + // Emitter for when we know this isn't a struct or array we have to loop + // through. This should work for the 'field' once the get-element call has + // been made. + auto emitSingleCombiner = + [&](mlir::Value lhsArg, mlir::Value rhsArg, + const OpenACCReductionRecipe::CombinerRecipe &combiner) { + mlir::Type elementTy = + mlir::cast<cir::PointerType>(lhsArg.getType()).getPointee(); + CIRGenFunction::DeclMapRevertingRAII declMapRAIILhs{cgf, combiner.LHS}; + cgf.setAddrOfLocalVar( + combiner.LHS, Address{lhsArg, elementTy, + cgf.getContext().getDeclAlign(combiner.LHS)}); + CIRGenFunction::DeclMapRevertingRAII declMapRAIIRhs{cgf, combiner.RHS}; + cgf.setAddrOfLocalVar( + combiner.RHS, Address{rhsArg, elementTy, + cgf.getContext().getDeclAlign(combiner.RHS)}); + + [[maybe_unused]] mlir::LogicalResult stmtRes = + cgf.emitStmt(combiner.Op, /*useCurrentScope=*/true); + }; + + // Emitter for when we know this is either a non-array or element of an array + // (which also shouldn't be an array type?). This function should generate the + // initialization code for an entire 'array-element'/non-array, including + // diving into each element of a struct (if necessary). + auto emitCombiner = [&](mlir::Value lhsArg, mlir::Value rhsArg, QualType ty) { + assert(!ty->isArrayType() && "Array type shouldn't get here"); + if (const auto *rd = ty->getAsRecordDecl()) { + if (combinerRecipes.size() == 1 && + cgf.getContext().hasSameType(ty, combinerRecipes[0].LHS->getType())) { + // If this is a 'top level' operator on the type we can just emit this + // as a simple one. + emitSingleCombiner(lhsArg, rhsArg, combinerRecipes[0]); + } else { + // else we have to handle each individual field after after a + // get-element. + for (const auto &[field, combiner] : + llvm::zip_equal(rd->fields(), combinerRecipes)) { + mlir::Type fieldType = cgf.convertType(field->getType()); + auto fieldPtr = cir::PointerType::get(fieldType); + + mlir::Value lhsField = builder.createGetMember( + loc, fieldPtr, lhsArg, field->getName(), field->getFieldIndex()); + mlir::Value rhsField = builder.createGetMember( + loc, fieldPtr, rhsArg, field->getName(), field->getFieldIndex()); + + emitSingleCombiner(lhsField, rhsField, combiner); + } + } + + } else { + // if this is a single-thing (because we should know this isn't an array, + // as Sema wouldn't let us get here), we can just do a normal emit call. + emitSingleCombiner(lhsArg, rhsArg, combinerRecipes[0]); + } + }; + + if (const auto *cat = cgf.getContext().getAsConstantArrayType(origType)) { + // If we're in an array, we have to emit the combiner for each element of + // the array. + auto itrTy = mlir::cast<cir::IntType>(cgf.PtrDiffTy); + auto itrPtrTy = cir::PointerType::get(itrTy); + + mlir::Value zero = + builder.getConstInt(loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 0); + mlir::Value itr = + cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "itr", + cgf.cgm.getSize(cgf.getPointerAlign())); + builder.CIRBaseBuilderTy::createStore(loc, zero, itr); + + builder.setInsertionPointAfter(builder.createFor( + loc, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto loadItr = cir::LoadOp::create(builder, loc, {itr}); + mlir::Value arraySize = builder.getConstInt( + loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), cat->getZExtSize()); + auto cmp = builder.createCompare(loc, cir::CmpOpKind::lt, loadItr, + arraySize); + builder.createCondition(cmp); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto loadItr = cir::LoadOp::create(builder, loc, {itr}); + auto lhsElt = builder.getArrayElement( + loc, loc, lhsArg, cgf.convertType(cat->getElementType()), loadItr, + /*shouldDecay=*/true); + auto rhsElt = builder.getArrayElement( + loc, loc, rhsArg, cgf.convertType(cat->getElementType()), loadItr, + /*shouldDecay=*/true); + + emitCombiner(lhsElt, rhsElt, cat->getElementType()); + builder.createYield(loc); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto loadItr = cir::LoadOp::create(builder, loc, {itr}); + auto inc = cir::UnaryOp::create(builder, loc, loadItr.getType(), + cir::UnaryOpKind::Inc, loadItr); + builder.CIRBaseBuilderTy::createStore(loc, inc, itr); + builder.createYield(loc); + })); - mlir::acc::YieldOp::create(builder, locEnd, lhsArg); + } else if (origType->isArrayType()) { + cgf.cgm.errorNYI(loc, + "OpenACC Reduction combiner non-constant array recipe"); + } else { + emitCombiner(lhsArg, rhsArg, origType); + } + + builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back()); + ls.forceCleanup(); + mlir::acc::YieldOp::create(builder, locEnd, block->getArgument(0)); } } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h index a5da744..745d424 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h @@ -64,10 +64,10 @@ protected: // that this function is not 'insertion point' clean, in that it alters the // insertion point to be inside of the 'combiner' section of the recipe, but // doesn't restore it aftewards. - void createReductionRecipeCombiner(mlir::Location loc, mlir::Location locEnd, - mlir::Value mainOp, - mlir::acc::ReductionRecipeOp recipe, - size_t numBounds); + void createReductionRecipeCombiner( + mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, + mlir::acc::ReductionRecipeOp recipe, size_t numBounds, QualType origType, + llvm::ArrayRef<OpenACCReductionRecipe::CombinerRecipe> combinerRecipes); void createInitRecipe(mlir::Location loc, mlir::Location locEnd, SourceRange exprRange, mlir::Value mainOp, @@ -169,7 +169,9 @@ public: const Expr *varRef, const VarDecl *varRecipe, const VarDecl *temporary, OpenACCReductionOperator reductionOp, DeclContext *dc, QualType origType, size_t numBounds, llvm::ArrayRef<QualType> boundTypes, QualType baseType, - mlir::Value mainOp) { + mlir::Value mainOp, + llvm::ArrayRef<OpenACCReductionRecipe::CombinerRecipe> + reductionCombinerRecipes) { assert(!varRecipe->getType()->isSpecificBuiltinType( BuiltinType::ArraySection) && "array section shouldn't make it to recipe creation"); @@ -208,7 +210,8 @@ public: createInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp, recipe.getInitRegion(), numBounds, boundTypes, varRecipe, origType, /*emitInitExpr=*/true); - createReductionRecipeCombiner(loc, locEnd, mainOp, recipe, numBounds); + createReductionRecipeCombiner(loc, locEnd, mainOp, recipe, numBounds, + origType, reductionCombinerRecipes); } else { static_assert(std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp>); createInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp, diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 0b8f8bf..5ba64dd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -154,6 +154,8 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s, return emitWhileStmt(cast<WhileStmt>(*s)); case Stmt::DoStmtClass: return emitDoStmt(cast<DoStmt>(*s)); + case Stmt::CXXTryStmtClass: + return emitCXXTryStmt(cast<CXXTryStmt>(*s)); case Stmt::CXXForRangeStmtClass: return emitCXXForRangeStmt(cast<CXXForRangeStmt>(*s), attr); case Stmt::OpenACCComputeConstructClass: @@ -199,7 +201,6 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s, case Stmt::CoroutineBodyStmtClass: return emitCoroutineBody(cast<CoroutineBodyStmt>(*s)); case Stmt::CoreturnStmtClass: - case Stmt::CXXTryStmtClass: case Stmt::IndirectGotoStmtClass: case Stmt::OMPParallelDirectiveClass: case Stmt::OMPTaskwaitDirectiveClass: @@ -535,7 +536,7 @@ mlir::LogicalResult CIRGenFunction::emitLabel(const clang::LabelDecl &d) { mlir::Block *currBlock = builder.getBlock(); mlir::Block *labelBlock = currBlock; - if (!currBlock->empty()) { + if (!currBlock->empty() || currBlock->isEntryBlock()) { { mlir::OpBuilder::InsertionGuard guard(builder); labelBlock = builder.createBlock(builder.getBlock()->getParent()); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 273ec7f..b5612d9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -65,6 +65,9 @@ struct CIRGenTypeCache { cir::PointerType VoidPtrTy; cir::PointerType UInt8PtrTy; + /// void* in alloca address space + cir::PointerType AllocaInt8PtrTy; + /// The size and alignment of a pointer into the generic address space. union { unsigned char PointerAlignInBytes; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index e65896a..d1b91d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -159,7 +159,7 @@ isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt, for (const clang::CXXBaseSpecifier &i : crd->bases()) if (!isSafeToConvert(i.getType() ->castAs<RecordType>() - ->getOriginalDecl() + ->getDecl() ->getDefinitionOrSelf(), cgt, alreadyChecked)) return false; @@ -279,8 +279,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) { // Process record types before the type cache lookup. if (const auto *recordType = dyn_cast<RecordType>(type)) - return convertRecordDeclType( - recordType->getOriginalDecl()->getDefinitionOrSelf()); + return convertRecordDeclType(recordType->getDecl()->getDefinitionOrSelf()); // Has the type already been processed? TypeCacheTy::iterator tci = typeCache.find(ty); @@ -421,6 +420,16 @@ mlir::Type CIRGenTypes::convertType(QualType type) { break; } + case Type::VariableArray: { + const VariableArrayType *a = cast<VariableArrayType>(ty); + if (a->getIndexTypeCVRQualifiers() != 0) + cgm.errorNYI(SourceLocation(), "non trivial array types", type); + // VLAs resolve to the innermost element type; this matches + // the return of alloca, and there isn't any obviously better choice. + resultType = convertTypeForMem(a->getElementType()); + break; + } + case Type::IncompleteArray: { const IncompleteArrayType *arrTy = cast<IncompleteArrayType>(ty); if (arrTy->getIndexTypeCVRQualifiers() != 0) @@ -619,10 +628,8 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeGlobalDeclaration(GlobalDecl gd) { const auto *fd = cast<FunctionDecl>(gd.getDecl()); if (isa<CXXConstructorDecl>(gd.getDecl()) || - isa<CXXDestructorDecl>(gd.getDecl())) { - cgm.errorNYI(SourceLocation(), - "arrangeGlobalDeclaration for C++ constructor or destructor"); - } + isa<CXXDestructorDecl>(gd.getDecl())) + return arrangeCXXStructorDeclaration(gd); return arrangeFunctionDeclaration(fd); } diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 84f5977..36bab62 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -120,12 +120,6 @@ mlir::Attribute CIRGenVTables::getVTableComponent( assert(!cir::MissingFeatures::vtableRelativeLayout()); switch (component.getKind()) { - case VTableComponent::CK_CompleteDtorPointer: - cgm.errorNYI("getVTableComponent: CompleteDtorPointer"); - return mlir::Attribute(); - case VTableComponent::CK_DeletingDtorPointer: - cgm.errorNYI("getVTableComponent: DeletingDtorPointer"); - return mlir::Attribute(); case VTableComponent::CK_UnusedFunctionPointer: cgm.errorNYI("getVTableComponent: UnusedFunctionPointer"); return mlir::Attribute(); @@ -148,7 +142,9 @@ mlir::Attribute CIRGenVTables::getVTableComponent( "expected GlobalViewAttr or ConstPtrAttr"); return rtti; - case VTableComponent::CK_FunctionPointer: { + case VTableComponent::CK_FunctionPointer: + case VTableComponent::CK_CompleteDtorPointer: + case VTableComponent::CK_DeletingDtorPointer: { GlobalDecl gd = component.getGlobalDecl(); assert(!cir::MissingFeatures::cudaSupport()); diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 25b6ecb..c05142e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -307,8 +307,8 @@ class AggValueSlot { /// This is set to true if some external code is responsible for setting up a /// destructor for the slot. Otherwise the code which constructs it should /// push the appropriate cleanup. - LLVM_PREFERRED_TYPE(bool) - LLVM_ATTRIBUTE_UNUSED unsigned destructedFlag : 1; + [[maybe_unused]] + LLVM_PREFERRED_TYPE(bool) unsigned destructedFlag : 1; /// This is set to true if the memory in the slot is known to be zero before /// the assignment into it. This means that zero fields don't need to be set. @@ -326,16 +326,16 @@ class AggValueSlot { /// over. Since it's invalid in general to memcpy a non-POD C++ /// object, it's important that this flag never be set when /// evaluating an expression which constructs such an object. - LLVM_PREFERRED_TYPE(bool) - LLVM_ATTRIBUTE_UNUSED unsigned aliasedFlag : 1; + [[maybe_unused]] + LLVM_PREFERRED_TYPE(bool) unsigned aliasedFlag : 1; /// This is set to true if the tail padding of this slot might overlap /// another object that may have already been initialized (and whose /// value must be preserved by this initialization). If so, we may only /// store up to the dsize of the type. Otherwise we can widen stores to /// the size of the type. - LLVM_PREFERRED_TYPE(bool) - LLVM_ATTRIBUTE_UNUSED unsigned overlapFlag : 1; + [[maybe_unused]] + LLVM_PREFERRED_TYPE(bool) unsigned overlapFlag : 1; public: enum IsDestructed_t { IsNotDestructed, IsDestructed }; |