diff options
Diffstat (limited to 'clang/lib/CIR/CodeGen')
-rw-r--r-- | clang/lib/CIR/CodeGen/Address.h | 11 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenCall.h | 2 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenClass.cpp | 19 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 59 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 166 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 88 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 30 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 10 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenFunction.h | 29 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenModule.cpp | 19 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp | 4 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp | 136 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp | 115 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 17 | ||||
-rw-r--r-- | clang/lib/CIR/CodeGen/CIRGenerator.cpp | 21 |
15 files changed, 660 insertions, 66 deletions
diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 6f76c3e..6c927e9 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -101,6 +101,17 @@ public: } clang::CharUnits getAlignment() const { return alignment; } + + /// Get the operation which defines this address. + mlir::Operation *getDefiningOp() const { + if (!isValid()) + return nullptr; + return getPointer().getDefiningOp(); + } + + template <typename OpTy> OpTy getDefiningOp() const { + return mlir::dyn_cast_or_null<OpTy>(getDefiningOp()); + } }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index a78956b..28576a1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -137,7 +137,7 @@ private: /// A data-flow flag to make sure getRValue and/or copyInto are not /// called twice for duplicated IR emission. - mutable bool isUsed; + [[maybe_unused]] mutable bool isUsed; public: clang::QualType ty; diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 50cca0e..72b9d17 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -349,12 +349,16 @@ void CIRGenFunction::emitCXXAggrConstructorCall( // doesn't happen, but it's not clear that it's worth it. // Optimize for a constant count. - auto constantCount = dyn_cast<cir::ConstantOp>(numElements.getDefiningOp()); - if (constantCount) { - auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constantCount.getValue()); - // Just skip out if the constant count is zero. - if (constIntAttr && constIntAttr.getUInt() == 0) - return; + if (auto constantCount = numElements.getDefiningOp<cir::ConstantOp>()) { + if (auto constIntAttr = constantCount.getValueAttr<cir::IntAttr>()) { + // Just skip out if the constant count is zero. + if (constIntAttr.getUInt() == 0) + return; + // Otherwise, emit the check. + } + + if (constantCount.use_empty()) + constantCount.erase(); } else { // Otherwise, emit the check. cgm.errorNYI(e->getSourceRange(), "dynamic-length array expression"); @@ -417,9 +421,6 @@ void CIRGenFunction::emitCXXAggrConstructorCall( builder.create<cir::YieldOp>(loc); }); } - - if (constantCount.use_empty()) - constantCount.erase(); } void CIRGenFunction::emitDelegateCXXConstructorCall( diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 6527fb5..9cdbebe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -24,7 +24,8 @@ using namespace clang; using namespace clang::CIRGen; CIRGenFunction::AutoVarEmission -CIRGenFunction::emitAutoVarAlloca(const VarDecl &d) { +CIRGenFunction::emitAutoVarAlloca(const VarDecl &d, + mlir::OpBuilder::InsertPoint ip) { QualType ty = d.getType(); if (ty.getAddressSpace() != LangAS::Default) cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: address space"); @@ -50,7 +51,8 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d) { // A normal fixed sized variable becomes an alloca in the entry block, mlir::Type allocaTy = convertTypeForMem(ty); // Create the temp alloca and declare variable using it. - address = createTempAlloca(allocaTy, alignment, loc, d.getName()); + address = createTempAlloca(allocaTy, alignment, loc, d.getName(), + /*arraySize=*/nullptr, /*alloca=*/nullptr, ip); declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()), alignment); emission.Addr = address; @@ -520,7 +522,7 @@ void CIRGenFunction::emitExprAsInit(const Expr *init, const ValueDecl *d, llvm_unreachable("bad evaluation kind"); } -void CIRGenFunction::emitDecl(const Decl &d) { +void CIRGenFunction::emitDecl(const Decl &d, bool evaluateConditionDecl) { switch (d.getKind()) { case Decl::BuiltinTemplate: case Decl::TranslationUnit: @@ -608,11 +610,14 @@ void CIRGenFunction::emitDecl(const Decl &d) { case Decl::UsingDirective: // using namespace X; [C++] assert(!cir::MissingFeatures::generateDebugInfo()); return; - case Decl::Var: { + case Decl::Var: + case Decl::Decomposition: { const VarDecl &vd = cast<VarDecl>(d); assert(vd.isLocalVarDecl() && "Should not see file-scope variables inside a function!"); emitVarDecl(vd); + if (evaluateConditionDecl) + maybeEmitDeferredVarDeclInit(&vd); return; } case Decl::OpenACCDeclare: @@ -632,7 +637,6 @@ void CIRGenFunction::emitDecl(const Decl &d) { case Decl::ImplicitConceptSpecialization: case Decl::TopLevelStmt: case Decl::UsingPack: - case Decl::Decomposition: // This could be moved to join Decl::Var case Decl::OMPDeclareReduction: case Decl::OMPDeclareMapper: cgm.errorNYI(d.getSourceRange(), @@ -649,6 +653,27 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, mlir::Value rhs, assert(!cir::MissingFeatures::sanitizers()); } +namespace { +struct DestroyObject final : EHScopeStack::Cleanup { + DestroyObject(Address addr, QualType type, + CIRGenFunction::Destroyer *destroyer) + : addr(addr), type(type), destroyer(destroyer) {} + + Address addr; + QualType type; + CIRGenFunction::Destroyer *destroyer; + + void emit(CIRGenFunction &cgf) override { + cgf.emitDestroy(addr, type, destroyer); + } +}; +} // namespace + +void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, + QualType type, Destroyer *destroyer) { + pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer); +} + /// Destroys all the elements of the given array, beginning from last to first. /// The array cannot be zero-length. /// @@ -736,22 +761,6 @@ CIRGenFunction::getDestroyer(QualType::DestructionKind kind) { llvm_unreachable("Unknown DestructionKind"); } -namespace { -struct DestroyObject final : EHScopeStack::Cleanup { - DestroyObject(Address addr, QualType type, - CIRGenFunction::Destroyer *destroyer) - : addr(addr), type(type), destroyer(destroyer) {} - - Address addr; - QualType type; - CIRGenFunction::Destroyer *destroyer; - - void emit(CIRGenFunction &cgf) override { - cgf.emitDestroy(addr, type, destroyer); - } -}; -} // namespace - /// Enter a destroy cleanup for the given local variable. void CIRGenFunction::emitAutoVarTypeCleanup( const CIRGenFunction::AutoVarEmission &emission, @@ -797,3 +806,11 @@ void CIRGenFunction::emitAutoVarTypeCleanup( assert(!cir::MissingFeatures::ehCleanupFlags()); ehStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer); } + +void CIRGenFunction::maybeEmitDeferredVarDeclInit(const VarDecl *vd) { + if (auto *dd = dyn_cast_if_present<DecompositionDecl>(vd)) { + for (auto *b : dd->flat_bindings()) + if (auto *hd = b->getHoldingVar()) + emitVarDecl(*hd); + } +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index c18498f..761d8d3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -584,6 +584,15 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) { return lv; } + if (const auto *bd = dyn_cast<BindingDecl>(nd)) { + if (e->refersToEnclosingVariableOrCapture()) { + assert(!cir::MissingFeatures::lambdaCaptures()); + cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures"); + return LValue(); + } + return emitLValue(bd->getBinding()); + } + cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type"); return LValue(); } @@ -712,8 +721,8 @@ static const Expr *getSimpleArrayDecayOperand(const Expr *e) { static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) { // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr? - if (auto constantOp = dyn_cast<cir::ConstantOp>(idx.getDefiningOp())) - return mlir::dyn_cast<cir::IntAttr>(constantOp.getValue()); + if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>()) + return constantOp.getValueAttr<cir::IntAttr>(); return {}; } @@ -721,8 +730,7 @@ static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize) { // If we have a constant index, we can use the exact offset of the // element we're accessing. - const cir::IntAttr constantIdx = getConstantIndexOrNull(idx); - if (constantIdx) { + if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) { const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize; return arrayAlign.alignmentAtOffset(offset); } @@ -1096,6 +1104,151 @@ void CIRGenFunction::emitAnyExprToMem(const Expr *e, Address location, llvm_unreachable("bad evaluation kind"); } +static Address createReferenceTemporary(CIRGenFunction &cgf, + const MaterializeTemporaryExpr *m, + const Expr *inner) { + // TODO(cir): cgf.getTargetHooks(); + switch (m->getStorageDuration()) { + case SD_FullExpression: + case SD_Automatic: { + QualType ty = inner->getType(); + + assert(!cir::MissingFeatures::mergeAllConstants()); + + // The temporary memory should be created in the same scope as the extending + // declaration of the temporary materialization expression. + cir::AllocaOp extDeclAlloca; + if (const ValueDecl *extDecl = m->getExtendingDecl()) { + auto extDeclAddrIter = cgf.localDeclMap.find(extDecl); + if (extDeclAddrIter != cgf.localDeclMap.end()) + extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>(); + } + mlir::OpBuilder::InsertPoint ip; + if (extDeclAlloca) + ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()}; + return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()), + cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr, + ip); + } + case SD_Thread: + case SD_Static: { + cgf.cgm.errorNYI( + m->getSourceRange(), + "createReferenceTemporary: static/thread storage duration"); + return Address::invalid(); + } + + case SD_Dynamic: + llvm_unreachable("temporary can't have dynamic storage duration"); + } + llvm_unreachable("unknown storage duration"); +} + +static void pushTemporaryCleanup(CIRGenFunction &cgf, + const MaterializeTemporaryExpr *m, + const Expr *e, Address referenceTemporary) { + // Objective-C++ ARC: + // If we are binding a reference to a temporary that has ownership, we + // need to perform retain/release operations on the temporary. + // + // FIXME(ogcg): This should be looking at e, not m. + if (m->getType().getObjCLifetime()) { + cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime"); + return; + } + + CXXDestructorDecl *referenceTemporaryDtor = nullptr; + if (const clang::RecordType *rt = e->getType() + ->getBaseElementTypeUnsafe() + ->getAs<clang::RecordType>()) { + // Get the destructor for the reference temporary. + auto *classDecl = cast<CXXRecordDecl>(rt->getDecl()); + if (!classDecl->hasTrivialDestructor()) + referenceTemporaryDtor = classDecl->getDestructor(); + } + + if (!referenceTemporaryDtor) + return; + + // Call the destructor for the temporary. + switch (m->getStorageDuration()) { + case SD_Static: + case SD_Thread: + cgf.cgm.errorNYI(e->getSourceRange(), + "pushTemporaryCleanup: static/thread storage duration"); + return; + + case SD_FullExpression: + cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(), + CIRGenFunction::destroyCXXObject); + break; + + case SD_Automatic: + cgf.cgm.errorNYI(e->getSourceRange(), + "pushTemporaryCleanup: automatic storage duration"); + break; + + case SD_Dynamic: + llvm_unreachable("temporary cannot have dynamic storage duration"); + } +} + +LValue CIRGenFunction::emitMaterializeTemporaryExpr( + const MaterializeTemporaryExpr *m) { + const Expr *e = m->getSubExpr(); + + assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) || + !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) && + "Reference should never be pseudo-strong!"); + + // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so + // as that will cause the lifetime adjustment to be lost for ARC + auto ownership = m->getType().getObjCLifetime(); + if (ownership != Qualifiers::OCL_None && + ownership != Qualifiers::OCL_ExplicitNone) { + cgm.errorNYI(e->getSourceRange(), + "emitMaterializeTemporaryExpr: ObjCLifetime"); + return {}; + } + + SmallVector<const Expr *, 2> commaLHSs; + SmallVector<SubobjectAdjustment, 2> adjustments; + e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments); + + for (const Expr *ignored : commaLHSs) + emitIgnoredExpr(ignored); + + if (isa<OpaqueValueExpr>(e)) { + cgm.errorNYI(e->getSourceRange(), + "emitMaterializeTemporaryExpr: OpaqueValueExpr"); + return {}; + } + + // Create and initialize the reference temporary. + Address object = createReferenceTemporary(*this, m, e); + + if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) { + // TODO(cir): add something akin to stripPointerCasts() to ptr above + cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp"); + return {}; + } else { + assert(!cir::MissingFeatures::emitLifetimeMarkers()); + emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true); + } + pushTemporaryCleanup(*this, m, e, object); + + // Perform derived-to-base casts and/or field accesses, to get from the + // temporary object we created (and, potentially, for which we extended + // the lifetime) to the subobject we're binding the reference to. + if (!adjustments.empty()) { + cgm.errorNYI(e->getSourceRange(), + "emitMaterializeTemporaryExpr: Adjustments"); + return {}; + } + + return makeAddrLValue(object, m->getType(), AlignmentSource::Decl); +} + LValue CIRGenFunction::emitCompoundLiteralLValue(const CompoundLiteralExpr *e) { if (e->isFileScope()) { cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope"); @@ -1472,9 +1625,10 @@ Address CIRGenFunction::emitArrayToPointerDecay(const Expr *e) { if (e->getType()->isVariableArrayType()) return addr; - auto pointeeTy = mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee()); + [[maybe_unused]] auto pointeeTy = + mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee()); - mlir::Type arrayTy = convertType(e->getType()); + [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType()); assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array"); assert(pointeeTy == arrayTy); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index a09d739..3aa170e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -91,6 +91,14 @@ public: } mlir::Value VisitUnaryDeref(const Expr *e); + + mlir::Value VisitUnaryPlus(const UnaryOperator *e); + + mlir::Value VisitPlusMinus(const UnaryOperator *e, cir::UnaryOpKind kind, + QualType promotionType); + + mlir::Value VisitUnaryMinus(const UnaryOperator *e); + mlir::Value VisitUnaryNot(const UnaryOperator *e); struct BinOpInfo { @@ -110,6 +118,7 @@ public: mlir::Value emitBinAdd(const BinOpInfo &op); mlir::Value emitBinSub(const BinOpInfo &op); + mlir::Value emitBinMul(const BinOpInfo &op); QualType getPromotionType(QualType ty, bool isDivOpCode = false) { if (auto *complexTy = ty->getAs<ComplexType>()) { @@ -142,16 +151,20 @@ public: HANDLEBINOP(Add) HANDLEBINOP(Sub) + HANDLEBINOP(Mul) #undef HANDLEBINOP }; } // namespace +#ifndef NDEBUG +// Only used in asserts static const ComplexType *getComplexType(QualType type) { type = type.getCanonicalType(); if (const ComplexType *comp = dyn_cast<ComplexType>(type)) return comp; return cast<ComplexType>(cast<AtomicType>(type)->getValueType()); } +#endif // NDEBUG LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e, mlir::Value &value) { @@ -282,6 +295,41 @@ mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op, llvm_unreachable("unknown cast resulting in complex value"); } +mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e) { + QualType promotionTy = getPromotionType(e->getSubExpr()->getType()); + mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Plus, promotionTy); + if (!promotionTy.isNull()) { + cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryPlus emitUnPromotedValue"); + return {}; + } + return result; +} + +mlir::Value ComplexExprEmitter::VisitPlusMinus(const UnaryOperator *e, + cir::UnaryOpKind kind, + QualType promotionType) { + assert(kind == cir::UnaryOpKind::Plus || + kind == cir::UnaryOpKind::Minus && + "Invalid UnaryOp kind for ComplexType Plus or Minus"); + + mlir::Value op; + if (!promotionType.isNull()) + op = cgf.emitPromotedComplexExpr(e->getSubExpr(), promotionType); + else + op = Visit(e->getSubExpr()); + return builder.createUnaryOp(cgf.getLoc(e->getExprLoc()), kind, op); +} + +mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e) { + QualType promotionTy = getPromotionType(e->getSubExpr()->getType()); + mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Minus, promotionTy); + if (!promotionTy.isNull()) { + cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryMinus emitUnPromotedValue"); + return {}; + } + return result; +} + mlir::Value ComplexExprEmitter::emitConstant( const CIRGenFunction::ConstantEmission &constant, Expr *e) { assert(constant && "not a constant"); @@ -534,13 +582,22 @@ mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e, return emitBin##OP(emitBinOps(bo, promotionTy)); HANDLE_BINOP(Add) HANDLE_BINOP(Sub) + HANDLE_BINOP(Mul) #undef HANDLE_BINOP default: break; } - } else if (isa<UnaryOperator>(e)) { - cgf.cgm.errorNYI("emitPromoted UnaryOperator"); - return {}; + } else if (const auto *unaryOp = dyn_cast<UnaryOperator>(e)) { + switch (unaryOp->getOpcode()) { + case UO_Minus: + case UO_Plus: { + auto kind = unaryOp->getOpcode() == UO_Plus ? cir::UnaryOpKind::Plus + : cir::UnaryOpKind::Minus; + return VisitPlusMinus(unaryOp, kind, promotionTy); + } + default: + break; + } } mlir::Value result = Visit(const_cast<Expr *>(e)); @@ -585,6 +642,31 @@ mlir::Value ComplexExprEmitter::emitBinSub(const BinOpInfo &op) { return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs); } +static cir::ComplexRangeKind +getComplexRangeAttr(LangOptions::ComplexRangeKind range) { + switch (range) { + case LangOptions::CX_Full: + return cir::ComplexRangeKind::Full; + case LangOptions::CX_Improved: + return cir::ComplexRangeKind::Improved; + case LangOptions::CX_Promoted: + return cir::ComplexRangeKind::Promoted; + case LangOptions::CX_Basic: + return cir::ComplexRangeKind::Basic; + case LangOptions::CX_None: + // The default value for ComplexRangeKind is Full is no option is selected + return cir::ComplexRangeKind::Full; + } +} + +mlir::Value ComplexExprEmitter::emitBinMul(const BinOpInfo &op) { + assert(!cir::MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::cgFPOptionsRAII()); + cir::ComplexRangeKind rangeKind = + getComplexRangeAttr(op.fpFeatures.getComplexRange()); + return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind); +} + LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *e) { assert(e->getOpcode() == BO_Assign && "Expected assign op"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 2523b0f..32c1c1a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -48,8 +48,8 @@ struct BinOpInfo { /// Check if the binop can result in integer overflow. bool mayHaveIntegerOverflow() const { // Without constant input, we can't rule out overflow. - auto lhsci = dyn_cast<cir::ConstantOp>(lhs.getDefiningOp()); - auto rhsci = dyn_cast<cir::ConstantOp>(rhs.getDefiningOp()); + auto lhsci = lhs.getDefiningOp<cir::ConstantOp>(); + auto rhsci = rhs.getDefiningOp<cir::ConstantOp>(); if (!lhsci || !rhsci) return true; @@ -439,7 +439,7 @@ public: value = builder.getTrue(cgf.getLoc(e->getExprLoc())); } else if (type->isIntegerType()) { QualType promotedType; - bool canPerformLossyDemotionCheck = false; + [[maybe_unused]] bool canPerformLossyDemotionCheck = false; if (cgf.getContext().isPromotableIntegerType(type)) { promotedType = cgf.getContext().getPromotedIntegerType(type); assert(promotedType != type && "Shouldn't promote to the same type."); @@ -626,6 +626,7 @@ public: mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); } + mlir::Value VisitExprWithCleanups(ExprWithCleanups *e); mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) { return cgf.emitCXXNewExpr(e); } @@ -1217,6 +1218,29 @@ mlir::Value ScalarExprEmitter::emitCompoundAssign( return emitLoadOfLValue(lhs, e->getExprLoc()); } +mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) { + mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange()); + mlir::OpBuilder &builder = cgf.builder; + + auto scope = cir::ScopeOp::create( + builder, scopeLoc, + /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{cgf, loc, + builder.getInsertionBlock()}; + mlir::Value scopeYieldVal = Visit(e->getSubExpr()); + if (scopeYieldVal) { + // Defend against dominance problems caused by jumps out of expression + // evaluation through the shared cleanup block. + lexScope.forceCleanup(); + cir::YieldOp::create(builder, loc, scopeYieldVal); + yieldTy = scopeYieldVal.getType(); + } + }); + + return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr; +} + } // namespace LValue diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index c65d025..3ed1e30 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -216,8 +216,7 @@ void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc, void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty, mlir::Location loc, CharUnits alignment, bool isParam) { - const auto *namedVar = dyn_cast_or_null<NamedDecl>(var); - assert(namedVar && "Needs a named decl"); + assert(isa<NamedDecl>(var) && "Needs a named decl"); assert(!cir::MissingFeatures::cgfSymbolTable()); auto allocaOp = cast<cir::AllocaOp>(addrVal.getDefiningOp()); @@ -801,6 +800,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) { case Expr::CXXDynamicCastExprClass: case Expr::ImplicitCastExprClass: return emitCastLValue(cast<CastExpr>(e)); + case Expr::MaterializeTemporaryExprClass: + return emitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(e)); } } @@ -811,6 +812,10 @@ static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) { return std::string(out.str()); } +std::string CIRGenFunction::getCounterRefTmpAsString() { + return getVersionedTmpName("ref.tmp", counterRefTmp++); +} + std::string CIRGenFunction::getCounterAggTmpAsString() { return getVersionedTmpName("agg.tmp", counterAggTmp++); } @@ -943,6 +948,7 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) { case Type::HLSLInlineSpirv: case Type::PredefinedSugar: cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType"); + break; #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 603f750..68d54bb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -325,7 +325,9 @@ public: }; /// Hold counters for incrementally naming temporaries + unsigned counterRefTmp = 0; unsigned counterAggTmp = 0; + std::string getCounterRefTmpAsString(); std::string getCounterAggTmpAsString(); /// Helpers to convert Clang's SourceLocation to a MLIR Location. @@ -604,6 +606,19 @@ public: void popCleanupBlocks(size_t oldCleanupStackDepth); void popCleanupBlock(); + /// Push a cleanup to be run at the end of the current full-expression. Safe + /// against the possibility that we're currently inside a + /// conditionally-evaluated expression. + template <class T, class... As> + void pushFullExprCleanup(CleanupKind kind, As... a) { + // If we're not in a conditional branch, or if none of the + // arguments requires saving, then use the unconditional cleanup. + if (!isInConditionalBranch()) + return ehStack.pushCleanup<T>(kind, a...); + + cgm.errorNYI("pushFullExprCleanup in conditional branch"); + } + /// Enters a new scope for capturing cleanups, all of which /// will be executed once the scope is exited. class RunCleanupsScope { @@ -619,6 +634,7 @@ public: protected: CIRGenFunction &cgf; + public: /// Enter a new cleanup scope. explicit RunCleanupsScope(CIRGenFunction &cgf) : performCleanup(true), cgf(cgf) { @@ -801,6 +817,9 @@ public: static Destroyer destroyCXXObject; + void pushDestroy(CleanupKind kind, Address addr, QualType type, + Destroyer *destroyer); + Destroyer *getDestroyer(clang::QualType::DestructionKind kind); /// ---------------------- @@ -858,7 +877,8 @@ public: Address emitArrayToPointerDecay(const Expr *array); - AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d); + AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d, + mlir::OpBuilder::InsertPoint ip = {}); /// Emit code and set up symbol table for a variable declaration with auto, /// register, or no storage class specifier. These turn into simple stack @@ -870,6 +890,8 @@ public: void emitAutoVarTypeCleanup(const AutoVarEmission &emission, clang::QualType::DestructionKind dtorKind); + void maybeEmitDeferredVarDeclInit(const VarDecl *vd); + void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl, CXXCtorInitializer *baseInit); @@ -1059,7 +1081,7 @@ public: void emitCompoundStmtWithoutScope(const clang::CompoundStmt &s); - void emitDecl(const clang::Decl &d); + void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false); mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s); LValue emitDeclRefLValue(const clang::DeclRefExpr *e); @@ -1136,6 +1158,8 @@ public: const clang::FieldDecl *field, llvm::StringRef fieldName); + LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e); + LValue emitMemberExpr(const MemberExpr *e); /// Given an expression with a pointer type, emit the value and compute our @@ -1375,6 +1399,7 @@ public: mlir::Location beginLoc; mlir::Value varValue; std::string name; + QualType baseType; llvm::SmallVector<mlir::Value> bounds; }; // Gets the collection of info required to lower and OpenACC clause or cache diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 623b84f..b143682 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -656,8 +656,6 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty, void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd, bool isTentative) { - const QualType astTy = vd->getType(); - if (getLangOpts().OpenCL || getLangOpts().OpenMPIsTargetDevice) { errorNYI(vd->getSourceRange(), "emit OpenCL/OpenMP global variable"); return; @@ -701,7 +699,7 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd, // never attempt to emit a tentative definition if a real one // exists. A use may still exists, however, so we still may need // to do a RAUW. - assert(!astTy->isIncompleteType() && "Unexpected incomplete type"); + assert(!vd->getType()->isIncompleteType() && "Unexpected incomplete type"); init = builder.getZeroInitAttr(convertType(vd->getType())); } else { emitter.emplace(*this); @@ -1308,8 +1306,13 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) { break; } - case Decl::Var: { + case Decl::Var: + case Decl::Decomposition: { auto *vd = cast<VarDecl>(decl); + if (isa<DecompositionDecl>(decl)) { + errorNYI(decl->getSourceRange(), "global variable decompositions"); + break; + } emitGlobal(vd); break; } @@ -1331,8 +1334,14 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) { break; // No code generation needed. - case Decl::UsingShadow: + case Decl::ClassTemplate: + case Decl::Concept: + case Decl::CXXDeductionGuide: case Decl::Empty: + case Decl::FunctionTemplate: + case Decl::StaticAssert: + case Decl::TypeAliasTemplate: + case Decl::UsingShadow: break; case Decl::CXXConstructor: diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp index 49ff124..32095cb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp @@ -119,7 +119,7 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) { if (const auto *memExpr = dyn_cast<MemberExpr>(curVarExpr)) return {exprLoc, emitMemberExpr(memExpr).getPointer(), exprString, - std::move(bounds)}; + curVarExpr->getType(), std::move(bounds)}; // Sema has made sure that only 4 types of things can get here, array // subscript, array section, member expr, or DRE to a var decl (or the @@ -127,5 +127,5 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) { // right. const auto *dre = cast<DeclRefExpr>(curVarExpr); return {exprLoc, emitDeclRefLValue(dre).getPointer(), exprString, - std::move(bounds)}; + curVarExpr->getType(), std::move(bounds)}; } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp index e45d3b8f..5a6e665 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp @@ -12,6 +12,7 @@ #include <type_traits> +#include "CIRGenCXXABI.h" #include "CIRGenFunction.h" #include "clang/AST/ExprCXX.h" @@ -355,6 +356,110 @@ class OpenACCClauseCIREmitter final } } + template <typename RecipeTy> + RecipeTy getOrCreateRecipe(ASTContext &astCtx, const Expr *varRef, + DeclContext *dc, QualType baseType, + mlir::Value mainOp) { + mlir::ModuleOp mod = + builder.getBlock()->getParent()->getParentOfType<mlir::ModuleOp>(); + + std::string recipeName; + { + llvm::raw_string_ostream stream(recipeName); + if constexpr (std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) { + stream << "privatization_"; + } else if constexpr (std::is_same_v<RecipeTy, + mlir::acc::FirstprivateRecipeOp>) { + stream << "firstprivatization_"; + + } else if constexpr (std::is_same_v<RecipeTy, + mlir::acc::ReductionRecipeOp>) { + stream << "reduction_"; + // We don't have the reduction operation here well enough to know how to + // spell this correctly (+ == 'add', etc), so when we implement + // 'reduction' we have to do that here. + cgf.cgm.errorNYI(varRef->getSourceRange(), + "OpeNACC reduction recipe creation"); + } else { + static_assert(!sizeof(RecipeTy), "Unknown Recipe op kind"); + } + + MangleContext &mc = cgf.cgm.getCXXABI().getMangleContext(); + mc.mangleCanonicalTypeName(baseType, stream); + } + + if (auto recipe = mod.lookupSymbol<RecipeTy>(recipeName)) + return recipe; + + mlir::Location loc = cgf.cgm.getLoc(varRef->getBeginLoc()); + mlir::Location locEnd = cgf.cgm.getLoc(varRef->getEndLoc()); + + mlir::OpBuilder modBuilder(mod.getBodyRegion()); + auto recipe = + RecipeTy::create(modBuilder, loc, recipeName, mainOp.getType()); + + // Magic-up a var-decl so we can use normal init/destruction operations for + // a variable declaration. + VarDecl &tempDecl = *VarDecl::Create( + astCtx, dc, varRef->getBeginLoc(), varRef->getBeginLoc(), + &astCtx.Idents.get("openacc.private.init"), baseType, + astCtx.getTrivialTypeSourceInfo(baseType), SC_Auto); + CIRGenFunction::AutoVarEmission tempDeclEmission{ + CIRGenFunction::AutoVarEmission::invalid()}; + + // Init section. + { + llvm::SmallVector<mlir::Type> argsTys{mainOp.getType()}; + llvm::SmallVector<mlir::Location> argsLocs{loc}; + builder.createBlock(&recipe.getInitRegion(), recipe.getInitRegion().end(), + argsTys, argsLocs); + builder.setInsertionPointToEnd(&recipe.getInitRegion().back()); + + if constexpr (!std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) { + // We have only implemented 'init' for private, so make this NYI until + // we have explicitly implemented everything. + cgf.cgm.errorNYI(varRef->getSourceRange(), + "OpenACC non-private recipe init"); + } + + tempDeclEmission = + cgf.emitAutoVarAlloca(tempDecl, builder.saveInsertionPoint()); + cgf.emitAutoVarInit(tempDeclEmission); + + mlir::acc::YieldOp::create(builder, locEnd); + } + + // Copy section. + if constexpr (std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp> || + std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) { + // TODO: OpenACC: 'private' doesn't emit this, but for the other two we + // have to figure out what 'copy' means here. + cgf.cgm.errorNYI(varRef->getSourceRange(), + "OpenACC record type privatization copy section"); + } + + // Destroy section (doesn't currently exist). + if (tempDecl.needsDestruction(cgf.getContext())) { + llvm::SmallVector<mlir::Type> argsTys{mainOp.getType()}; + llvm::SmallVector<mlir::Location> argsLocs{loc}; + mlir::Block *block = builder.createBlock(&recipe.getDestroyRegion(), + recipe.getDestroyRegion().end(), + argsTys, argsLocs); + builder.setInsertionPointToEnd(&recipe.getDestroyRegion().back()); + + mlir::Type elementTy = + mlir::cast<cir::PointerType>(mainOp.getType()).getPointee(); + Address addr{block->getArgument(0), elementTy, + cgf.getContext().getDeclAlign(&tempDecl)}; + cgf.emitDestroy(addr, baseType, + cgf.getDestroyer(QualType::DK_cxx_destructor)); + + mlir::acc::YieldOp::create(builder, locEnd); + } + + return recipe; + } + public: OpenACCClauseCIREmitter(OpTy &operation, CIRGen::CIRGenFunction &cgf, CIRGen::CIRGenBuilderTy &builder, @@ -971,6 +1076,37 @@ public: llvm_unreachable("Unknown construct kind in VisitAttachClause"); } } + + void VisitPrivateClause(const OpenACCPrivateClause &clause) { + if constexpr (isOneOfTypes<OpTy, mlir::acc::ParallelOp, mlir::acc::SerialOp, + mlir::acc::LoopOp>) { + for (const Expr *var : clause.getVarList()) { + CIRGenFunction::OpenACCDataOperandInfo opInfo = + cgf.getOpenACCDataOperandInfo(var); + auto privateOp = mlir::acc::PrivateOp::create( + builder, opInfo.beginLoc, opInfo.varValue, /*structured=*/true, + /*implicit=*/false, opInfo.name, opInfo.bounds); + privateOp.setDataClause(mlir::acc::DataClause::acc_private); + + { + mlir::OpBuilder::InsertionGuard guardCase(builder); + auto recipe = getOrCreateRecipe<mlir::acc::PrivateRecipeOp>( + cgf.getContext(), var, Decl::castToDeclContext(cgf.curFuncDecl), + opInfo.baseType, privateOp.getResult()); + // TODO: OpenACC: The dialect is going to change in the near future to + // have these be on a different operation, so when that changes, we + // probably need to change these here. + operation.addPrivatization(builder.getContext(), privateOp, recipe); + } + } + } else if constexpr (isCombinedType<OpTy>) { + // Despite this being valid on ParallelOp or SerialOp, combined type + // applies to the 'loop'. + applyToLoopOp(clause); + } else { + llvm_unreachable("Unknown construct kind in VisitPrivateClause"); + } + } }; template <typename OpTy> diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp index e4ec380..8b01d41a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp @@ -41,7 +41,7 @@ struct CIRRecordLowering final { // member type that ensures correct rounding. struct MemberInfo final { CharUnits offset; - enum class InfoKind { Field, Base } kind; + enum class InfoKind { VFPtr, Field, Base } kind; mlir::Type data; union { const FieldDecl *fieldDecl; @@ -87,10 +87,15 @@ struct CIRRecordLowering final { accumulateBitFields(RecordDecl::field_iterator field, RecordDecl::field_iterator fieldEnd); + mlir::Type getVFPtrType(); + bool isAAPCS() const { return astContext.getTargetInfo().getABI().starts_with("aapcs"); } + /// Helper function to check if the target machine is BigEndian. + bool isBigEndian() const { return astContext.getTargetInfo().isBigEndian(); } + CharUnits bitsToCharUnits(uint64_t bitOffset) { return astContext.toCharUnitsFromBits(bitOffset); } @@ -771,7 +776,104 @@ void CIRRecordLowering::computeVolatileBitfields() { !cirGenTypes.getCGModule().getCodeGenOpts().AAPCSBitfieldWidth) return; - assert(!cir::MissingFeatures::armComputeVolatileBitfields()); + for (auto &[field, info] : bitFields) { + mlir::Type resLTy = cirGenTypes.convertTypeForMem(field->getType()); + + if (astContext.toBits(astRecordLayout.getAlignment()) < + getSizeInBits(resLTy).getQuantity()) + continue; + + // CIRRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets + // for big-endian targets, but it assumes a container of width + // info.storageSize. Since AAPCS uses a different container size (width + // of the type), we first undo that calculation here and redo it once + // the bit-field offset within the new container is calculated. + const unsigned oldOffset = + isBigEndian() ? info.storageSize - (info.offset + info.size) + : info.offset; + // Offset to the bit-field from the beginning of the struct. + const unsigned absoluteOffset = + astContext.toBits(info.storageOffset) + oldOffset; + + // Container size is the width of the bit-field type. + const unsigned storageSize = getSizeInBits(resLTy).getQuantity(); + // Nothing to do if the access uses the desired + // container width and is naturally aligned. + if (info.storageSize == storageSize && (oldOffset % storageSize == 0)) + continue; + + // Offset within the container. + unsigned offset = absoluteOffset & (storageSize - 1); + // Bail out if an aligned load of the container cannot cover the entire + // bit-field. This can happen for example, if the bit-field is part of a + // packed struct. AAPCS does not define access rules for such cases, we let + // clang to follow its own rules. + if (offset + info.size > storageSize) + continue; + + // Re-adjust offsets for big-endian targets. + if (isBigEndian()) + offset = storageSize - (offset + info.size); + + const CharUnits storageOffset = + astContext.toCharUnitsFromBits(absoluteOffset & ~(storageSize - 1)); + const CharUnits end = storageOffset + + astContext.toCharUnitsFromBits(storageSize) - + CharUnits::One(); + + const ASTRecordLayout &layout = + astContext.getASTRecordLayout(field->getParent()); + // If we access outside memory outside the record, than bail out. + const CharUnits recordSize = layout.getSize(); + if (end >= recordSize) + continue; + + // Bail out if performing this load would access non-bit-fields members. + bool conflict = false; + for (const auto *f : recordDecl->fields()) { + // Allow sized bit-fields overlaps. + if (f->isBitField() && !f->isZeroLengthBitField()) + continue; + + const CharUnits fOffset = astContext.toCharUnitsFromBits( + layout.getFieldOffset(f->getFieldIndex())); + + // As C11 defines, a zero sized bit-field defines a barrier, so + // fields after and before it should be race condition free. + // The AAPCS acknowledges it and imposes no restritions when the + // natural container overlaps a zero-length bit-field. + if (f->isZeroLengthBitField()) { + if (end > fOffset && storageOffset < fOffset) { + conflict = true; + break; + } + } + + const CharUnits fEnd = + fOffset + + astContext.toCharUnitsFromBits(astContext.toBits( + getSizeInBits(cirGenTypes.convertTypeForMem(f->getType())))) - + CharUnits::One(); + // If no overlap, continue. + if (end < fOffset || fEnd < storageOffset) + continue; + + // The desired load overlaps a non-bit-field member, bail out. + conflict = true; + break; + } + + if (conflict) + continue; + // Write the new bit-field access parameters. + // As the storage offset now is defined as the number of elements from the + // start of the structure, we should divide the Offset by the element size. + info.volatileStorageOffset = + storageOffset / + astContext.toCharUnitsFromBits(storageSize).getQuantity(); + info.volatileStorageSize = storageSize; + info.volatileOffset = offset; + } } void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) { @@ -802,9 +904,14 @@ void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) { void CIRRecordLowering::accumulateVPtrs() { if (astRecordLayout.hasOwnVFPtr()) - cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(), - "accumulateVPtrs: hasOwnVFPtr"); + members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::VFPtr, + getVFPtrType())); + if (astRecordLayout.hasOwnVBPtr()) cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(), "accumulateVPtrs: hasOwnVBPtr"); } + +mlir::Type CIRRecordLowering::getVFPtrType() { + return cir::VPtrType::get(builder.getContext()); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 21bee33..50642e7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -79,14 +79,15 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s, #define EXPR(Type, Base) case Stmt::Type##Class: #include "clang/AST/StmtNodes.inc" { - // Remember the block we came in on. - mlir::Block *incoming = builder.getInsertionBlock(); - assert(incoming && "expression emission must have an insertion point"); + assert(builder.getInsertionBlock() && + "expression emission must have an insertion point"); emitIgnoredExpr(cast<Expr>(s)); - mlir::Block *outgoing = builder.getInsertionBlock(); - assert(outgoing && "expression emission cleared block!"); + // Classic codegen has a check here to see if the emitter created a new + // block that isn't used (comparing the incoming and outgoing insertion + // points) and deletes the outgoing block if it's not used. In CIR, we + // will handle that during the cir.canonicalize pass. return mlir::success(); } case Stmt::IfStmtClass: @@ -363,8 +364,8 @@ mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &s) { mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &s) { assert(builder.getInsertionBlock() && "expected valid insertion point"); - for (const Decl *I : s.decls()) - emitDecl(*I); + for (const Decl *i : s.decls()) + emitDecl(*i, /*evaluateConditionDecl=*/true); return mlir::success(); } @@ -875,7 +876,7 @@ mlir::LogicalResult CIRGenFunction::emitSwitchStmt(const clang::SwitchStmt &s) { return mlir::failure(); if (s.getConditionVariable()) - emitDecl(*s.getConditionVariable()); + emitDecl(*s.getConditionVariable(), /*evaluateConditionDecl=*/true); mlir::Value condV = emitScalarExpr(s.getCond()); diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 99d6528..b0357d9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -152,9 +152,30 @@ void CIRGenerator::HandleTagDeclDefinition(TagDecl *d) { cgm->errorNYI(d->getSourceRange(), "HandleTagDeclDefinition: OpenMP"); } +void CIRGenerator::HandleTagDeclRequiredDefinition(const TagDecl *D) { + if (diags.hasErrorOccurred()) + return; + + assert(!cir::MissingFeatures::generateDebugInfo()); +} + +void CIRGenerator::HandleCXXStaticMemberVarInstantiation(VarDecl *D) { + if (diags.hasErrorOccurred()) + return; + + cgm->errorNYI(D->getSourceRange(), "HandleCXXStaticMemberVarInstantiation"); +} + void CIRGenerator::CompleteTentativeDefinition(VarDecl *d) { if (diags.hasErrorOccurred()) return; cgm->emitTentativeDefinition(d); } + +void CIRGenerator::HandleVTable(CXXRecordDecl *rd) { + if (diags.hasErrorOccurred()) + return; + + cgm->errorNYI(rd->getSourceRange(), "HandleVTable"); +} |