aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR/CodeGen')
-rw-r--r--clang/lib/CIR/CodeGen/Address.h11
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp19
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp48
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp167
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp28
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h25
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp136
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp19
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenerator.cpp21
11 files changed, 432 insertions, 56 deletions
diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h
index 6f76c3e..6c927e9 100644
--- a/clang/lib/CIR/CodeGen/Address.h
+++ b/clang/lib/CIR/CodeGen/Address.h
@@ -101,6 +101,17 @@ public:
}
clang::CharUnits getAlignment() const { return alignment; }
+
+ /// Get the operation which defines this address.
+ mlir::Operation *getDefiningOp() const {
+ if (!isValid())
+ return nullptr;
+ return getPointer().getDefiningOp();
+ }
+
+ template <typename OpTy> OpTy getDefiningOp() const {
+ return mlir::dyn_cast_or_null<OpTy>(getDefiningOp());
+ }
};
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index 50cca0e..72b9d17 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -349,12 +349,16 @@ void CIRGenFunction::emitCXXAggrConstructorCall(
// doesn't happen, but it's not clear that it's worth it.
// Optimize for a constant count.
- auto constantCount = dyn_cast<cir::ConstantOp>(numElements.getDefiningOp());
- if (constantCount) {
- auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constantCount.getValue());
- // Just skip out if the constant count is zero.
- if (constIntAttr && constIntAttr.getUInt() == 0)
- return;
+ if (auto constantCount = numElements.getDefiningOp<cir::ConstantOp>()) {
+ if (auto constIntAttr = constantCount.getValueAttr<cir::IntAttr>()) {
+ // Just skip out if the constant count is zero.
+ if (constIntAttr.getUInt() == 0)
+ return;
+ // Otherwise, emit the check.
+ }
+
+ if (constantCount.use_empty())
+ constantCount.erase();
} else {
// Otherwise, emit the check.
cgm.errorNYI(e->getSourceRange(), "dynamic-length array expression");
@@ -417,9 +421,6 @@ void CIRGenFunction::emitCXXAggrConstructorCall(
builder.create<cir::YieldOp>(loc);
});
}
-
- if (constantCount.use_empty())
- constantCount.erase();
}
void CIRGenFunction::emitDelegateCXXConstructorCall(
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 9e8eaa5..78d375c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -24,7 +24,8 @@ using namespace clang;
using namespace clang::CIRGen;
CIRGenFunction::AutoVarEmission
-CIRGenFunction::emitAutoVarAlloca(const VarDecl &d) {
+CIRGenFunction::emitAutoVarAlloca(const VarDecl &d,
+ mlir::OpBuilder::InsertPoint ip) {
QualType ty = d.getType();
if (ty.getAddressSpace() != LangAS::Default)
cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: address space");
@@ -50,7 +51,8 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d) {
// A normal fixed sized variable becomes an alloca in the entry block,
mlir::Type allocaTy = convertTypeForMem(ty);
// Create the temp alloca and declare variable using it.
- address = createTempAlloca(allocaTy, alignment, loc, d.getName());
+ address = createTempAlloca(allocaTy, alignment, loc, d.getName(),
+ /*arraySize=*/nullptr, /*alloca=*/nullptr, ip);
declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()), alignment);
emission.Addr = address;
@@ -156,7 +158,7 @@ void CIRGenFunction::emitAutoVarInit(
// out of it while trying to build the expression, mark it as such.
mlir::Value val = lv.getAddress().getPointer();
assert(val && "Should have an address");
- auto allocaOp = dyn_cast_or_null<cir::AllocaOp>(val.getDefiningOp());
+ auto allocaOp = val.getDefiningOp<cir::AllocaOp>();
assert(allocaOp && "Address should come straight out of the alloca");
if (!allocaOp.use_empty())
@@ -410,7 +412,8 @@ void CIRGenFunction::emitStaticVarDecl(const VarDecl &d,
// TODO(cir): we should have a way to represent global ops as values without
// having to emit a get global op. Sometimes these emissions are not used.
mlir::Value addr = builder.createGetGlobal(globalOp);
- auto getAddrOp = mlir::cast<cir::GetGlobalOp>(addr.getDefiningOp());
+ auto getAddrOp = addr.getDefiningOp<cir::GetGlobalOp>();
+ assert(getAddrOp && "expected cir::GetGlobalOp");
CharUnits alignment = getContext().getDeclAlign(&d);
@@ -651,6 +654,27 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, mlir::Value rhs,
assert(!cir::MissingFeatures::sanitizers());
}
+namespace {
+struct DestroyObject final : EHScopeStack::Cleanup {
+ DestroyObject(Address addr, QualType type,
+ CIRGenFunction::Destroyer *destroyer)
+ : addr(addr), type(type), destroyer(destroyer) {}
+
+ Address addr;
+ QualType type;
+ CIRGenFunction::Destroyer *destroyer;
+
+ void emit(CIRGenFunction &cgf) override {
+ cgf.emitDestroy(addr, type, destroyer);
+ }
+};
+} // namespace
+
+void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
+ QualType type, Destroyer *destroyer) {
+ pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
+}
+
/// Destroys all the elements of the given array, beginning from last to first.
/// The array cannot be zero-length.
///
@@ -738,22 +762,6 @@ CIRGenFunction::getDestroyer(QualType::DestructionKind kind) {
llvm_unreachable("Unknown DestructionKind");
}
-namespace {
-struct DestroyObject final : EHScopeStack::Cleanup {
- DestroyObject(Address addr, QualType type,
- CIRGenFunction::Destroyer *destroyer)
- : addr(addr), type(type), destroyer(destroyer) {}
-
- Address addr;
- QualType type;
- CIRGenFunction::Destroyer *destroyer;
-
- void emit(CIRGenFunction &cgf) override {
- cgf.emitDestroy(addr, type, destroyer);
- }
-};
-} // namespace
-
/// Enter a destroy cleanup for the given local variable.
void CIRGenFunction::emitAutoVarTypeCleanup(
const CIRGenFunction::AutoVarEmission &emission,
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index d267504..cd37a2b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -303,8 +303,7 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
// Update the alloca with more info on initialization.
assert(addr.getPointer() && "expected pointer to exist");
- auto srcAlloca =
- dyn_cast_or_null<cir::AllocaOp>(addr.getPointer().getDefiningOp());
+ auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
if (currVarDecl && srcAlloca) {
const VarDecl *vd = currVarDecl;
assert(vd && "VarDecl expected");
@@ -635,10 +634,8 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) {
// Tag 'load' with deref attribute.
// FIXME: This misses some derefence cases and has problematic interactions
// with other operators.
- if (auto loadOp =
- dyn_cast<cir::LoadOp>(addr.getPointer().getDefiningOp())) {
+ if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
- }
LValue lv = makeAddrLValue(addr, t, baseInfo);
assert(!cir::MissingFeatures::addressSpace());
@@ -721,8 +718,8 @@ static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
// TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
- if (auto constantOp = dyn_cast<cir::ConstantOp>(idx.getDefiningOp()))
- return mlir::dyn_cast<cir::IntAttr>(constantOp.getValue());
+ if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
+ return constantOp.getValueAttr<cir::IntAttr>();
return {};
}
@@ -730,8 +727,7 @@ static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
CharUnits eltSize) {
// If we have a constant index, we can use the exact offset of the
// element we're accessing.
- const cir::IntAttr constantIdx = getConstantIndexOrNull(idx);
- if (constantIdx) {
+ if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
return arrayAlign.alignmentAtOffset(offset);
}
@@ -1105,6 +1101,151 @@ void CIRGenFunction::emitAnyExprToMem(const Expr *e, Address location,
llvm_unreachable("bad evaluation kind");
}
+static Address createReferenceTemporary(CIRGenFunction &cgf,
+ const MaterializeTemporaryExpr *m,
+ const Expr *inner) {
+ // TODO(cir): cgf.getTargetHooks();
+ switch (m->getStorageDuration()) {
+ case SD_FullExpression:
+ case SD_Automatic: {
+ QualType ty = inner->getType();
+
+ assert(!cir::MissingFeatures::mergeAllConstants());
+
+ // The temporary memory should be created in the same scope as the extending
+ // declaration of the temporary materialization expression.
+ cir::AllocaOp extDeclAlloca;
+ if (const ValueDecl *extDecl = m->getExtendingDecl()) {
+ auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
+ if (extDeclAddrIter != cgf.localDeclMap.end())
+ extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
+ }
+ mlir::OpBuilder::InsertPoint ip;
+ if (extDeclAlloca)
+ ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
+ return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
+ cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
+ ip);
+ }
+ case SD_Thread:
+ case SD_Static: {
+ cgf.cgm.errorNYI(
+ m->getSourceRange(),
+ "createReferenceTemporary: static/thread storage duration");
+ return Address::invalid();
+ }
+
+ case SD_Dynamic:
+ llvm_unreachable("temporary can't have dynamic storage duration");
+ }
+ llvm_unreachable("unknown storage duration");
+}
+
+static void pushTemporaryCleanup(CIRGenFunction &cgf,
+ const MaterializeTemporaryExpr *m,
+ const Expr *e, Address referenceTemporary) {
+ // Objective-C++ ARC:
+ // If we are binding a reference to a temporary that has ownership, we
+ // need to perform retain/release operations on the temporary.
+ //
+ // FIXME(ogcg): This should be looking at e, not m.
+ if (m->getType().getObjCLifetime()) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
+ return;
+ }
+
+ CXXDestructorDecl *referenceTemporaryDtor = nullptr;
+ if (const clang::RecordType *rt = e->getType()
+ ->getBaseElementTypeUnsafe()
+ ->getAs<clang::RecordType>()) {
+ // Get the destructor for the reference temporary.
+ auto *classDecl = cast<CXXRecordDecl>(rt->getDecl());
+ if (!classDecl->hasTrivialDestructor())
+ referenceTemporaryDtor = classDecl->getDestructor();
+ }
+
+ if (!referenceTemporaryDtor)
+ return;
+
+ // Call the destructor for the temporary.
+ switch (m->getStorageDuration()) {
+ case SD_Static:
+ case SD_Thread:
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "pushTemporaryCleanup: static/thread storage duration");
+ return;
+
+ case SD_FullExpression:
+ cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
+ CIRGenFunction::destroyCXXObject);
+ break;
+
+ case SD_Automatic:
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "pushTemporaryCleanup: automatic storage duration");
+ break;
+
+ case SD_Dynamic:
+ llvm_unreachable("temporary cannot have dynamic storage duration");
+ }
+}
+
+LValue CIRGenFunction::emitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *m) {
+ const Expr *e = m->getSubExpr();
+
+ assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
+ !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
+ "Reference should never be pseudo-strong!");
+
+ // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
+ // as that will cause the lifetime adjustment to be lost for ARC
+ auto ownership = m->getType().getObjCLifetime();
+ if (ownership != Qualifiers::OCL_None &&
+ ownership != Qualifiers::OCL_ExplicitNone) {
+ cgm.errorNYI(e->getSourceRange(),
+ "emitMaterializeTemporaryExpr: ObjCLifetime");
+ return {};
+ }
+
+ SmallVector<const Expr *, 2> commaLHSs;
+ SmallVector<SubobjectAdjustment, 2> adjustments;
+ e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
+
+ for (const Expr *ignored : commaLHSs)
+ emitIgnoredExpr(ignored);
+
+ if (isa<OpaqueValueExpr>(e)) {
+ cgm.errorNYI(e->getSourceRange(),
+ "emitMaterializeTemporaryExpr: OpaqueValueExpr");
+ return {};
+ }
+
+ // Create and initialize the reference temporary.
+ Address object = createReferenceTemporary(*this, m, e);
+
+ if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
+ // TODO(cir): add something akin to stripPointerCasts() to ptr above
+ cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
+ return {};
+ } else {
+ assert(!cir::MissingFeatures::emitLifetimeMarkers());
+ emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
+ }
+ pushTemporaryCleanup(*this, m, e, object);
+
+ // Perform derived-to-base casts and/or field accesses, to get from the
+ // temporary object we created (and, potentially, for which we extended
+ // the lifetime) to the subobject we're binding the reference to.
+ if (!adjustments.empty()) {
+ cgm.errorNYI(e->getSourceRange(),
+ "emitMaterializeTemporaryExpr: Adjustments");
+ return {};
+ }
+
+ return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
+}
+
LValue CIRGenFunction::emitCompoundLiteralLValue(const CompoundLiteralExpr *e) {
if (e->isFileScope()) {
cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
@@ -1862,9 +2003,9 @@ cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
const Twine &name,
mlir::Value arraySize,
bool insertIntoFnEntryBlock) {
- return cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
- insertIntoFnEntryBlock, arraySize)
- .getDefiningOp());
+ return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
+ insertIntoFnEntryBlock, arraySize)
+ .getDefiningOp());
}
/// This creates an alloca and inserts it into the provided insertion point
@@ -1874,7 +2015,7 @@ cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
mlir::OpBuilder::InsertPoint ip,
mlir::Value arraySize) {
assert(ip.isSet() && "Insertion point is not set");
- return cast<cir::AllocaOp>(
+ return mlir::cast<cir::AllocaOp>(
emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
.getDefiningOp());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index f62be49..32c1c1a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -48,8 +48,8 @@ struct BinOpInfo {
/// Check if the binop can result in integer overflow.
bool mayHaveIntegerOverflow() const {
// Without constant input, we can't rule out overflow.
- auto lhsci = dyn_cast<cir::ConstantOp>(lhs.getDefiningOp());
- auto rhsci = dyn_cast<cir::ConstantOp>(rhs.getDefiningOp());
+ auto lhsci = lhs.getDefiningOp<cir::ConstantOp>();
+ auto rhsci = rhs.getDefiningOp<cir::ConstantOp>();
if (!lhsci || !rhsci)
return true;
@@ -626,6 +626,7 @@ public:
mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
+ mlir::Value VisitExprWithCleanups(ExprWithCleanups *e);
mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
return cgf.emitCXXNewExpr(e);
}
@@ -1217,6 +1218,29 @@ mlir::Value ScalarExprEmitter::emitCompoundAssign(
return emitLoadOfLValue(lhs, e->getExprLoc());
}
+mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
+ mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
+ mlir::OpBuilder &builder = cgf.builder;
+
+ auto scope = cir::ScopeOp::create(
+ builder, scopeLoc,
+ /*scopeBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) {
+ CIRGenFunction::LexicalScope lexScope{cgf, loc,
+ builder.getInsertionBlock()};
+ mlir::Value scopeYieldVal = Visit(e->getSubExpr());
+ if (scopeYieldVal) {
+ // Defend against dominance problems caused by jumps out of expression
+ // evaluation through the shared cleanup block.
+ lexScope.forceCleanup();
+ cir::YieldOp::create(builder, loc, scopeYieldVal);
+ yieldTy = scopeYieldVal.getType();
+ }
+ });
+
+ return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr;
+}
+
} // namespace
LValue
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 0c9bc38..f8e7347 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -219,7 +219,9 @@ void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
assert(isa<NamedDecl>(var) && "Needs a named decl");
assert(!cir::MissingFeatures::cgfSymbolTable());
- auto allocaOp = cast<cir::AllocaOp>(addrVal.getDefiningOp());
+ auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
+ assert(allocaOp && "expected cir::AllocaOp");
+
if (isParam)
allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
if (ty->isReferenceType() || ty.isConstQualified())
@@ -800,6 +802,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
case Expr::CXXDynamicCastExprClass:
case Expr::ImplicitCastExprClass:
return emitCastLValue(cast<CastExpr>(e));
+ case Expr::MaterializeTemporaryExprClass:
+ return emitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(e));
}
}
@@ -810,6 +814,10 @@ static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
return std::string(out.str());
}
+std::string CIRGenFunction::getCounterRefTmpAsString() {
+ return getVersionedTmpName("ref.tmp", counterRefTmp++);
+}
+
std::string CIRGenFunction::getCounterAggTmpAsString() {
return getVersionedTmpName("agg.tmp", counterAggTmp++);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index f9c8636..68d54bb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -325,7 +325,9 @@ public:
};
/// Hold counters for incrementally naming temporaries
+ unsigned counterRefTmp = 0;
unsigned counterAggTmp = 0;
+ std::string getCounterRefTmpAsString();
std::string getCounterAggTmpAsString();
/// Helpers to convert Clang's SourceLocation to a MLIR Location.
@@ -604,6 +606,19 @@ public:
void popCleanupBlocks(size_t oldCleanupStackDepth);
void popCleanupBlock();
+ /// Push a cleanup to be run at the end of the current full-expression. Safe
+ /// against the possibility that we're currently inside a
+ /// conditionally-evaluated expression.
+ template <class T, class... As>
+ void pushFullExprCleanup(CleanupKind kind, As... a) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch())
+ return ehStack.pushCleanup<T>(kind, a...);
+
+ cgm.errorNYI("pushFullExprCleanup in conditional branch");
+ }
+
/// Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
class RunCleanupsScope {
@@ -619,6 +634,7 @@ public:
protected:
CIRGenFunction &cgf;
+ public:
/// Enter a new cleanup scope.
explicit RunCleanupsScope(CIRGenFunction &cgf)
: performCleanup(true), cgf(cgf) {
@@ -801,6 +817,9 @@ public:
static Destroyer destroyCXXObject;
+ void pushDestroy(CleanupKind kind, Address addr, QualType type,
+ Destroyer *destroyer);
+
Destroyer *getDestroyer(clang::QualType::DestructionKind kind);
/// ----------------------
@@ -858,7 +877,8 @@ public:
Address emitArrayToPointerDecay(const Expr *array);
- AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d);
+ AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
+ mlir::OpBuilder::InsertPoint ip = {});
/// Emit code and set up symbol table for a variable declaration with auto,
/// register, or no storage class specifier. These turn into simple stack
@@ -1138,6 +1158,8 @@ public:
const clang::FieldDecl *field,
llvm::StringRef fieldName);
+ LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
+
LValue emitMemberExpr(const MemberExpr *e);
/// Given an expression with a pointer type, emit the value and compute our
@@ -1377,6 +1399,7 @@ public:
mlir::Location beginLoc;
mlir::Value varValue;
std::string name;
+ QualType baseType;
llvm::SmallVector<mlir::Value> bounds;
};
// Gets the collection of info required to lower and OpenACC clause or cache
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp
index 49ff124..32095cb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp
@@ -119,7 +119,7 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) {
if (const auto *memExpr = dyn_cast<MemberExpr>(curVarExpr))
return {exprLoc, emitMemberExpr(memExpr).getPointer(), exprString,
- std::move(bounds)};
+ curVarExpr->getType(), std::move(bounds)};
// Sema has made sure that only 4 types of things can get here, array
// subscript, array section, member expr, or DRE to a var decl (or the
@@ -127,5 +127,5 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) {
// right.
const auto *dre = cast<DeclRefExpr>(curVarExpr);
return {exprLoc, emitDeclRefLValue(dre).getPointer(), exprString,
- std::move(bounds)};
+ curVarExpr->getType(), std::move(bounds)};
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
index e45d3b8f..5a6e665 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
@@ -12,6 +12,7 @@
#include <type_traits>
+#include "CIRGenCXXABI.h"
#include "CIRGenFunction.h"
#include "clang/AST/ExprCXX.h"
@@ -355,6 +356,110 @@ class OpenACCClauseCIREmitter final
}
}
+ template <typename RecipeTy>
+ RecipeTy getOrCreateRecipe(ASTContext &astCtx, const Expr *varRef,
+ DeclContext *dc, QualType baseType,
+ mlir::Value mainOp) {
+ mlir::ModuleOp mod =
+ builder.getBlock()->getParent()->getParentOfType<mlir::ModuleOp>();
+
+ std::string recipeName;
+ {
+ llvm::raw_string_ostream stream(recipeName);
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) {
+ stream << "privatization_";
+ } else if constexpr (std::is_same_v<RecipeTy,
+ mlir::acc::FirstprivateRecipeOp>) {
+ stream << "firstprivatization_";
+
+ } else if constexpr (std::is_same_v<RecipeTy,
+ mlir::acc::ReductionRecipeOp>) {
+ stream << "reduction_";
+ // We don't have the reduction operation here well enough to know how to
+ // spell this correctly (+ == 'add', etc), so when we implement
+ // 'reduction' we have to do that here.
+ cgf.cgm.errorNYI(varRef->getSourceRange(),
+ "OpeNACC reduction recipe creation");
+ } else {
+ static_assert(!sizeof(RecipeTy), "Unknown Recipe op kind");
+ }
+
+ MangleContext &mc = cgf.cgm.getCXXABI().getMangleContext();
+ mc.mangleCanonicalTypeName(baseType, stream);
+ }
+
+ if (auto recipe = mod.lookupSymbol<RecipeTy>(recipeName))
+ return recipe;
+
+ mlir::Location loc = cgf.cgm.getLoc(varRef->getBeginLoc());
+ mlir::Location locEnd = cgf.cgm.getLoc(varRef->getEndLoc());
+
+ mlir::OpBuilder modBuilder(mod.getBodyRegion());
+ auto recipe =
+ RecipeTy::create(modBuilder, loc, recipeName, mainOp.getType());
+
+ // Magic-up a var-decl so we can use normal init/destruction operations for
+ // a variable declaration.
+ VarDecl &tempDecl = *VarDecl::Create(
+ astCtx, dc, varRef->getBeginLoc(), varRef->getBeginLoc(),
+ &astCtx.Idents.get("openacc.private.init"), baseType,
+ astCtx.getTrivialTypeSourceInfo(baseType), SC_Auto);
+ CIRGenFunction::AutoVarEmission tempDeclEmission{
+ CIRGenFunction::AutoVarEmission::invalid()};
+
+ // Init section.
+ {
+ llvm::SmallVector<mlir::Type> argsTys{mainOp.getType()};
+ llvm::SmallVector<mlir::Location> argsLocs{loc};
+ builder.createBlock(&recipe.getInitRegion(), recipe.getInitRegion().end(),
+ argsTys, argsLocs);
+ builder.setInsertionPointToEnd(&recipe.getInitRegion().back());
+
+ if constexpr (!std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) {
+ // We have only implemented 'init' for private, so make this NYI until
+ // we have explicitly implemented everything.
+ cgf.cgm.errorNYI(varRef->getSourceRange(),
+ "OpenACC non-private recipe init");
+ }
+
+ tempDeclEmission =
+ cgf.emitAutoVarAlloca(tempDecl, builder.saveInsertionPoint());
+ cgf.emitAutoVarInit(tempDeclEmission);
+
+ mlir::acc::YieldOp::create(builder, locEnd);
+ }
+
+ // Copy section.
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp> ||
+ std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) {
+ // TODO: OpenACC: 'private' doesn't emit this, but for the other two we
+ // have to figure out what 'copy' means here.
+ cgf.cgm.errorNYI(varRef->getSourceRange(),
+ "OpenACC record type privatization copy section");
+ }
+
+ // Destroy section (doesn't currently exist).
+ if (tempDecl.needsDestruction(cgf.getContext())) {
+ llvm::SmallVector<mlir::Type> argsTys{mainOp.getType()};
+ llvm::SmallVector<mlir::Location> argsLocs{loc};
+ mlir::Block *block = builder.createBlock(&recipe.getDestroyRegion(),
+ recipe.getDestroyRegion().end(),
+ argsTys, argsLocs);
+ builder.setInsertionPointToEnd(&recipe.getDestroyRegion().back());
+
+ mlir::Type elementTy =
+ mlir::cast<cir::PointerType>(mainOp.getType()).getPointee();
+ Address addr{block->getArgument(0), elementTy,
+ cgf.getContext().getDeclAlign(&tempDecl)};
+ cgf.emitDestroy(addr, baseType,
+ cgf.getDestroyer(QualType::DK_cxx_destructor));
+
+ mlir::acc::YieldOp::create(builder, locEnd);
+ }
+
+ return recipe;
+ }
+
public:
OpenACCClauseCIREmitter(OpTy &operation, CIRGen::CIRGenFunction &cgf,
CIRGen::CIRGenBuilderTy &builder,
@@ -971,6 +1076,37 @@ public:
llvm_unreachable("Unknown construct kind in VisitAttachClause");
}
}
+
+ void VisitPrivateClause(const OpenACCPrivateClause &clause) {
+ if constexpr (isOneOfTypes<OpTy, mlir::acc::ParallelOp, mlir::acc::SerialOp,
+ mlir::acc::LoopOp>) {
+ for (const Expr *var : clause.getVarList()) {
+ CIRGenFunction::OpenACCDataOperandInfo opInfo =
+ cgf.getOpenACCDataOperandInfo(var);
+ auto privateOp = mlir::acc::PrivateOp::create(
+ builder, opInfo.beginLoc, opInfo.varValue, /*structured=*/true,
+ /*implicit=*/false, opInfo.name, opInfo.bounds);
+ privateOp.setDataClause(mlir::acc::DataClause::acc_private);
+
+ {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+ auto recipe = getOrCreateRecipe<mlir::acc::PrivateRecipeOp>(
+ cgf.getContext(), var, Decl::castToDeclContext(cgf.curFuncDecl),
+ opInfo.baseType, privateOp.getResult());
+ // TODO: OpenACC: The dialect is going to change in the near future to
+ // have these be on a different operation, so when that changes, we
+ // probably need to change these here.
+ operation.addPrivatization(builder.getContext(), privateOp, recipe);
+ }
+ }
+ } else if constexpr (isCombinedType<OpTy>) {
+ // Despite this being valid on ParallelOp or SerialOp, combined type
+ // applies to the 'loop'.
+ applyToLoopOp(clause);
+ } else {
+ llvm_unreachable("Unknown construct kind in VisitPrivateClause");
+ }
+ }
};
template <typename OpTy>
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
index 0c8ff4bd..ecf31a7 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
@@ -41,7 +41,7 @@ struct CIRRecordLowering final {
// member type that ensures correct rounding.
struct MemberInfo final {
CharUnits offset;
- enum class InfoKind { Field, Base } kind;
+ enum class InfoKind { VFPtr, Field, Base } kind;
mlir::Type data;
union {
const FieldDecl *fieldDecl;
@@ -87,6 +87,8 @@ struct CIRRecordLowering final {
accumulateBitFields(RecordDecl::field_iterator field,
RecordDecl::field_iterator fieldEnd);
+ mlir::Type getVFPtrType();
+
bool isAAPCS() const {
return astContext.getTargetInfo().getABI().starts_with("aapcs");
}
@@ -499,11 +501,7 @@ void CIRRecordLowering::accumulateFields() {
fieldEnd = recordDecl->field_end();
field != fieldEnd;) {
if (field->isBitField()) {
- RecordDecl::field_iterator start = field;
- // Iterate to gather the list of bitfields.
- for (++field; field != fieldEnd && field->isBitField(); ++field)
- ;
- field = accumulateBitFields(start, field);
+ field = accumulateBitFields(field, fieldEnd);
assert((field == fieldEnd || !field->isBitField()) &&
"Failed to accumulate all the bitfields");
} else if (!field->isZeroSize(astContext)) {
@@ -902,9 +900,14 @@ void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
void CIRRecordLowering::accumulateVPtrs() {
if (astRecordLayout.hasOwnVFPtr())
- cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
- "accumulateVPtrs: hasOwnVFPtr");
+ members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::VFPtr,
+ getVFPtrType()));
+
if (astRecordLayout.hasOwnVBPtr())
cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
"accumulateVPtrs: hasOwnVBPtr");
}
+
+mlir::Type CIRRecordLowering::getVFPtrType() {
+ return cir::VPtrType::get(builder.getContext());
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp
index 99d6528..b0357d9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp
@@ -152,9 +152,30 @@ void CIRGenerator::HandleTagDeclDefinition(TagDecl *d) {
cgm->errorNYI(d->getSourceRange(), "HandleTagDeclDefinition: OpenMP");
}
+void CIRGenerator::HandleTagDeclRequiredDefinition(const TagDecl *D) {
+ if (diags.hasErrorOccurred())
+ return;
+
+ assert(!cir::MissingFeatures::generateDebugInfo());
+}
+
+void CIRGenerator::HandleCXXStaticMemberVarInstantiation(VarDecl *D) {
+ if (diags.hasErrorOccurred())
+ return;
+
+ cgm->errorNYI(D->getSourceRange(), "HandleCXXStaticMemberVarInstantiation");
+}
+
void CIRGenerator::CompleteTentativeDefinition(VarDecl *d) {
if (diags.hasErrorOccurred())
return;
cgm->emitTentativeDefinition(d);
}
+
+void CIRGenerator::HandleVTable(CXXRecordDecl *rd) {
+ if (diags.hasErrorOccurred())
+ return;
+
+ cgm->errorNYI(rd->getSourceRange(), "HandleVTable");
+}