aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR/CodeGen')
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp34
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp79
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp32
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp35
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h9
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp25
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp109
11 files changed, 303 insertions, 38 deletions
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 27c4d11..e35100f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -449,10 +449,36 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
}
case Builtin::BI__builtin_coro_free:
case Builtin::BI__builtin_coro_size: {
- cgm.errorNYI(e->getSourceRange(),
- "BI__builtin_coro_free, BI__builtin_coro_size NYI");
- assert(!cir::MissingFeatures::coroSizeBuiltinCall());
- return getUndefRValue(e->getType());
+ GlobalDecl gd{fd};
+ mlir::Type ty = cgm.getTypes().getFunctionType(
+ cgm.getTypes().arrangeGlobalDeclaration(gd));
+ const auto *nd = cast<NamedDecl>(gd.getDecl());
+ cir::FuncOp fnOp =
+ cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
+ fnOp.setBuiltin(true);
+ return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
+ returnValue);
+ }
+ case Builtin::BI__builtin_prefetch: {
+ auto evaluateOperandAsInt = [&](const Expr *arg) {
+ Expr::EvalResult res;
+ [[maybe_unused]] bool evalSucceed =
+ arg->EvaluateAsInt(res, cgm.getASTContext());
+ assert(evalSucceed && "expression should be able to evaluate as int");
+ return res.Val.getInt().getZExtValue();
+ };
+
+ bool isWrite = false;
+ if (e->getNumArgs() > 1)
+ isWrite = evaluateOperandAsInt(e->getArg(1));
+
+ int locality = 3;
+ if (e->getNumArgs() > 2)
+ locality = evaluateOperandAsInt(e->getArg(2));
+
+ mlir::Value address = emitScalarExpr(e->getArg(0));
+ cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
+ return RValue::get(nullptr);
}
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index c78f9b0..13dc9f3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -124,6 +124,8 @@ public:
virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0;
virtual void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) = 0;
+ virtual void emitBadCastCall(CIRGenFunction &cgf, mlir::Location loc) = 0;
+
virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc,
QualType ty) = 0;
@@ -185,6 +187,11 @@ public:
virtual void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor,
mlir::Value addr) = 0;
+ virtual void emitVirtualObjectDelete(CIRGenFunction &cgf,
+ const CXXDeleteExpr *de, Address ptr,
+ QualType elementType,
+ const CXXDestructorDecl *dtor) = 0;
+
/// Checks if ABI requires extra virtual offset for vtable field.
virtual bool
isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf,
diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
index c25cce4..8723a6e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CIR/Dialect/IR/CIRTypes.h"
+#include "clang/CIR/MissingFeatures.h"
using namespace clang;
using namespace clang::CIRGen;
@@ -23,6 +24,9 @@ struct clang::CIRGen::CGCoroData {
// Stores the __builtin_coro_id emitted in the function so that we can supply
// it as the first argument to other builtins.
cir::CallOp coroId = nullptr;
+
+ // Stores the result of __builtin_coro_begin call.
+ mlir::Value coroBegin = nullptr;
};
// Defining these here allows to keep CGCoroData private to this file.
@@ -63,6 +67,46 @@ cir::CallOp CIRGenFunction::emitCoroIDBuiltinCall(mlir::Location loc,
nullPtr, nullPtr, nullPtr});
}
+cir::CallOp CIRGenFunction::emitCoroAllocBuiltinCall(mlir::Location loc) {
+ cir::BoolType boolTy = builder.getBoolTy();
+
+ mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroAlloc);
+
+ cir::FuncOp fnOp;
+ if (!builtin) {
+ fnOp = cgm.createCIRBuiltinFunction(loc, cgm.builtinCoroAlloc,
+ cir::FuncType::get({UInt32Ty}, boolTy),
+ /*fd=*/nullptr);
+ assert(fnOp && "should always succeed");
+ } else {
+ fnOp = cast<cir::FuncOp>(builtin);
+ }
+
+ return builder.createCallOp(
+ loc, fnOp, mlir::ValueRange{curCoro.data->coroId.getResult()});
+}
+
+cir::CallOp
+CIRGenFunction::emitCoroBeginBuiltinCall(mlir::Location loc,
+ mlir::Value coroframeAddr) {
+ mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroBegin);
+
+ cir::FuncOp fnOp;
+ if (!builtin) {
+ fnOp = cgm.createCIRBuiltinFunction(
+ loc, cgm.builtinCoroBegin,
+ cir::FuncType::get({UInt32Ty, VoidPtrTy}, VoidPtrTy),
+ /*fd=*/nullptr);
+ assert(fnOp && "should always succeed");
+ } else {
+ fnOp = cast<cir::FuncOp>(builtin);
+ }
+
+ return builder.createCallOp(
+ loc, fnOp,
+ mlir::ValueRange{curCoro.data->coroId.getResult(), coroframeAddr});
+}
+
mlir::LogicalResult
CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &s) {
mlir::Location openCurlyLoc = getLoc(s.getBeginLoc());
@@ -73,10 +117,39 @@ CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &s) {
cir::CallOp coroId = emitCoroIDBuiltinCall(openCurlyLoc, nullPtrCst);
createCoroData(*this, curCoro, coroId);
- assert(!cir::MissingFeatures::coroAllocBuiltinCall());
-
- assert(!cir::MissingFeatures::coroBeginBuiltinCall());
+ // Backend is allowed to elide memory allocations, to help it, emit
+ // auto mem = coro.alloc() ? 0 : ... allocation code ...;
+ cir::CallOp coroAlloc = emitCoroAllocBuiltinCall(openCurlyLoc);
+
+ // Initialize address of coroutine frame to null
+ CanQualType astVoidPtrTy = cgm.getASTContext().VoidPtrTy;
+ mlir::Type allocaTy = convertTypeForMem(astVoidPtrTy);
+ Address coroFrame =
+ createTempAlloca(allocaTy, getContext().getTypeAlignInChars(astVoidPtrTy),
+ openCurlyLoc, "__coro_frame_addr",
+ /*ArraySize=*/nullptr);
+
+ mlir::Value storeAddr = coroFrame.getPointer();
+ builder.CIRBaseBuilderTy::createStore(openCurlyLoc, nullPtrCst, storeAddr);
+ cir::IfOp::create(
+ builder, openCurlyLoc, coroAlloc.getResult(),
+ /*withElseRegion=*/false,
+ /*thenBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) {
+ builder.CIRBaseBuilderTy::createStore(
+ loc, emitScalarExpr(s.getAllocate()), storeAddr);
+ cir::YieldOp::create(builder, loc);
+ });
+ curCoro.data->coroBegin =
+ emitCoroBeginBuiltinCall(
+ openCurlyLoc,
+ cir::LoadOp::create(builder, openCurlyLoc, allocaTy, storeAddr))
+ .getResult();
+
+ // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided.
+ if (s.getReturnStmtOnAllocFailure())
+ cgm.errorNYI("handle coroutine return alloc failure");
assert(!cir::MissingFeatures::generateDebugInfo());
+ assert(!cir::MissingFeatures::emitBodyAndFallthrough());
return mlir::success();
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 9df88ad..df6ee56 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -2065,7 +2065,11 @@ mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
// a surrounding cir.scope, make sure the alloca ends up in the surrounding
// scope instead. This is necessary in order to guarantee all SSA values are
// reachable during cleanups.
- assert(!cir::MissingFeatures::tryOp());
+ if (auto tryOp =
+ llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
+ if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
+ entryBlock = &scopeOp.getScopeRegion().front();
+ }
return emitAlloca(name, ty, loc, alignment,
builder.getBestAllocaInsertPoint(entryBlock), arraySize);
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index d6d226b..8fe0d9b4 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -362,8 +362,7 @@ public:
cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXTypeidExpr");
}
void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "AggExprEmitter: VisitMaterializeTemporaryExpr");
+ Visit(e->getSubExpr());
}
void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(),
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index fe9e210..7a35382 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -565,8 +565,10 @@ static void emitObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de,
dtor = rd->getDestructor();
if (dtor->isVirtual()) {
- cgf.cgm.errorNYI(de->getSourceRange(),
- "emitObjectDelete: virtual destructor");
+ assert(!cir::MissingFeatures::devirtualizeDestructor());
+ cgf.cgm.getCXXABI().emitVirtualObjectDelete(cgf, de, ptr, elementType,
+ dtor);
+ return;
}
}
}
@@ -801,6 +803,26 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD,
emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs);
}
+static mlir::Value emitDynamicCastToNull(CIRGenFunction &cgf,
+ mlir::Location loc, QualType destTy) {
+ mlir::Type destCIRTy = cgf.convertType(destTy);
+ assert(mlir::isa<cir::PointerType>(destCIRTy) &&
+ "result of dynamic_cast should be a ptr");
+
+ if (!destTy->isPointerType()) {
+ mlir::Region *currentRegion = cgf.getBuilder().getBlock()->getParent();
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ cgf.cgm.getCXXABI().emitBadCastCall(cgf, loc);
+
+ // The call to bad_cast will terminate the current block. Create a new block
+ // to hold any follow up code.
+ cgf.getBuilder().createBlock(currentRegion, currentRegion->end());
+ }
+
+ return cgf.getBuilder().getNullPtr(destCIRTy, loc);
+}
+
mlir::Value CIRGenFunction::emitDynamicCast(Address thisAddr,
const CXXDynamicCastExpr *dce) {
mlir::Location loc = getLoc(dce->getSourceRange());
@@ -831,10 +853,8 @@ mlir::Value CIRGenFunction::emitDynamicCast(Address thisAddr,
assert(srcRecordTy->isRecordType() && "source type must be a record type!");
assert(!cir::MissingFeatures::emitTypeCheck());
- if (dce->isAlwaysNull()) {
- cgm.errorNYI(dce->getSourceRange(), "emitDynamicCastToNull");
- return {};
- }
+ if (dce->isAlwaysNull())
+ return emitDynamicCastToNull(*this, loc, destTy);
auto destCirTy = mlir::cast<cir::PointerType>(convertType(destTy));
return cgm.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy,
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 7de3dd0..928e5aa 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -922,9 +922,9 @@ public:
}
mlir::Attribute VisitCastExpr(CastExpr *e, QualType destType) {
- if (isa<ExplicitCastExpr>(e))
- cgm.errorNYI(e->getBeginLoc(),
- "ConstExprEmitter::VisitCastExpr explicit cast");
+ if (const auto *ece = dyn_cast<ExplicitCastExpr>(e))
+ cgm.emitExplicitCastExprType(ece,
+ const_cast<CIRGenFunction *>(emitter.cgf));
Expr *subExpr = e->getSubExpr();
@@ -1078,9 +1078,32 @@ public:
mlir::Attribute VisitCXXConstructExpr(CXXConstructExpr *e, QualType ty) {
if (!e->getConstructor()->isTrivial())
- return nullptr;
- cgm.errorNYI(e->getBeginLoc(), "trivial constructor const handling");
- return {};
+ return {};
+
+ // Only default and copy/move constructors can be trivial.
+ if (e->getNumArgs()) {
+ assert(e->getNumArgs() == 1 && "trivial ctor with > 1 argument");
+ assert(e->getConstructor()->isCopyOrMoveConstructor() &&
+ "trivial ctor has argument but isn't a copy/move ctor");
+
+ Expr *arg = e->getArg(0);
+ assert(cgm.getASTContext().hasSameUnqualifiedType(ty, arg->getType()) &&
+ "argument to copy ctor is of wrong type");
+
+ // Look through the temporary; it's just converting the value to an lvalue
+ // to pass it to the constructor.
+ if (auto const *mte = dyn_cast<MaterializeTemporaryExpr>(arg))
+ return Visit(mte->getSubExpr(), ty);
+
+ // TODO: Investigate whether there are cases that can fall through to here
+ // that need to be handled. This is missing in classic codegen also.
+ assert(!cir::MissingFeatures::ctorConstLvalueToRvalueConversion());
+
+ // Don't try to support arbitrary lvalue-to-rvalue conversions for now.
+ return {};
+ }
+
+ return cgm.getBuilder().getZeroInitAttr(cgm.convertType(ty));
}
mlir::Attribute VisitStringLiteral(StringLiteral *e, QualType t) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 5f9dbdc..d791130 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -665,6 +665,12 @@ public:
symbolTable.insert(vd, addr.getPointer());
}
+ // Replaces the address of the local variable, if it exists. Else does the
+ // same thing as setAddrOfLocalVar.
+ void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr) {
+ localDeclMap.insert_or_assign(vd, addr);
+ }
+
// A class to allow reverting changes to a var-decl's registration to the
// localDeclMap. This is used in cases where things are being inserted into
// the variable list but don't follow normal lookup/search rules, like in
@@ -1326,6 +1332,9 @@ public:
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
+ cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
+ cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
+ mlir::Value coroframeAddr);
void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index f7c4d18..88fedf1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -74,6 +74,9 @@ public:
QualType thisTy) override;
void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor,
mlir::Value addr) override;
+ void emitVirtualObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de,
+ Address ptr, QualType elementType,
+ const CXXDestructorDecl *dtor) override;
void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) override;
void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) override;
@@ -120,6 +123,8 @@ public:
return true;
}
+ void emitBadCastCall(CIRGenFunction &cgf, mlir::Location loc) override;
+
mlir::Value
getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &cgf,
Address thisAddr, const CXXRecordDecl *classDecl,
@@ -1883,6 +1888,11 @@ static void emitCallToBadCast(CIRGenFunction &cgf, mlir::Location loc) {
cgf.getBuilder().clearInsertionPoint();
}
+void CIRGenItaniumCXXABI::emitBadCastCall(CIRGenFunction &cgf,
+ mlir::Location loc) {
+ emitCallToBadCast(cgf, loc);
+}
+
// TODO(cir): This could be shared with classic codegen.
static CharUnits computeOffsetHint(ASTContext &astContext,
const CXXRecordDecl *src,
@@ -2168,6 +2178,21 @@ mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf,
isRefCast, castInfo);
}
+/// The Itanium ABI always places an offset to the complete object
+/// at entry -2 in the vtable.
+void CIRGenItaniumCXXABI::emitVirtualObjectDelete(
+ CIRGenFunction &cgf, const CXXDeleteExpr *delExpr, Address ptr,
+ QualType elementType, const CXXDestructorDecl *dtor) {
+ bool useGlobalDelete = delExpr->isGlobalDelete();
+ if (useGlobalDelete) {
+ cgf.cgm.errorNYI(delExpr->getSourceRange(),
+ "emitVirtualObjectDelete: global delete");
+ }
+
+ CXXDtorType dtorType = useGlobalDelete ? Dtor_Complete : Dtor_Deleting;
+ emitVirtualDestructorCall(cgf, dtor, dtorType, ptr, delExpr);
+}
+
/************************** Array allocation cookies **************************/
CharUnits CIRGenItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 1fc116d..186913d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -496,6 +496,8 @@ public:
bool assumeConvergent = false);
static constexpr const char *builtinCoroId = "__builtin_coro_id";
+ static constexpr const char *builtinCoroAlloc = "__builtin_coro_alloc";
+ static constexpr const char *builtinCoroBegin = "__builtin_coro_begin";
/// Given a builtin id for a function like "__builtin_fabsf", return a
/// Function* for "fabsf".
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
index 77e6f83..9e55bd5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
@@ -304,11 +304,21 @@ CIRGenFunction::emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s) {
return mlir::success();
}
+const VarDecl *getLValueDecl(const Expr *e) {
+ // We are going to assume that after stripping implicit casts, that the LValue
+ // is just a DRE around the var-decl.
+
+ e = e->IgnoreImpCasts();
+
+ const auto *dre = cast<DeclRefExpr>(e);
+ return cast<VarDecl>(dre->getDecl());
+}
+
mlir::LogicalResult
CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) {
- // For now, we are only support 'read', so diagnose. We can switch on the kind
- // later once we start implementing the other 3 forms.
- if (s.getAtomicKind() != OpenACCAtomicKind::Read) {
+ // For now, we are only support 'read'/'write'/'update', so diagnose. We can
+ // switch on the kind later once we implement the 'capture' form.
+ if (s.getAtomicKind() == OpenACCAtomicKind::Capture) {
cgm.errorNYI(s.getSourceRange(), "OpenACC Atomic Construct");
return mlir::failure();
}
@@ -317,18 +327,85 @@ CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) {
// expression it is associated with rather than emitting it inside of it. So
// it has custom emit logic.
mlir::Location start = getLoc(s.getSourceRange().getBegin());
+ mlir::Location end = getLoc(s.getSourceRange().getEnd());
OpenACCAtomicConstruct::StmtInfo inf = s.getAssociatedStmtInfo();
- // Atomic 'read' only permits 'v = x', where v and x are both scalar L values.
- // The getAssociatedStmtInfo strips off implicit casts, which includes
- // implicit conversions and L-to-R-Value conversions, so we can just emit it
- // as an L value. The Flang implementation has no problem with different
- // types, so it appears that the dialect can handle the conversions.
- mlir::Value v = emitLValue(inf.V).getPointer();
- mlir::Value x = emitLValue(inf.X).getPointer();
- mlir::Type resTy = convertType(inf.V->getType());
- auto op = mlir::acc::AtomicReadOp::create(builder, start, x, v, resTy,
- /*ifCond=*/{});
- emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
- s.clauses());
- return mlir::success();
+
+ switch (s.getAtomicKind()) {
+ case OpenACCAtomicKind::Capture:
+ llvm_unreachable("Unimplemented atomic construct type, should have "
+ "diagnosed/returned above");
+ return mlir::failure();
+ case OpenACCAtomicKind::Read: {
+
+ // Atomic 'read' only permits 'v = x', where v and x are both scalar L
+ // values. The getAssociatedStmtInfo strips off implicit casts, which
+ // includes implicit conversions and L-to-R-Value conversions, so we can
+ // just emit it as an L value. The Flang implementation has no problem with
+ // different types, so it appears that the dialect can handle the
+ // conversions.
+ mlir::Value v = emitLValue(inf.V).getPointer();
+ mlir::Value x = emitLValue(inf.X).getPointer();
+ mlir::Type resTy = convertType(inf.V->getType());
+ auto op = mlir::acc::AtomicReadOp::create(builder, start, x, v, resTy,
+ /*ifCond=*/{});
+ emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
+ s.clauses());
+ return mlir::success();
+ }
+ case OpenACCAtomicKind::Write: {
+ mlir::Value x = emitLValue(inf.X).getPointer();
+ mlir::Value expr = emitAnyExpr(inf.RefExpr).getValue();
+ auto op = mlir::acc::AtomicWriteOp::create(builder, start, x, expr,
+ /*ifCond=*/{});
+ emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
+ s.clauses());
+ return mlir::success();
+ }
+ case OpenACCAtomicKind::None:
+ case OpenACCAtomicKind::Update: {
+ mlir::Value x = emitLValue(inf.X).getPointer();
+ auto op =
+ mlir::acc::AtomicUpdateOp::create(builder, start, x, /*ifCond=*/{});
+ emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
+ s.clauses());
+ mlir::LogicalResult res = mlir::success();
+ {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+ mlir::Type argTy = cast<cir::PointerType>(x.getType()).getPointee();
+ std::array<mlir::Type, 1> recipeType{argTy};
+ std::array<mlir::Location, 1> recipeLoc{start};
+ mlir::Block *recipeBlock = builder.createBlock(
+ &op.getRegion(), op.getRegion().end(), recipeType, recipeLoc);
+ builder.setInsertionPointToEnd(recipeBlock);
+
+ // Since we have an initial value that we know is a scalar type, we can
+ // just emit the entire statement here after sneaking-in our 'alloca' in
+ // the right place, then loading out of it. Flang does a lot less work
+ // (probably does its own emitting!), but we have more complicated AST
+ // nodes to worry about, so we can just count on opt to remove the extra
+ // alloca/load/store set.
+ auto alloca = cir::AllocaOp::create(
+ builder, start, x.getType(), argTy, "x_var",
+ cgm.getSize(getContext().getTypeAlignInChars(inf.X->getType())));
+
+ alloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
+ builder.CIRBaseBuilderTy::createStore(start, recipeBlock->getArgument(0),
+ alloca);
+
+ const VarDecl *xval = getLValueDecl(inf.X);
+ CIRGenFunction::DeclMapRevertingRAII declMapRAII{*this, xval};
+ replaceAddrOfLocalVar(
+ xval, Address{alloca, argTy, getContext().getDeclAlign(xval)});
+
+ res = emitStmt(s.getAssociatedStmt(), /*useCurrentScope=*/true);
+
+ auto load = cir::LoadOp::create(builder, start, {alloca});
+ mlir::acc::YieldOp::create(builder, end, {load});
+ }
+
+ return res;
+ }
+ }
+
+ llvm_unreachable("unknown OpenACC atomic kind");
}