aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR/CodeGen')
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp42
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp215
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp11
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp13
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp112
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h32
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp1
9 files changed, 393 insertions, 42 deletions
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index cb8fe6c..9d12a13 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -951,28 +951,37 @@ Address CIRGenFunction::getAddressOfBaseClass(
bool nullCheckValue, SourceLocation loc) {
assert(!path.empty() && "Base path should not be empty!");
+ CastExpr::path_const_iterator start = path.begin();
+ const CXXRecordDecl *vBase = nullptr;
+
if ((*path.begin())->isVirtual()) {
- // The implementation here is actually complete, but let's flag this
- // as an error until the rest of the virtual base class support is in place.
- cgm.errorNYI(loc, "getAddrOfBaseClass: virtual base");
- return Address::invalid();
+ vBase = (*start)->getType()->castAsCXXRecordDecl();
+ ++start;
}
// Compute the static offset of the ultimate destination within its
// allocating subobject (the virtual base, if there is one, or else
// the "complete" object that we see).
- CharUnits nonVirtualOffset =
- cgm.computeNonVirtualBaseClassOffset(derived, path);
+ CharUnits nonVirtualOffset = cgm.computeNonVirtualBaseClassOffset(
+ vBase ? vBase : derived, {start, path.end()});
+
+ // If there's a virtual step, we can sometimes "devirtualize" it.
+ // For now, that's limited to when the derived type is final.
+ // TODO: "devirtualize" this for accesses to known-complete objects.
+ if (vBase && derived->hasAttr<FinalAttr>()) {
+ const ASTRecordLayout &layout = getContext().getASTRecordLayout(derived);
+ CharUnits vBaseOffset = layout.getVBaseClassOffset(vBase);
+ nonVirtualOffset += vBaseOffset;
+ vBase = nullptr; // we no longer have a virtual step
+ }
// Get the base pointer type.
mlir::Type baseValueTy = convertType((path.end()[-1])->getType());
assert(!cir::MissingFeatures::addressSpace());
- // The if statement here is redundant now, but it will be needed when we add
- // support for virtual base classes.
// If there is no virtual base, use cir.base_class_addr. It takes care of
// the adjustment and the null pointer check.
- if (nonVirtualOffset.isZero()) {
+ if (nonVirtualOffset.isZero() && !vBase) {
assert(!cir::MissingFeatures::sanitizers());
return builder.createBaseClassAddr(getLoc(loc), value, baseValueTy, 0,
/*assumeNotNull=*/true);
@@ -980,10 +989,17 @@ Address CIRGenFunction::getAddressOfBaseClass(
assert(!cir::MissingFeatures::sanitizers());
- // Apply the offset
- value = builder.createBaseClassAddr(getLoc(loc), value, baseValueTy,
- nonVirtualOffset.getQuantity(),
- /*assumeNotNull=*/true);
+ // Compute the virtual offset.
+ mlir::Value virtualOffset = nullptr;
+ if (vBase) {
+ virtualOffset = cgm.getCXXABI().getVirtualBaseClassOffset(
+ getLoc(loc), *this, value, derived, vBase);
+ }
+
+ // Apply both offsets.
+ value = applyNonVirtualAndVirtualOffset(
+ getLoc(loc), *this, value, nonVirtualOffset, virtualOffset, derived,
+ vBase, baseValueTy, not nullCheckValue);
// Cast to the destination type.
value = value.withElementType(builder, baseValueTy);
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 4a8aac90..5596499 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -131,9 +131,7 @@ public:
std::string("AggExprEmitter::VisitStmt: ") +
s->getStmtClassName());
}
- void VisitParenExpr(ParenExpr *pe) {
- cgf.cgm.errorNYI(pe->getSourceRange(), "AggExprEmitter: VisitParenExpr");
- }
+ void VisitParenExpr(ParenExpr *pe) { Visit(pe->getSubExpr()); }
void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
cgf.cgm.errorNYI(ge->getSourceRange(),
"AggExprEmitter: VisitGenericSelectionExpr");
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 1f7e3dd..83208bf 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -210,6 +210,60 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorCall(
return emitCall(fnInfo, callee, returnValue, args, nullptr, loc);
}
+namespace {
+/// The parameters to pass to a usual operator delete.
+struct UsualDeleteParams {
+ TypeAwareAllocationMode typeAwareDelete = TypeAwareAllocationMode::No;
+ bool destroyingDelete = false;
+ bool size = false;
+ AlignedAllocationMode alignment = AlignedAllocationMode::No;
+};
+} // namespace
+
+// FIXME(cir): this should be shared with LLVM codegen
+static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *fd) {
+ UsualDeleteParams params;
+
+ const FunctionProtoType *fpt = fd->getType()->castAs<FunctionProtoType>();
+ auto ai = fpt->param_type_begin(), ae = fpt->param_type_end();
+
+ if (fd->isTypeAwareOperatorNewOrDelete()) {
+ params.typeAwareDelete = TypeAwareAllocationMode::Yes;
+ assert(ai != ae);
+ ++ai;
+ }
+
+ // The first argument after the type-identity parameter (if any) is
+ // always a void* (or C* for a destroying operator delete for class
+ // type C).
+ ++ai;
+
+ // The next parameter may be a std::destroying_delete_t.
+ if (fd->isDestroyingOperatorDelete()) {
+ params.destroyingDelete = true;
+ assert(ai != ae);
+ ++ai;
+ }
+
+ // Figure out what other parameters we should be implicitly passing.
+ if (ai != ae && (*ai)->isIntegerType()) {
+ params.size = true;
+ ++ai;
+ } else {
+ assert(!isTypeAwareAllocation(params.typeAwareDelete));
+ }
+
+ if (ai != ae && (*ai)->isAlignValT()) {
+ params.alignment = AlignedAllocationMode::Yes;
+ ++ai;
+ } else {
+ assert(!isTypeAwareAllocation(params.typeAwareDelete));
+ }
+
+ assert(ai == ae && "unexpected usual deallocation function parameter");
+ return params;
+}
+
static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
unsigned minElements,
mlir::Value &numElements,
@@ -332,6 +386,117 @@ static RValue emitNewDeleteCall(CIRGenFunction &cgf,
return rv;
}
+namespace {
+/// Calls the given 'operator delete' on a single object.
+struct CallObjectDelete final : EHScopeStack::Cleanup {
+ mlir::Value ptr;
+ const FunctionDecl *operatorDelete;
+ QualType elementType;
+
+ CallObjectDelete(mlir::Value ptr, const FunctionDecl *operatorDelete,
+ QualType elementType)
+ : ptr(ptr), operatorDelete(operatorDelete), elementType(elementType) {}
+
+ void emit(CIRGenFunction &cgf) override {
+ cgf.emitDeleteCall(operatorDelete, ptr, elementType);
+ }
+
+ // This is a placeholder until EHCleanupScope is implemented.
+ size_t getSize() const override {
+ assert(!cir::MissingFeatures::ehCleanupScope());
+ return sizeof(CallObjectDelete);
+ }
+};
+} // namespace
+
+/// Emit the code for deleting a single object.
+static void emitObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de,
+ Address ptr, QualType elementType) {
+ // C++11 [expr.delete]p3:
+ // If the static type of the object to be deleted is different from its
+ // dynamic type, the static type shall be a base class of the dynamic type
+ // of the object to be deleted and the static type shall have a virtual
+ // destructor or the behavior is undefined.
+ assert(!cir::MissingFeatures::emitTypeCheck());
+
+ const FunctionDecl *operatorDelete = de->getOperatorDelete();
+ assert(!operatorDelete->isDestroyingOperatorDelete());
+
+ // Find the destructor for the type, if applicable. If the
+ // destructor is virtual, we'll just emit the vcall and return.
+ const CXXDestructorDecl *dtor = nullptr;
+ if (const auto *rd = elementType->getAsCXXRecordDecl()) {
+ if (rd->hasDefinition() && !rd->hasTrivialDestructor()) {
+ dtor = rd->getDestructor();
+
+ if (dtor->isVirtual()) {
+ cgf.cgm.errorNYI(de->getSourceRange(),
+ "emitObjectDelete: virtual destructor");
+ }
+ }
+ }
+
+ // Make sure that we call delete even if the dtor throws.
+ // This doesn't have to a conditional cleanup because we're going
+ // to pop it off in a second.
+ cgf.ehStack.pushCleanup<CallObjectDelete>(
+ NormalAndEHCleanup, ptr.getPointer(), operatorDelete, elementType);
+
+ if (dtor) {
+ cgf.emitCXXDestructorCall(dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false,
+ /*Delegating=*/false, ptr, elementType);
+ } else if (elementType.getObjCLifetime()) {
+ assert(!cir::MissingFeatures::objCLifetime());
+ cgf.cgm.errorNYI(de->getSourceRange(), "emitObjectDelete: ObjCLifetime");
+ }
+
+ // In traditional LLVM codegen null checks are emitted to save a delete call.
+ // In CIR we optimize for size by default, the null check should be added into
+ // this function callers.
+ assert(!cir::MissingFeatures::emitNullCheckForDeleteCalls());
+
+ cgf.popCleanupBlock();
+}
+
+void CIRGenFunction::emitCXXDeleteExpr(const CXXDeleteExpr *e) {
+ const Expr *arg = e->getArgument();
+ Address ptr = emitPointerWithAlignment(arg);
+
+ // Null check the pointer.
+ //
+ // We could avoid this null check if we can determine that the object
+ // destruction is trivial and doesn't require an array cookie; we can
+ // unconditionally perform the operator delete call in that case. For now, we
+ // assume that deleted pointers are null rarely enough that it's better to
+ // keep the branch. This might be worth revisiting for a -O0 code size win.
+ //
+ // CIR note: emit the code size friendly by default for now, such as mentioned
+ // in `emitObjectDelete`.
+ assert(!cir::MissingFeatures::emitNullCheckForDeleteCalls());
+ QualType deleteTy = e->getDestroyedType();
+
+ // A destroying operator delete overrides the entire operation of the
+ // delete expression.
+ if (e->getOperatorDelete()->isDestroyingOperatorDelete()) {
+ cgm.errorNYI(e->getSourceRange(),
+ "emitCXXDeleteExpr: destroying operator delete");
+ return;
+ }
+
+ // We might be deleting a pointer to array.
+ deleteTy = getContext().getBaseElementType(deleteTy);
+ ptr = ptr.withElementType(builder, convertTypeForMem(deleteTy));
+
+ if (e->isArrayForm()) {
+ assert(!cir::MissingFeatures::deleteArray());
+ cgm.errorNYI(e->getSourceRange(), "emitCXXDeleteExpr: array delete");
+ return;
+ } else {
+ emitObjectDelete(*this, e, ptr, deleteTy);
+ }
+}
+
mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
// The element type being allocated.
QualType allocType = getContext().getBaseElementType(e->getAllocatedType());
@@ -443,3 +608,53 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
allocSizeWithoutCookie);
return result.getPointer();
}
+
+void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD,
+ mlir::Value ptr, QualType deleteTy) {
+ assert(!cir::MissingFeatures::deleteArray());
+
+ const auto *deleteFTy = deleteFD->getType()->castAs<FunctionProtoType>();
+ CallArgList deleteArgs;
+
+ UsualDeleteParams params = getUsualDeleteParams(deleteFD);
+ auto paramTypeIt = deleteFTy->param_type_begin();
+
+ // Pass std::type_identity tag if present
+ if (isTypeAwareAllocation(params.typeAwareDelete))
+ cgm.errorNYI(deleteFD->getSourceRange(),
+ "emitDeleteCall: type aware delete");
+
+ // Pass the pointer itself.
+ QualType argTy = *paramTypeIt++;
+ mlir::Value deletePtr =
+ builder.createBitcast(ptr.getLoc(), ptr, convertType(argTy));
+ deleteArgs.add(RValue::get(deletePtr), argTy);
+
+ // Pass the std::destroying_delete tag if present.
+ if (params.destroyingDelete)
+ cgm.errorNYI(deleteFD->getSourceRange(),
+ "emitDeleteCall: destroying delete");
+
+ // Pass the size if the delete function has a size_t parameter.
+ if (params.size) {
+ QualType sizeType = *paramTypeIt++;
+ CharUnits deleteTypeSize = getContext().getTypeSizeInChars(deleteTy);
+ assert(mlir::isa<cir::IntType>(convertType(sizeType)) &&
+ "expected cir::IntType");
+ cir::ConstantOp size = builder.getConstInt(
+ *currSrcLoc, convertType(sizeType), deleteTypeSize.getQuantity());
+
+ deleteArgs.add(RValue::get(size), sizeType);
+ }
+
+ // Pass the alignment if the delete function has an align_val_t parameter.
+ if (isAlignedAllocation(params.alignment))
+ cgm.errorNYI(deleteFD->getSourceRange(),
+ "emitDeleteCall: aligned allocation");
+
+ assert(paramTypeIt == deleteFTy->param_type_end() &&
+ "unknown parameter to usual delete function");
+
+ // Emit the call to delete.
+ emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index bd09d78..f4bbced 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -676,6 +676,10 @@ public:
mlir::Value VisitRealImag(const UnaryOperator *e,
QualType promotionType = QualType());
+ mlir::Value VisitUnaryExtension(const UnaryOperator *e) {
+ return Visit(e->getSubExpr());
+ }
+
mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
return Visit(die->getExpr());
@@ -687,6 +691,10 @@ public:
mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
return cgf.emitCXXNewExpr(e);
}
+ mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *e) {
+ cgf.emitCXXDeleteExpr(e);
+ return {};
+ }
mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
cgf.emitCXXThrowExpr(e);
@@ -1274,9 +1282,6 @@ mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
} else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
switch (uo->getOpcode()) {
case UO_Imag:
- cgf.cgm.errorNYI(e->getSourceRange(),
- "ScalarExprEmitter::emitPromoted unary imag");
- return {};
case UO_Real:
return VisitRealImag(uo, promotionType);
case UO_Minus:
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 166435f..ef07db3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -1197,6 +1197,8 @@ public:
bool delegating, Address thisAddr,
CallArgList &args, clang::SourceLocation loc);
+ void emitCXXDeleteExpr(const CXXDeleteExpr *e);
+
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type,
bool forVirtualBase, bool delegating,
Address thisAddr, QualType thisTy);
@@ -1244,6 +1246,9 @@ public:
void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor,
const FunctionArgList &args);
+ void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr,
+ QualType deleteTy);
+
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
/// Emit an expression as an initializer for an object (variable, field, etc.)
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index eef23a0..c977ff9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -119,6 +119,19 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
cir::OptInfoAttr::get(&mlirContext,
cgo.OptimizationLevel,
cgo.OptimizeSize));
+ // Set the module name to be the name of the main file. TranslationUnitDecl
+ // often contains invalid source locations and isn't a reliable source for the
+ // module location.
+ FileID mainFileId = astContext.getSourceManager().getMainFileID();
+ const FileEntry &mainFile =
+ *astContext.getSourceManager().getFileEntryForID(mainFileId);
+ StringRef path = mainFile.tryGetRealPathName();
+ if (!path.empty()) {
+ theModule.setSymName(path);
+ theModule->setLoc(mlir::FileLineColLoc::get(&mlirContext, path,
+ /*line=*/0,
+ /*column=*/0));
+ }
}
CIRGenModule::~CIRGenModule() = default;
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
index a4c2641..e41c2d85 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
@@ -10,6 +10,8 @@
//
//===----------------------------------------------------------------------===//
+#include <numeric>
+
#include "CIRGenOpenACCRecipe.h"
namespace clang::CIRGen {
@@ -35,6 +37,110 @@ mlir::Block *OpenACCRecipeBuilderBase::createRecipeBlock(mlir::Region &region,
return builder.createBlock(&region, region.end(), types, locs);
}
+mlir::Value OpenACCRecipeBuilderBase::makeBoundsAlloca(
+ mlir::Block *block, SourceRange exprRange, mlir::Location loc,
+ std::string_view allocaName, size_t numBounds,
+ llvm::ArrayRef<QualType> boundTypes) {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+
+ // Get the range of bounds arguments, which are all but the 1st arg.
+ llvm::ArrayRef<mlir::BlockArgument> boundsRange =
+ block->getArguments().drop_front(1);
+
+ // boundTypes contains the before and after of each bounds, so it ends up
+ // having 1 extra. Assert this is the case to ensure we don't call this in the
+ // wrong 'block'.
+ assert(boundsRange.size() + 1 == boundTypes.size());
+
+ mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy);
+ auto idxType = mlir::IndexType::get(&cgf.getMLIRContext());
+
+ auto getUpperBound = [&](mlir::Value bound) {
+ auto upperBoundVal =
+ mlir::acc::GetUpperboundOp::create(builder, loc, idxType, bound);
+ return mlir::UnrealizedConversionCastOp::create(builder, loc, itrTy,
+ upperBoundVal.getResult())
+ .getResult(0);
+ };
+
+ auto isArrayTy = [&](QualType ty) {
+ if (ty->isArrayType() && !ty->isConstantArrayType())
+ cgf.cgm.errorNYI(exprRange, "OpenACC recipe init for VLAs");
+ return ty->isConstantArrayType();
+ };
+
+ mlir::Type topLevelTy = cgf.convertType(boundTypes.back());
+ cir::PointerType topLevelTyPtr = builder.getPointerTo(topLevelTy);
+ // Do an alloca for the 'top' level type without bounds.
+ mlir::Value initialAlloca = builder.createAlloca(
+ loc, topLevelTyPtr, topLevelTy, allocaName,
+ cgf.getContext().getTypeAlignInChars(boundTypes.back()));
+
+ bool lastBoundWasArray = isArrayTy(boundTypes.back());
+
+ // Since we're iterating the types in reverse, this sets up for each index
+ // corresponding to the boundsRange to be the 'after application of the
+ // bounds.
+ llvm::ArrayRef<QualType> boundResults = boundTypes.drop_back(1);
+
+ // Collect the 'do we have any allocas needed after this type' list.
+ llvm::SmallVector<bool> allocasLeftArr;
+ llvm::ArrayRef<QualType> resultTypes = boundTypes.drop_front();
+ std::transform_inclusive_scan(
+ resultTypes.begin(), resultTypes.end(),
+ std::back_inserter(allocasLeftArr), std::plus<bool>{},
+ [](QualType ty) { return !ty->isConstantArrayType(); });
+
+ // Keep track of the number of 'elements' that we're allocating. Individual
+ // allocas should multiply this by the size of its current allocation.
+ mlir::Value cumulativeElts;
+ for (auto [bound, resultType, allocasLeft] : llvm::reverse(
+ llvm::zip_equal(boundsRange, boundResults, allocasLeftArr))) {
+
+ // if there is no further 'alloca' operation we need to do, we can skip
+ // creating the UB/multiplications/etc.
+ if (!allocasLeft)
+ break;
+
+ // First: figure out the number of elements in the current 'bound' list.
+ mlir::Value eltsPerSubArray = getUpperBound(bound);
+ mlir::Value eltsToAlloca;
+
+ // IF we are in a sub-bounds, the total number of elements to alloca is
+ // the product of that one and the current 'bounds' size. That is,
+ // arr[5][5], we would need 25 elements, not just 5. Else it is just the
+ // current number of elements.
+ if (cumulativeElts)
+ eltsToAlloca = builder.createMul(loc, eltsPerSubArray, cumulativeElts);
+ else
+ eltsToAlloca = eltsPerSubArray;
+
+ if (!lastBoundWasArray) {
+ // If we have to do an allocation, figure out the size of the
+ // allocation. alloca takes the number of bytes, not elements.
+ TypeInfoChars eltInfo = cgf.getContext().getTypeInfoInChars(resultType);
+ cir::ConstantOp eltSize = builder.getConstInt(
+ loc, itrTy, eltInfo.Width.alignTo(eltInfo.Align).getQuantity());
+ mlir::Value curSize = builder.createMul(loc, eltsToAlloca, eltSize);
+
+ mlir::Type eltTy = cgf.convertType(resultType);
+ cir::PointerType ptrTy = builder.getPointerTo(eltTy);
+ builder.createAlloca(loc, ptrTy, eltTy, "openacc.init.bounds",
+ cgf.getContext().getTypeAlignInChars(resultType),
+ curSize);
+
+ // TODO: OpenACC : At this point we should be copying the addresses of
+ // each element of this to the last allocation. At the moment, that is
+ // not yet implemented.
+ cgf.cgm.errorNYI(exprRange, "OpenACC recipe alloca copying");
+ }
+
+ cumulativeElts = eltsToAlloca;
+ lastBoundWasArray = isArrayTy(resultType);
+ }
+ return initialAlloca;
+}
+
mlir::Value
OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue,
mlir::Value bound,
@@ -258,7 +364,11 @@ void OpenACCRecipeBuilderBase::createPrivateInitRecipe(
cgf.emitAutoVarAlloca(*allocaDecl, builder.saveInsertionPoint());
cgf.emitAutoVarInit(tempDeclEmission);
} else {
- cgf.cgm.errorNYI(exprRange, "private-init with bounds");
+ makeBoundsAlloca(block, exprRange, loc, "openacc.private.init", numBounds,
+ boundTypes);
+
+ if (initExpr)
+ cgf.cgm.errorNYI(exprRange, "private-init with bounds initialization");
}
mlir::acc::YieldOp::create(builder, locEnd);
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h
index 978c671..acd187b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h
@@ -24,6 +24,13 @@
namespace clang::CIRGen {
class OpenACCRecipeBuilderBase {
+ // This function generates the required alloca, similar to
+ // 'emitAutoVarAlloca', except for the OpenACC array/pointer types.
+ mlir::Value makeBoundsAlloca(mlir::Block *block, SourceRange exprRange,
+ mlir::Location loc, std::string_view allocaName,
+ size_t numBounds,
+ llvm::ArrayRef<QualType> boundTypes);
+
protected:
CIRGen::CIRGenFunction &cgf;
CIRGen::CIRGenBuilderTy &builder;
@@ -165,28 +172,9 @@ class OpenACCRecipeBuilder : OpenACCRecipeBuilderBase {
cgf.emitAutoVarAlloca(*varRecipe, builder.saveInsertionPoint());
// 'firstprivate' doesn't do its initialization in the 'init' section,
- // instead does it in the 'copy' section. SO only do init here.
- // 'reduction' appears to use it too (rather than a 'copy' section), so
- // we probably have to do it here too, but we can do that when we get to
- // reduction implementation.
- if constexpr (std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) {
- // We are OK with no init for builtins, arrays of builtins, or pointers,
- // else we should NYI so we know to go look for these.
- if (cgf.getContext().getLangOpts().CPlusPlus &&
- !varRecipe->getType()
- ->getPointeeOrArrayElementType()
- ->isBuiltinType() &&
- !varRecipe->getType()->isPointerType() && !varRecipe->getInit()) {
- // If we don't have any initialization recipe, we failed during Sema to
- // initialize this correctly. If we disable the
- // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll
- // emit an error to tell us. However, emitting those errors during
- // production is a violation of the standard, so we cannot do them.
- cgf.cgm.errorNYI(exprRange, "private default-init recipe");
- }
- cgf.emitAutoVarInit(tempDeclEmission);
- } else if constexpr (std::is_same_v<RecipeTy,
- mlir::acc::ReductionRecipeOp>) {
+ // instead it does it in the 'copy' section. SO, only do 'init' here for
+ // reduction.
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) {
// Unlike Private, the recipe here is always required as it has to do
// init, not just 'default' init.
if (!varRecipe->getInit())
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index e842892..644c383 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -216,6 +216,7 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
case Stmt::OMPSimdDirectiveClass:
case Stmt::OMPTileDirectiveClass:
case Stmt::OMPUnrollDirectiveClass:
+ case Stmt::OMPFuseDirectiveClass:
case Stmt::OMPForDirectiveClass:
case Stmt::OMPForSimdDirectiveClass:
case Stmt::OMPSectionsDirectiveClass: