aboutsummaryrefslogtreecommitdiff
path: root/flang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'flang/lib')
-rw-r--r--flang/lib/Lower/Bridge.cpp70
-rw-r--r--flang/lib/Lower/OpenACC.cpp380
-rw-r--r--flang/lib/Lower/OpenMP/OpenMP.cpp1
-rw-r--r--flang/lib/Optimizer/CodeGen/CodeGen.cpp2
-rw-r--r--flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp225
5 files changed, 574 insertions, 104 deletions
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index 780d56f..50a687c 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -643,6 +643,8 @@ public:
return localSymbols.lookupStorage(sym);
}
+ Fortran::lower::SymMap &getSymbolMap() override final { return localSymbols; }
+
void
overrideExprValues(const Fortran::lower::ExprToValueMap *map) override final {
exprValueOverrides = map;
@@ -3190,15 +3192,20 @@ private:
std::get_if<Fortran::parser::OpenACCCombinedConstruct>(&acc.u);
Fortran::lower::pft::Evaluation *curEval = &getEval();
+ // Determine collapse depth/force and loopCount
+ bool collapseForce = false;
+ uint64_t collapseDepth = 1;
+ uint64_t loopCount = 1;
if (accLoop || accCombined) {
- uint64_t loopCount;
if (accLoop) {
const Fortran::parser::AccBeginLoopDirective &beginLoopDir =
std::get<Fortran::parser::AccBeginLoopDirective>(accLoop->t);
const Fortran::parser::AccClauseList &clauseList =
std::get<Fortran::parser::AccClauseList>(beginLoopDir.t);
loopCount = Fortran::lower::getLoopCountForCollapseAndTile(clauseList);
+ std::tie(collapseDepth, collapseForce) =
+ Fortran::lower::getCollapseSizeAndForce(clauseList);
} else if (accCombined) {
const Fortran::parser::AccBeginCombinedDirective &beginCombinedDir =
std::get<Fortran::parser::AccBeginCombinedDirective>(
@@ -3206,6 +3213,8 @@ private:
const Fortran::parser::AccClauseList &clauseList =
std::get<Fortran::parser::AccClauseList>(beginCombinedDir.t);
loopCount = Fortran::lower::getLoopCountForCollapseAndTile(clauseList);
+ std::tie(collapseDepth, collapseForce) =
+ Fortran::lower::getCollapseSizeAndForce(clauseList);
}
if (curEval->lowerAsStructured()) {
@@ -3215,8 +3224,63 @@ private:
}
}
- for (Fortran::lower::pft::Evaluation &e : curEval->getNestedEvaluations())
- genFIR(e);
+ const bool isStructured = curEval && curEval->lowerAsStructured();
+ if (isStructured && collapseForce && collapseDepth > 1) {
+ // force: collect prologue/epilogue for the first collapseDepth nested
+ // loops and sink them into the innermost loop body at that depth
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> prologue, epilogue;
+ Fortran::lower::pft::Evaluation *parent = &getEval();
+ Fortran::lower::pft::Evaluation *innermostLoopEval = nullptr;
+ for (uint64_t lvl = 0; lvl + 1 < collapseDepth; ++lvl) {
+ epilogue.clear();
+ auto &kids = parent->getNestedEvaluations();
+ // Collect all non-loop statements before the next inner loop as
+ // prologue, then mark remaining siblings as epilogue and descend into
+ // the inner loop.
+ Fortran::lower::pft::Evaluation *childLoop = nullptr;
+ for (auto it = kids.begin(); it != kids.end(); ++it) {
+ if (it->getIf<Fortran::parser::DoConstruct>()) {
+ childLoop = &*it;
+ for (auto it2 = std::next(it); it2 != kids.end(); ++it2)
+ epilogue.push_back(&*it2);
+ break;
+ }
+ prologue.push_back(&*it);
+ }
+ // Semantics guarantees collapseDepth does not exceed nest depth
+ // so childLoop must be found here.
+ assert(childLoop && "Expected inner DoConstruct for collapse");
+ parent = childLoop;
+ innermostLoopEval = childLoop;
+ }
+
+ // Track sunk evaluations (avoid double-lowering)
+ llvm::SmallPtrSet<const Fortran::lower::pft::Evaluation *, 16> sunk;
+ for (auto *e : prologue)
+ sunk.insert(e);
+ for (auto *e : epilogue)
+ sunk.insert(e);
+
+ auto sink =
+ [&](llvm::SmallVector<Fortran::lower::pft::Evaluation *> &lst) {
+ for (auto *e : lst)
+ genFIR(*e);
+ };
+
+ sink(prologue);
+
+ // Lower innermost loop body, skipping sunk
+ for (Fortran::lower::pft::Evaluation &e :
+ innermostLoopEval->getNestedEvaluations())
+ if (!sunk.contains(&e))
+ genFIR(e);
+
+ sink(epilogue);
+ } else {
+ // Normal lowering
+ for (Fortran::lower::pft::Evaluation &e : curEval->getNestedEvaluations())
+ genFIR(e);
+ }
localSymbols.popScope();
builder->restoreInsertionPoint(insertPt);
diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp
index 4a9e494..62e5c0c 100644
--- a/flang/lib/Lower/OpenACC.cpp
+++ b/flang/lib/Lower/OpenACC.cpp
@@ -20,6 +20,7 @@
#include "flang/Lower/PFTBuilder.h"
#include "flang/Lower/StatementContext.h"
#include "flang/Lower/Support/Utils.h"
+#include "flang/Lower/SymbolMap.h"
#include "flang/Optimizer/Builder/BoxValue.h"
#include "flang/Optimizer/Builder/Complex.h"
#include "flang/Optimizer/Builder/FIRBuilder.h"
@@ -33,6 +34,7 @@
#include "flang/Semantics/scope.h"
#include "flang/Semantics/tools.h"
#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
+#include "mlir/IR/IRMapping.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "llvm/ADT/STLExtras.h"
@@ -60,6 +62,16 @@ static llvm::cl::opt<bool> lowerDoLoopToAccLoop(
llvm::cl::desc("Whether to lower do loops as `acc.loop` operations."),
llvm::cl::init(true));
+static llvm::cl::opt<bool> enableSymbolRemapping(
+ "openacc-remap-symbols",
+ llvm::cl::desc("Whether to remap symbols that appears in data clauses."),
+ llvm::cl::init(true));
+
+static llvm::cl::opt<bool> enableDevicePtrRemap(
+ "openacc-remap-device-ptr-symbols",
+ llvm::cl::desc("sub-option of openacc-remap-symbols for deviceptr clause"),
+ llvm::cl::init(false));
+
// Special value for * passed in device_type or gang clauses.
static constexpr std::int64_t starCst = -1;
@@ -624,17 +636,19 @@ void genAtomicCapture(Fortran::lower::AbstractConverter &converter,
}
template <typename Op>
-static void
-genDataOperandOperations(const Fortran::parser::AccObjectList &objectList,
- Fortran::lower::AbstractConverter &converter,
- Fortran::semantics::SemanticsContext &semanticsContext,
- Fortran::lower::StatementContext &stmtCtx,
- llvm::SmallVectorImpl<mlir::Value> &dataOperands,
- mlir::acc::DataClause dataClause, bool structured,
- bool implicit, llvm::ArrayRef<mlir::Value> async,
- llvm::ArrayRef<mlir::Attribute> asyncDeviceTypes,
- llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes,
- bool setDeclareAttr = false) {
+static void genDataOperandOperations(
+ const Fortran::parser::AccObjectList &objectList,
+ Fortran::lower::AbstractConverter &converter,
+ Fortran::semantics::SemanticsContext &semanticsContext,
+ Fortran::lower::StatementContext &stmtCtx,
+ llvm::SmallVectorImpl<mlir::Value> &dataOperands,
+ mlir::acc::DataClause dataClause, bool structured, bool implicit,
+ llvm::ArrayRef<mlir::Value> async,
+ llvm::ArrayRef<mlir::Attribute> asyncDeviceTypes,
+ llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes,
+ bool setDeclareAttr = false,
+ llvm::SmallVectorImpl<std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ *symbolPairs = nullptr) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
Fortran::evaluate::ExpressionAnalyzer ea{semanticsContext};
const bool unwrapBoxAddr = true;
@@ -655,6 +669,9 @@ genDataOperandOperations(const Fortran::parser::AccObjectList &objectList,
/*strideIncludeLowerExtent=*/strideIncludeLowerExtent);
LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs()));
+ bool isWholeSymbol =
+ !designator || Fortran::evaluate::UnwrapWholeSymbolDataRef(*designator);
+
// If the input value is optional and is not a descriptor, we use the
// rawInput directly.
mlir::Value baseAddr = ((fir::unwrapRefType(info.addr.getType()) !=
@@ -668,6 +685,11 @@ genDataOperandOperations(const Fortran::parser::AccObjectList &objectList,
asyncOnlyDeviceTypes, unwrapBoxAddr, info.isPresent);
dataOperands.push_back(op.getAccVar());
+ // Track the symbol and its corresponding mlir::Value if requested
+ if (symbolPairs && isWholeSymbol)
+ symbolPairs->emplace_back(op.getAccVar(),
+ Fortran::semantics::SymbolRef(symbol));
+
// For UseDeviceOp, if operand is one of a pair resulting from a
// declare operation, create a UseDeviceOp for the other operand as well.
if constexpr (std::is_same_v<Op, mlir::acc::UseDeviceOp>) {
@@ -681,6 +703,8 @@ genDataOperandOperations(const Fortran::parser::AccObjectList &objectList,
asyncDeviceTypes, asyncOnlyDeviceTypes,
unwrapBoxAddr, info.isPresent);
dataOperands.push_back(op.getAccVar());
+ // Not adding this to symbolPairs because it only make sense to
+ // map the symbol to a single value.
}
}
}
@@ -1264,7 +1288,9 @@ static void genPrivatizationRecipes(
llvm::SmallVector<mlir::Attribute> &privatizationRecipes,
llvm::ArrayRef<mlir::Value> async,
llvm::ArrayRef<mlir::Attribute> asyncDeviceTypes,
- llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes) {
+ llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes,
+ llvm::SmallVectorImpl<std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ *symbolPairs = nullptr) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
Fortran::evaluate::ExpressionAnalyzer ea{semanticsContext};
for (const auto &accObject : objectList.v) {
@@ -1284,6 +1310,9 @@ static void genPrivatizationRecipes(
/*strideIncludeLowerExtent=*/strideIncludeLowerExtent);
LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs()));
+ bool isWholeSymbol =
+ !designator || Fortran::evaluate::UnwrapWholeSymbolDataRef(*designator);
+
RecipeOp recipe;
mlir::Type retTy = getTypeFromBounds(bounds, info.addr.getType());
if constexpr (std::is_same_v<RecipeOp, mlir::acc::PrivateRecipeOp>) {
@@ -1297,6 +1326,11 @@ static void genPrivatizationRecipes(
/*implicit=*/false, mlir::acc::DataClause::acc_private, retTy, async,
asyncDeviceTypes, asyncOnlyDeviceTypes, /*unwrapBoxAddr=*/true);
dataOperands.push_back(op.getAccVar());
+
+ // Track the symbol and its corresponding mlir::Value if requested
+ if (symbolPairs && isWholeSymbol)
+ symbolPairs->emplace_back(op.getAccVar(),
+ Fortran::semantics::SymbolRef(symbol));
} else {
std::string suffix =
areAllBoundConstant(bounds) ? getBoundsString(bounds) : "";
@@ -1310,6 +1344,11 @@ static void genPrivatizationRecipes(
async, asyncDeviceTypes, asyncOnlyDeviceTypes,
/*unwrapBoxAddr=*/true);
dataOperands.push_back(op.getAccVar());
+
+ // Track the symbol and its corresponding mlir::Value if requested
+ if (symbolPairs && isWholeSymbol)
+ symbolPairs->emplace_back(op.getAccVar(),
+ Fortran::semantics::SymbolRef(symbol));
}
privatizationRecipes.push_back(mlir::SymbolRefAttr::get(
builder.getContext(), recipe.getSymName().str()));
@@ -1949,15 +1988,16 @@ mlir::Type getTypeFromIvTypeSize(fir::FirOpBuilder &builder,
return builder.getIntegerType(ivTypeSize * 8);
}
-static void
-privatizeIv(Fortran::lower::AbstractConverter &converter,
- const Fortran::semantics::Symbol &sym, mlir::Location loc,
- llvm::SmallVector<mlir::Type> &ivTypes,
- llvm::SmallVector<mlir::Location> &ivLocs,
- llvm::SmallVector<mlir::Value> &privateOperands,
- llvm::SmallVector<mlir::Value> &ivPrivate,
- llvm::SmallVector<mlir::Attribute> &privatizationRecipes,
- bool isDoConcurrent = false) {
+static void privatizeIv(
+ Fortran::lower::AbstractConverter &converter,
+ const Fortran::semantics::Symbol &sym, mlir::Location loc,
+ llvm::SmallVector<mlir::Type> &ivTypes,
+ llvm::SmallVector<mlir::Location> &ivLocs,
+ llvm::SmallVector<mlir::Value> &privateOperands,
+ llvm::SmallVector<std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ &ivPrivate,
+ llvm::SmallVector<mlir::Attribute> &privatizationRecipes,
+ bool isDoConcurrent = false) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
mlir::Type ivTy = getTypeFromIvTypeSize(builder, sym);
@@ -2001,15 +2041,8 @@ privatizeIv(Fortran::lower::AbstractConverter &converter,
builder.getContext(), recipe.getSymName().str()));
}
- // Map the new private iv to its symbol for the scope of the loop. bindSymbol
- // might create a hlfir.declare op, if so, we map its result in order to
- // use the sym value in the scope.
- converter.bindSymbol(sym, mlir::acc::getAccVar(privateOp));
- auto privateValue = converter.getSymbolAddress(sym);
- if (auto declareOp =
- mlir::dyn_cast<hlfir::DeclareOp>(privateValue.getDefiningOp()))
- privateValue = declareOp.getResults()[0];
- ivPrivate.push_back(privateValue);
+ ivPrivate.emplace_back(mlir::acc::getAccVar(privateOp),
+ Fortran::semantics::SymbolRef(sym));
}
static void determineDefaultLoopParMode(
@@ -2088,7 +2121,8 @@ static void processDoLoopBounds(
llvm::SmallVector<mlir::Value> &upperbounds,
llvm::SmallVector<mlir::Value> &steps,
llvm::SmallVector<mlir::Value> &privateOperands,
- llvm::SmallVector<mlir::Value> &ivPrivate,
+ llvm::SmallVector<std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ &ivPrivate,
llvm::SmallVector<mlir::Attribute> &privatizationRecipes,
llvm::SmallVector<mlir::Type> &ivTypes,
llvm::SmallVector<mlir::Location> &ivLocs,
@@ -2144,11 +2178,25 @@ static void processDoLoopBounds(
locs.push_back(converter.genLocation(
Fortran::parser::FindSourceLocation(outerDoConstruct)));
} else {
- auto *doCons = crtEval->getIf<Fortran::parser::DoConstruct>();
- assert(doCons && "expect do construct");
- loopControl = &*doCons->GetLoopControl();
+ // Safely locate the next inner DoConstruct within this eval.
+ const Fortran::parser::DoConstruct *innerDo = nullptr;
+ if (crtEval && crtEval->hasNestedEvaluations()) {
+ for (Fortran::lower::pft::Evaluation &child :
+ crtEval->getNestedEvaluations()) {
+ if (auto *stmt = child.getIf<Fortran::parser::DoConstruct>()) {
+ innerDo = stmt;
+ // Prepare to descend for the next iteration
+ crtEval = &child;
+ break;
+ }
+ }
+ }
+ if (!innerDo)
+ break; // No deeper loop; stop collecting collapsed bounds.
+
+ loopControl = &*innerDo->GetLoopControl();
locs.push_back(converter.genLocation(
- Fortran::parser::FindSourceLocation(*doCons)));
+ Fortran::parser::FindSourceLocation(*innerDo)));
}
const Fortran::parser::LoopControl::Bounds *bounds =
@@ -2172,32 +2220,127 @@ static void processDoLoopBounds(
inclusiveBounds.push_back(true);
- if (i < loopsToProcess - 1)
- crtEval = &*std::next(crtEval->getNestedEvaluations().begin());
+ // crtEval already updated when descending; no blind increment here.
}
}
}
-static mlir::acc::LoopOp
-buildACCLoopOp(Fortran::lower::AbstractConverter &converter,
- mlir::Location currentLocation,
- Fortran::semantics::SemanticsContext &semanticsContext,
- Fortran::lower::StatementContext &stmtCtx,
- const Fortran::parser::DoConstruct &outerDoConstruct,
- Fortran::lower::pft::Evaluation &eval,
- llvm::SmallVector<mlir::Value> &privateOperands,
- llvm::SmallVector<mlir::Attribute> &privatizationRecipes,
- llvm::SmallVector<mlir::Value> &gangOperands,
- llvm::SmallVector<mlir::Value> &workerNumOperands,
- llvm::SmallVector<mlir::Value> &vectorOperands,
- llvm::SmallVector<mlir::Value> &tileOperands,
- llvm::SmallVector<mlir::Value> &cacheOperands,
- llvm::SmallVector<mlir::Value> &reductionOperands,
- llvm::SmallVector<mlir::Type> &retTy, mlir::Value yieldValue,
- uint64_t loopsToProcess) {
+/// Remap symbols that appeared in OpenACC data clauses to use the results of
+/// the corresponding data operations. This allows isolating symbol accesses
+/// inside the OpenACC region from accesses in the host and other regions while
+/// preserving Fortran information about the symbols for optimizations.
+template <typename RegionOp>
+static void remapDataOperandSymbols(
+ Fortran::lower::AbstractConverter &converter, fir::FirOpBuilder &builder,
+ RegionOp &regionOp,
+ const llvm::SmallVector<
+ std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ &dataOperandSymbolPairs) {
+ if (!enableSymbolRemapping || dataOperandSymbolPairs.empty())
+ return;
+
+ // Map Symbols that appeared inside data clauses to a new hlfir.declare whose
+ // input is the acc data operation result.
+ // This allows isolating all the symbol accesses inside the compute region
+ // from accesses in the host and other regions while preserving the Fortran
+ // information about the symbols for Fortran specific optimizations inside the
+ // region.
+ Fortran::lower::SymMap &symbolMap = converter.getSymbolMap();
+ mlir::OpBuilder::InsertionGuard insertGuard(builder);
+ builder.setInsertionPointToStart(&regionOp.getRegion().front());
+ llvm::SmallPtrSet<const Fortran::semantics::Symbol *, 8> seenSymbols;
+ mlir::IRMapping mapper;
+ for (auto [value, symbol] : dataOperandSymbolPairs) {
+
+ // If A symbol appears on several data clause, just map it to the first
+ // result (all data operations results for a symbol are pointing same
+ // memory, so it does not matter which one is used).
+ if (seenSymbols.contains(&symbol.get()))
+ continue;
+ seenSymbols.insert(&symbol.get());
+ std::optional<fir::FortranVariableOpInterface> hostDef =
+ symbolMap.lookupVariableDefinition(symbol);
+ assert(hostDef.has_value() && llvm::isa<hlfir::DeclareOp>(*hostDef) &&
+ "expected symbol to be mapped to hlfir.declare");
+ auto hostDeclare = llvm::cast<hlfir::DeclareOp>(*hostDef);
+ // Replace base input and DummyScope inputs.
+ mlir::Value hostInput = hostDeclare.getMemref();
+ mlir::Type hostType = hostInput.getType();
+ mlir::Type computeType = value.getType();
+ if (hostType == computeType) {
+ mapper.map(hostInput, value);
+ } else if (llvm::isa<fir::BaseBoxType>(computeType)) {
+ assert(!llvm::isa<fir::BaseBoxType>(hostType) &&
+ "box type mismatch between compute region variable and "
+ "hlfir.declare input unexpected");
+ if (Fortran::semantics::IsOptional(symbol))
+ TODO(regionOp.getLoc(),
+ "remapping OPTIONAL symbol in OpenACC compute region");
+ auto rawValue =
+ fir::BoxAddrOp::create(builder, regionOp.getLoc(), hostType, value);
+ mapper.map(hostInput, rawValue);
+ } else {
+ assert(!llvm::isa<fir::BaseBoxType>(hostType) &&
+ "compute region variable should not be raw address when host "
+ "hlfir.declare input was a box");
+ assert(fir::isBoxAddress(hostType) == fir::isBoxAddress(computeType) &&
+ "compute region variable should be a pointer/allocatable if and "
+ "only if host is");
+ assert(fir::isa_ref_type(hostType) && fir::isa_ref_type(computeType) &&
+ "compute region variable and host variable should both be raw "
+ "addresses");
+ mlir::Value cast =
+ builder.createConvert(regionOp.getLoc(), hostType, value);
+ mapper.map(hostInput, cast);
+ }
+ if (mlir::Value dummyScope = hostDeclare.getDummyScope()) {
+ // Copy the dummy scope into the region so that aliasing rules about
+ // Fortran dummies are understood inside the region and the abstract dummy
+ // scope type does not have to cross the OpenACC compute region boundary.
+ if (!mapper.contains(dummyScope)) {
+ mlir::Operation *hostDummyScopeOp = dummyScope.getDefiningOp();
+ assert(hostDummyScopeOp &&
+ "dummyScope defining operation must be visible in lowering");
+ (void)builder.clone(*hostDummyScopeOp, mapper);
+ }
+ }
+
+ mlir::Operation *computeDef =
+ builder.clone(*hostDeclare.getOperation(), mapper);
+
+ // The input box already went through an hlfir.declare. It has the correct
+ // local lower bounds and attribute. Do not generate a new fir.rebox.
+ if (llvm::isa<fir::BaseBoxType>(hostDeclare.getMemref().getType()))
+ llvm::cast<hlfir::DeclareOp>(*computeDef).setSkipRebox(true);
+
+ symbolMap.addVariableDefinition(
+ symbol, llvm::cast<fir::FortranVariableOpInterface>(computeDef));
+ }
+}
+
+static mlir::acc::LoopOp buildACCLoopOp(
+ Fortran::lower::AbstractConverter &converter,
+ mlir::Location currentLocation,
+ Fortran::semantics::SemanticsContext &semanticsContext,
+ Fortran::lower::StatementContext &stmtCtx,
+ const Fortran::parser::DoConstruct &outerDoConstruct,
+ Fortran::lower::pft::Evaluation &eval,
+ llvm::SmallVector<mlir::Value> &privateOperands,
+ llvm::SmallVector<mlir::Attribute> &privatizationRecipes,
+ llvm::SmallVector<std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ &dataOperandSymbolPairs,
+ llvm::SmallVector<mlir::Value> &gangOperands,
+ llvm::SmallVector<mlir::Value> &workerNumOperands,
+ llvm::SmallVector<mlir::Value> &vectorOperands,
+ llvm::SmallVector<mlir::Value> &tileOperands,
+ llvm::SmallVector<mlir::Value> &cacheOperands,
+ llvm::SmallVector<mlir::Value> &reductionOperands,
+ llvm::SmallVector<mlir::Type> &retTy, mlir::Value yieldValue,
+ uint64_t loopsToProcess) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- llvm::SmallVector<mlir::Value> ivPrivate;
+ llvm::SmallVector<std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ ivPrivate;
llvm::SmallVector<mlir::Type> ivTypes;
llvm::SmallVector<mlir::Location> ivLocs;
llvm::SmallVector<bool> inclusiveBounds;
@@ -2231,10 +2374,22 @@ buildACCLoopOp(Fortran::lower::AbstractConverter &converter,
builder, builder.getFusedLoc(locs), currentLocation, eval, operands,
operandSegments, /*outerCombined=*/false, retTy, yieldValue, ivTypes,
ivLocs);
-
- for (auto [arg, value] : llvm::zip(
- loopOp.getLoopRegions().front()->front().getArguments(), ivPrivate))
- fir::StoreOp::create(builder, currentLocation, arg, value);
+ // Ensure the iv symbol is mapped to private iv SSA value for the scope of
+ // the loop even if it did not appear explicitly in a PRIVATE clause (if it
+ // appeared explicitly in such clause, that is also fine because duplicates
+ // in the list are ignored).
+ dataOperandSymbolPairs.append(ivPrivate.begin(), ivPrivate.end());
+ // Remap symbols from data clauses to use data operation results
+ remapDataOperandSymbols(converter, builder, loopOp, dataOperandSymbolPairs);
+
+ for (auto [arg, iv] :
+ llvm::zip(loopOp.getLoopRegions().front()->front().getArguments(),
+ ivPrivate)) {
+ // Store block argument to the related iv private variable.
+ mlir::Value privateValue =
+ converter.getSymbolAddress(std::get<Fortran::semantics::SymbolRef>(iv));
+ fir::StoreOp::create(builder, currentLocation, arg, privateValue);
+ }
loopOp.setInclusiveUpperbound(inclusiveBounds);
@@ -2260,6 +2415,10 @@ static mlir::acc::LoopOp createLoopOp(
llvm::SmallVector<int32_t> tileOperandsSegments, gangOperandsSegments;
llvm::SmallVector<int64_t> collapseValues;
+ // Vector to track mlir::Value results and their corresponding Fortran symbols
+ llvm::SmallVector<std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ dataOperandSymbolPairs;
+
llvm::SmallVector<mlir::Attribute> gangArgTypes;
llvm::SmallVector<mlir::Attribute> seqDeviceTypes, independentDeviceTypes,
autoDeviceTypes, vectorOperandsDeviceTypes, workerNumOperandsDeviceTypes,
@@ -2380,7 +2539,8 @@ static mlir::acc::LoopOp createLoopOp(
genPrivatizationRecipes<mlir::acc::PrivateRecipeOp>(
privateClause->v, converter, semanticsContext, stmtCtx,
privateOperands, privatizationRecipes, /*async=*/{},
- /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
+ /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{},
+ &dataOperandSymbolPairs);
} else if (const auto *reductionClause =
std::get_if<Fortran::parser::AccClause::Reduction>(
&clause.u)) {
@@ -2406,10 +2566,6 @@ static mlir::acc::LoopOp createLoopOp(
std::get_if<Fortran::parser::AccClause::Collapse>(
&clause.u)) {
const Fortran::parser::AccCollapseArg &arg = collapseClause->v;
- const auto &force = std::get<bool>(arg.t);
- if (force)
- TODO(clauseLocation, "OpenACC collapse force modifier");
-
const auto &intExpr =
std::get<Fortran::parser::ScalarIntConstantExpr>(arg.t);
const auto *expr = Fortran::semantics::GetExpr(intExpr);
@@ -2436,9 +2592,9 @@ static mlir::acc::LoopOp createLoopOp(
Fortran::lower::getLoopCountForCollapseAndTile(accClauseList);
auto loopOp = buildACCLoopOp(
converter, currentLocation, semanticsContext, stmtCtx, outerDoConstruct,
- eval, privateOperands, privatizationRecipes, gangOperands,
- workerNumOperands, vectorOperands, tileOperands, cacheOperands,
- reductionOperands, retTy, yieldValue, loopsToProcess);
+ eval, privateOperands, privatizationRecipes, dataOperandSymbolPairs,
+ gangOperands, workerNumOperands, vectorOperands, tileOperands,
+ cacheOperands, reductionOperands, retTy, yieldValue, loopsToProcess);
if (!gangDeviceTypes.empty())
loopOp.setGangAttr(builder.getArrayAttr(gangDeviceTypes));
@@ -2568,7 +2724,9 @@ static void genDataOperandOperationsWithModifier(
llvm::ArrayRef<mlir::Value> async,
llvm::ArrayRef<mlir::Attribute> asyncDeviceTypes,
llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes,
- bool setDeclareAttr = false) {
+ bool setDeclareAttr = false,
+ llvm::SmallVectorImpl<std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ *symbolPairs = nullptr) {
const Fortran::parser::AccObjectListWithModifier &listWithModifier = x->v;
const auto &accObjectList =
std::get<Fortran::parser::AccObjectList>(listWithModifier.t);
@@ -2581,7 +2739,7 @@ static void genDataOperandOperationsWithModifier(
stmtCtx, dataClauseOperands, dataClause,
/*structured=*/true, /*implicit=*/false, async,
asyncDeviceTypes, asyncOnlyDeviceTypes,
- setDeclareAttr);
+ setDeclareAttr, symbolPairs);
}
template <typename Op>
@@ -2612,6 +2770,10 @@ static Op createComputeOp(
llvm::SmallVector<mlir::Attribute> privatizationRecipes,
firstPrivatizationRecipes, reductionRecipes;
+ // Vector to track mlir::Value results and their corresponding Fortran symbols
+ llvm::SmallVector<std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ dataOperandSymbolPairs;
+
// Self clause has optional values but can be present with
// no value as well. When there is no value, the op has an attribute to
// represent the clause.
@@ -2732,7 +2894,8 @@ static Op createComputeOp(
copyClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_copy,
/*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes);
+ asyncOnlyDeviceTypes, /*setDeclareAttr=*/false,
+ &dataOperandSymbolPairs);
copyEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *copyinClause =
@@ -2744,7 +2907,8 @@ static Op createComputeOp(
Fortran::parser::AccDataModifier::Modifier::ReadOnly,
dataClauseOperands, mlir::acc::DataClause::acc_copyin,
mlir::acc::DataClause::acc_copyin_readonly, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes);
+ asyncOnlyDeviceTypes, /*setDeclareAttr=*/false,
+ &dataOperandSymbolPairs);
copyinEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *copyoutClause =
@@ -2757,7 +2921,8 @@ static Op createComputeOp(
Fortran::parser::AccDataModifier::Modifier::ReadOnly,
dataClauseOperands, mlir::acc::DataClause::acc_copyout,
mlir::acc::DataClause::acc_copyout_zero, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes);
+ asyncOnlyDeviceTypes, /*setDeclareAttr=*/false,
+ &dataOperandSymbolPairs);
copyoutEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *createClause =
@@ -2769,7 +2934,8 @@ static Op createComputeOp(
Fortran::parser::AccDataModifier::Modifier::Zero, dataClauseOperands,
mlir::acc::DataClause::acc_create,
mlir::acc::DataClause::acc_create_zero, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes);
+ asyncOnlyDeviceTypes, /*setDeclareAttr=*/false,
+ &dataOperandSymbolPairs);
createEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *noCreateClause =
@@ -2780,7 +2946,8 @@ static Op createComputeOp(
noCreateClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_no_create,
/*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes);
+ asyncOnlyDeviceTypes, /*setDeclareAttr=*/false,
+ &dataOperandSymbolPairs);
nocreateEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *presentClause =
@@ -2791,17 +2958,21 @@ static Op createComputeOp(
presentClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_present,
/*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes);
+ asyncOnlyDeviceTypes, /*setDeclareAttr=*/false,
+ &dataOperandSymbolPairs);
presentEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *devicePtrClause =
std::get_if<Fortran::parser::AccClause::Deviceptr>(
&clause.u)) {
+ llvm::SmallVectorImpl<
+ std::pair<mlir::Value, Fortran::semantics::SymbolRef>> *symPairs =
+ enableDevicePtrRemap ? &dataOperandSymbolPairs : nullptr;
genDataOperandOperations<mlir::acc::DevicePtrOp>(
devicePtrClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_deviceptr,
/*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes);
+ asyncOnlyDeviceTypes, /*setDeclareAttr=*/false, symPairs);
} else if (const auto *attachClause =
std::get_if<Fortran::parser::AccClause::Attach>(&clause.u)) {
auto crtDataStart = dataClauseOperands.size();
@@ -2809,7 +2980,8 @@ static Op createComputeOp(
attachClause->v, converter, semanticsContext, stmtCtx,
dataClauseOperands, mlir::acc::DataClause::acc_attach,
/*structured=*/true, /*implicit=*/false, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes);
+ asyncOnlyDeviceTypes, /*setDeclareAttr=*/false,
+ &dataOperandSymbolPairs);
attachEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
} else if (const auto *privateClause =
@@ -2819,14 +2991,14 @@ static Op createComputeOp(
genPrivatizationRecipes<mlir::acc::PrivateRecipeOp>(
privateClause->v, converter, semanticsContext, stmtCtx,
privateOperands, privatizationRecipes, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes);
+ asyncOnlyDeviceTypes, &dataOperandSymbolPairs);
} else if (const auto *firstprivateClause =
std::get_if<Fortran::parser::AccClause::Firstprivate>(
&clause.u)) {
genPrivatizationRecipes<mlir::acc::FirstprivateRecipeOp>(
firstprivateClause->v, converter, semanticsContext, stmtCtx,
firstprivateOperands, firstPrivatizationRecipes, async,
- asyncDeviceTypes, asyncOnlyDeviceTypes);
+ asyncDeviceTypes, asyncOnlyDeviceTypes, &dataOperandSymbolPairs);
} else if (const auto *reductionClause =
std::get_if<Fortran::parser::AccClause::Reduction>(
&clause.u)) {
@@ -2846,7 +3018,8 @@ static Op createComputeOp(
converter, semanticsContext, stmtCtx, dataClauseOperands,
mlir::acc::DataClause::acc_reduction,
/*structured=*/true, /*implicit=*/true, async, asyncDeviceTypes,
- asyncOnlyDeviceTypes);
+ asyncOnlyDeviceTypes, /*setDeclareAttr=*/false,
+ &dataOperandSymbolPairs);
copyEntryOperands.append(dataClauseOperands.begin() + crtDataStart,
dataClauseOperands.end());
}
@@ -2945,6 +3118,11 @@ static Op createComputeOp(
computeOp.setCombinedAttr(builder.getUnitAttr());
auto insPt = builder.saveInsertionPoint();
+
+ // Remap symbols from data clauses to use data operation results
+ remapDataOperandSymbols(converter, builder, computeOp,
+ dataOperandSymbolPairs);
+
builder.setInsertionPointAfter(computeOp);
// Create the exit operations after the region.
@@ -4860,25 +5038,34 @@ void Fortran::lower::genEarlyReturnInOpenACCLoop(fir::FirOpBuilder &builder,
uint64_t Fortran::lower::getLoopCountForCollapseAndTile(
const Fortran::parser::AccClauseList &clauseList) {
- uint64_t collapseLoopCount = 1;
+ uint64_t collapseLoopCount = getCollapseSizeAndForce(clauseList).first;
uint64_t tileLoopCount = 1;
for (const Fortran::parser::AccClause &clause : clauseList.v) {
- if (const auto *collapseClause =
- std::get_if<Fortran::parser::AccClause::Collapse>(&clause.u)) {
- const parser::AccCollapseArg &arg = collapseClause->v;
- const auto &collapseValue{std::get<parser::ScalarIntConstantExpr>(arg.t)};
- collapseLoopCount = *Fortran::semantics::GetIntValue(collapseValue);
- }
if (const auto *tileClause =
std::get_if<Fortran::parser::AccClause::Tile>(&clause.u)) {
const parser::AccTileExprList &tileExprList = tileClause->v;
- const std::list<parser::AccTileExpr> &listTileExpr = tileExprList.v;
- tileLoopCount = listTileExpr.size();
+ tileLoopCount = tileExprList.v.size();
}
}
- if (tileLoopCount > collapseLoopCount)
- return tileLoopCount;
- return collapseLoopCount;
+ return tileLoopCount > collapseLoopCount ? tileLoopCount : collapseLoopCount;
+}
+
+std::pair<uint64_t, bool> Fortran::lower::getCollapseSizeAndForce(
+ const Fortran::parser::AccClauseList &clauseList) {
+ uint64_t size = 1;
+ bool force = false;
+ for (const Fortran::parser::AccClause &clause : clauseList.v) {
+ if (const auto *collapseClause =
+ std::get_if<Fortran::parser::AccClause::Collapse>(&clause.u)) {
+ const Fortran::parser::AccCollapseArg &arg = collapseClause->v;
+ force = std::get<bool>(arg.t);
+ const auto &collapseValue =
+ std::get<Fortran::parser::ScalarIntConstantExpr>(arg.t);
+ size = *Fortran::semantics::GetIntValue(collapseValue);
+ break;
+ }
+ }
+ return {size, force};
}
/// Create an ACC loop operation for a DO construct when inside ACC compute
@@ -4921,6 +5108,8 @@ mlir::Operation *Fortran::lower::genOpenACCLoopFromDoConstruct(
reductionOperands;
llvm::SmallVector<mlir::Attribute> privatizationRecipes;
llvm::SmallVector<mlir::Type> retTy;
+ llvm::SmallVector<std::pair<mlir::Value, Fortran::semantics::SymbolRef>>
+ dataOperandSymbolPairs;
mlir::Value yieldValue;
uint64_t loopsToProcess = 1; // Single loop construct
@@ -4929,9 +5118,10 @@ mlir::Operation *Fortran::lower::genOpenACCLoopFromDoConstruct(
Fortran::lower::StatementContext stmtCtx;
auto loopOp = buildACCLoopOp(
converter, converter.getCurrentLocation(), semanticsContext, stmtCtx,
- doConstruct, eval, privateOperands, privatizationRecipes, gangOperands,
- workerNumOperands, vectorOperands, tileOperands, cacheOperands,
- reductionOperands, retTy, yieldValue, loopsToProcess);
+ doConstruct, eval, privateOperands, privatizationRecipes,
+ dataOperandSymbolPairs, gangOperands, workerNumOperands, vectorOperands,
+ tileOperands, cacheOperands, reductionOperands, retTy, yieldValue,
+ loopsToProcess);
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
if (!privatizationRecipes.empty())
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index 9e56c2b..bd94651 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -153,6 +153,7 @@ public:
clauseOps.loopLowerBounds = ops.loopLowerBounds;
clauseOps.loopUpperBounds = ops.loopUpperBounds;
clauseOps.loopSteps = ops.loopSteps;
+ clauseOps.collapseNumLoops = ops.collapseNumLoops;
ivOut.append(iv);
return true;
}
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index 50603cb..4a05cd9 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -3348,7 +3348,7 @@ struct DoConcurrentSpecifierOpConversion : public fir::FIROpConversion<OpTy> {
mlir::ConversionPatternRewriter &rewriter) const override {
#ifdef EXPENSIVE_CHECKS
auto uses = mlir::SymbolTable::getSymbolUses(
- specifier, specifier->getParentOfType<mlir::ModuleOp>());
+ specifier, specifier->template getParentOfType<mlir::ModuleOp>());
// `fir.local|fir.declare_reduction` ops are not supposed to have any uses
// at this point (i.e. during lowering to LLVM). In case of serialization,
diff --git a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp
index e595e61..260e525 100644
--- a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp
+++ b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp
@@ -77,6 +77,10 @@ class MapInfoFinalizationPass
/// | |
std::map<mlir::Operation *, mlir::Value> localBoxAllocas;
+ // List of deferrable descriptors to process at the end of
+ // the pass.
+ llvm::SmallVector<mlir::Operation *> deferrableDesc;
+
/// Return true if the given path exists in a list of paths.
static bool
containsPath(const llvm::SmallVectorImpl<llvm::SmallVector<int64_t>> &paths,
@@ -183,6 +187,40 @@ class MapInfoFinalizationPass
newMemberIndexPaths.emplace_back(indexPath.begin(), indexPath.end());
}
+ // Check if the declaration operation we have refers to a dummy
+ // function argument.
+ bool isDummyArgument(mlir::Value mappedValue) {
+ if (auto declareOp = mlir::dyn_cast_if_present<hlfir::DeclareOp>(
+ mappedValue.getDefiningOp()))
+ if (auto dummyScope = declareOp.getDummyScope())
+ return true;
+ return false;
+ }
+
+ // Relevant for OpenMP < 5.2, where attach semantics and rules don't exist.
+ // As descriptors were an unspoken implementation detail in these versions
+ // there's certain cases where the user (and the compiler implementation)
+ // can create data mapping errors by having temporary descriptors stuck
+ // in memory. The main example is calling an 'target enter data map'
+ // without a corresponding exit on an assumed shape or size dummy
+ // argument, a local stack descriptor is generated, gets mapped and
+ // is then left on device. A user doesn't realize what they've done as
+ // the OpenMP specification isn't explicit on descriptor handling in
+ // earlier versions and as far as Fortran is concerned this si something
+ // hidden from a user. To avoid this we can defer the descriptor mapping
+ // in these cases until target or target data regions, when we can be
+ // sure they have a clear limited scope on device.
+ bool canDeferDescriptorMapping(mlir::Value descriptor) {
+ if (fir::isAllocatableType(descriptor.getType()) ||
+ fir::isPointerType(descriptor.getType()))
+ return false;
+ if (isDummyArgument(descriptor) &&
+ (fir::isAssumedType(descriptor.getType()) ||
+ fir::isAssumedShape(descriptor.getType())))
+ return true;
+ return false;
+ }
+
/// getMemberUserList gathers all users of a particular MapInfoOp that are
/// other MapInfoOp's and places them into the mapMemberUsers list, which
/// records the map that the current argument MapInfoOp "op" is part of
@@ -234,13 +272,16 @@ class MapInfoFinalizationPass
/// fir::BoxOffsetOp we utilise to access the descriptor datas
/// base address can be utilised.
mlir::Value getDescriptorFromBoxMap(mlir::omp::MapInfoOp boxMap,
- fir::FirOpBuilder &builder) {
+ fir::FirOpBuilder &builder,
+ bool &canDescBeDeferred) {
mlir::Value descriptor = boxMap.getVarPtr();
if (!fir::isTypeWithDescriptor(boxMap.getVarType()))
if (auto addrOp = mlir::dyn_cast_if_present<fir::BoxAddrOp>(
boxMap.getVarPtr().getDefiningOp()))
descriptor = addrOp.getVal();
+ canDescBeDeferred = canDeferDescriptorMapping(descriptor);
+
if (!mlir::isa<fir::BaseBoxType>(descriptor.getType()) &&
!fir::factory::isOptionalArgument(descriptor.getDefiningOp()))
return descriptor;
@@ -391,8 +432,7 @@ class MapInfoFinalizationPass
/// Check if the mapOp is present in the HasDeviceAddr clause on
/// the userOp. Only applies to TargetOp.
- bool isHasDeviceAddr(mlir::omp::MapInfoOp mapOp, mlir::Operation *userOp) {
- assert(userOp && "Expecting non-null argument");
+ bool isHasDeviceAddr(mlir::omp::MapInfoOp mapOp, mlir::Operation &userOp) {
if (auto targetOp = llvm::dyn_cast<mlir::omp::TargetOp>(userOp)) {
for (mlir::Value hda : targetOp.getHasDeviceAddrVars()) {
if (hda.getDefiningOp() == mapOp)
@@ -402,6 +442,26 @@ class MapInfoFinalizationPass
return false;
}
+ bool isUseDeviceAddr(mlir::omp::MapInfoOp mapOp, mlir::Operation &userOp) {
+ if (auto targetDataOp = llvm::dyn_cast<mlir::omp::TargetDataOp>(userOp)) {
+ for (mlir::Value uda : targetDataOp.getUseDeviceAddrVars()) {
+ if (uda.getDefiningOp() == mapOp)
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool isUseDevicePtr(mlir::omp::MapInfoOp mapOp, mlir::Operation &userOp) {
+ if (auto targetDataOp = llvm::dyn_cast<mlir::omp::TargetDataOp>(userOp)) {
+ for (mlir::Value udp : targetDataOp.getUseDevicePtrVars()) {
+ if (udp.getDefiningOp() == mapOp)
+ return true;
+ }
+ }
+ return false;
+ }
+
mlir::omp::MapInfoOp genBoxcharMemberMap(mlir::omp::MapInfoOp op,
fir::FirOpBuilder &builder) {
if (!op.getMembers().empty())
@@ -466,12 +526,14 @@ class MapInfoFinalizationPass
// TODO: map the addendum segment of the descriptor, similarly to the
// base address/data pointer member.
- mlir::Value descriptor = getDescriptorFromBoxMap(op, builder);
+ bool descCanBeDeferred = false;
+ mlir::Value descriptor =
+ getDescriptorFromBoxMap(op, builder, descCanBeDeferred);
mlir::ArrayAttr newMembersAttr;
mlir::SmallVector<mlir::Value> newMembers;
llvm::SmallVector<llvm::SmallVector<int64_t>> memberIndices;
- bool isHasDeviceAddrFlag = isHasDeviceAddr(op, target);
+ bool isHasDeviceAddrFlag = isHasDeviceAddr(op, *target);
if (!mapMemberUsers.empty() || !op.getMembers().empty())
getMemberIndicesAsVectors(
@@ -553,6 +615,10 @@ class MapInfoFinalizationPass
/*partial_map=*/builder.getBoolAttr(false));
op.replaceAllUsesWith(newDescParentMapOp.getResult());
op->erase();
+
+ if (descCanBeDeferred)
+ deferrableDesc.push_back(newDescParentMapOp);
+
return newDescParentMapOp;
}
@@ -701,6 +767,124 @@ class MapInfoFinalizationPass
return nullptr;
}
+ void addImplicitDescriptorMapToTargetDataOp(mlir::omp::MapInfoOp op,
+ fir::FirOpBuilder &builder,
+ mlir::Operation &target) {
+ // Checks if the map is present as an explicit map already on the target
+ // data directive, and not just present on a use_device_addr/ptr, as if
+ // that's the case, we should not need to add an implicit map for the
+ // descriptor.
+ auto explicitMappingPresent = [](mlir::omp::MapInfoOp op,
+ mlir::omp::TargetDataOp tarData) {
+ // Verify top-level descriptor mapping is at least equal with same
+ // varPtr, the map type should always be To for a descriptor, which is
+ // all we really care about for this mapping as we aim to make sure the
+ // descriptor is always present on device if we're expecting to access
+ // the underlying data.
+ if (tarData.getMapVars().empty())
+ return false;
+
+ for (mlir::Value mapVar : tarData.getMapVars()) {
+ auto mapOp = llvm::cast<mlir::omp::MapInfoOp>(mapVar.getDefiningOp());
+ if (mapOp.getVarPtr() == op.getVarPtr() &&
+ mapOp.getVarPtrPtr() == op.getVarPtrPtr()) {
+ return true;
+ }
+ }
+
+ return false;
+ };
+
+ // if we're not a top level descriptor with members (e.g. member of a
+ // derived type), we do not want to perform this step.
+ if (!llvm::isa<mlir::omp::TargetDataOp>(target) || op.getMembers().empty())
+ return;
+
+ if (!isUseDeviceAddr(op, target) && !isUseDevicePtr(op, target))
+ return;
+
+ auto targetDataOp = llvm::cast<mlir::omp::TargetDataOp>(target);
+ if (explicitMappingPresent(op, targetDataOp))
+ return;
+
+ mlir::omp::MapInfoOp newDescParentMapOp =
+ builder.create<mlir::omp::MapInfoOp>(
+ op->getLoc(), op.getResult().getType(), op.getVarPtr(),
+ op.getVarTypeAttr(),
+ builder.getIntegerAttr(
+ builder.getIntegerType(64, false),
+ llvm::to_underlying(
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO |
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS)),
+ op.getMapCaptureTypeAttr(), /*varPtrPtr=*/mlir::Value{},
+ mlir::SmallVector<mlir::Value>{}, mlir::ArrayAttr{},
+ /*bounds=*/mlir::SmallVector<mlir::Value>{},
+ /*mapperId*/ mlir::FlatSymbolRefAttr(), op.getNameAttr(),
+ /*partial_map=*/builder.getBoolAttr(false));
+
+ targetDataOp.getMapVarsMutable().append({newDescParentMapOp});
+ }
+
+ void removeTopLevelDescriptor(mlir::omp::MapInfoOp op,
+ fir::FirOpBuilder &builder,
+ mlir::Operation *target) {
+ if (llvm::isa<mlir::omp::TargetOp, mlir::omp::TargetDataOp,
+ mlir::omp::DeclareMapperInfoOp>(target))
+ return;
+
+ // if we're not a top level descriptor with members (e.g. member of a
+ // derived type), we do not want to perform this step.
+ if (op.getMembers().empty())
+ return;
+
+ mlir::SmallVector<mlir::Value> members = op.getMembers();
+ mlir::omp::MapInfoOp baseAddr =
+ mlir::dyn_cast_or_null<mlir::omp::MapInfoOp>(
+ members.front().getDefiningOp());
+ assert(baseAddr && "Expected member to be MapInfoOp");
+ members.erase(members.begin());
+
+ llvm::SmallVector<llvm::SmallVector<int64_t>> memberIndices;
+ getMemberIndicesAsVectors(op, memberIndices);
+
+ // Can skip the extra processing if there's only 1 member as it'd
+ // be the base addresses, which we're promoting to the parent.
+ mlir::ArrayAttr membersAttr;
+ if (memberIndices.size() > 1) {
+ memberIndices.erase(memberIndices.begin());
+ membersAttr = builder.create2DI64ArrayAttr(memberIndices);
+ }
+
+ // VarPtrPtr is tied to detecting if something is a pointer in the later
+ // lowering currently, this at the moment comes tied with
+ // OMP_MAP_PTR_AND_OBJ being applied which breaks the problem this tries to
+ // solve by emitting a 8-byte mapping tied to the descriptor address (even
+ // if we only emit a single map). So we circumvent this by removing the
+ // varPtrPtr mapping, however, a side affect of this is we lose the
+ // additional load from the backend tied to this which is required for
+ // correctness and getting the correct address of the data to perform our
+ // mapping. So we do our load at this stage.
+ // TODO/FIXME: Tidy up the OMP_MAP_PTR_AND_OBJ and varPtrPtr being tied to
+ // if something is a pointer to try and tidy up the implementation a bit.
+ // This is an unfortunate complexity from push-back from upstream. We
+ // could also emit a load at this level for all base addresses as well,
+ // which in turn will simplify the later lowering a bit as well. But first
+ // need to see how well this alteration works.
+ auto loadBaseAddr =
+ builder.loadIfRef(op->getLoc(), baseAddr.getVarPtrPtr());
+ mlir::omp::MapInfoOp newBaseAddrMapOp =
+ builder.create<mlir::omp::MapInfoOp>(
+ op->getLoc(), loadBaseAddr.getType(), loadBaseAddr,
+ baseAddr.getVarTypeAttr(), baseAddr.getMapTypeAttr(),
+ baseAddr.getMapCaptureTypeAttr(), mlir::Value{}, members,
+ membersAttr, baseAddr.getBounds(),
+ /*mapperId*/ mlir::FlatSymbolRefAttr(), op.getNameAttr(),
+ /*partial_map=*/builder.getBoolAttr(false));
+ op.replaceAllUsesWith(newBaseAddrMapOp.getResult());
+ op->erase();
+ baseAddr.erase();
+ }
+
// This pass executes on omp::MapInfoOp's containing descriptor based types
// (allocatables, pointers, assumed shape etc.) and expanding them into
// multiple omp::MapInfoOp's for each pointer member contained within the
@@ -730,6 +914,7 @@ class MapInfoFinalizationPass
// clear all local allocations we made for any boxes in any prior
// iterations from previous function scopes.
localBoxAllocas.clear();
+ deferrableDesc.clear();
// First, walk `omp.map.info` ops to see if any of them have varPtrs
// with an underlying type of fir.char<k, ?>, i.e a character
@@ -1010,6 +1195,36 @@ class MapInfoFinalizationPass
}
});
+ // Now that we've expanded all of our boxes into a descriptor and base
+ // address map where necessary, we check if the map owner is an
+ // enter/exit/target data directive, and if they are we drop the initial
+ // descriptor (top-level parent) and replace it with the
+ // base_address/data.
+ //
+ // This circumvents issues with stack allocated descriptors bound to
+ // device colliding which in Flang is rather trivial for a user to do by
+ // accident due to the rather pervasive local intermediate descriptor
+ // generation that occurs whenever you pass boxes around different scopes.
+ // In OpenMP 6+ mapping these would be a user error as the tools required
+ // to circumvent these issues are provided by the spec (ref_ptr/ptee map
+ // types), but in prior specifications these tools are not available and
+ // it becomes an implementation issue for us to solve.
+ //
+ // We do this by dropping the top-level descriptor which will be the stack
+ // descriptor when we perform enter/exit maps, as we don't want these to
+ // be bound until necessary which is when we utilise the descriptor type
+ // within a target region. At which point we map the relevant descriptor
+ // data and the runtime should correctly associate the data with the
+ // descriptor and bind together and allow clean mapping and execution.
+ for (auto *op : deferrableDesc) {
+ auto mapOp = llvm::dyn_cast<mlir::omp::MapInfoOp>(op);
+ mlir::Operation *targetUser = getFirstTargetUser(mapOp);
+ assert(targetUser && "expected user of map operation was not found");
+ builder.setInsertionPoint(mapOp);
+ removeTopLevelDescriptor(mapOp, builder, targetUser);
+ addImplicitDescriptorMapToTargetDataOp(mapOp, builder, *targetUser);
+ }
+
// Wait until after we have generated all of our maps to add them onto
// the target's block arguments, simplifying the process as there would be
// no need to avoid accidental duplicate additions.