aboutsummaryrefslogtreecommitdiff
path: root/flang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'flang/lib')
-rw-r--r--flang/lib/Evaluate/intrinsics.cpp23
-rw-r--r--flang/lib/Evaluate/tools.cpp12
-rw-r--r--flang/lib/Lower/Allocatable.cpp13
-rw-r--r--flang/lib/Lower/Bridge.cpp7
-rw-r--r--flang/lib/Lower/CUDA.cpp27
-rw-r--r--flang/lib/Lower/ConvertExpr.cpp2
-rw-r--r--flang/lib/Lower/IO.cpp11
-rw-r--r--flang/lib/Lower/OpenACC.cpp27
-rw-r--r--flang/lib/Lower/OpenMP/Atomic.cpp97
-rw-r--r--flang/lib/Lower/OpenMP/ClauseProcessor.cpp6
-rw-r--r--flang/lib/Lower/OpenMP/Clauses.cpp85
-rw-r--r--flang/lib/Lower/OpenMP/OpenMP.cpp15
-rw-r--r--flang/lib/Optimizer/Builder/Character.cpp2
-rw-r--r--flang/lib/Optimizer/Builder/IntrinsicCall.cpp61
-rw-r--r--flang/lib/Optimizer/CodeGen/CodeGen.cpp18
-rw-r--r--flang/lib/Optimizer/Dialect/FIRType.cpp19
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp62
-rw-r--r--flang/lib/Optimizer/OpenACC/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp320
-rw-r--r--flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp191
-rw-r--r--flang/lib/Optimizer/OpenACC/Transforms/CMakeLists.txt12
-rw-r--r--flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp100
-rw-r--r--flang/lib/Optimizer/Support/Utils.cpp10
-rw-r--r--flang/lib/Optimizer/Transforms/AffinePromotion.cpp2
-rw-r--r--flang/lib/Optimizer/Transforms/CUFOpConversion.cpp7
-rw-r--r--flang/lib/Optimizer/Transforms/StackArrays.cpp2
-rw-r--r--flang/lib/Parser/openmp-parsers.cpp25
-rw-r--r--flang/lib/Parser/parse-tree.cpp2
-rw-r--r--flang/lib/Parser/unparse.cpp6
-rw-r--r--flang/lib/Semantics/assignment.cpp3
-rw-r--r--flang/lib/Semantics/check-allocate.cpp13
-rw-r--r--flang/lib/Semantics/check-case.cpp2
-rw-r--r--flang/lib/Semantics/check-coarray.cpp9
-rw-r--r--flang/lib/Semantics/check-data.cpp10
-rw-r--r--flang/lib/Semantics/check-deallocate.cpp3
-rw-r--r--flang/lib/Semantics/check-declarations.cpp2
-rw-r--r--flang/lib/Semantics/check-do-forall.cpp50
-rw-r--r--flang/lib/Semantics/check-io.cpp6
-rw-r--r--flang/lib/Semantics/check-omp-atomic.cpp39
-rw-r--r--flang/lib/Semantics/check-omp-structure.cpp110
-rw-r--r--flang/lib/Semantics/check-omp-structure.h8
-rw-r--r--flang/lib/Semantics/data-to-inits.cpp17
-rw-r--r--flang/lib/Semantics/expression.cpp107
-rw-r--r--flang/lib/Semantics/mod-file.cpp37
-rw-r--r--flang/lib/Semantics/openmp-modifiers.cpp16
-rw-r--r--flang/lib/Semantics/openmp-utils.cpp23
-rw-r--r--flang/lib/Semantics/resolve-directives.cpp225
-rw-r--r--flang/lib/Semantics/resolve-directives.h2
-rw-r--r--flang/lib/Semantics/resolve-names-utils.cpp9
-rw-r--r--flang/lib/Semantics/resolve-names.cpp35
-rw-r--r--flang/lib/Semantics/symbol.cpp32
51 files changed, 1519 insertions, 404 deletions
diff --git a/flang/lib/Evaluate/intrinsics.cpp b/flang/lib/Evaluate/intrinsics.cpp
index f204eef..1de5e6b 100644
--- a/flang/lib/Evaluate/intrinsics.cpp
+++ b/flang/lib/Evaluate/intrinsics.cpp
@@ -111,6 +111,7 @@ ENUM_CLASS(KindCode, none, defaultIntegerKind,
atomicIntKind, // atomic_int_kind from iso_fortran_env
atomicIntOrLogicalKind, // atomic_int_kind or atomic_logical_kind
sameAtom, // same type and kind as atom
+ extensibleOrUnlimitedType, // extensible or unlimited polymorphic type
)
struct TypePattern {
@@ -160,7 +161,8 @@ static constexpr TypePattern AnyChar{CharType, KindCode::any};
static constexpr TypePattern AnyLogical{LogicalType, KindCode::any};
static constexpr TypePattern AnyRelatable{RelatableType, KindCode::any};
static constexpr TypePattern AnyIntrinsic{IntrinsicType, KindCode::any};
-static constexpr TypePattern ExtensibleDerived{DerivedType, KindCode::any};
+static constexpr TypePattern ExtensibleDerived{
+ DerivedType, KindCode::extensibleOrUnlimitedType};
static constexpr TypePattern AnyData{AnyType, KindCode::any};
// Type is irrelevant, but not BOZ (for PRESENT(), OPTIONAL(), &c.)
@@ -2103,9 +2105,13 @@ std::optional<SpecificCall> IntrinsicInterface::Match(
}
return std::nullopt;
} else if (!d.typePattern.categorySet.test(type->category())) {
+ const char *expected{
+ d.typePattern.kindCode == KindCode::extensibleOrUnlimitedType
+ ? ", expected extensible or unlimited polymorphic type"
+ : ""};
messages.Say(arg->sourceLocation(),
- "Actual argument for '%s=' has bad type '%s'"_err_en_US, d.keyword,
- type->AsFortran());
+ "Actual argument for '%s=' has bad type '%s'%s"_err_en_US, d.keyword,
+ type->AsFortran(), expected);
return std::nullopt; // argument has invalid type category
}
bool argOk{false};
@@ -2244,6 +2250,17 @@ std::optional<SpecificCall> IntrinsicInterface::Match(
return std::nullopt;
}
break;
+ case KindCode::extensibleOrUnlimitedType:
+ argOk = type->IsUnlimitedPolymorphic() ||
+ (type->category() == TypeCategory::Derived &&
+ IsExtensibleType(GetDerivedTypeSpec(type)));
+ if (!argOk) {
+ messages.Say(arg->sourceLocation(),
+ "Actual argument for '%s=' has type '%s', but was expected to be an extensible or unlimited polymorphic type"_err_en_US,
+ d.keyword, type->AsFortran());
+ return std::nullopt;
+ }
+ break;
default:
CRASH_NO_CASE;
}
diff --git a/flang/lib/Evaluate/tools.cpp b/flang/lib/Evaluate/tools.cpp
index b927fa3..bd06acc 100644
--- a/flang/lib/Evaluate/tools.cpp
+++ b/flang/lib/Evaluate/tools.cpp
@@ -1153,6 +1153,18 @@ bool HasCUDAImplicitTransfer(const Expr<SomeType> &expr) {
return (hasConstant || (hostSymbols.size() > 0)) && deviceSymbols.size() > 0;
}
+bool IsCUDADeviceSymbol(const Symbol &sym) {
+ if (const auto *details =
+ sym.GetUltimate().detailsIf<semantics::ObjectEntityDetails>()) {
+ return details->cudaDataAttr() &&
+ *details->cudaDataAttr() != common::CUDADataAttr::Pinned;
+ } else if (const auto *details =
+ sym.GetUltimate().detailsIf<semantics::AssocEntityDetails>()) {
+ return GetNbOfCUDADeviceSymbols(details->expr()) > 0;
+ }
+ return false;
+}
+
// HasVectorSubscript()
struct HasVectorSubscriptHelper
: public AnyTraverse<HasVectorSubscriptHelper, bool,
diff --git a/flang/lib/Lower/Allocatable.cpp b/flang/lib/Lower/Allocatable.cpp
index 53239cb..e7a6c4d 100644
--- a/flang/lib/Lower/Allocatable.cpp
+++ b/flang/lib/Lower/Allocatable.cpp
@@ -629,6 +629,10 @@ private:
unsigned allocatorIdx = Fortran::lower::getAllocatorIdx(alloc.getSymbol());
fir::ExtendedValue exv = isSource ? sourceExv : moldExv;
+ if (const Fortran::semantics::Symbol *sym{GetLastSymbol(sourceExpr)})
+ if (Fortran::semantics::IsCUDADevice(*sym))
+ TODO(loc, "CUDA Fortran: allocate with device source");
+
// Generate a sequence of runtime calls.
errorManager.genStatCheck(builder, loc);
genAllocateObjectInit(box, allocatorIdx);
@@ -767,6 +771,15 @@ private:
const fir::MutableBoxValue &box,
ErrorManager &errorManager,
const Fortran::semantics::Symbol &sym) {
+
+ if (const Fortran::semantics::DeclTypeSpec *declTypeSpec = sym.GetType())
+ if (const Fortran::semantics::DerivedTypeSpec *derivedTypeSpec =
+ declTypeSpec->AsDerived())
+ if (derivedTypeSpec->HasDefaultInitialization(
+ /*ignoreAllocatable=*/true, /*ignorePointer=*/true))
+ TODO(loc,
+ "CUDA Fortran: allocate on device with default initialization");
+
Fortran::lower::StatementContext stmtCtx;
cuf::DataAttributeAttr cudaAttr =
Fortran::lower::translateSymbolCUFDataAttribute(builder.getContext(),
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index 68adf34..0595ca0 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -4987,11 +4987,8 @@ private:
// host = device
if (!lhsIsDevice && rhsIsDevice) {
- if (Fortran::lower::isTransferWithConversion(rhs)) {
+ if (auto elementalOp = Fortran::lower::isTransferWithConversion(rhs)) {
mlir::OpBuilder::InsertionGuard insertionGuard(builder);
- auto elementalOp =
- mlir::dyn_cast<hlfir::ElementalOp>(rhs.getDefiningOp());
- assert(elementalOp && "expect elemental op");
auto designateOp =
*elementalOp.getBody()->getOps<hlfir::DesignateOp>().begin();
builder.setInsertionPoint(elementalOp);
@@ -6079,7 +6076,7 @@ private:
if (resTy != wrappedSymTy) {
// check size of the pointed to type so we can't overflow by writing
// double precision to a single precision allocation, etc
- LLVM_ATTRIBUTE_UNUSED auto getBitWidth = [this](mlir::Type ty) {
+ [[maybe_unused]] auto getBitWidth = [this](mlir::Type ty) {
// 15.6.2.6.3: differering result types should be integer, real,
// complex or logical
if (auto cmplx = mlir::dyn_cast_or_null<mlir::ComplexType>(ty))
diff --git a/flang/lib/Lower/CUDA.cpp b/flang/lib/Lower/CUDA.cpp
index bb4bdee..9501b0e 100644
--- a/flang/lib/Lower/CUDA.cpp
+++ b/flang/lib/Lower/CUDA.cpp
@@ -68,11 +68,26 @@ cuf::DataAttributeAttr Fortran::lower::translateSymbolCUFDataAttribute(
return cuf::getDataAttribute(mlirContext, cudaAttr);
}
-bool Fortran::lower::isTransferWithConversion(mlir::Value rhs) {
+hlfir::ElementalOp Fortran::lower::isTransferWithConversion(mlir::Value rhs) {
+ auto isConversionElementalOp = [](hlfir::ElementalOp elOp) {
+ return llvm::hasSingleElement(
+ elOp.getBody()->getOps<hlfir::DesignateOp>()) &&
+ llvm::hasSingleElement(elOp.getBody()->getOps<fir::LoadOp>()) == 1 &&
+ llvm::hasSingleElement(elOp.getBody()->getOps<fir::ConvertOp>()) ==
+ 1;
+ };
+ if (auto declOp = mlir::dyn_cast<hlfir::DeclareOp>(rhs.getDefiningOp())) {
+ if (!declOp.getMemref().getDefiningOp())
+ return {};
+ if (auto associateOp = mlir::dyn_cast<hlfir::AssociateOp>(
+ declOp.getMemref().getDefiningOp()))
+ if (auto elOp = mlir::dyn_cast<hlfir::ElementalOp>(
+ associateOp.getSource().getDefiningOp()))
+ if (isConversionElementalOp(elOp))
+ return elOp;
+ }
if (auto elOp = mlir::dyn_cast<hlfir::ElementalOp>(rhs.getDefiningOp()))
- if (llvm::hasSingleElement(elOp.getBody()->getOps<hlfir::DesignateOp>()) &&
- llvm::hasSingleElement(elOp.getBody()->getOps<fir::LoadOp>()) == 1 &&
- llvm::hasSingleElement(elOp.getBody()->getOps<fir::ConvertOp>()) == 1)
- return true;
- return false;
+ if (isConversionElementalOp(elOp))
+ return elOp;
+ return {};
}
diff --git a/flang/lib/Lower/ConvertExpr.cpp b/flang/lib/Lower/ConvertExpr.cpp
index d7f94e1..a46d219 100644
--- a/flang/lib/Lower/ConvertExpr.cpp
+++ b/flang/lib/Lower/ConvertExpr.cpp
@@ -5603,7 +5603,7 @@ private:
return newIters;
};
if (useTripsForSlice) {
- LLVM_ATTRIBUTE_UNUSED auto vectorSubscriptShape =
+ [[maybe_unused]] auto vectorSubscriptShape =
getShape(arrayOperands.back());
auto undef = fir::UndefOp::create(builder, loc, idxTy);
trips.push_back(undef);
diff --git a/flang/lib/Lower/IO.cpp b/flang/lib/Lower/IO.cpp
index 98dc78f..cd53dc9 100644
--- a/flang/lib/Lower/IO.cpp
+++ b/flang/lib/Lower/IO.cpp
@@ -524,12 +524,18 @@ getNamelistGroup(Fortran::lower::AbstractConverter &converter,
descAddr =
builder.createConvert(loc, builder.getRefType(symType), varAddr);
} else {
+ fir::BaseBoxType boxType;
const auto expr = Fortran::evaluate::AsGenericExpr(s);
fir::ExtendedValue exv = converter.genExprAddr(*expr, stmtCtx);
mlir::Type type = fir::getBase(exv).getType();
+ bool isClassType = mlir::isa<fir::ClassType>(type);
if (mlir::Type baseTy = fir::dyn_cast_ptrOrBoxEleTy(type))
type = baseTy;
- fir::BoxType boxType = fir::BoxType::get(fir::PointerType::get(type));
+
+ if (isClassType)
+ boxType = fir::ClassType::get(fir::PointerType::get(type));
+ else
+ boxType = fir::BoxType::get(fir::PointerType::get(type));
descAddr = builder.createTemporary(loc, boxType);
fir::MutableBoxValue box = fir::MutableBoxValue(descAddr, {}, {});
fir::factory::associateMutableBox(builder, loc, box, exv,
@@ -944,7 +950,8 @@ static void genIoLoop(Fortran::lower::AbstractConverter &converter,
makeNextConditionalOn(builder, loc, checkResult, ok, inLoop);
const auto &itemList = std::get<0>(ioImpliedDo.t);
const auto &control = std::get<1>(ioImpliedDo.t);
- const auto &loopSym = *control.name.thing.thing.symbol;
+ const auto &loopSym =
+ *Fortran::parser::UnwrapRef<Fortran::parser::Name>(control.name).symbol;
mlir::Value loopVar = fir::getBase(converter.genExprAddr(
Fortran::evaluate::AsGenericExpr(loopSym).value(), stmtCtx));
auto genControlValue = [&](const Fortran::parser::ScalarIntExpr &expr) {
diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp
index 62e5c0c..cfb1891 100644
--- a/flang/lib/Lower/OpenACC.cpp
+++ b/flang/lib/Lower/OpenACC.cpp
@@ -978,15 +978,40 @@ static RecipeOp genRecipeOp(
auto mappableTy = mlir::dyn_cast<mlir::acc::MappableType>(ty);
assert(mappableTy &&
"Expected that all variable types are considered mappable");
+ bool needsDestroy = false;
auto retVal = mappableTy.generatePrivateInit(
builder, loc,
mlir::cast<mlir::TypedValue<mlir::acc::MappableType>>(
initBlock->getArgument(0)),
initName,
initBlock->getArguments().take_back(initBlock->getArguments().size() - 1),
- initValue);
+ initValue, needsDestroy);
mlir::acc::YieldOp::create(builder, loc,
retVal ? retVal : initBlock->getArgument(0));
+ // Create destroy region and generate destruction if requested.
+ if (needsDestroy) {
+ llvm::SmallVector<mlir::Type> destroyArgsTy;
+ llvm::SmallVector<mlir::Location> destroyArgsLoc;
+ // original and privatized/reduction value
+ destroyArgsTy.push_back(ty);
+ destroyArgsTy.push_back(ty);
+ destroyArgsLoc.push_back(loc);
+ destroyArgsLoc.push_back(loc);
+ // Append bounds arguments (if any) in the same order as init region
+ if (argsTy.size() > 1) {
+ destroyArgsTy.append(argsTy.begin() + 1, argsTy.end());
+ destroyArgsLoc.insert(destroyArgsLoc.end(), argsTy.size() - 1, loc);
+ }
+
+ builder.createBlock(&recipe.getDestroyRegion(),
+ recipe.getDestroyRegion().end(), destroyArgsTy,
+ destroyArgsLoc);
+ builder.setInsertionPointToEnd(&recipe.getDestroyRegion().back());
+ // Call interface on the privatized/reduction value (2nd argument).
+ (void)mappableTy.generatePrivateDestroy(
+ builder, loc, recipe.getDestroyRegion().front().getArgument(1));
+ mlir::acc::TerminatorOp::create(builder, loc);
+ }
return recipe;
}
diff --git a/flang/lib/Lower/OpenMP/Atomic.cpp b/flang/lib/Lower/OpenMP/Atomic.cpp
index ff82a36..3ab8a58 100644
--- a/flang/lib/Lower/OpenMP/Atomic.cpp
+++ b/flang/lib/Lower/OpenMP/Atomic.cpp
@@ -20,6 +20,7 @@
#include "flang/Optimizer/Builder/FIRBuilder.h"
#include "flang/Optimizer/Builder/Todo.h"
#include "flang/Parser/parse-tree.h"
+#include "flang/Semantics/openmp-utils.h"
#include "flang/Semantics/semantics.h"
#include "flang/Semantics/type.h"
#include "flang/Support/Fortran.h"
@@ -183,12 +184,8 @@ getMemoryOrderFromRequires(const semantics::Scope &scope) {
// scope.
// For safety, traverse all enclosing scopes and check if their symbol
// contains REQUIRES.
- for (const auto *sc{&scope}; sc->kind() != semantics::Scope::Kind::Global;
- sc = &sc->parent()) {
- const semantics::Symbol *sym = sc->symbol();
- if (!sym)
- continue;
-
+ const semantics::Scope &unitScope = semantics::omp::GetProgramUnit(scope);
+ if (auto *symbol = unitScope.symbol()) {
const common::OmpMemoryOrderType *admo = common::visit(
[](auto &&s) {
using WithOmpDeclarative = semantics::WithOmpDeclarative;
@@ -198,7 +195,8 @@ getMemoryOrderFromRequires(const semantics::Scope &scope) {
}
return static_cast<const common::OmpMemoryOrderType *>(nullptr);
},
- sym->details());
+ symbol->details());
+
if (admo)
return getMemoryOrderKind(*admo);
}
@@ -214,19 +212,83 @@ getDefaultAtomicMemOrder(semantics::SemanticsContext &semaCtx) {
return std::nullopt;
}
-static std::optional<mlir::omp::ClauseMemoryOrderKind>
+static std::pair<std::optional<mlir::omp::ClauseMemoryOrderKind>, bool>
getAtomicMemoryOrder(semantics::SemanticsContext &semaCtx,
const omp::List<omp::Clause> &clauses,
const semantics::Scope &scope) {
for (const omp::Clause &clause : clauses) {
if (auto maybeKind = getMemoryOrderKind(clause.id))
- return *maybeKind;
+ return std::make_pair(*maybeKind, /*canOverride=*/false);
}
if (auto maybeKind = getMemoryOrderFromRequires(scope))
- return *maybeKind;
+ return std::make_pair(*maybeKind, /*canOverride=*/true);
- return getDefaultAtomicMemOrder(semaCtx);
+ return std::make_pair(getDefaultAtomicMemOrder(semaCtx),
+ /*canOverride=*/false);
+}
+
+static std::optional<mlir::omp::ClauseMemoryOrderKind>
+makeValidForAction(std::optional<mlir::omp::ClauseMemoryOrderKind> memOrder,
+ int action0, int action1, unsigned version) {
+ // When the atomic default memory order specified on a REQUIRES directive is
+ // disallowed on a given ATOMIC operation, and it's not ACQ_REL, the order
+ // reverts to RELAXED. ACQ_REL decays to either ACQUIRE or RELEASE, depending
+ // on the operation.
+
+ if (!memOrder) {
+ return memOrder;
+ }
+
+ using Analysis = parser::OpenMPAtomicConstruct::Analysis;
+ // Figure out the main action (i.e. disregard a potential capture operation)
+ int action = action0;
+ if (action1 != Analysis::None)
+ action = action0 == Analysis::Read ? action1 : action0;
+
+ // Avaliable orderings: acquire, acq_rel, relaxed, release, seq_cst
+
+ if (action == Analysis::Read) {
+ // "acq_rel" decays to "acquire"
+ if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Acq_rel)
+ return mlir::omp::ClauseMemoryOrderKind::Acquire;
+ } else if (action == Analysis::Write) {
+ // "acq_rel" decays to "release"
+ if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Acq_rel)
+ return mlir::omp::ClauseMemoryOrderKind::Release;
+ }
+
+ if (version > 50) {
+ if (action == Analysis::Read) {
+ // "release" prohibited
+ if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Release)
+ return mlir::omp::ClauseMemoryOrderKind::Relaxed;
+ }
+ if (action == Analysis::Write) {
+ // "acquire" prohibited
+ if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Acquire)
+ return mlir::omp::ClauseMemoryOrderKind::Relaxed;
+ }
+ } else {
+ if (action == Analysis::Read) {
+ // "release" prohibited
+ if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Release)
+ return mlir::omp::ClauseMemoryOrderKind::Relaxed;
+ } else {
+ if (action & Analysis::Write) { // include "update"
+ // "acquire" prohibited
+ if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Acquire)
+ return mlir::omp::ClauseMemoryOrderKind::Relaxed;
+ if (action == Analysis::Update) {
+ // "acq_rel" prohibited
+ if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Acq_rel)
+ return mlir::omp::ClauseMemoryOrderKind::Relaxed;
+ }
+ }
+ }
+ }
+
+ return memOrder;
}
static mlir::omp::ClauseMemoryOrderKindAttr
@@ -449,16 +511,19 @@ void Fortran::lower::omp::lowerAtomic(
mlir::Value atomAddr =
fir::getBase(converter.genExprAddr(atom, stmtCtx, &loc));
mlir::IntegerAttr hint = getAtomicHint(converter, clauses);
- std::optional<mlir::omp::ClauseMemoryOrderKind> memOrder =
- getAtomicMemoryOrder(semaCtx, clauses,
- semaCtx.FindScope(construct.source));
+ auto [memOrder, canOverride] = getAtomicMemoryOrder(
+ semaCtx, clauses, semaCtx.FindScope(construct.source));
+
+ unsigned version = semaCtx.langOptions().OpenMPVersion;
+ int action0 = analysis.op0.what & analysis.Action;
+ int action1 = analysis.op1.what & analysis.Action;
+ if (canOverride)
+ memOrder = makeValidForAction(memOrder, action0, action1, version);
if (auto *cond = get(analysis.cond)) {
(void)cond;
TODO(loc, "OpenMP ATOMIC COMPARE");
} else {
- int action0 = analysis.op0.what & analysis.Action;
- int action1 = analysis.op1.what & analysis.Action;
mlir::Operation *captureOp = nullptr;
fir::FirOpBuilder::InsertPoint preAt = builder.saveInsertionPoint();
fir::FirOpBuilder::InsertPoint atomicAt, postAt;
diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
index 55eda7e..85398be 100644
--- a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
@@ -1343,8 +1343,10 @@ bool ClauseProcessor::processMap(
const parser::CharBlock &source) {
using Map = omp::clause::Map;
mlir::Location clauseLocation = converter.genLocation(source);
- const auto &[mapType, typeMods, refMod, mappers, iterator, objects] =
- clause.t;
+ const auto &[mapType, typeMods, attachMod, refMod, mappers, iterator,
+ objects] = clause.t;
+ if (attachMod)
+ TODO(currentLocation, "ATTACH modifier is not implemented yet");
llvm::omp::OpenMPOffloadMappingFlags mapTypeBits =
llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_NONE;
std::string mapperIdName = "__implicit_mapper";
diff --git a/flang/lib/Lower/OpenMP/Clauses.cpp b/flang/lib/Lower/OpenMP/Clauses.cpp
index fac37a3..ba34212 100644
--- a/flang/lib/Lower/OpenMP/Clauses.cpp
+++ b/flang/lib/Lower/OpenMP/Clauses.cpp
@@ -219,7 +219,6 @@ MAKE_EMPTY_CLASS(AcqRel, AcqRel);
MAKE_EMPTY_CLASS(Acquire, Acquire);
MAKE_EMPTY_CLASS(Capture, Capture);
MAKE_EMPTY_CLASS(Compare, Compare);
-MAKE_EMPTY_CLASS(DynamicAllocators, DynamicAllocators);
MAKE_EMPTY_CLASS(Full, Full);
MAKE_EMPTY_CLASS(Inbranch, Inbranch);
MAKE_EMPTY_CLASS(Mergeable, Mergeable);
@@ -235,13 +234,9 @@ MAKE_EMPTY_CLASS(OmpxBare, OmpxBare);
MAKE_EMPTY_CLASS(Read, Read);
MAKE_EMPTY_CLASS(Relaxed, Relaxed);
MAKE_EMPTY_CLASS(Release, Release);
-MAKE_EMPTY_CLASS(ReverseOffload, ReverseOffload);
MAKE_EMPTY_CLASS(SeqCst, SeqCst);
-MAKE_EMPTY_CLASS(SelfMaps, SelfMaps);
MAKE_EMPTY_CLASS(Simd, Simd);
MAKE_EMPTY_CLASS(Threads, Threads);
-MAKE_EMPTY_CLASS(UnifiedAddress, UnifiedAddress);
-MAKE_EMPTY_CLASS(UnifiedSharedMemory, UnifiedSharedMemory);
MAKE_EMPTY_CLASS(Unknown, Unknown);
MAKE_EMPTY_CLASS(Untied, Untied);
MAKE_EMPTY_CLASS(Weak, Weak);
@@ -775,7 +770,18 @@ Doacross make(const parser::OmpClause::Doacross &inp,
return makeDoacross(inp.v.v, semaCtx);
}
-// DynamicAllocators: empty
+DynamicAllocators make(const parser::OmpClause::DynamicAllocators &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp.v -> td::optional<arser::OmpDynamicAllocatorsClause>
+ auto &&maybeRequired = maybeApply(
+ [&](const parser::OmpDynamicAllocatorsClause &c) {
+ return makeExpr(c.v, semaCtx);
+ },
+ inp.v);
+
+ return DynamicAllocators{/*Required=*/std::move(maybeRequired)};
+}
+
DynGroupprivate make(const parser::OmpClause::DynGroupprivate &inp,
semantics::SemanticsContext &semaCtx) {
@@ -1069,6 +1075,15 @@ Map make(const parser::OmpClause::Map &inp,
);
CLAUSET_ENUM_CONVERT( //
+ convertAttachMod, parser::OmpAttachModifier::Value, Map::AttachModifier,
+ // clang-format off
+ MS(Always, Always)
+ MS(Auto, Auto)
+ MS(Never, Never)
+ // clang-format on
+ );
+
+ CLAUSET_ENUM_CONVERT( //
convertRefMod, parser::OmpRefModifier::Value, Map::RefModifier,
// clang-format off
MS(Ref_Ptee, RefPtee)
@@ -1115,6 +1130,13 @@ Map make(const parser::OmpClause::Map &inp,
if (!modSet.empty())
maybeTypeMods = Map::MapTypeModifiers(modSet.begin(), modSet.end());
+ auto attachMod = [&]() -> std::optional<Map::AttachModifier> {
+ if (auto *t =
+ semantics::OmpGetUniqueModifier<parser::OmpAttachModifier>(mods))
+ return convertAttachMod(t->v);
+ return std::nullopt;
+ }();
+
auto refMod = [&]() -> std::optional<Map::RefModifier> {
if (auto *t = semantics::OmpGetUniqueModifier<parser::OmpRefModifier>(mods))
return convertRefMod(t->v);
@@ -1135,6 +1157,7 @@ Map make(const parser::OmpClause::Map &inp,
return Map{{/*MapType=*/std::move(type),
/*MapTypeModifiers=*/std::move(maybeTypeMods),
+ /*AttachModifier=*/std::move(attachMod),
/*RefModifier=*/std::move(refMod), /*Mapper=*/std::move(mappers),
/*Iterator=*/std::move(iterator),
/*LocatorList=*/makeObjects(t2, semaCtx)}};
@@ -1321,7 +1344,18 @@ Reduction make(const parser::OmpClause::Reduction &inp,
// Relaxed: empty
// Release: empty
-// ReverseOffload: empty
+
+ReverseOffload make(const parser::OmpClause::ReverseOffload &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp.v -> std::optional<parser::OmpReverseOffloadClause>
+ auto &&maybeRequired = maybeApply(
+ [&](const parser::OmpReverseOffloadClause &c) {
+ return makeExpr(c.v, semaCtx);
+ },
+ inp.v);
+
+ return ReverseOffload{/*Required=*/std::move(maybeRequired)};
+}
Safelen make(const parser::OmpClause::Safelen &inp,
semantics::SemanticsContext &semaCtx) {
@@ -1374,6 +1408,18 @@ Schedule make(const parser::OmpClause::Schedule &inp,
// SeqCst: empty
+SelfMaps make(const parser::OmpClause::SelfMaps &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp.v -> std::optional<parser::OmpSelfMapsClause>
+ auto &&maybeRequired = maybeApply(
+ [&](const parser::OmpSelfMapsClause &c) {
+ return makeExpr(c.v, semaCtx);
+ },
+ inp.v);
+
+ return SelfMaps{/*Required=*/std::move(maybeRequired)};
+}
+
Severity make(const parser::OmpClause::Severity &inp,
semantics::SemanticsContext &semaCtx) {
// inp -> empty
@@ -1463,8 +1509,29 @@ To make(const parser::OmpClause::To &inp,
/*LocatorList=*/makeObjects(t3, semaCtx)}};
}
-// UnifiedAddress: empty
-// UnifiedSharedMemory: empty
+UnifiedAddress make(const parser::OmpClause::UnifiedAddress &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp.v -> std::optional<parser::OmpUnifiedAddressClause>
+ auto &&maybeRequired = maybeApply(
+ [&](const parser::OmpUnifiedAddressClause &c) {
+ return makeExpr(c.v, semaCtx);
+ },
+ inp.v);
+
+ return UnifiedAddress{/*Required=*/std::move(maybeRequired)};
+}
+
+UnifiedSharedMemory make(const parser::OmpClause::UnifiedSharedMemory &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp.v -> std::optional<parser::OmpUnifiedSharedMemoryClause>
+ auto &&maybeRequired = maybeApply(
+ [&](const parser::OmpUnifiedSharedMemoryClause &c) {
+ return makeExpr(c.v, semaCtx);
+ },
+ inp.v);
+
+ return UnifiedSharedMemory{/*Required=*/std::move(maybeRequired)};
+}
Uniform make(const parser::OmpClause::Uniform &inp,
semantics::SemanticsContext &semaCtx) {
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index 444f274..f86ee01 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -4208,18 +4208,17 @@ bool Fortran::lower::markOpenMPDeferredDeclareTargetFunctions(
void Fortran::lower::genOpenMPRequires(mlir::Operation *mod,
const semantics::Symbol *symbol) {
using MlirRequires = mlir::omp::ClauseRequires;
- using SemaRequires = semantics::WithOmpDeclarative::RequiresFlag;
if (auto offloadMod =
llvm::dyn_cast<mlir::omp::OffloadModuleInterface>(mod)) {
- semantics::WithOmpDeclarative::RequiresFlags semaFlags;
+ semantics::WithOmpDeclarative::RequiresClauses reqs;
if (symbol) {
common::visit(
[&](const auto &details) {
if constexpr (std::is_base_of_v<semantics::WithOmpDeclarative,
std::decay_t<decltype(details)>>) {
if (details.has_ompRequires())
- semaFlags = *details.ompRequires();
+ reqs = *details.ompRequires();
}
},
symbol->details());
@@ -4228,14 +4227,14 @@ void Fortran::lower::genOpenMPRequires(mlir::Operation *mod,
// Use pre-populated omp.requires module attribute if it was set, so that
// the "-fopenmp-force-usm" compiler option is honored.
MlirRequires mlirFlags = offloadMod.getRequires();
- if (semaFlags.test(SemaRequires::ReverseOffload))
+ if (reqs.test(llvm::omp::Clause::OMPC_dynamic_allocators))
+ mlirFlags = mlirFlags | MlirRequires::dynamic_allocators;
+ if (reqs.test(llvm::omp::Clause::OMPC_reverse_offload))
mlirFlags = mlirFlags | MlirRequires::reverse_offload;
- if (semaFlags.test(SemaRequires::UnifiedAddress))
+ if (reqs.test(llvm::omp::Clause::OMPC_unified_address))
mlirFlags = mlirFlags | MlirRequires::unified_address;
- if (semaFlags.test(SemaRequires::UnifiedSharedMemory))
+ if (reqs.test(llvm::omp::Clause::OMPC_unified_shared_memory))
mlirFlags = mlirFlags | MlirRequires::unified_shared_memory;
- if (semaFlags.test(SemaRequires::DynamicAllocators))
- mlirFlags = mlirFlags | MlirRequires::dynamic_allocators;
offloadMod.setRequires(mlirFlags);
}
diff --git a/flang/lib/Optimizer/Builder/Character.cpp b/flang/lib/Optimizer/Builder/Character.cpp
index a096099..155bc0f 100644
--- a/flang/lib/Optimizer/Builder/Character.cpp
+++ b/flang/lib/Optimizer/Builder/Character.cpp
@@ -92,7 +92,7 @@ getCompileTimeLength(const fir::CharBoxValue &box) {
/// Detect the precondition that the value `str` does not reside in memory. Such
/// values will have a type `!fir.array<...x!fir.char<N>>` or `!fir.char<N>`.
-LLVM_ATTRIBUTE_UNUSED static bool needToMaterialize(mlir::Value str) {
+[[maybe_unused]] static bool needToMaterialize(mlir::Value str) {
return mlir::isa<fir::SequenceType>(str.getType()) ||
fir::isa_char(str.getType());
}
diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
index 7c5c5fb..0195178 100644
--- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
@@ -1020,6 +1020,17 @@ static constexpr IntrinsicHandler handlers[]{
&I::genTMABulkCommitGroup,
{{}},
/*isElemental=*/false},
+ {"tma_bulk_g2s",
+ &I::genTMABulkG2S,
+ {{{"barrier", asAddr},
+ {"src", asAddr},
+ {"dst", asAddr},
+ {"nbytes", asValue}}},
+ /*isElemental=*/false},
+ {"tma_bulk_s2g",
+ &I::genTMABulkS2G,
+ {{{"src", asAddr}, {"dst", asAddr}, {"nbytes", asValue}}},
+ /*isElemental=*/false},
{"tma_bulk_wait_group",
&I::genTMABulkWaitGroup,
{{}},
@@ -2158,7 +2169,8 @@ IntrinsicLibrary::genElementalCall<IntrinsicLibrary::ExtendedGenerator>(
for (const fir::ExtendedValue &arg : args) {
auto *box = arg.getBoxOf<fir::BoxValue>();
if (!arg.getUnboxed() && !arg.getCharBox() &&
- !(box && fir::isScalarBoxedRecordType(fir::getBase(*box).getType())))
+ !(box && (fir::isScalarBoxedRecordType(fir::getBase(*box).getType()) ||
+ fir::isClassStarType(fir::getBase(*box).getType()))))
fir::emitFatalError(loc, "nonscalar intrinsic argument");
}
if (outline)
@@ -3200,17 +3212,17 @@ IntrinsicLibrary::genAssociated(mlir::Type resultType,
return fir::runtime::genAssociated(builder, loc, pointerBox, targetBox);
}
-static mlir::Value convertBarrierToLLVM(fir::FirOpBuilder &builder,
- mlir::Location loc,
- mlir::Value barrier) {
+static mlir::Value convertPtrToNVVMSpace(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ mlir::Value barrier,
+ mlir::NVVM::NVVMMemorySpace space) {
mlir::Value llvmPtr = fir::ConvertOp::create(
builder, loc, mlir::LLVM::LLVMPointerType::get(builder.getContext()),
barrier);
mlir::Value addrCast = mlir::LLVM::AddrSpaceCastOp::create(
builder, loc,
- mlir::LLVM::LLVMPointerType::get(
- builder.getContext(),
- static_cast<unsigned>(mlir::NVVM::NVVMMemorySpace::Shared)),
+ mlir::LLVM::LLVMPointerType::get(builder.getContext(),
+ static_cast<unsigned>(space)),
llvmPtr);
return addrCast;
}
@@ -3220,7 +3232,8 @@ mlir::Value
IntrinsicLibrary::genBarrierArrive(mlir::Type resultType,
llvm::ArrayRef<mlir::Value> args) {
assert(args.size() == 1);
- mlir::Value barrier = convertBarrierToLLVM(builder, loc, args[0]);
+ mlir::Value barrier = convertPtrToNVVMSpace(
+ builder, loc, args[0], mlir::NVVM::NVVMMemorySpace::Shared);
return mlir::NVVM::MBarrierArriveSharedOp::create(builder, loc, resultType,
barrier)
.getResult();
@@ -3231,7 +3244,8 @@ mlir::Value
IntrinsicLibrary::genBarrierArriveCnt(mlir::Type resultType,
llvm::ArrayRef<mlir::Value> args) {
assert(args.size() == 2);
- mlir::Value barrier = convertBarrierToLLVM(builder, loc, args[0]);
+ mlir::Value barrier = convertPtrToNVVMSpace(
+ builder, loc, args[0], mlir::NVVM::NVVMMemorySpace::Shared);
mlir::Value token = fir::AllocaOp::create(builder, loc, resultType);
// TODO: the MBarrierArriveExpectTxOp is not taking the state argument and
// currently just the sink symbol `_`.
@@ -3244,8 +3258,8 @@ IntrinsicLibrary::genBarrierArriveCnt(mlir::Type resultType,
// BARRIER_INIT (CUDA)
void IntrinsicLibrary::genBarrierInit(llvm::ArrayRef<fir::ExtendedValue> args) {
assert(args.size() == 2);
- mlir::Value barrier =
- convertBarrierToLLVM(builder, loc, fir::getBase(args[0]));
+ mlir::Value barrier = convertPtrToNVVMSpace(
+ builder, loc, fir::getBase(args[0]), mlir::NVVM::NVVMMemorySpace::Shared);
mlir::NVVM::MBarrierInitSharedOp::create(builder, loc, barrier,
fir::getBase(args[1]), {});
auto kind = mlir::NVVM::ProxyKindAttr::get(
@@ -9204,6 +9218,31 @@ void IntrinsicLibrary::genTMABulkCommitGroup(
mlir::NVVM::CpAsyncBulkCommitGroupOp::create(builder, loc);
}
+// TMA_BULK_G2S (CUDA)
+void IntrinsicLibrary::genTMABulkG2S(llvm::ArrayRef<fir::ExtendedValue> args) {
+ assert(args.size() == 4);
+ mlir::Value barrier = convertPtrToNVVMSpace(
+ builder, loc, fir::getBase(args[0]), mlir::NVVM::NVVMMemorySpace::Shared);
+ mlir::Value dst =
+ convertPtrToNVVMSpace(builder, loc, fir::getBase(args[2]),
+ mlir::NVVM::NVVMMemorySpace::SharedCluster);
+ mlir::Value src = convertPtrToNVVMSpace(builder, loc, fir::getBase(args[1]),
+ mlir::NVVM::NVVMMemorySpace::Global);
+ mlir::NVVM::CpAsyncBulkGlobalToSharedClusterOp::create(
+ builder, loc, dst, src, barrier, fir::getBase(args[3]), {}, {});
+}
+
+// TMA_BULK_S2G (CUDA)
+void IntrinsicLibrary::genTMABulkS2G(llvm::ArrayRef<fir::ExtendedValue> args) {
+ assert(args.size() == 3);
+ mlir::Value src = convertPtrToNVVMSpace(builder, loc, fir::getBase(args[0]),
+ mlir::NVVM::NVVMMemorySpace::Shared);
+ mlir::Value dst = convertPtrToNVVMSpace(builder, loc, fir::getBase(args[1]),
+ mlir::NVVM::NVVMMemorySpace::Global);
+ mlir::NVVM::CpAsyncBulkSharedCTAToGlobalOp::create(
+ builder, loc, dst, src, fir::getBase(args[2]), {}, {});
+}
+
// TMA_BULK_WAIT_GROUP (CUDA)
void IntrinsicLibrary::genTMABulkWaitGroup(
llvm::ArrayRef<fir::ExtendedValue> args) {
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index 4a05cd9..70bb43a2 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -176,6 +176,19 @@ struct AddrOfOpConversion : public fir::FIROpConversion<fir::AddrOfOp> {
llvm::LogicalResult
matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
+
+ if (auto gpuMod = addr->getParentOfType<mlir::gpu::GPUModuleOp>()) {
+ auto global = gpuMod.lookupSymbol<mlir::LLVM::GlobalOp>(addr.getSymbol());
+ replaceWithAddrOfOrASCast(
+ rewriter, addr->getLoc(),
+ global ? global.getAddrSpace() : getGlobalAddressSpace(rewriter),
+ getProgramAddressSpace(rewriter),
+ global ? global.getSymName()
+ : addr.getSymbol().getRootReference().getValue(),
+ convertType(addr.getType()), addr);
+ return mlir::success();
+ }
+
auto global = addr->getParentOfType<mlir::ModuleOp>()
.lookupSymbol<mlir::LLVM::GlobalOp>(addr.getSymbol());
replaceWithAddrOfOrASCast(
@@ -3229,6 +3242,11 @@ struct GlobalOpConversion : public fir::FIROpConversion<fir::GlobalOp> {
g.setAddrSpace(
static_cast<unsigned>(mlir::NVVM::NVVMMemorySpace::Shared));
+ if (global.getDataAttr() &&
+ *global.getDataAttr() == cuf::DataAttribute::Constant)
+ g.setAddrSpace(
+ static_cast<unsigned>(mlir::NVVM::NVVMMemorySpace::Constant));
+
rewriter.eraseOp(global);
return mlir::success();
}
diff --git a/flang/lib/Optimizer/Dialect/FIRType.cpp b/flang/lib/Optimizer/Dialect/FIRType.cpp
index 4a9579c..48e1622 100644
--- a/flang/lib/Optimizer/Dialect/FIRType.cpp
+++ b/flang/lib/Optimizer/Dialect/FIRType.cpp
@@ -336,6 +336,17 @@ bool isBoxedRecordType(mlir::Type ty) {
return false;
}
+// CLASS(*)
+bool isClassStarType(mlir::Type ty) {
+ if (auto clTy = mlir::dyn_cast<fir::ClassType>(fir::unwrapRefType(ty))) {
+ if (mlir::isa<mlir::NoneType>(clTy.getEleTy()))
+ return true;
+ mlir::Type innerType = clTy.unwrapInnerType();
+ return innerType && mlir::isa<mlir::NoneType>(innerType);
+ }
+ return false;
+}
+
bool isScalarBoxedRecordType(mlir::Type ty) {
if (auto refTy = fir::dyn_cast_ptrEleTy(ty))
ty = refTy;
@@ -398,12 +409,8 @@ bool isPolymorphicType(mlir::Type ty) {
bool isUnlimitedPolymorphicType(mlir::Type ty) {
// CLASS(*)
- if (auto clTy = mlir::dyn_cast<fir::ClassType>(fir::unwrapRefType(ty))) {
- if (mlir::isa<mlir::NoneType>(clTy.getEleTy()))
- return true;
- mlir::Type innerType = clTy.unwrapInnerType();
- return innerType && mlir::isa<mlir::NoneType>(innerType);
- }
+ if (isClassStarType(ty))
+ return true;
// TYPE(*)
return isAssumedType(ty);
}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp b/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp
index a48b7ba..63a5803 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp
@@ -21,24 +21,27 @@
//===----------------------------------------------------------------------===//
/// Log RAW or WAW conflict.
-static void LLVM_ATTRIBUTE_UNUSED logConflict(llvm::raw_ostream &os,
- mlir::Value writtenOrReadVarA,
- mlir::Value writtenVarB);
+[[maybe_unused]] static void logConflict(llvm::raw_ostream &os,
+ mlir::Value writtenOrReadVarA,
+ mlir::Value writtenVarB);
/// Log when an expression evaluation must be saved.
-static void LLVM_ATTRIBUTE_UNUSED logSaveEvaluation(llvm::raw_ostream &os,
- unsigned runid,
- mlir::Region &yieldRegion,
- bool anyWrite);
+[[maybe_unused]] static void logSaveEvaluation(llvm::raw_ostream &os,
+ unsigned runid,
+ mlir::Region &yieldRegion,
+ bool anyWrite);
/// Log when an assignment is scheduled.
-static void LLVM_ATTRIBUTE_UNUSED logAssignmentEvaluation(
- llvm::raw_ostream &os, unsigned runid, hlfir::RegionAssignOp assign);
+[[maybe_unused]] static void
+logAssignmentEvaluation(llvm::raw_ostream &os, unsigned runid,
+ hlfir::RegionAssignOp assign);
/// Log when starting to schedule an order assignment tree.
-static void LLVM_ATTRIBUTE_UNUSED logStartScheduling(
- llvm::raw_ostream &os, hlfir::OrderedAssignmentTreeOpInterface root);
+[[maybe_unused]] static void
+logStartScheduling(llvm::raw_ostream &os,
+ hlfir::OrderedAssignmentTreeOpInterface root);
/// Log op if effect value is not known.
-static void LLVM_ATTRIBUTE_UNUSED logIfUnkownEffectValue(
- llvm::raw_ostream &os, mlir::MemoryEffects::EffectInstance effect,
- mlir::Operation &op);
+[[maybe_unused]] static void
+logIfUnkownEffectValue(llvm::raw_ostream &os,
+ mlir::MemoryEffects::EffectInstance effect,
+ mlir::Operation &op);
//===----------------------------------------------------------------------===//
// Scheduling Implementation
@@ -701,23 +704,24 @@ static llvm::raw_ostream &printRegionPath(llvm::raw_ostream &os,
return printRegionId(os, yieldRegion);
}
-static void LLVM_ATTRIBUTE_UNUSED logSaveEvaluation(llvm::raw_ostream &os,
- unsigned runid,
- mlir::Region &yieldRegion,
- bool anyWrite) {
+[[maybe_unused]] static void logSaveEvaluation(llvm::raw_ostream &os,
+ unsigned runid,
+ mlir::Region &yieldRegion,
+ bool anyWrite) {
os << "run " << runid << " save " << (anyWrite ? "(w)" : " ") << ": ";
printRegionPath(os, yieldRegion) << "\n";
}
-static void LLVM_ATTRIBUTE_UNUSED logAssignmentEvaluation(
- llvm::raw_ostream &os, unsigned runid, hlfir::RegionAssignOp assign) {
+[[maybe_unused]] static void
+logAssignmentEvaluation(llvm::raw_ostream &os, unsigned runid,
+ hlfir::RegionAssignOp assign) {
os << "run " << runid << " evaluate: ";
printNodePath(os, assign.getOperation()) << "\n";
}
-static void LLVM_ATTRIBUTE_UNUSED logConflict(llvm::raw_ostream &os,
- mlir::Value writtenOrReadVarA,
- mlir::Value writtenVarB) {
+[[maybe_unused]] static void logConflict(llvm::raw_ostream &os,
+ mlir::Value writtenOrReadVarA,
+ mlir::Value writtenVarB) {
auto printIfValue = [&](mlir::Value var) -> llvm::raw_ostream & {
if (!var)
return os << "<unknown>";
@@ -728,8 +732,9 @@ static void LLVM_ATTRIBUTE_UNUSED logConflict(llvm::raw_ostream &os,
printIfValue(writtenVarB) << "\n";
}
-static void LLVM_ATTRIBUTE_UNUSED logStartScheduling(
- llvm::raw_ostream &os, hlfir::OrderedAssignmentTreeOpInterface root) {
+[[maybe_unused]] static void
+logStartScheduling(llvm::raw_ostream &os,
+ hlfir::OrderedAssignmentTreeOpInterface root) {
os << "------------ scheduling ";
printNodePath(os, root.getOperation());
if (auto funcOp = root->getParentOfType<mlir::func::FuncOp>())
@@ -737,9 +742,10 @@ static void LLVM_ATTRIBUTE_UNUSED logStartScheduling(
os << "------------\n";
}
-static void LLVM_ATTRIBUTE_UNUSED logIfUnkownEffectValue(
- llvm::raw_ostream &os, mlir::MemoryEffects::EffectInstance effect,
- mlir::Operation &op) {
+[[maybe_unused]] static void
+logIfUnkownEffectValue(llvm::raw_ostream &os,
+ mlir::MemoryEffects::EffectInstance effect,
+ mlir::Operation &op) {
if (effect.getValue() != nullptr)
return;
os << "unknown effected value (";
diff --git a/flang/lib/Optimizer/OpenACC/CMakeLists.txt b/flang/lib/Optimizer/OpenACC/CMakeLists.txt
index fc23e64..790b9fd 100644
--- a/flang/lib/Optimizer/OpenACC/CMakeLists.txt
+++ b/flang/lib/Optimizer/OpenACC/CMakeLists.txt
@@ -1 +1,2 @@
add_subdirectory(Support)
+add_subdirectory(Transforms)
diff --git a/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp b/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp
index 89aa010..ed9e41c 100644
--- a/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp
+++ b/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp
@@ -21,6 +21,7 @@
#include "flang/Optimizer/Dialect/FIRType.h"
#include "flang/Optimizer/Dialect/Support/FIRContext.h"
#include "flang/Optimizer/Dialect/Support/KindMapping.h"
+#include "flang/Optimizer/Support/Utils.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/OpenACC/OpenACC.h"
#include "mlir/IR/BuiltinOps.h"
@@ -352,6 +353,14 @@ getBaseRef(mlir::TypedValue<mlir::acc::PointerLikeType> varPtr) {
// calculation op.
mlir::Value baseRef =
llvm::TypeSwitch<mlir::Operation *, mlir::Value>(op)
+ .Case<fir::DeclareOp>([&](auto op) {
+ // If this declare binds a view with an underlying storage operand,
+ // treat that storage as the base reference. Otherwise, fall back
+ // to the declared memref.
+ if (auto storage = op.getStorage())
+ return storage;
+ return mlir::Value(varPtr);
+ })
.Case<hlfir::DesignateOp>([&](auto op) {
// Get the base object.
return op.getMemref();
@@ -548,14 +557,27 @@ template <typename Ty>
mlir::Value OpenACCMappableModel<Ty>::generatePrivateInit(
mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
mlir::TypedValue<mlir::acc::MappableType> var, llvm::StringRef varName,
- mlir::ValueRange extents, mlir::Value initVal) const {
+ mlir::ValueRange extents, mlir::Value initVal, bool &needsDestroy) const {
+ needsDestroy = false;
mlir::Value retVal;
mlir::Type unwrappedTy = fir::unwrapRefType(type);
mlir::ModuleOp mod = builder.getInsertionBlock()
->getParent()
->getParentOfType<mlir::ModuleOp>();
- fir::FirOpBuilder firBuilder(builder, mod);
+ if (auto recType = llvm::dyn_cast<fir::RecordType>(
+ fir::getFortranElementType(unwrappedTy))) {
+ // Need to make deep copies of allocatable components.
+ if (fir::isRecordWithAllocatableMember(recType))
+ TODO(loc,
+ "OpenACC: privatizing derived type with allocatable components");
+ // Need to decide if user assignment/final routine should be called.
+ if (fir::isRecordWithFinalRoutine(recType, mod).value_or(false))
+ TODO(loc, "OpenACC: privatizing derived type with user assignment or "
+ "final routine ");
+ }
+
+ fir::FirOpBuilder firBuilder(builder, mod);
auto getDeclareOpForType = [&](mlir::Type ty) -> hlfir::DeclareOp {
auto alloca = fir::AllocaOp::create(firBuilder, loc, ty);
return hlfir::DeclareOp::create(firBuilder, loc, alloca, varName);
@@ -615,9 +637,11 @@ mlir::Value OpenACCMappableModel<Ty>::generatePrivateInit(
mlir::Value firClass =
fir::EmboxOp::create(builder, loc, boxTy, allocatedScalar);
fir::StoreOp::create(builder, loc, firClass, retVal);
+ needsDestroy = true;
} else if (mlir::isa<fir::SequenceType>(innerTy)) {
hlfir::Entity source = hlfir::Entity{var};
- auto [temp, cleanup] = hlfir::createTempFromMold(loc, firBuilder, source);
+ auto [temp, cleanupFlag] =
+ hlfir::createTempFromMold(loc, firBuilder, source);
if (fir::isa_ref_type(type)) {
// When the temp is created - it is not a reference - thus we can
// end up with a type inconsistency. Therefore ensure storage is created
@@ -636,6 +660,9 @@ mlir::Value OpenACCMappableModel<Ty>::generatePrivateInit(
} else {
retVal = temp;
}
+ // If heap was allocated, a destroy is required later.
+ if (cleanupFlag)
+ needsDestroy = true;
} else {
TODO(loc, "Unsupported boxed type for OpenACC private-like recipe");
}
@@ -667,23 +694,302 @@ template mlir::Value
OpenACCMappableModel<fir::BaseBoxType>::generatePrivateInit(
mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
mlir::TypedValue<mlir::acc::MappableType> var, llvm::StringRef varName,
- mlir::ValueRange extents, mlir::Value initVal) const;
+ mlir::ValueRange extents, mlir::Value initVal, bool &needsDestroy) const;
template mlir::Value
OpenACCMappableModel<fir::ReferenceType>::generatePrivateInit(
mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
mlir::TypedValue<mlir::acc::MappableType> var, llvm::StringRef varName,
- mlir::ValueRange extents, mlir::Value initVal) const;
+ mlir::ValueRange extents, mlir::Value initVal, bool &needsDestroy) const;
template mlir::Value OpenACCMappableModel<fir::HeapType>::generatePrivateInit(
mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
mlir::TypedValue<mlir::acc::MappableType> var, llvm::StringRef varName,
- mlir::ValueRange extents, mlir::Value initVal) const;
+ mlir::ValueRange extents, mlir::Value initVal, bool &needsDestroy) const;
template mlir::Value
OpenACCMappableModel<fir::PointerType>::generatePrivateInit(
mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
mlir::TypedValue<mlir::acc::MappableType> var, llvm::StringRef varName,
- mlir::ValueRange extents, mlir::Value initVal) const;
+ mlir::ValueRange extents, mlir::Value initVal, bool &needsDestroy) const;
+
+template <typename Ty>
+bool OpenACCMappableModel<Ty>::generatePrivateDestroy(
+ mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::Value privatized) const {
+ mlir::Type unwrappedTy = fir::unwrapRefType(type);
+ // For boxed scalars allocated with AllocMem during init, free the heap.
+ if (auto boxTy = mlir::dyn_cast_or_null<fir::BaseBoxType>(unwrappedTy)) {
+ mlir::Value boxVal = privatized;
+ if (fir::isa_ref_type(boxVal.getType()))
+ boxVal = fir::LoadOp::create(builder, loc, boxVal);
+ mlir::Value addr = fir::BoxAddrOp::create(builder, loc, boxVal);
+ // FreeMem only accepts fir.heap and this may not be represented in the box
+ // type if the privatized entity is not an allocatable.
+ mlir::Type heapType =
+ fir::HeapType::get(fir::unwrapRefType(addr.getType()));
+ if (heapType != addr.getType())
+ addr = fir::ConvertOp::create(builder, loc, heapType, addr);
+ fir::FreeMemOp::create(builder, loc, addr);
+ return true;
+ }
+
+ // Nothing to do for other categories by default, they are stack allocated.
+ return true;
+}
+
+template bool OpenACCMappableModel<fir::BaseBoxType>::generatePrivateDestroy(
+ mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::Value privatized) const;
+template bool OpenACCMappableModel<fir::ReferenceType>::generatePrivateDestroy(
+ mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::Value privatized) const;
+template bool OpenACCMappableModel<fir::HeapType>::generatePrivateDestroy(
+ mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::Value privatized) const;
+template bool OpenACCMappableModel<fir::PointerType>::generatePrivateDestroy(
+ mlir::Type type, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::Value privatized) const;
+
+template <typename Ty>
+mlir::Value OpenACCPointerLikeModel<Ty>::genAllocate(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ llvm::StringRef varName, mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const {
+
+ // Unwrap to get the pointee type.
+ mlir::Type pointeeTy = fir::dyn_cast_ptrEleTy(pointer);
+ assert(pointeeTy && "expected pointee type to be extractable");
+
+ // Box types are descriptors that contain both metadata and a pointer to data.
+ // The `genAllocate` API is designed for simple allocations and cannot
+ // properly handle the dual nature of boxes. Using `generatePrivateInit`
+ // instead can allocate both the descriptor and its referenced data. For use
+ // cases that require an empty descriptor storage, potentially this could be
+ // implemented here.
+ if (fir::isa_box_type(pointeeTy))
+ return {};
+
+ // Unlimited polymorphic (class(*)) cannot be handled - size unknown
+ if (fir::isUnlimitedPolymorphicType(pointeeTy))
+ return {};
+
+ // Return null for dynamic size types because the size of the
+ // allocation cannot be determined simply from the type.
+ if (fir::hasDynamicSize(pointeeTy))
+ return {};
+
+ // Use heap allocation for fir.heap, stack allocation for others (fir.ref,
+ // fir.ptr, fir.llvm_ptr). For fir.ptr, which is supposed to represent a
+ // Fortran pointer type, it feels a bit odd to "allocate" since it is meant
+ // to point to an existing entity - but one can imagine where a pointee is
+ // privatized - thus it makes sense to issue an allocate.
+ mlir::Value allocation;
+ if (std::is_same_v<Ty, fir::HeapType>) {
+ needsFree = true;
+ allocation = fir::AllocMemOp::create(builder, loc, pointeeTy);
+ } else {
+ needsFree = false;
+ allocation = fir::AllocaOp::create(builder, loc, pointeeTy);
+ }
+
+ // Convert to the requested pointer type if needed.
+ // This means converting from a fir.ref to either a fir.llvm_ptr or a fir.ptr.
+ // fir.heap is already correct type in this case.
+ if (allocation.getType() != pointer) {
+ assert(!(std::is_same_v<Ty, fir::HeapType>) &&
+ "fir.heap is already correct type because of allocmem");
+ return fir::ConvertOp::create(builder, loc, pointer, allocation);
+ }
+
+ return allocation;
+}
+
+template mlir::Value OpenACCPointerLikeModel<fir::ReferenceType>::genAllocate(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ llvm::StringRef varName, mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const;
+
+template mlir::Value OpenACCPointerLikeModel<fir::PointerType>::genAllocate(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ llvm::StringRef varName, mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const;
+
+template mlir::Value OpenACCPointerLikeModel<fir::HeapType>::genAllocate(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ llvm::StringRef varName, mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const;
+
+template mlir::Value OpenACCPointerLikeModel<fir::LLVMPointerType>::genAllocate(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ llvm::StringRef varName, mlir::Type varType, mlir::Value originalVar,
+ bool &needsFree) const;
+
+static mlir::Value stripCasts(mlir::Value value, bool stripDeclare = true) {
+ mlir::Value currentValue = value;
+
+ while (currentValue) {
+ auto *definingOp = currentValue.getDefiningOp();
+ if (!definingOp)
+ break;
+
+ if (auto convertOp = mlir::dyn_cast<fir::ConvertOp>(definingOp)) {
+ currentValue = convertOp.getValue();
+ continue;
+ }
+
+ if (auto viewLike = mlir::dyn_cast<mlir::ViewLikeOpInterface>(definingOp)) {
+ currentValue = viewLike.getViewSource();
+ continue;
+ }
+
+ if (stripDeclare) {
+ if (auto declareOp = mlir::dyn_cast<hlfir::DeclareOp>(definingOp)) {
+ currentValue = declareOp.getMemref();
+ continue;
+ }
+
+ if (auto declareOp = mlir::dyn_cast<fir::DeclareOp>(definingOp)) {
+ currentValue = declareOp.getMemref();
+ continue;
+ }
+ }
+ break;
+ }
+
+ return currentValue;
+}
+
+template <typename Ty>
+bool OpenACCPointerLikeModel<Ty>::genFree(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const {
+
+ // Unwrap to get the pointee type.
+ mlir::Type pointeeTy = fir::dyn_cast_ptrEleTy(pointer);
+ assert(pointeeTy && "expected pointee type to be extractable");
+
+ // Box types contain both a descriptor and data. The `genFree` API
+ // handles simple deallocations and cannot properly manage both parts.
+ // Using `generatePrivateDestroy` instead can free both the descriptor and
+ // its referenced data.
+ if (fir::isa_box_type(pointeeTy))
+ return false;
+
+ // If pointer type is HeapType, assume it's a heap allocation
+ if (std::is_same_v<Ty, fir::HeapType>) {
+ fir::FreeMemOp::create(builder, loc, varToFree);
+ return true;
+ }
+
+ // Use allocRes if provided to determine the allocation type
+ mlir::Value valueToInspect = allocRes ? allocRes : varToFree;
+
+ // Strip casts and declare operations to find the original allocation
+ mlir::Value strippedValue = stripCasts(valueToInspect);
+ mlir::Operation *originalAlloc = strippedValue.getDefiningOp();
+
+ // If we found an AllocMemOp (heap allocation), free it
+ if (mlir::isa_and_nonnull<fir::AllocMemOp>(originalAlloc)) {
+ mlir::Value toFree = varToFree;
+ if (!mlir::isa<fir::HeapType>(valueToInspect.getType()))
+ toFree = fir::ConvertOp::create(
+ builder, loc,
+ fir::HeapType::get(varToFree.getType().getElementType()), toFree);
+ fir::FreeMemOp::create(builder, loc, toFree);
+ return true;
+ }
+
+ // If we found an AllocaOp (stack allocation), no deallocation needed
+ if (mlir::isa_and_nonnull<fir::AllocaOp>(originalAlloc))
+ return true;
+
+ // Unable to determine allocation type
+ return false;
+}
+
+template bool OpenACCPointerLikeModel<fir::ReferenceType>::genFree(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::PointerType>::genFree(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::HeapType>::genFree(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::LLVMPointerType>::genFree(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> varToFree,
+ mlir::Value allocRes, mlir::Type varType) const;
+
+template <typename Ty>
+bool OpenACCPointerLikeModel<Ty>::genCopy(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const {
+
+ // Check that source and destination types match
+ if (source.getType() != destination.getType())
+ return false;
+
+ // Unwrap to get the pointee type.
+ mlir::Type pointeeTy = fir::dyn_cast_ptrEleTy(pointer);
+ assert(pointeeTy && "expected pointee type to be extractable");
+
+ // Box types contain both a descriptor and referenced data. The genCopy API
+ // handles simple copies and cannot properly manage both parts.
+ if (fir::isa_box_type(pointeeTy))
+ return false;
+
+ // Unlimited polymorphic (class(*)) cannot be handled because source and
+ // destination types are not known.
+ if (fir::isUnlimitedPolymorphicType(pointeeTy))
+ return false;
+
+ // Return false for dynamic size types because the copy logic
+ // cannot be determined simply from the type.
+ if (fir::hasDynamicSize(pointeeTy))
+ return false;
+
+ if (fir::isa_trivial(pointeeTy)) {
+ auto loadVal = fir::LoadOp::create(builder, loc, source);
+ fir::StoreOp::create(builder, loc, loadVal, destination);
+ } else {
+ hlfir::AssignOp::create(builder, loc, source, destination);
+ }
+ return true;
+}
+
+template bool OpenACCPointerLikeModel<fir::ReferenceType>::genCopy(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::PointerType>::genCopy(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::HeapType>::genCopy(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const;
+
+template bool OpenACCPointerLikeModel<fir::LLVMPointerType>::genCopy(
+ mlir::Type pointer, mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::TypedValue<mlir::acc::PointerLikeType> destination,
+ mlir::TypedValue<mlir::acc::PointerLikeType> source,
+ mlir::Type varType) const;
} // namespace fir::acc
diff --git a/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp b/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp
new file mode 100644
index 0000000..4840a99
--- /dev/null
+++ b/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp
@@ -0,0 +1,191 @@
+//===- ACCRecipeBufferization.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Bufferize OpenACC recipes that yield fir.box<T> to operate on
+// fir.ref<fir.box<T>> and update uses accordingly.
+//
+//===----------------------------------------------------------------------===//
+
+#include "flang/Optimizer/Dialect/FIROps.h"
+#include "flang/Optimizer/OpenACC/Passes.h"
+#include "mlir/Dialect/OpenACC/OpenACC.h"
+#include "mlir/IR/Block.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/BuiltinOps.h"
+#include "mlir/IR/SymbolTable.h"
+#include "mlir/IR/Value.h"
+#include "mlir/IR/Visitors.h"
+#include "llvm/ADT/TypeSwitch.h"
+
+namespace fir::acc {
+#define GEN_PASS_DEF_ACCRECIPEBUFFERIZATION
+#include "flang/Optimizer/OpenACC/Passes.h.inc"
+} // namespace fir::acc
+
+namespace {
+
+class BufferizeInterface {
+public:
+ static std::optional<mlir::Type> mustBufferize(mlir::Type recipeType) {
+ if (auto boxTy = llvm::dyn_cast<fir::BaseBoxType>(recipeType))
+ return fir::ReferenceType::get(boxTy);
+ return std::nullopt;
+ }
+
+ static mlir::Operation *load(mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::Value value) {
+ return builder.create<fir::LoadOp>(loc, value);
+ }
+
+ static mlir::Value placeInMemory(mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::Value value) {
+ auto alloca = builder.create<fir::AllocaOp>(loc, value.getType());
+ builder.create<fir::StoreOp>(loc, value, alloca);
+ return alloca;
+ }
+};
+
+static void bufferizeRegionArgsAndYields(mlir::Region &region,
+ mlir::Location loc, mlir::Type oldType,
+ mlir::Type newType) {
+ if (region.empty())
+ return;
+
+ mlir::OpBuilder builder(&region);
+ for (mlir::BlockArgument arg : region.getArguments()) {
+ if (arg.getType() == oldType) {
+ arg.setType(newType);
+ if (!arg.use_empty()) {
+ mlir::Operation *loadOp = BufferizeInterface::load(builder, loc, arg);
+ arg.replaceAllUsesExcept(loadOp->getResult(0), loadOp);
+ }
+ }
+ }
+ if (auto yield =
+ llvm::dyn_cast<mlir::acc::YieldOp>(region.back().getTerminator())) {
+ llvm::SmallVector<mlir::Value> newOperands;
+ newOperands.reserve(yield.getNumOperands());
+ bool changed = false;
+ for (mlir::Value oldYieldArg : yield.getOperands()) {
+ if (oldYieldArg.getType() == oldType) {
+ builder.setInsertionPoint(yield);
+ mlir::Value alloca =
+ BufferizeInterface::placeInMemory(builder, loc, oldYieldArg);
+ newOperands.push_back(alloca);
+ changed = true;
+ } else {
+ newOperands.push_back(oldYieldArg);
+ }
+ }
+ if (changed)
+ yield->setOperands(newOperands);
+ }
+}
+
+static void updateRecipeUse(mlir::ArrayAttr recipes, mlir::ValueRange operands,
+ llvm::StringRef recipeSymName,
+ mlir::Operation *computeOp) {
+ if (!recipes)
+ return;
+ for (auto [recipeSym, oldRes] : llvm::zip(recipes, operands)) {
+ if (llvm::cast<mlir::SymbolRefAttr>(recipeSym).getLeafReference() !=
+ recipeSymName)
+ continue;
+
+ mlir::Operation *dataOp = oldRes.getDefiningOp();
+ assert(dataOp && "dataOp must be paired with computeOp");
+ mlir::Location loc = dataOp->getLoc();
+ mlir::OpBuilder builder(dataOp);
+ llvm::TypeSwitch<mlir::Operation *, void>(dataOp)
+ .Case<mlir::acc::PrivateOp, mlir::acc::FirstprivateOp,
+ mlir::acc::ReductionOp>([&](auto privateOp) {
+ builder.setInsertionPointAfterValue(privateOp.getVar());
+ mlir::Value alloca = BufferizeInterface::placeInMemory(
+ builder, loc, privateOp.getVar());
+ privateOp.getVarMutable().assign(alloca);
+ privateOp.getAccVar().setType(alloca.getType());
+ });
+
+ llvm::SmallVector<mlir::Operation *> users(oldRes.getUsers().begin(),
+ oldRes.getUsers().end());
+ for (mlir::Operation *useOp : users) {
+ if (useOp == computeOp)
+ continue;
+ builder.setInsertionPoint(useOp);
+ mlir::Operation *load = BufferizeInterface::load(builder, loc, oldRes);
+ useOp->replaceUsesOfWith(oldRes, load->getResult(0));
+ }
+ }
+}
+
+class ACCRecipeBufferization
+ : public fir::acc::impl::ACCRecipeBufferizationBase<
+ ACCRecipeBufferization> {
+public:
+ void runOnOperation() override {
+ mlir::ModuleOp module = getOperation();
+
+ llvm::SmallVector<llvm::StringRef> recipeNames;
+ module.walk([&](mlir::Operation *recipe) {
+ llvm::TypeSwitch<mlir::Operation *, void>(recipe)
+ .Case<mlir::acc::PrivateRecipeOp, mlir::acc::FirstprivateRecipeOp,
+ mlir::acc::ReductionRecipeOp>([&](auto recipe) {
+ mlir::Type oldType = recipe.getType();
+ auto bufferizedType =
+ BufferizeInterface::mustBufferize(recipe.getType());
+ if (!bufferizedType)
+ return;
+ recipe.setTypeAttr(mlir::TypeAttr::get(*bufferizedType));
+ mlir::Location loc = recipe.getLoc();
+ using RecipeOp = decltype(recipe);
+ bufferizeRegionArgsAndYields(recipe.getInitRegion(), loc, oldType,
+ *bufferizedType);
+ if constexpr (std::is_same_v<RecipeOp,
+ mlir::acc::FirstprivateRecipeOp>)
+ bufferizeRegionArgsAndYields(recipe.getCopyRegion(), loc, oldType,
+ *bufferizedType);
+ if constexpr (std::is_same_v<RecipeOp,
+ mlir::acc::ReductionRecipeOp>)
+ bufferizeRegionArgsAndYields(recipe.getCombinerRegion(), loc,
+ oldType, *bufferizedType);
+ bufferizeRegionArgsAndYields(recipe.getDestroyRegion(), loc,
+ oldType, *bufferizedType);
+ recipeNames.push_back(recipe.getSymName());
+ });
+ });
+ if (recipeNames.empty())
+ return;
+
+ module.walk([&](mlir::Operation *op) {
+ llvm::TypeSwitch<mlir::Operation *, void>(op)
+ .Case<mlir::acc::LoopOp, mlir::acc::ParallelOp, mlir::acc::SerialOp>(
+ [&](auto computeOp) {
+ for (llvm::StringRef recipeName : recipeNames) {
+ if (computeOp.getPrivatizationRecipes())
+ updateRecipeUse(computeOp.getPrivatizationRecipesAttr(),
+ computeOp.getPrivateOperands(), recipeName,
+ op);
+ if (computeOp.getFirstprivatizationRecipes())
+ updateRecipeUse(
+ computeOp.getFirstprivatizationRecipesAttr(),
+ computeOp.getFirstprivateOperands(), recipeName, op);
+ if (computeOp.getReductionRecipes())
+ updateRecipeUse(computeOp.getReductionRecipesAttr(),
+ computeOp.getReductionOperands(),
+ recipeName, op);
+ }
+ });
+ });
+ }
+};
+
+} // namespace
+
+std::unique_ptr<mlir::Pass> fir::acc::createACCRecipeBufferizationPass() {
+ return std::make_unique<ACCRecipeBufferization>();
+}
diff --git a/flang/lib/Optimizer/OpenACC/Transforms/CMakeLists.txt b/flang/lib/Optimizer/OpenACC/Transforms/CMakeLists.txt
new file mode 100644
index 0000000..2427da0
--- /dev/null
+++ b/flang/lib/Optimizer/OpenACC/Transforms/CMakeLists.txt
@@ -0,0 +1,12 @@
+add_flang_library(FIROpenACCTransforms
+ ACCRecipeBufferization.cpp
+
+ DEPENDS
+ FIROpenACCPassesIncGen
+
+ LINK_LIBS
+ MLIRIR
+ MLIRPass
+ FIRDialect
+ MLIROpenACCDialect
+)
diff --git a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp
index 260e525..2bbd803 100644
--- a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp
+++ b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp
@@ -40,6 +40,7 @@
#include "mlir/IR/SymbolTable.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
+#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
@@ -128,6 +129,17 @@ class MapInfoFinalizationPass
}
}
+ /// Return true if the module has an OpenMP requires clause that includes
+ /// unified_shared_memory.
+ static bool moduleRequiresUSM(mlir::ModuleOp module) {
+ assert(module && "invalid module");
+ if (auto req = module->getAttrOfType<mlir::omp::ClauseRequiresAttr>(
+ "omp.requires"))
+ return mlir::omp::bitEnumContainsAll(
+ req.getValue(), mlir::omp::ClauseRequires::unified_shared_memory);
+ return false;
+ }
+
/// Create the member map for coordRef and append it (and its index
/// path) to the provided new* vectors, if it is not already present.
void appendMemberMapIfNew(
@@ -425,8 +437,12 @@ class MapInfoFinalizationPass
mapFlags flags = mapFlags::OMP_MAP_TO |
(mapFlags(mapTypeFlag) &
- (mapFlags::OMP_MAP_IMPLICIT | mapFlags::OMP_MAP_CLOSE |
- mapFlags::OMP_MAP_ALWAYS));
+ (mapFlags::OMP_MAP_IMPLICIT | mapFlags::OMP_MAP_ALWAYS));
+ // For unified_shared_memory, we additionally add `CLOSE` on the descriptor
+ // to ensure device-local placement where required by tests relying on USM +
+ // close semantics.
+ if (moduleRequiresUSM(target->getParentOfType<mlir::ModuleOp>()))
+ flags |= mapFlags::OMP_MAP_CLOSE;
return llvm::to_underlying(flags);
}
@@ -518,6 +534,75 @@ class MapInfoFinalizationPass
return newMapInfoOp;
}
+ // Expand mappings of type(C_PTR) to map their `__address` field explicitly
+ // as a single pointer-sized member (USM-gated at callsite). This helps in
+ // USM scenarios to ensure the pointer-sized mapping is used.
+ mlir::omp::MapInfoOp genCptrMemberMap(mlir::omp::MapInfoOp op,
+ fir::FirOpBuilder &builder) {
+ if (!op.getMembers().empty())
+ return op;
+
+ mlir::Type varTy = fir::unwrapRefType(op.getVarPtr().getType());
+ if (!mlir::isa<fir::RecordType>(varTy))
+ return op;
+ auto recTy = mlir::cast<fir::RecordType>(varTy);
+ // If not a builtin C_PTR record, skip.
+ if (!recTy.getName().ends_with("__builtin_c_ptr"))
+ return op;
+
+ // Find the index of the c_ptr address component named "__address".
+ int32_t fieldIdx = recTy.getFieldIndex("__address");
+ if (fieldIdx < 0)
+ return op;
+
+ mlir::Location loc = op.getVarPtr().getLoc();
+ mlir::Type memTy = recTy.getType(fieldIdx);
+ fir::IntOrValue idxConst =
+ mlir::IntegerAttr::get(builder.getI32Type(), fieldIdx);
+ mlir::Value coord = fir::CoordinateOp::create(
+ builder, loc, builder.getRefType(memTy), op.getVarPtr(),
+ llvm::SmallVector<fir::IntOrValue, 1>{idxConst});
+
+ // Child for the `__address` member.
+ llvm::SmallVector<llvm::SmallVector<int64_t>> memberIdx = {{0}};
+ mlir::ArrayAttr newMembersAttr = builder.create2DI64ArrayAttr(memberIdx);
+ // Force CLOSE in USM paths so the pointer gets device-local placement
+ // when required by tests relying on USM + close semantics.
+ uint64_t mapTypeVal =
+ op.getMapType() |
+ llvm::to_underlying(
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_CLOSE);
+ mlir::IntegerAttr mapTypeAttr = builder.getIntegerAttr(
+ builder.getIntegerType(64, /*isSigned=*/false), mapTypeVal);
+
+ mlir::omp::MapInfoOp memberMap = mlir::omp::MapInfoOp::create(
+ builder, loc, coord.getType(), coord,
+ mlir::TypeAttr::get(fir::unwrapRefType(coord.getType())), mapTypeAttr,
+ builder.getAttr<mlir::omp::VariableCaptureKindAttr>(
+ mlir::omp::VariableCaptureKind::ByRef),
+ /*varPtrPtr=*/mlir::Value{},
+ /*members=*/llvm::SmallVector<mlir::Value>{},
+ /*member_index=*/mlir::ArrayAttr{},
+ /*bounds=*/op.getBounds(),
+ /*mapperId=*/mlir::FlatSymbolRefAttr(),
+ /*name=*/op.getNameAttr(),
+ /*partial_map=*/builder.getBoolAttr(false));
+
+ // Rebuild the parent as a container with the `__address` member.
+ mlir::omp::MapInfoOp newParent = mlir::omp::MapInfoOp::create(
+ builder, op.getLoc(), op.getResult().getType(), op.getVarPtr(),
+ op.getVarTypeAttr(), mapTypeAttr, op.getMapCaptureTypeAttr(),
+ /*varPtrPtr=*/mlir::Value{},
+ /*members=*/llvm::SmallVector<mlir::Value>{memberMap},
+ /*member_index=*/newMembersAttr,
+ /*bounds=*/llvm::SmallVector<mlir::Value>{},
+ /*mapperId=*/mlir::FlatSymbolRefAttr(), op.getNameAttr(),
+ /*partial_map=*/builder.getBoolAttr(false));
+ op.replaceAllUsesWith(newParent.getResult());
+ op->erase();
+ return newParent;
+ }
+
mlir::omp::MapInfoOp genDescriptorMemberMaps(mlir::omp::MapInfoOp op,
fir::FirOpBuilder &builder,
mlir::Operation *target) {
@@ -1169,6 +1254,17 @@ class MapInfoFinalizationPass
genBoxcharMemberMap(op, builder);
});
+ // Expand type(C_PTR) only when unified_shared_memory is required,
+ // to ensure device-visible pointer size/behavior in USM scenarios
+ // without changing default expectations elsewhere.
+ func->walk([&](mlir::omp::MapInfoOp op) {
+ // Only expand C_PTR members when unified_shared_memory is required.
+ if (!moduleRequiresUSM(func->getParentOfType<mlir::ModuleOp>()))
+ return;
+ builder.setInsertionPoint(op);
+ genCptrMemberMap(op, builder);
+ });
+
func->walk([&](mlir::omp::MapInfoOp op) {
// TODO: Currently only supports a single user for the MapInfoOp. This
// is fine for the moment, as the Fortran frontend will generate a
diff --git a/flang/lib/Optimizer/Support/Utils.cpp b/flang/lib/Optimizer/Support/Utils.cpp
index c71642c..92390e4a 100644
--- a/flang/lib/Optimizer/Support/Utils.cpp
+++ b/flang/lib/Optimizer/Support/Utils.cpp
@@ -51,6 +51,16 @@ std::optional<llvm::ArrayRef<int64_t>> fir::getComponentLowerBoundsIfNonDefault(
return std::nullopt;
}
+std::optional<bool>
+fir::isRecordWithFinalRoutine(fir::RecordType recordType, mlir::ModuleOp module,
+ const mlir::SymbolTable *symbolTable) {
+ fir::TypeInfoOp typeInfo =
+ fir::lookupTypeInfoOp(recordType, module, symbolTable);
+ if (!typeInfo)
+ return std::nullopt;
+ return !typeInfo.getNoFinal();
+}
+
mlir::LLVM::ConstantOp
fir::genConstantIndex(mlir::Location loc, mlir::Type ity,
mlir::ConversionPatternRewriter &rewriter,
diff --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
index 061a7d2..bdc3418 100644
--- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
+++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
@@ -474,7 +474,7 @@ public:
mlir::PatternRewriter &rewriter) const override {
LLVM_DEBUG(llvm::dbgs() << "AffineLoopConversion: rewriting loop:\n";
loop.dump(););
- LLVM_ATTRIBUTE_UNUSED auto loopAnalysis =
+ [[maybe_unused]] auto loopAnalysis =
functionAnalysis.getChildLoopAnalysis(loop);
if (!loopAnalysis.canPromoteToAffine())
return rewriter.notifyMatchFailure(loop, "cannot promote to affine");
diff --git a/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp b/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
index 609a1fc..759e3a65d 100644
--- a/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
@@ -558,6 +558,7 @@ static mlir::Value emboxSrc(mlir::PatternRewriter &rewriter,
if (srcTy.isInteger(1)) {
// i1 is not a supported type in the descriptor and it is actually coming
// from a LOGICAL constant. Use the destination type to avoid mismatch.
+ assert(dstEleTy && "expect dst element type to be set");
srcTy = dstEleTy;
src = createConvertOp(rewriter, loc, srcTy, src);
addr = builder.createTemporary(loc, srcTy);
@@ -652,7 +653,8 @@ struct CUFDataTransferOpConversion
// Initialization of an array from a scalar value should be implemented
// via a kernel launch. Use the flang runtime via the Assign function
// until we have more infrastructure.
- mlir::Value src = emboxSrc(rewriter, op, symtab);
+ mlir::Type dstEleTy = fir::unwrapInnerType(fir::unwrapRefType(dstTy));
+ mlir::Value src = emboxSrc(rewriter, op, symtab, dstEleTy);
mlir::Value dst = emboxDst(rewriter, op, symtab);
mlir::func::FuncOp func =
fir::runtime::getRuntimeFunc<mkRTKey(CUFDataTransferCstDesc)>(
@@ -739,6 +741,9 @@ struct CUFDataTransferOpConversion
fir::StoreOp::create(builder, loc, val, box);
return box;
}
+ if (mlir::isa<fir::BaseBoxType>(val.getType()))
+ if (auto loadOp = mlir::dyn_cast<fir::LoadOp>(val.getDefiningOp()))
+ return loadOp.getMemref();
return val;
};
diff --git a/flang/lib/Optimizer/Transforms/StackArrays.cpp b/flang/lib/Optimizer/Transforms/StackArrays.cpp
index 80b3f68..8601499 100644
--- a/flang/lib/Optimizer/Transforms/StackArrays.cpp
+++ b/flang/lib/Optimizer/Transforms/StackArrays.cpp
@@ -561,7 +561,7 @@ static mlir::Value convertAllocationType(mlir::PatternRewriter &rewriter,
return stack;
fir::HeapType firHeapTy = mlir::cast<fir::HeapType>(heapTy);
- LLVM_ATTRIBUTE_UNUSED fir::ReferenceType firRefTy =
+ [[maybe_unused]] fir::ReferenceType firRefTy =
mlir::cast<fir::ReferenceType>(stackTy);
assert(firHeapTy.getElementType() == firRefTy.getElementType() &&
"Allocations must have the same type");
diff --git a/flang/lib/Parser/openmp-parsers.cpp b/flang/lib/Parser/openmp-parsers.cpp
index 9507021..d677e14 100644
--- a/flang/lib/Parser/openmp-parsers.cpp
+++ b/flang/lib/Parser/openmp-parsers.cpp
@@ -548,6 +548,14 @@ TYPE_PARSER(construct<OmpAllocatorSimpleModifier>(scalarIntExpr))
TYPE_PARSER(construct<OmpAlwaysModifier>( //
"ALWAYS" >> pure(OmpAlwaysModifier::Value::Always)))
+TYPE_PARSER(construct<OmpAttachModifier::Value>(
+ "ALWAYS" >> pure(OmpAttachModifier::Value::Always) ||
+ "AUTO" >> pure(OmpAttachModifier::Value::Auto) ||
+ "NEVER" >> pure(OmpAttachModifier::Value::Never)))
+
+TYPE_PARSER(construct<OmpAttachModifier>( //
+ "ATTACH" >> parenthesized(Parser<OmpAttachModifier::Value>{})))
+
TYPE_PARSER(construct<OmpAutomapModifier>(
"AUTOMAP" >> pure(OmpAutomapModifier::Value::Automap)))
@@ -744,6 +752,7 @@ TYPE_PARSER(sourced(
TYPE_PARSER(sourced(construct<OmpMapClause::Modifier>(
sourced(construct<OmpMapClause::Modifier>(Parser<OmpAlwaysModifier>{}) ||
+ construct<OmpMapClause::Modifier>(Parser<OmpAttachModifier>{}) ||
construct<OmpMapClause::Modifier>(Parser<OmpCloseModifier>{}) ||
construct<OmpMapClause::Modifier>(Parser<OmpDeleteModifier>{}) ||
construct<OmpMapClause::Modifier>(Parser<OmpPresentModifier>{}) ||
@@ -1085,7 +1094,7 @@ TYPE_PARSER(construct<OmpBindClause>(
"TEAMS" >> pure(OmpBindClause::Binding::Teams) ||
"THREAD" >> pure(OmpBindClause::Binding::Thread)))
-TYPE_PARSER(construct<OmpAlignClause>(scalarIntExpr))
+TYPE_PARSER(construct<OmpAlignClause>(scalarIntConstantExpr))
TYPE_PARSER(construct<OmpAtClause>(
"EXECUTION" >> pure(OmpAtClause::ActionTime::Execution) ||
@@ -1158,7 +1167,8 @@ TYPE_PARSER( //
"DOACROSS" >>
construct<OmpClause>(parenthesized(Parser<OmpDoacrossClause>{})) ||
"DYNAMIC_ALLOCATORS" >>
- construct<OmpClause>(construct<OmpClause::DynamicAllocators>()) ||
+ construct<OmpClause>(construct<OmpClause::DynamicAllocators>(
+ maybe(parenthesized(scalarLogicalConstantExpr)))) ||
"DYN_GROUPPRIVATE" >>
construct<OmpClause>(construct<OmpClause::DynGroupprivate>(
parenthesized(Parser<OmpDynGroupprivateClause>{}))) ||
@@ -1270,12 +1280,15 @@ TYPE_PARSER( //
"REPLAYABLE" >> construct<OmpClause>(construct<OmpClause::Replayable>(
maybe(parenthesized(Parser<OmpReplayableClause>{})))) ||
"REVERSE_OFFLOAD" >>
- construct<OmpClause>(construct<OmpClause::ReverseOffload>()) ||
+ construct<OmpClause>(construct<OmpClause::ReverseOffload>(
+ maybe(parenthesized(scalarLogicalConstantExpr)))) ||
"SAFELEN" >> construct<OmpClause>(construct<OmpClause::Safelen>(
parenthesized(scalarIntConstantExpr))) ||
"SCHEDULE" >> construct<OmpClause>(construct<OmpClause::Schedule>(
parenthesized(Parser<OmpScheduleClause>{}))) ||
"SEQ_CST" >> construct<OmpClause>(construct<OmpClause::SeqCst>()) ||
+ "SELF_MAPS" >> construct<OmpClause>(construct<OmpClause::SelfMaps>(
+ maybe(parenthesized(scalarLogicalConstantExpr)))) ||
"SEVERITY" >> construct<OmpClause>(construct<OmpClause::Severity>(
parenthesized(Parser<OmpSeverityClause>{}))) ||
"SHARED" >> construct<OmpClause>(construct<OmpClause::Shared>(
@@ -1303,9 +1316,11 @@ TYPE_PARSER( //
construct<OmpClause>(construct<OmpClause::UseDeviceAddr>(
parenthesized(Parser<OmpObjectList>{}))) ||
"UNIFIED_ADDRESS" >>
- construct<OmpClause>(construct<OmpClause::UnifiedAddress>()) ||
+ construct<OmpClause>(construct<OmpClause::UnifiedAddress>(
+ maybe(parenthesized(scalarLogicalConstantExpr)))) ||
"UNIFIED_SHARED_MEMORY" >>
- construct<OmpClause>(construct<OmpClause::UnifiedSharedMemory>()) ||
+ construct<OmpClause>(construct<OmpClause::UnifiedSharedMemory>(
+ maybe(parenthesized(scalarLogicalConstantExpr)))) ||
"UNIFORM" >> construct<OmpClause>(construct<OmpClause::Uniform>(
parenthesized(nonemptyList(name)))) ||
"UNTIED" >> construct<OmpClause>(construct<OmpClause::Untied>()) ||
diff --git a/flang/lib/Parser/parse-tree.cpp b/flang/lib/Parser/parse-tree.cpp
index cb30939..8cbaa39 100644
--- a/flang/lib/Parser/parse-tree.cpp
+++ b/flang/lib/Parser/parse-tree.cpp
@@ -185,7 +185,7 @@ StructureConstructor ArrayElement::ConvertToStructureConstructor(
std::list<ComponentSpec> components;
for (auto &subscript : subscripts) {
components.emplace_back(std::optional<Keyword>{},
- ComponentDataSource{std::move(*Unwrap<Expr>(subscript))});
+ ComponentDataSource{std::move(UnwrapRef<Expr>(subscript))});
}
DerivedTypeSpec spec{std::move(name), std::list<TypeParamSpec>{}};
spec.derivedTypeSpec = &derived;
diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp
index 0511f5b..b172e429 100644
--- a/flang/lib/Parser/unparse.cpp
+++ b/flang/lib/Parser/unparse.cpp
@@ -2384,6 +2384,11 @@ public:
Walk(x.v);
Put(")");
}
+ void Unparse(const OmpAttachModifier &x) {
+ Word("ATTACH(");
+ Walk(x.v);
+ Put(")");
+ }
void Unparse(const OmpOrderClause &x) {
using Modifier = OmpOrderClause::Modifier;
Walk(std::get<std::optional<std::list<Modifier>>>(x.t), ":");
@@ -2820,6 +2825,7 @@ public:
WALK_NESTED_ENUM(OmpMapType, Value) // OMP map-type
WALK_NESTED_ENUM(OmpMapTypeModifier, Value) // OMP map-type-modifier
WALK_NESTED_ENUM(OmpAlwaysModifier, Value)
+ WALK_NESTED_ENUM(OmpAttachModifier, Value)
WALK_NESTED_ENUM(OmpCloseModifier, Value)
WALK_NESTED_ENUM(OmpDeleteModifier, Value)
WALK_NESTED_ENUM(OmpPresentModifier, Value)
diff --git a/flang/lib/Semantics/assignment.cpp b/flang/lib/Semantics/assignment.cpp
index f4aa496..1824a7d 100644
--- a/flang/lib/Semantics/assignment.cpp
+++ b/flang/lib/Semantics/assignment.cpp
@@ -194,7 +194,8 @@ void AssignmentContext::CheckShape(parser::CharBlock at, const SomeExpr *expr) {
template <typename A> void AssignmentContext::PushWhereContext(const A &x) {
const auto &expr{std::get<parser::LogicalExpr>(x.t)};
- CheckShape(expr.thing.value().source, GetExpr(context_, expr));
+ CheckShape(
+ parser::UnwrapRef<parser::Expr>(expr).source, GetExpr(context_, expr));
++whereDepth_;
}
diff --git a/flang/lib/Semantics/check-allocate.cpp b/flang/lib/Semantics/check-allocate.cpp
index 823aa4e..e019bbd 100644
--- a/flang/lib/Semantics/check-allocate.cpp
+++ b/flang/lib/Semantics/check-allocate.cpp
@@ -151,7 +151,9 @@ static std::optional<AllocateCheckerInfo> CheckAllocateOptions(
[&](const parser::MsgVariable &var) {
WarnOnDeferredLengthCharacterScalar(context,
GetExpr(context, var),
- var.v.thing.thing.GetSource(), "ERRMSG=");
+ parser::UnwrapRef<parser::Variable>(var)
+ .GetSource(),
+ "ERRMSG=");
if (info.gotMsg) { // C943
context.Say(
"ERRMSG may not be duplicated in a ALLOCATE statement"_err_en_US);
@@ -439,7 +441,7 @@ static bool HaveCompatibleLengths(
evaluate::ToInt64(type1.characterTypeSpec().length().GetExplicit())};
auto v2{
evaluate::ToInt64(type2.characterTypeSpec().length().GetExplicit())};
- return !v1 || !v2 || *v1 == *v2;
+ return !v1 || !v2 || (*v1 >= 0 ? *v1 : 0) == (*v2 >= 0 ? *v2 : 0);
} else {
return true;
}
@@ -452,7 +454,7 @@ static bool HaveCompatibleLengths(
auto v1{
evaluate::ToInt64(type1.characterTypeSpec().length().GetExplicit())};
auto v2{type2.knownLength()};
- return !v1 || !v2 || *v1 == *v2;
+ return !v1 || !v2 || (*v1 >= 0 ? *v1 : 0) == (*v2 >= 0 ? *v2 : 0);
} else {
return true;
}
@@ -598,7 +600,7 @@ bool AllocationCheckerHelper::RunChecks(SemanticsContext &context) {
std::optional<evaluate::ConstantSubscript> lbound;
if (const auto &lb{std::get<0>(shapeSpec.t)}) {
lbound.reset();
- const auto &lbExpr{lb->thing.thing.value()};
+ const auto &lbExpr{parser::UnwrapRef<parser::Expr>(lb)};
if (const auto *expr{GetExpr(context, lbExpr)}) {
auto folded{
evaluate::Fold(context.foldingContext(), SomeExpr(*expr))};
@@ -609,7 +611,8 @@ bool AllocationCheckerHelper::RunChecks(SemanticsContext &context) {
lbound = 1;
}
if (lbound) {
- const auto &ubExpr{std::get<1>(shapeSpec.t).thing.thing.value()};
+ const auto &ubExpr{
+ parser::UnwrapRef<parser::Expr>(std::get<1>(shapeSpec.t))};
if (const auto *expr{GetExpr(context, ubExpr)}) {
auto folded{
evaluate::Fold(context.foldingContext(), SomeExpr(*expr))};
diff --git a/flang/lib/Semantics/check-case.cpp b/flang/lib/Semantics/check-case.cpp
index 5ce143c..7593154 100644
--- a/flang/lib/Semantics/check-case.cpp
+++ b/flang/lib/Semantics/check-case.cpp
@@ -72,7 +72,7 @@ private:
}
std::optional<Value> GetValue(const parser::CaseValue &caseValue) {
- const parser::Expr &expr{caseValue.thing.thing.value()};
+ const auto &expr{parser::UnwrapRef<parser::Expr>(caseValue)};
auto *x{expr.typedExpr.get()};
if (x && x->v) { // C1147
auto type{x->v->GetType()};
diff --git a/flang/lib/Semantics/check-coarray.cpp b/flang/lib/Semantics/check-coarray.cpp
index 0e444f1..9113369 100644
--- a/flang/lib/Semantics/check-coarray.cpp
+++ b/flang/lib/Semantics/check-coarray.cpp
@@ -112,7 +112,7 @@ static void CheckTeamType(
static void CheckTeamStat(
SemanticsContext &context, const parser::ImageSelectorSpec::Stat &stat) {
- const parser::Variable &var{stat.v.thing.thing.value()};
+ const auto &var{parser::UnwrapRef<parser::Variable>(stat)};
if (parser::GetCoindexedNamedObject(var)) {
context.Say(parser::FindSourceLocation(var), // C931
"Image selector STAT variable must not be a coindexed "
@@ -147,7 +147,8 @@ static void CheckSyncStat(SemanticsContext &context,
},
[&](const parser::MsgVariable &var) {
WarnOnDeferredLengthCharacterScalar(context, GetExpr(context, var),
- var.v.thing.thing.GetSource(), "ERRMSG=");
+ parser::UnwrapRef<parser::Variable>(var).GetSource(),
+ "ERRMSG=");
if (gotMsg) {
context.Say( // C1172
"The errmsg-variable in a sync-stat-list may not be repeated"_err_en_US);
@@ -260,7 +261,9 @@ static void CheckEventWaitSpecList(SemanticsContext &context,
[&](const parser::MsgVariable &var) {
WarnOnDeferredLengthCharacterScalar(context,
GetExpr(context, var),
- var.v.thing.thing.GetSource(), "ERRMSG=");
+ parser::UnwrapRef<parser::Variable>(var)
+ .GetSource(),
+ "ERRMSG=");
if (gotMsg) {
context.Say( // C1178
"A errmsg-variable in a event-wait-spec-list may not be repeated"_err_en_US);
diff --git a/flang/lib/Semantics/check-data.cpp b/flang/lib/Semantics/check-data.cpp
index 5459290..3bcf711 100644
--- a/flang/lib/Semantics/check-data.cpp
+++ b/flang/lib/Semantics/check-data.cpp
@@ -25,9 +25,10 @@ namespace Fortran::semantics {
// Ensures that references to an implied DO loop control variable are
// represented as such in the "body" of the implied DO loop.
void DataChecker::Enter(const parser::DataImpliedDo &x) {
- auto name{std::get<parser::DataImpliedDo::Bounds>(x.t).name.thing.thing};
+ const auto &name{parser::UnwrapRef<parser::Name>(
+ std::get<parser::DataImpliedDo::Bounds>(x.t).name)};
int kind{evaluate::ResultType<evaluate::ImpliedDoIndex>::kind};
- if (const auto dynamicType{evaluate::DynamicType::From(*name.symbol)}) {
+ if (const auto dynamicType{evaluate::DynamicType::From(DEREF(name.symbol))}) {
if (dynamicType->category() == TypeCategory::Integer) {
kind = dynamicType->kind();
}
@@ -36,7 +37,8 @@ void DataChecker::Enter(const parser::DataImpliedDo &x) {
}
void DataChecker::Leave(const parser::DataImpliedDo &x) {
- auto name{std::get<parser::DataImpliedDo::Bounds>(x.t).name.thing.thing};
+ const auto &name{parser::UnwrapRef<parser::Name>(
+ std::get<parser::DataImpliedDo::Bounds>(x.t).name)};
exprAnalyzer_.RemoveImpliedDo(name.source);
}
@@ -211,7 +213,7 @@ void DataChecker::Leave(const parser::DataIDoObject &object) {
std::get_if<parser::Scalar<common::Indirection<parser::Designator>>>(
&object.u)}) {
if (MaybeExpr expr{exprAnalyzer_.Analyze(*designator)}) {
- auto source{designator->thing.value().source};
+ auto source{parser::UnwrapRef<parser::Designator>(*designator).source};
DataVarChecker checker{exprAnalyzer_.context(), source};
if (checker(*expr)) {
if (checker.HasComponentWithoutSubscripts()) { // C880
diff --git a/flang/lib/Semantics/check-deallocate.cpp b/flang/lib/Semantics/check-deallocate.cpp
index c45b585..c1ebc5f 100644
--- a/flang/lib/Semantics/check-deallocate.cpp
+++ b/flang/lib/Semantics/check-deallocate.cpp
@@ -114,7 +114,8 @@ void DeallocateChecker::Leave(const parser::DeallocateStmt &deallocateStmt) {
},
[&](const parser::MsgVariable &var) {
WarnOnDeferredLengthCharacterScalar(context_,
- GetExpr(context_, var), var.v.thing.thing.GetSource(),
+ GetExpr(context_, var),
+ parser::UnwrapRef<parser::Variable>(var).GetSource(),
"ERRMSG=");
if (gotMsg) {
context_.Say(
diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp
index ea5e2c0..31e246c 100644
--- a/flang/lib/Semantics/check-declarations.cpp
+++ b/flang/lib/Semantics/check-declarations.cpp
@@ -3622,6 +3622,7 @@ void CheckHelper::CheckDioDtvArg(const Symbol &proc, const Symbol &subp,
ioKind == common::DefinedIo::ReadUnformatted
? Attr::INTENT_INOUT
: Attr::INTENT_IN);
+ CheckDioDummyIsScalar(subp, *arg);
}
}
@@ -3687,6 +3688,7 @@ void CheckHelper::CheckDioAssumedLenCharacterArg(const Symbol &subp,
"Dummy argument '%s' of a defined input/output procedure must be assumed-length CHARACTER of default kind"_err_en_US,
arg->name());
}
+ CheckDioDummyIsScalar(subp, *arg);
}
}
diff --git a/flang/lib/Semantics/check-do-forall.cpp b/flang/lib/Semantics/check-do-forall.cpp
index a2f3685..8a47340 100644
--- a/flang/lib/Semantics/check-do-forall.cpp
+++ b/flang/lib/Semantics/check-do-forall.cpp
@@ -535,7 +535,8 @@ private:
if (const SomeExpr * expr{GetExpr(context_, scalarExpression)}) {
if (!ExprHasTypeCategory(*expr, TypeCategory::Integer)) {
// No warnings or errors for type INTEGER
- const parser::CharBlock &loc{scalarExpression.thing.value().source};
+ parser::CharBlock loc{
+ parser::UnwrapRef<parser::Expr>(scalarExpression).source};
CheckDoControl(loc, ExprHasTypeCategory(*expr, TypeCategory::Real));
}
}
@@ -552,7 +553,7 @@ private:
CheckDoExpression(*bounds.step);
if (IsZero(*bounds.step)) {
context_.Warn(common::UsageWarning::ZeroDoStep,
- bounds.step->thing.value().source,
+ parser::UnwrapRef<parser::Expr>(bounds.step).source,
"DO step expression should not be zero"_warn_en_US);
}
}
@@ -615,7 +616,7 @@ private:
// C1121 - procedures in mask must be pure
void CheckMaskIsPure(const parser::ScalarLogicalExpr &mask) const {
UnorderedSymbolSet references{
- GatherSymbolsFromExpression(mask.thing.thing.value())};
+ GatherSymbolsFromExpression(parser::UnwrapRef<parser::Expr>(mask))};
for (const Symbol &ref : OrderBySourcePosition(references)) {
if (IsProcedure(ref) && !IsPureProcedure(ref)) {
context_.SayWithDecl(ref, parser::Unwrap<parser::Expr>(mask)->source,
@@ -639,32 +640,33 @@ private:
}
void HasNoReferences(const UnorderedSymbolSet &indexNames,
- const parser::ScalarIntExpr &expr) const {
- CheckNoCollisions(GatherSymbolsFromExpression(expr.thing.thing.value()),
- indexNames,
+ const parser::ScalarIntExpr &scalarIntExpr) const {
+ const auto &expr{parser::UnwrapRef<parser::Expr>(scalarIntExpr)};
+ CheckNoCollisions(GatherSymbolsFromExpression(expr), indexNames,
"%s limit expression may not reference index variable '%s'"_err_en_US,
- expr.thing.thing.value().source);
+ expr.source);
}
// C1129, names in local locality-specs can't be in mask expressions
void CheckMaskDoesNotReferenceLocal(const parser::ScalarLogicalExpr &mask,
const UnorderedSymbolSet &localVars) const {
- CheckNoCollisions(GatherSymbolsFromExpression(mask.thing.thing.value()),
- localVars,
+ const auto &expr{parser::UnwrapRef<parser::Expr>(mask)};
+ CheckNoCollisions(GatherSymbolsFromExpression(expr), localVars,
"%s mask expression references variable '%s'"
" in LOCAL locality-spec"_err_en_US,
- mask.thing.thing.value().source);
+ expr.source);
}
// C1129, names in local locality-specs can't be in limit or step
// expressions
- void CheckExprDoesNotReferenceLocal(const parser::ScalarIntExpr &expr,
+ void CheckExprDoesNotReferenceLocal(
+ const parser::ScalarIntExpr &scalarIntExpr,
const UnorderedSymbolSet &localVars) const {
- CheckNoCollisions(GatherSymbolsFromExpression(expr.thing.thing.value()),
- localVars,
+ const auto &expr{parser::UnwrapRef<parser::Expr>(scalarIntExpr)};
+ CheckNoCollisions(GatherSymbolsFromExpression(expr), localVars,
"%s expression references variable '%s'"
" in LOCAL locality-spec"_err_en_US,
- expr.thing.thing.value().source);
+ expr.source);
}
// C1130, DEFAULT(NONE) locality requires names to be in locality-specs to
@@ -772,7 +774,7 @@ private:
HasNoReferences(indexNames, std::get<2>(control.t));
if (const auto &intExpr{
std::get<std::optional<parser::ScalarIntExpr>>(control.t)}) {
- const parser::Expr &expr{intExpr->thing.thing.value()};
+ const auto &expr{parser::UnwrapRef<parser::Expr>(intExpr)};
CheckNoCollisions(GatherSymbolsFromExpression(expr), indexNames,
"%s step expression may not reference index variable '%s'"_err_en_US,
expr.source);
@@ -840,7 +842,7 @@ private:
}
void CheckForImpureCall(const parser::ScalarIntExpr &x,
std::optional<IndexVarKind> nesting) const {
- const auto &parsedExpr{x.thing.thing.value()};
+ const auto &parsedExpr{parser::UnwrapRef<parser::Expr>(x)};
auto oldLocation{context_.location()};
context_.set_location(parsedExpr.source);
if (const auto &typedExpr{parsedExpr.typedExpr}) {
@@ -1124,7 +1126,8 @@ void DoForallChecker::Leave(const parser::ConnectSpec &connectSpec) {
const auto *newunit{
std::get_if<parser::ConnectSpec::Newunit>(&connectSpec.u)};
if (newunit) {
- context_.CheckIndexVarRedefine(newunit->v.thing.thing);
+ context_.CheckIndexVarRedefine(
+ parser::UnwrapRef<parser::Variable>(newunit));
}
}
@@ -1166,14 +1169,14 @@ void DoForallChecker::Leave(const parser::InquireSpec &inquireSpec) {
const auto *intVar{std::get_if<parser::InquireSpec::IntVar>(&inquireSpec.u)};
if (intVar) {
const auto &scalar{std::get<parser::ScalarIntVariable>(intVar->t)};
- context_.CheckIndexVarRedefine(scalar.thing.thing);
+ context_.CheckIndexVarRedefine(parser::UnwrapRef<parser::Variable>(scalar));
}
}
void DoForallChecker::Leave(const parser::IoControlSpec &ioControlSpec) {
const auto *size{std::get_if<parser::IoControlSpec::Size>(&ioControlSpec.u)};
if (size) {
- context_.CheckIndexVarRedefine(size->v.thing.thing);
+ context_.CheckIndexVarRedefine(parser::UnwrapRef<parser::Variable>(size));
}
}
@@ -1190,16 +1193,19 @@ static void CheckIoImpliedDoIndex(
void DoForallChecker::Leave(const parser::OutputImpliedDo &outputImpliedDo) {
CheckIoImpliedDoIndex(context_,
- std::get<parser::IoImpliedDoControl>(outputImpliedDo.t).name.thing.thing);
+ parser::UnwrapRef<parser::Name>(
+ std::get<parser::IoImpliedDoControl>(outputImpliedDo.t).name));
}
void DoForallChecker::Leave(const parser::InputImpliedDo &inputImpliedDo) {
CheckIoImpliedDoIndex(context_,
- std::get<parser::IoImpliedDoControl>(inputImpliedDo.t).name.thing.thing);
+ parser::UnwrapRef<parser::Name>(
+ std::get<parser::IoImpliedDoControl>(inputImpliedDo.t).name));
}
void DoForallChecker::Leave(const parser::StatVariable &statVariable) {
- context_.CheckIndexVarRedefine(statVariable.v.thing.thing);
+ context_.CheckIndexVarRedefine(
+ parser::UnwrapRef<parser::Variable>(statVariable));
}
} // namespace Fortran::semantics
diff --git a/flang/lib/Semantics/check-io.cpp b/flang/lib/Semantics/check-io.cpp
index a1ff4b9..19059ad 100644
--- a/flang/lib/Semantics/check-io.cpp
+++ b/flang/lib/Semantics/check-io.cpp
@@ -424,8 +424,8 @@ void IoChecker::Enter(const parser::InquireSpec::CharVar &spec) {
specKind = IoSpecKind::Dispose;
break;
}
- const parser::Variable &var{
- std::get<parser::ScalarDefaultCharVariable>(spec.t).thing.thing};
+ const auto &var{parser::UnwrapRef<parser::Variable>(
+ std::get<parser::ScalarDefaultCharVariable>(spec.t))};
std::string what{parser::ToUpperCaseLetters(common::EnumToString(specKind))};
CheckForDefinableVariable(var, what);
WarnOnDeferredLengthCharacterScalar(
@@ -627,7 +627,7 @@ void IoChecker::Enter(const parser::IoUnit &spec) {
}
void IoChecker::Enter(const parser::MsgVariable &msgVar) {
- const parser::Variable &var{msgVar.v.thing.thing};
+ const auto &var{parser::UnwrapRef<parser::Variable>(msgVar)};
if (stmt_ == IoStmtKind::None) {
// allocate, deallocate, image control
CheckForDefinableVariable(var, "ERRMSG");
diff --git a/flang/lib/Semantics/check-omp-atomic.cpp b/flang/lib/Semantics/check-omp-atomic.cpp
index 351af5c..515121a 100644
--- a/flang/lib/Semantics/check-omp-atomic.cpp
+++ b/flang/lib/Semantics/check-omp-atomic.cpp
@@ -519,8 +519,8 @@ private:
/// function references with scalar data pointer result of non-character
/// intrinsic type or variables that are non-polymorphic scalar pointers
/// and any length type parameter must be constant.
-void OmpStructureChecker::CheckAtomicType(
- SymbolRef sym, parser::CharBlock source, std::string_view name) {
+void OmpStructureChecker::CheckAtomicType(SymbolRef sym,
+ parser::CharBlock source, std::string_view name, bool checkTypeOnPointer) {
const DeclTypeSpec *typeSpec{sym->GetType()};
if (!typeSpec) {
return;
@@ -547,6 +547,22 @@ void OmpStructureChecker::CheckAtomicType(
return;
}
+ // Apply pointer-to-non-intrinsic rule only for intrinsic-assignment paths.
+ if (checkTypeOnPointer) {
+ using Category = DeclTypeSpec::Category;
+ Category cat{typeSpec->category()};
+ if (cat != Category::Numeric && cat != Category::Logical) {
+ std::string details = " has the POINTER attribute";
+ if (const auto *derived{typeSpec->AsDerived()}) {
+ details += " and derived type '"s + derived->name().ToString() + "'";
+ }
+ context_.Say(source,
+ "ATOMIC operation requires an intrinsic scalar variable; '%s'%s"_err_en_US,
+ sym->name(), details);
+ return;
+ }
+ }
+
// Go over all length parameters, if any, and check if they are
// explicit.
if (const DerivedTypeSpec *derived{typeSpec->AsDerived()}) {
@@ -562,7 +578,7 @@ void OmpStructureChecker::CheckAtomicType(
}
void OmpStructureChecker::CheckAtomicVariable(
- const SomeExpr &atom, parser::CharBlock source) {
+ const SomeExpr &atom, parser::CharBlock source, bool checkTypeOnPointer) {
if (atom.Rank() != 0) {
context_.Say(source, "Atomic variable %s should be a scalar"_err_en_US,
atom.AsFortran());
@@ -572,7 +588,7 @@ void OmpStructureChecker::CheckAtomicVariable(
assert(dsgs.size() == 1 && "Should have a single top-level designator");
evaluate::SymbolVector syms{evaluate::GetSymbolVector(dsgs.front())};
- CheckAtomicType(syms.back(), source, atom.AsFortran());
+ CheckAtomicType(syms.back(), source, atom.AsFortran(), checkTypeOnPointer);
if (IsAllocatable(syms.back()) && !IsArrayElement(atom)) {
context_.Say(source, "Atomic variable %s cannot be ALLOCATABLE"_err_en_US,
@@ -789,7 +805,8 @@ void OmpStructureChecker::CheckAtomicCaptureAssignment(
if (!IsVarOrFunctionRef(atom)) {
ErrorShouldBeVariable(atom, rsrc);
} else {
- CheckAtomicVariable(atom, rsrc);
+ CheckAtomicVariable(
+ atom, rsrc, /*checkTypeOnPointer=*/!IsPointerAssignment(capture));
// This part should have been checked prior to calling this function.
assert(*GetConvertInput(capture.rhs) == atom &&
"This cannot be a capture assignment");
@@ -808,7 +825,8 @@ void OmpStructureChecker::CheckAtomicReadAssignment(
if (!IsVarOrFunctionRef(atom)) {
ErrorShouldBeVariable(atom, rsrc);
} else {
- CheckAtomicVariable(atom, rsrc);
+ CheckAtomicVariable(
+ atom, rsrc, /*checkTypeOnPointer=*/!IsPointerAssignment(read));
CheckStorageOverlap(atom, {read.lhs}, source);
}
} else {
@@ -829,7 +847,8 @@ void OmpStructureChecker::CheckAtomicWriteAssignment(
if (!IsVarOrFunctionRef(atom)) {
ErrorShouldBeVariable(atom, rsrc);
} else {
- CheckAtomicVariable(atom, lsrc);
+ CheckAtomicVariable(
+ atom, lsrc, /*checkTypeOnPointer=*/!IsPointerAssignment(write));
CheckStorageOverlap(atom, {write.rhs}, source);
}
}
@@ -854,7 +873,8 @@ OmpStructureChecker::CheckAtomicUpdateAssignment(
return std::nullopt;
}
- CheckAtomicVariable(atom, lsrc);
+ CheckAtomicVariable(
+ atom, lsrc, /*checkTypeOnPointer=*/!IsPointerAssignment(update));
auto [hasErrors, tryReassoc]{CheckAtomicUpdateAssignmentRhs(
atom, update.rhs, source, /*suppressDiagnostics=*/true)};
@@ -1017,7 +1037,8 @@ void OmpStructureChecker::CheckAtomicConditionalUpdateAssignment(
return;
}
- CheckAtomicVariable(atom, alsrc);
+ CheckAtomicVariable(
+ atom, alsrc, /*checkTypeOnPointer=*/!IsPointerAssignment(assign));
auto top{GetTopLevelOperationIgnoreResizing(cond)};
// Missing arguments to operations would have been diagnosed by now.
diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp
index d65a89e..ea6fe43 100644
--- a/flang/lib/Semantics/check-omp-structure.cpp
+++ b/flang/lib/Semantics/check-omp-structure.cpp
@@ -1517,19 +1517,42 @@ void OmpStructureChecker::Leave(const parser::OpenMPDepobjConstruct &x) {
void OmpStructureChecker::Enter(const parser::OpenMPRequiresConstruct &x) {
const auto &dirName{x.v.DirName()};
PushContextAndClauseSets(dirName.source, dirName.v);
+ unsigned version{context_.langOptions().OpenMPVersion};
- if (visitedAtomicSource_.empty()) {
- return;
- }
for (const parser::OmpClause &clause : x.v.Clauses().v) {
llvm::omp::Clause id{clause.Id()};
if (id == llvm::omp::Clause::OMPC_atomic_default_mem_order) {
- parser::MessageFormattedText txt(
- "REQUIRES directive with '%s' clause found lexically after atomic operation without a memory order clause"_err_en_US,
- parser::ToUpperCaseLetters(llvm::omp::getOpenMPClauseName(id)));
- parser::Message message(clause.source, txt);
- message.Attach(visitedAtomicSource_, "Previous atomic construct"_en_US);
- context_.Say(std::move(message));
+ if (!visitedAtomicSource_.empty()) {
+ parser::MessageFormattedText txt(
+ "REQUIRES directive with '%s' clause found lexically after atomic operation without a memory order clause"_err_en_US,
+ parser::ToUpperCaseLetters(llvm::omp::getOpenMPClauseName(id)));
+ parser::Message message(clause.source, txt);
+ message.Attach(visitedAtomicSource_, "Previous atomic construct"_en_US);
+ context_.Say(std::move(message));
+ }
+ } else {
+ bool hasArgument{common::visit(
+ [&](auto &&s) {
+ using TypeS = llvm::remove_cvref_t<decltype(s)>;
+ if constexpr ( //
+ std::is_same_v<TypeS, parser::OmpClause::DynamicAllocators> ||
+ std::is_same_v<TypeS, parser::OmpClause::ReverseOffload> ||
+ std::is_same_v<TypeS, parser::OmpClause::SelfMaps> ||
+ std::is_same_v<TypeS, parser::OmpClause::UnifiedAddress> ||
+ std::is_same_v<TypeS, parser::OmpClause::UnifiedSharedMemory>) {
+ return s.v.has_value();
+ } else {
+ return false;
+ }
+ },
+ clause.u)};
+ if (version < 60 && hasArgument) {
+ context_.Say(clause.source,
+ "An argument to %s is an %s feature, %s"_warn_en_US,
+ parser::ToUpperCaseLetters(
+ llvm::omp::getOpenMPClauseName(clause.Id())),
+ ThisVersion(60), TryVersion(60));
+ }
}
}
}
@@ -1540,9 +1563,8 @@ void OmpStructureChecker::Leave(const parser::OpenMPRequiresConstruct &) {
void OmpStructureChecker::CheckAlignValue(const parser::OmpClause &clause) {
if (auto *align{std::get_if<parser::OmpClause::Align>(&clause.u)}) {
- if (const auto &v{GetIntValue(align->v)}; !v || *v <= 0) {
- context_.Say(clause.source,
- "The alignment value should be a constant positive integer"_err_en_US);
+ if (const auto &v{GetIntValue(align->v)}; v && *v <= 0) {
+ context_.Say(clause.source, "The alignment should be positive"_err_en_US);
}
}
}
@@ -2336,7 +2358,7 @@ private:
}
if (auto &repl{std::get<parser::OmpClause::Replayable>(clause.u).v}) {
// Scalar<Logical<Constant<indirection<Expr>>>>
- const parser::Expr &parserExpr{repl->v.thing.thing.thing.value()};
+ const auto &parserExpr{parser::UnwrapRef<parser::Expr>(repl)};
if (auto &&expr{GetEvaluateExpr(parserExpr)}) {
return GetLogicalValue(*expr).value_or(true);
}
@@ -2350,7 +2372,7 @@ private:
bool isTransparent{true};
if (auto &transp{std::get<parser::OmpClause::Transparent>(clause.u).v}) {
// Scalar<Integer<indirection<Expr>>>
- const parser::Expr &parserExpr{transp->v.thing.thing.value()};
+ const auto &parserExpr{parser::UnwrapRef<parser::Expr>(transp)};
if (auto &&expr{GetEvaluateExpr(parserExpr)}) {
// If the argument is omp_not_impex (defined as 0), then
// the task is not transparent, otherwise it is.
@@ -2389,8 +2411,8 @@ private:
}
}
// Scalar<Logical<indirection<Expr>>>
- auto &parserExpr{
- std::get<parser::ScalarLogicalExpr>(ifc.v.t).thing.thing.value()};
+ const auto &parserExpr{parser::UnwrapRef<parser::Expr>(
+ std::get<parser::ScalarLogicalExpr>(ifc.v.t))};
if (auto &&expr{GetEvaluateExpr(parserExpr)}) {
// If the value is known to be false, an undeferred task will be
// generated.
@@ -3017,8 +3039,8 @@ void OmpStructureChecker::Leave(const parser::OmpClauseList &) {
&objs,
std::string clause) {
for (const auto &obj : objs.v) {
- if (const parser::Name *
- objName{parser::Unwrap<parser::Name>(obj)}) {
+ if (const parser::Name *objName{
+ parser::Unwrap<parser::Name>(obj)}) {
if (&objName->symbol->GetUltimate() == eventHandleSym) {
context_.Say(GetContext().clauseSource,
"A variable: `%s` that appears in a DETACH clause cannot appear on %s clause on the same construct"_err_en_US,
@@ -3637,7 +3659,8 @@ void OmpStructureChecker::CheckReductionModifier(
if (modifier.v == ReductionModifier::Value::Task) {
// "Task" is only allowed on worksharing or "parallel" directive.
static llvm::omp::Directive worksharing[]{
- llvm::omp::Directive::OMPD_do, llvm::omp::Directive::OMPD_scope,
+ llvm::omp::Directive::OMPD_do, //
+ llvm::omp::Directive::OMPD_scope, //
llvm::omp::Directive::OMPD_sections,
// There are more worksharing directives, but they do not apply:
// "for" is C++ only,
@@ -4081,9 +4104,15 @@ void OmpStructureChecker::Enter(const parser::OmpClause::Map &x) {
if (auto *iter{OmpGetUniqueModifier<parser::OmpIterator>(modifiers)}) {
CheckIteratorModifier(*iter);
}
+
+ using Directive = llvm::omp::Directive;
+ Directive dir{GetContext().directive};
+ llvm::ArrayRef<Directive> leafs{llvm::omp::getLeafConstructsOrSelf(dir)};
+ parser::OmpMapType::Value mapType{parser::OmpMapType::Value::Storage};
+
if (auto *type{OmpGetUniqueModifier<parser::OmpMapType>(modifiers)}) {
- using Directive = llvm::omp::Directive;
using Value = parser::OmpMapType::Value;
+ mapType = type->v;
static auto isValidForVersion{
[](parser::OmpMapType::Value t, unsigned version) {
@@ -4120,10 +4149,6 @@ void OmpStructureChecker::Enter(const parser::OmpClause::Map &x) {
return result;
}()};
- llvm::omp::Directive dir{GetContext().directive};
- llvm::ArrayRef<llvm::omp::Directive> leafs{
- llvm::omp::getLeafConstructsOrSelf(dir)};
-
if (llvm::is_contained(leafs, Directive::OMPD_target) ||
llvm::is_contained(leafs, Directive::OMPD_target_data)) {
if (version >= 60) {
@@ -4141,6 +4166,43 @@ void OmpStructureChecker::Enter(const parser::OmpClause::Map &x) {
}
}
+ if (auto *attach{
+ OmpGetUniqueModifier<parser::OmpAttachModifier>(modifiers)}) {
+ bool mapEnteringConstructOrMapper{
+ llvm::is_contained(leafs, Directive::OMPD_target) ||
+ llvm::is_contained(leafs, Directive::OMPD_target_data) ||
+ llvm::is_contained(leafs, Directive::OMPD_target_enter_data) ||
+ llvm::is_contained(leafs, Directive::OMPD_declare_mapper)};
+
+ if (!mapEnteringConstructOrMapper || !IsMapEnteringType(mapType)) {
+ const auto &desc{OmpGetDescriptor<parser::OmpAttachModifier>()};
+ context_.Say(OmpGetModifierSource(modifiers, attach),
+ "The '%s' modifier can only appear on a map-entering construct or on a DECLARE_MAPPER directive"_err_en_US,
+ desc.name.str());
+ }
+
+ auto hasBasePointer{[&](const SomeExpr &item) {
+ evaluate::SymbolVector symbols{evaluate::GetSymbolVector(item)};
+ return llvm::any_of(
+ symbols, [](SymbolRef s) { return IsPointer(s.get()); });
+ }};
+
+ evaluate::ExpressionAnalyzer ea{context_};
+ const auto &objects{std::get<parser::OmpObjectList>(x.v.t)};
+ for (auto &object : objects.v) {
+ if (const parser::Designator *d{GetDesignatorFromObj(object)}) {
+ if (auto &&expr{ea.Analyze(*d)}) {
+ if (hasBasePointer(*expr)) {
+ continue;
+ }
+ }
+ }
+ auto source{GetObjectSource(object)};
+ context_.Say(source ? *source : GetContext().clauseSource,
+ "A list-item that appears in a map clause with the ATTACH modifier must have a base-pointer"_err_en_US);
+ }
+ }
+
auto &&typeMods{
OmpGetRepeatableModifier<parser::OmpMapTypeModifier>(modifiers)};
struct Less {
diff --git a/flang/lib/Semantics/check-omp-structure.h b/flang/lib/Semantics/check-omp-structure.h
index f507278..543642ff 100644
--- a/flang/lib/Semantics/check-omp-structure.h
+++ b/flang/lib/Semantics/check-omp-structure.h
@@ -262,10 +262,10 @@ private:
void CheckStorageOverlap(const evaluate::Expr<evaluate::SomeType> &,
llvm::ArrayRef<evaluate::Expr<evaluate::SomeType>>, parser::CharBlock);
void ErrorShouldBeVariable(const MaybeExpr &expr, parser::CharBlock source);
- void CheckAtomicType(
- SymbolRef sym, parser::CharBlock source, std::string_view name);
- void CheckAtomicVariable(
- const evaluate::Expr<evaluate::SomeType> &, parser::CharBlock);
+ void CheckAtomicType(SymbolRef sym, parser::CharBlock source,
+ std::string_view name, bool checkTypeOnPointer = true);
+ void CheckAtomicVariable(const evaluate::Expr<evaluate::SomeType> &,
+ parser::CharBlock, bool checkTypeOnPointer = true);
std::pair<const parser::ExecutionPartConstruct *,
const parser::ExecutionPartConstruct *>
CheckUpdateCapture(const parser::ExecutionPartConstruct *ec1,
diff --git a/flang/lib/Semantics/data-to-inits.cpp b/flang/lib/Semantics/data-to-inits.cpp
index 1e46dab..bbf3b28 100644
--- a/flang/lib/Semantics/data-to-inits.cpp
+++ b/flang/lib/Semantics/data-to-inits.cpp
@@ -179,13 +179,14 @@ bool DataInitializationCompiler<DSV>::Scan(
template <typename DSV>
bool DataInitializationCompiler<DSV>::Scan(const parser::DataImpliedDo &ido) {
const auto &bounds{std::get<parser::DataImpliedDo::Bounds>(ido.t)};
- auto name{bounds.name.thing.thing};
- const auto *lowerExpr{
- GetExpr(exprAnalyzer_.context(), bounds.lower.thing.thing)};
- const auto *upperExpr{
- GetExpr(exprAnalyzer_.context(), bounds.upper.thing.thing)};
+ const auto &name{parser::UnwrapRef<parser::Name>(bounds.name)};
+ const auto *lowerExpr{GetExpr(
+ exprAnalyzer_.context(), parser::UnwrapRef<parser::Expr>(bounds.lower))};
+ const auto *upperExpr{GetExpr(
+ exprAnalyzer_.context(), parser::UnwrapRef<parser::Expr>(bounds.upper))};
const auto *stepExpr{bounds.step
- ? GetExpr(exprAnalyzer_.context(), bounds.step->thing.thing)
+ ? GetExpr(exprAnalyzer_.context(),
+ parser::UnwrapRef<parser::Expr>(bounds.step))
: nullptr};
if (lowerExpr && upperExpr) {
// Fold the bounds expressions (again) in case any of them depend
@@ -240,7 +241,9 @@ bool DataInitializationCompiler<DSV>::Scan(
return common::visit(
common::visitors{
[&](const parser::Scalar<common::Indirection<parser::Designator>>
- &var) { return Scan(var.thing.value()); },
+ &var) {
+ return Scan(parser::UnwrapRef<parser::Designator>(var));
+ },
[&](const common::Indirection<parser::DataImpliedDo> &ido) {
return Scan(ido.value());
},
diff --git a/flang/lib/Semantics/expression.cpp b/flang/lib/Semantics/expression.cpp
index 2feec98..4aeb9a4 100644
--- a/flang/lib/Semantics/expression.cpp
+++ b/flang/lib/Semantics/expression.cpp
@@ -176,8 +176,8 @@ public:
// Find and return a user-defined operator or report an error.
// The provided message is used if there is no such operator.
- MaybeExpr TryDefinedOp(
- const char *, parser::MessageFixedText, bool isUserOp = false);
+ MaybeExpr TryDefinedOp(const char *, parser::MessageFixedText,
+ bool isUserOp = false, bool checkForNullPointer = true);
template <typename E>
MaybeExpr TryDefinedOp(E opr, parser::MessageFixedText msg) {
return TryDefinedOp(
@@ -211,7 +211,8 @@ private:
void SayNoMatch(
const std::string &, bool isAssignment = false, bool isAmbiguous = false);
std::string TypeAsFortran(std::size_t);
- bool AnyUntypedOrMissingOperand() const;
+ bool AnyUntypedOperand() const;
+ bool AnyMissingOperand() const;
ExpressionAnalyzer &context_;
ActualArguments actuals_;
@@ -1954,9 +1955,10 @@ void ArrayConstructorContext::Add(const parser::AcImpliedDo &impliedDo) {
const auto &control{std::get<parser::AcImpliedDoControl>(impliedDo.t)};
const auto &bounds{std::get<parser::AcImpliedDoControl::Bounds>(control.t)};
exprAnalyzer_.Analyze(bounds.name);
- parser::CharBlock name{bounds.name.thing.thing.source};
+ const auto &parsedName{parser::UnwrapRef<parser::Name>(bounds.name)};
+ parser::CharBlock name{parsedName.source};
int kind{ImpliedDoIntType::kind};
- if (const Symbol * symbol{bounds.name.thing.thing.symbol}) {
+ if (const Symbol *symbol{parsedName.symbol}) {
if (auto dynamicType{DynamicType::From(symbol)}) {
if (dynamicType->category() == TypeCategory::Integer) {
kind = dynamicType->kind();
@@ -1981,7 +1983,7 @@ void ArrayConstructorContext::Add(const parser::AcImpliedDo &impliedDo) {
auto cUpper{ToInt64(upper)};
auto cStride{ToInt64(stride)};
if (!(messageDisplayedSet_ & 0x10) && cStride && *cStride == 0) {
- exprAnalyzer_.SayAt(bounds.step.value().thing.thing.value().source,
+ exprAnalyzer_.SayAt(parser::UnwrapRef<parser::Expr>(bounds.step).source,
"The stride of an implied DO loop must not be zero"_err_en_US);
messageDisplayedSet_ |= 0x10;
}
@@ -2526,7 +2528,7 @@ static const Symbol *GetBindingResolution(
auto ExpressionAnalyzer::AnalyzeProcedureComponentRef(
const parser::ProcComponentRef &pcr, ActualArguments &&arguments,
bool isSubroutine) -> std::optional<CalleeAndArguments> {
- const parser::StructureComponent &sc{pcr.v.thing};
+ const auto &sc{parser::UnwrapRef<parser::StructureComponent>(pcr)};
if (MaybeExpr base{Analyze(sc.base)}) {
if (const Symbol *sym{sc.component.symbol}) {
if (context_.HasError(sym)) {
@@ -3695,11 +3697,12 @@ std::optional<characteristics::Procedure> ExpressionAnalyzer::CheckCall(
MaybeExpr ExpressionAnalyzer::Analyze(const parser::Expr::Parentheses &x) {
if (MaybeExpr operand{Analyze(x.v.value())}) {
- if (const semantics::Symbol *symbol{GetLastSymbol(*operand)}) {
+ if (IsNullPointerOrAllocatable(&*operand)) {
+ Say("NULL() may not be parenthesized"_err_en_US);
+ } else if (const semantics::Symbol *symbol{GetLastSymbol(*operand)}) {
if (const semantics::Symbol *result{FindFunctionResult(*symbol)}) {
if (semantics::IsProcedurePointer(*result)) {
- Say("A function reference that returns a procedure "
- "pointer may not be parenthesized"_err_en_US); // C1003
+ Say("A function reference that returns a procedure pointer may not be parenthesized"_err_en_US); // C1003
}
}
}
@@ -3788,7 +3791,7 @@ MaybeExpr ExpressionAnalyzer::Analyze(const parser::Expr::DefinedUnary &x) {
ArgumentAnalyzer analyzer{*this, name.source};
analyzer.Analyze(std::get<1>(x.t));
return analyzer.TryDefinedOp(name.source.ToString().c_str(),
- "No operator %s defined for %s"_err_en_US, true);
+ "No operator %s defined for %s"_err_en_US, /*isUserOp=*/true);
}
// Binary (dyadic) operations
@@ -3997,7 +4000,9 @@ static bool CheckFuncRefToArrayElement(semantics::SemanticsContext &context,
auto &proc{std::get<parser::ProcedureDesignator>(funcRef.v.t)};
const auto *name{std::get_if<parser::Name>(&proc.u)};
if (!name) {
- name = &std::get<parser::ProcComponentRef>(proc.u).v.thing.component;
+ name = &parser::UnwrapRef<parser::StructureComponent>(
+ std::get<parser::ProcComponentRef>(proc.u))
+ .component;
}
if (!name->symbol) {
return false;
@@ -4047,14 +4052,16 @@ static void FixMisparsedFunctionReference(
}
}
auto &proc{std::get<parser::ProcedureDesignator>(funcRef.v.t)};
- if (Symbol *origSymbol{
- common::visit(common::visitors{
- [&](parser::Name &name) { return name.symbol; },
- [&](parser::ProcComponentRef &pcr) {
- return pcr.v.thing.component.symbol;
- },
- },
- proc.u)}) {
+ if (Symbol *
+ origSymbol{common::visit(
+ common::visitors{
+ [&](parser::Name &name) { return name.symbol; },
+ [&](parser::ProcComponentRef &pcr) {
+ return parser::UnwrapRef<parser::StructureComponent>(pcr)
+ .component.symbol;
+ },
+ },
+ proc.u)}) {
Symbol &symbol{origSymbol->GetUltimate()};
if (symbol.has<semantics::ObjectEntityDetails>() ||
symbol.has<semantics::AssocEntityDetails>()) {
@@ -4176,15 +4183,23 @@ MaybeExpr ExpressionAnalyzer::IterativelyAnalyzeSubexpressions(
} while (!queue.empty());
// Analyze the collected subexpressions in bottom-up order.
// On an error, bail out and leave partial results in place.
- MaybeExpr result;
- for (auto riter{finish.rbegin()}; riter != finish.rend(); ++riter) {
- const parser::Expr &expr{**riter};
- result = ExprOrVariable(expr, expr.source);
- if (!result) {
- return result;
+ if (finish.size() == 1) {
+ const parser::Expr &expr{DEREF(finish.front())};
+ return ExprOrVariable(expr, expr.source);
+ } else {
+ // NULL() operand catching is deferred to operation analysis so
+ // that they can be accepted by defined operators.
+ auto restorer{AllowNullPointer()};
+ MaybeExpr result;
+ for (auto riter{finish.rbegin()}; riter != finish.rend(); ++riter) {
+ const parser::Expr &expr{**riter};
+ result = ExprOrVariable(expr, expr.source);
+ if (!result) {
+ return result;
+ }
}
+ return result; // last value was from analysis of "top"
}
- return result; // last value was from analysis of "top"
}
MaybeExpr ExpressionAnalyzer::Analyze(const parser::Expr &expr) {
@@ -4681,7 +4696,7 @@ bool ArgumentAnalyzer::AnyCUDADeviceData() const {
// attribute.
bool ArgumentAnalyzer::HasDeviceDefinedIntrinsicOpOverride(
const char *opr) const {
- if (AnyCUDADeviceData() && !AnyUntypedOrMissingOperand()) {
+ if (AnyCUDADeviceData() && !AnyUntypedOperand() && !AnyMissingOperand()) {
std::string oprNameString{"operator("s + opr + ')'};
parser::CharBlock oprName{oprNameString};
parser::Messages buffer;
@@ -4709,9 +4724,9 @@ bool ArgumentAnalyzer::HasDeviceDefinedIntrinsicOpOverride(
return false;
}
-MaybeExpr ArgumentAnalyzer::TryDefinedOp(
- const char *opr, parser::MessageFixedText error, bool isUserOp) {
- if (AnyUntypedOrMissingOperand()) {
+MaybeExpr ArgumentAnalyzer::TryDefinedOp(const char *opr,
+ parser::MessageFixedText error, bool isUserOp, bool checkForNullPointer) {
+ if (AnyMissingOperand()) {
context_.Say(error, ToUpperCase(opr), TypeAsFortran(0), TypeAsFortran(1));
return std::nullopt;
}
@@ -4790,7 +4805,9 @@ MaybeExpr ArgumentAnalyzer::TryDefinedOp(
context_.Say(
"Operands of %s are not conformable; have rank %d and rank %d"_err_en_US,
ToUpperCase(opr), actuals_[0]->Rank(), actuals_[1]->Rank());
- } else if (CheckForNullPointer() && CheckForAssumedRank()) {
+ } else if (!CheckForAssumedRank()) {
+ } else if (checkForNullPointer && !CheckForNullPointer()) {
+ } else { // use the supplied error
context_.Say(error, ToUpperCase(opr), TypeAsFortran(0), TypeAsFortran(1));
}
return result;
@@ -4808,15 +4825,16 @@ MaybeExpr ArgumentAnalyzer::TryDefinedOp(
for (std::size_t i{0}; i < oprs.size(); ++i) {
parser::Messages buffer;
auto restorer{context_.GetContextualMessages().SetMessages(buffer)};
- if (MaybeExpr thisResult{TryDefinedOp(oprs[i], error)}) {
+ if (MaybeExpr thisResult{TryDefinedOp(oprs[i], error, /*isUserOp=*/false,
+ /*checkForNullPointer=*/false)}) {
result = std::move(thisResult);
hit.push_back(oprs[i]);
hitBuffer = std::move(buffer);
}
}
}
- if (hit.empty()) { // for the error
- result = TryDefinedOp(oprs[0], error);
+ if (hit.empty()) { // run TryDefinedOp() again just to emit errors
+ CHECK(!TryDefinedOp(oprs[0], error).has_value());
} else if (hit.size() > 1) {
context_.Say(
"Matching accessible definitions were found with %zd variant spellings of the generic operator ('%s', '%s')"_err_en_US,
@@ -5232,10 +5250,19 @@ std::string ArgumentAnalyzer::TypeAsFortran(std::size_t i) {
}
}
-bool ArgumentAnalyzer::AnyUntypedOrMissingOperand() const {
+bool ArgumentAnalyzer::AnyUntypedOperand() const {
+ for (const auto &actual : actuals_) {
+ if (actual && !actual->GetType() &&
+ !IsBareNullPointer(actual->UnwrapExpr())) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ArgumentAnalyzer::AnyMissingOperand() const {
for (const auto &actual : actuals_) {
- if (!actual ||
- (!actual->GetType() && !IsBareNullPointer(actual->UnwrapExpr()))) {
+ if (!actual) {
return true;
}
}
@@ -5268,9 +5295,9 @@ void ExprChecker::Post(const parser::DataStmtObject &obj) {
bool ExprChecker::Pre(const parser::DataImpliedDo &ido) {
parser::Walk(std::get<parser::DataImpliedDo::Bounds>(ido.t), *this);
const auto &bounds{std::get<parser::DataImpliedDo::Bounds>(ido.t)};
- auto name{bounds.name.thing.thing};
+ const auto &name{parser::UnwrapRef<parser::Name>(bounds.name)};
int kind{evaluate::ResultType<evaluate::ImpliedDoIndex>::kind};
- if (const auto dynamicType{evaluate::DynamicType::From(*name.symbol)}) {
+ if (const auto dynamicType{evaluate::DynamicType::From(DEREF(name.symbol))}) {
if (dynamicType->category() == TypeCategory::Integer) {
kind = dynamicType->kind();
}
diff --git a/flang/lib/Semantics/mod-file.cpp b/flang/lib/Semantics/mod-file.cpp
index 8074c94..556259d 100644
--- a/flang/lib/Semantics/mod-file.cpp
+++ b/flang/lib/Semantics/mod-file.cpp
@@ -17,6 +17,7 @@
#include "flang/Semantics/semantics.h"
#include "flang/Semantics/symbol.h"
#include "flang/Semantics/tools.h"
+#include "llvm/Frontend/OpenMP/OMP.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
@@ -24,6 +25,7 @@
#include <fstream>
#include <set>
#include <string_view>
+#include <type_traits>
#include <variant>
#include <vector>
@@ -359,6 +361,40 @@ void ModFileWriter::PrepareRenamings(const Scope &scope) {
}
}
+static void PutOpenMPRequirements(llvm::raw_ostream &os, const Symbol &symbol) {
+ using RequiresClauses = WithOmpDeclarative::RequiresClauses;
+ using OmpMemoryOrderType = common::OmpMemoryOrderType;
+
+ const auto [reqs, order]{common::visit(
+ [&](auto &&details)
+ -> std::pair<const RequiresClauses *, const OmpMemoryOrderType *> {
+ if constexpr (std::is_convertible_v<decltype(details),
+ const WithOmpDeclarative &>) {
+ return {details.ompRequires(), details.ompAtomicDefaultMemOrder()};
+ } else {
+ return {nullptr, nullptr};
+ }
+ },
+ symbol.details())};
+
+ if (order) {
+ llvm::omp::Clause admo{llvm::omp::Clause::OMPC_atomic_default_mem_order};
+ os << "!$omp requires "
+ << parser::ToLowerCaseLetters(llvm::omp::getOpenMPClauseName(admo))
+ << '(' << parser::ToLowerCaseLetters(EnumToString(*order)) << ")\n";
+ }
+ if (reqs) {
+ os << "!$omp requires";
+ reqs->IterateOverMembers([&](llvm::omp::Clause f) {
+ if (f != llvm::omp::Clause::OMPC_atomic_default_mem_order) {
+ os << ' '
+ << parser::ToLowerCaseLetters(llvm::omp::getOpenMPClauseName(f));
+ }
+ });
+ os << "\n";
+ }
+}
+
// Put out the visible symbols from scope.
void ModFileWriter::PutSymbols(
const Scope &scope, UnorderedSymbolSet *hermeticModules) {
@@ -396,6 +432,7 @@ void ModFileWriter::PutSymbols(
for (const Symbol &symbol : uses) {
PutUse(symbol);
}
+ PutOpenMPRequirements(decls_, DEREF(scope.symbol()));
for (const auto &set : scope.equivalenceSets()) {
if (!set.empty() &&
!set.front().symbol.test(Symbol::Flag::CompilerCreated)) {
diff --git a/flang/lib/Semantics/openmp-modifiers.cpp b/flang/lib/Semantics/openmp-modifiers.cpp
index af4000c..717fb03 100644
--- a/flang/lib/Semantics/openmp-modifiers.cpp
+++ b/flang/lib/Semantics/openmp-modifiers.cpp
@@ -157,6 +157,22 @@ const OmpModifierDescriptor &OmpGetDescriptor<parser::OmpAlwaysModifier>() {
}
template <>
+const OmpModifierDescriptor &OmpGetDescriptor<parser::OmpAttachModifier>() {
+ static const OmpModifierDescriptor desc{
+ /*name=*/"attach-modifier",
+ /*props=*/
+ {
+ {61, {OmpProperty::Unique}},
+ },
+ /*clauses=*/
+ {
+ {61, {Clause::OMPC_map}},
+ },
+ };
+ return desc;
+}
+
+template <>
const OmpModifierDescriptor &OmpGetDescriptor<parser::OmpAutomapModifier>() {
static const OmpModifierDescriptor desc{
/*name=*/"automap-modifier",
diff --git a/flang/lib/Semantics/openmp-utils.cpp b/flang/lib/Semantics/openmp-utils.cpp
index a8ec4d6..292e73b 100644
--- a/flang/lib/Semantics/openmp-utils.cpp
+++ b/flang/lib/Semantics/openmp-utils.cpp
@@ -13,6 +13,7 @@
#include "flang/Semantics/openmp-utils.h"
#include "flang/Common/Fortran-consts.h"
+#include "flang/Common/idioms.h"
#include "flang/Common/indirection.h"
#include "flang/Common/reference.h"
#include "flang/Common/visit.h"
@@ -59,6 +60,26 @@ const Scope &GetScopingUnit(const Scope &scope) {
return *iter;
}
+const Scope &GetProgramUnit(const Scope &scope) {
+ const Scope *unit{nullptr};
+ for (const Scope *iter{&scope}; !iter->IsTopLevel(); iter = &iter->parent()) {
+ switch (iter->kind()) {
+ case Scope::Kind::BlockData:
+ case Scope::Kind::MainProgram:
+ case Scope::Kind::Module:
+ return *iter;
+ case Scope::Kind::Subprogram:
+ // Ignore subprograms that are nested.
+ unit = iter;
+ break;
+ default:
+ break;
+ }
+ }
+ assert(unit && "Scope not in a program unit");
+ return *unit;
+}
+
SourcedActionStmt GetActionStmt(const parser::ExecutionPartConstruct *x) {
if (x == nullptr) {
return SourcedActionStmt{};
@@ -202,7 +223,7 @@ std::optional<SomeExpr> GetEvaluateExpr(const parser::Expr &parserExpr) {
// ForwardOwningPointer typedExpr
// `- GenericExprWrapper ^.get()
// `- std::optional<Expr> ^->v
- return typedExpr.get()->v;
+ return DEREF(typedExpr.get()).v;
}
std::optional<evaluate::DynamicType> GetDynamicType(
diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp
index 18fc638..7067ed3 100644
--- a/flang/lib/Semantics/resolve-directives.cpp
+++ b/flang/lib/Semantics/resolve-directives.cpp
@@ -435,6 +435,22 @@ public:
return true;
}
+ bool Pre(const parser::UseStmt &x) {
+ if (x.moduleName.symbol) {
+ Scope &thisScope{context_.FindScope(x.moduleName.source)};
+ common::visit(
+ [&](auto &&details) {
+ if constexpr (std::is_convertible_v<decltype(details),
+ const WithOmpDeclarative &>) {
+ AddOmpRequiresToScope(thisScope, details.ompRequires(),
+ details.ompAtomicDefaultMemOrder());
+ }
+ },
+ x.moduleName.symbol->details());
+ }
+ return true;
+ }
+
bool Pre(const parser::OmpMetadirectiveDirective &x) {
PushContext(x.v.source, llvm::omp::Directive::OMPD_metadirective);
return true;
@@ -538,38 +554,55 @@ public:
void Post(const parser::OpenMPFlushConstruct &) { PopContext(); }
bool Pre(const parser::OpenMPRequiresConstruct &x) {
- using Flags = WithOmpDeclarative::RequiresFlags;
- using Requires = WithOmpDeclarative::RequiresFlag;
+ using RequiresClauses = WithOmpDeclarative::RequiresClauses;
PushContext(x.source, llvm::omp::Directive::OMPD_requires);
+ auto getArgument{[&](auto &&maybeClause) {
+ if (maybeClause) {
+ // Scalar<Logical<Constant<common::Indirection<Expr>>>>
+ auto &parserExpr{maybeClause->v.thing.thing.thing.value()};
+ evaluate::ExpressionAnalyzer ea{context_};
+ if (auto &&maybeExpr{ea.Analyze(parserExpr)}) {
+ if (auto v{omp::GetLogicalValue(*maybeExpr)}) {
+ return *v;
+ }
+ }
+ }
+ // If the argument is missing, it is assumed to be true.
+ return true;
+ }};
+
// Gather information from the clauses.
- Flags flags;
- std::optional<common::OmpMemoryOrderType> memOrder;
+ RequiresClauses reqs;
+ const common::OmpMemoryOrderType *memOrder{nullptr};
for (const parser::OmpClause &clause : x.v.Clauses().v) {
- flags |= common::visit(
+ using OmpClause = parser::OmpClause;
+ reqs |= common::visit(
common::visitors{
- [&memOrder](
- const parser::OmpClause::AtomicDefaultMemOrder &atomic) {
- memOrder = atomic.v.v;
- return Flags{};
- },
- [](const parser::OmpClause::ReverseOffload &) {
- return Flags{Requires::ReverseOffload};
+ [&](const OmpClause::AtomicDefaultMemOrder &atomic) {
+ memOrder = &atomic.v.v;
+ return RequiresClauses{};
},
- [](const parser::OmpClause::UnifiedAddress &) {
- return Flags{Requires::UnifiedAddress};
- },
- [](const parser::OmpClause::UnifiedSharedMemory &) {
- return Flags{Requires::UnifiedSharedMemory};
- },
- [](const parser::OmpClause::DynamicAllocators &) {
- return Flags{Requires::DynamicAllocators};
+ [&](auto &&s) {
+ using TypeS = llvm::remove_cvref_t<decltype(s)>;
+ if constexpr ( //
+ std::is_same_v<TypeS, OmpClause::DynamicAllocators> ||
+ std::is_same_v<TypeS, OmpClause::ReverseOffload> ||
+ std::is_same_v<TypeS, OmpClause::SelfMaps> ||
+ std::is_same_v<TypeS, OmpClause::UnifiedAddress> ||
+ std::is_same_v<TypeS, OmpClause::UnifiedSharedMemory>) {
+ if (getArgument(s.v)) {
+ return RequiresClauses{clause.Id()};
+ }
+ }
+ return RequiresClauses{};
},
- [](const auto &) { return Flags{}; }},
+ },
clause.u);
}
+
// Merge clauses into parents' symbols details.
- AddOmpRequiresToScope(currScope(), flags, memOrder);
+ AddOmpRequiresToScope(currScope(), &reqs, memOrder);
return true;
}
void Post(const parser::OpenMPRequiresConstruct &) { PopContext(); }
@@ -1001,8 +1034,9 @@ private:
std::int64_t ordCollapseLevel{0};
- void AddOmpRequiresToScope(Scope &, WithOmpDeclarative::RequiresFlags,
- std::optional<common::OmpMemoryOrderType>);
+ void AddOmpRequiresToScope(Scope &,
+ const WithOmpDeclarative::RequiresClauses *,
+ const common::OmpMemoryOrderType *);
void IssueNonConformanceWarning(llvm::omp::Directive D,
parser::CharBlock source, unsigned EmitFromVersion);
@@ -3309,86 +3343,6 @@ void ResolveOmpParts(
}
}
-void ResolveOmpTopLevelParts(
- SemanticsContext &context, const parser::Program &program) {
- if (!context.IsEnabled(common::LanguageFeature::OpenMP)) {
- return;
- }
-
- // Gather REQUIRES clauses from all non-module top-level program unit symbols,
- // combine them together ensuring compatibility and apply them to all these
- // program units. Modules are skipped because their REQUIRES clauses should be
- // propagated via USE statements instead.
- WithOmpDeclarative::RequiresFlags combinedFlags;
- std::optional<common::OmpMemoryOrderType> combinedMemOrder;
-
- // Function to go through non-module top level program units and extract
- // REQUIRES information to be processed by a function-like argument.
- auto processProgramUnits{[&](auto processFn) {
- for (const parser::ProgramUnit &unit : program.v) {
- if (!std::holds_alternative<common::Indirection<parser::Module>>(
- unit.u) &&
- !std::holds_alternative<common::Indirection<parser::Submodule>>(
- unit.u) &&
- !std::holds_alternative<
- common::Indirection<parser::CompilerDirective>>(unit.u)) {
- Symbol *symbol{common::visit(
- [&context](auto &x) {
- Scope *scope = GetScope(context, x.value());
- return scope ? scope->symbol() : nullptr;
- },
- unit.u)};
- // FIXME There is no symbol defined for MainProgram units in certain
- // circumstances, so REQUIRES information has no place to be stored in
- // these cases.
- if (!symbol) {
- continue;
- }
- common::visit(
- [&](auto &details) {
- if constexpr (std::is_convertible_v<decltype(&details),
- WithOmpDeclarative *>) {
- processFn(*symbol, details);
- }
- },
- symbol->details());
- }
- }
- }};
-
- // Combine global REQUIRES information from all program units except modules
- // and submodules.
- processProgramUnits([&](Symbol &symbol, WithOmpDeclarative &details) {
- if (const WithOmpDeclarative::RequiresFlags *
- flags{details.ompRequires()}) {
- combinedFlags |= *flags;
- }
- if (const common::OmpMemoryOrderType *
- memOrder{details.ompAtomicDefaultMemOrder()}) {
- if (combinedMemOrder && *combinedMemOrder != *memOrder) {
- context.Say(symbol.scope()->sourceRange(),
- "Conflicting '%s' REQUIRES clauses found in compilation "
- "unit"_err_en_US,
- parser::ToUpperCaseLetters(llvm::omp::getOpenMPClauseName(
- llvm::omp::Clause::OMPC_atomic_default_mem_order)
- .str()));
- }
- combinedMemOrder = *memOrder;
- }
- });
-
- // Update all program units except modules and submodules with the combined
- // global REQUIRES information.
- processProgramUnits([&](Symbol &, WithOmpDeclarative &details) {
- if (combinedFlags.any()) {
- details.set_ompRequires(combinedFlags);
- }
- if (combinedMemOrder) {
- details.set_ompAtomicDefaultMemOrder(*combinedMemOrder);
- }
- });
-}
-
static bool IsSymbolThreadprivate(const Symbol &symbol) {
if (const auto *details{symbol.detailsIf<HostAssocDetails>()}) {
return details->symbol().test(Symbol::Flag::OmpThreadprivate);
@@ -3547,42 +3501,39 @@ void OmpAttributeVisitor::CheckLabelContext(const parser::CharBlock source,
}
void OmpAttributeVisitor::AddOmpRequiresToScope(Scope &scope,
- WithOmpDeclarative::RequiresFlags flags,
- std::optional<common::OmpMemoryOrderType> memOrder) {
- Scope *scopeIter = &scope;
- do {
- if (Symbol * symbol{scopeIter->symbol()}) {
- common::visit(
- [&](auto &details) {
- // Store clauses information into the symbol for the parent and
- // enclosing modules, programs, functions and subroutines.
- if constexpr (std::is_convertible_v<decltype(&details),
- WithOmpDeclarative *>) {
- if (flags.any()) {
- if (const WithOmpDeclarative::RequiresFlags *
- otherFlags{details.ompRequires()}) {
- flags |= *otherFlags;
- }
- details.set_ompRequires(flags);
+ const WithOmpDeclarative::RequiresClauses *reqs,
+ const common::OmpMemoryOrderType *memOrder) {
+ const Scope &programUnit{omp::GetProgramUnit(scope)};
+ using RequiresClauses = WithOmpDeclarative::RequiresClauses;
+ RequiresClauses combinedReqs{reqs ? *reqs : RequiresClauses{}};
+
+ if (auto *symbol{const_cast<Symbol *>(programUnit.symbol())}) {
+ common::visit(
+ [&](auto &details) {
+ if constexpr (std::is_convertible_v<decltype(&details),
+ WithOmpDeclarative *>) {
+ if (combinedReqs.any()) {
+ if (const RequiresClauses *otherReqs{details.ompRequires()}) {
+ combinedReqs |= *otherReqs;
}
- if (memOrder) {
- if (details.has_ompAtomicDefaultMemOrder() &&
- *details.ompAtomicDefaultMemOrder() != *memOrder) {
- context_.Say(scopeIter->sourceRange(),
- "Conflicting '%s' REQUIRES clauses found in compilation "
- "unit"_err_en_US,
- parser::ToUpperCaseLetters(llvm::omp::getOpenMPClauseName(
- llvm::omp::Clause::OMPC_atomic_default_mem_order)
- .str()));
- }
- details.set_ompAtomicDefaultMemOrder(*memOrder);
+ details.set_ompRequires(combinedReqs);
+ }
+ if (memOrder) {
+ if (details.has_ompAtomicDefaultMemOrder() &&
+ *details.ompAtomicDefaultMemOrder() != *memOrder) {
+ context_.Say(programUnit.sourceRange(),
+ "Conflicting '%s' REQUIRES clauses found in compilation "
+ "unit"_err_en_US,
+ parser::ToUpperCaseLetters(llvm::omp::getOpenMPClauseName(
+ llvm::omp::Clause::OMPC_atomic_default_mem_order)
+ .str()));
}
+ details.set_ompAtomicDefaultMemOrder(*memOrder);
}
- },
- symbol->details());
- }
- scopeIter = &scopeIter->parent();
- } while (!scopeIter->IsGlobal());
+ }
+ },
+ symbol->details());
+ }
}
void OmpAttributeVisitor::IssueNonConformanceWarning(llvm::omp::Directive D,
diff --git a/flang/lib/Semantics/resolve-directives.h b/flang/lib/Semantics/resolve-directives.h
index 5a890c2..36d3ce9 100644
--- a/flang/lib/Semantics/resolve-directives.h
+++ b/flang/lib/Semantics/resolve-directives.h
@@ -23,7 +23,5 @@ class SemanticsContext;
void ResolveAccParts(
SemanticsContext &, const parser::ProgramUnit &, Scope *topScope);
void ResolveOmpParts(SemanticsContext &, const parser::ProgramUnit &);
-void ResolveOmpTopLevelParts(SemanticsContext &, const parser::Program &);
-
} // namespace Fortran::semantics
#endif
diff --git a/flang/lib/Semantics/resolve-names-utils.cpp b/flang/lib/Semantics/resolve-names-utils.cpp
index 742bb74..ac67799 100644
--- a/flang/lib/Semantics/resolve-names-utils.cpp
+++ b/flang/lib/Semantics/resolve-names-utils.cpp
@@ -492,12 +492,14 @@ bool EquivalenceSets::CheckDesignator(const parser::Designator &designator) {
const auto &range{std::get<parser::SubstringRange>(x.t)};
bool ok{CheckDataRef(designator.source, dataRef)};
if (const auto &lb{std::get<0>(range.t)}) {
- ok &= CheckSubstringBound(lb->thing.thing.value(), true);
+ ok &= CheckSubstringBound(
+ parser::UnwrapRef<parser::Expr>(lb), true);
} else {
currObject_.substringStart = 1;
}
if (const auto &ub{std::get<1>(range.t)}) {
- ok &= CheckSubstringBound(ub->thing.thing.value(), false);
+ ok &= CheckSubstringBound(
+ parser::UnwrapRef<parser::Expr>(ub), false);
}
return ok;
},
@@ -528,7 +530,8 @@ bool EquivalenceSets::CheckDataRef(
return false;
},
[&](const parser::IntExpr &y) {
- return CheckArrayBound(y.thing.value());
+ return CheckArrayBound(
+ parser::UnwrapRef<parser::Expr>(y));
},
},
subscript.u);
diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp
index 86121880..699de41 100644
--- a/flang/lib/Semantics/resolve-names.cpp
+++ b/flang/lib/Semantics/resolve-names.cpp
@@ -1140,7 +1140,7 @@ protected:
std::optional<SourceName> BeginCheckOnIndexUseInOwnBounds(
const parser::DoVariable &name) {
std::optional<SourceName> result{checkIndexUseInOwnBounds_};
- checkIndexUseInOwnBounds_ = name.thing.thing.source;
+ checkIndexUseInOwnBounds_ = parser::UnwrapRef<parser::Name>(name).source;
return result;
}
void EndCheckOnIndexUseInOwnBounds(const std::optional<SourceName> &restore) {
@@ -2130,7 +2130,7 @@ public:
void Post(const parser::SubstringInquiry &);
template <typename A, typename B>
void Post(const parser::LoopBounds<A, B> &x) {
- ResolveName(*parser::Unwrap<parser::Name>(x.name));
+ ResolveName(parser::UnwrapRef<parser::Name>(x.name));
}
void Post(const parser::ProcComponentRef &);
bool Pre(const parser::FunctionReference &);
@@ -2560,7 +2560,7 @@ KindExpr DeclTypeSpecVisitor::GetKindParamExpr(
CHECK(!state_.originalKindParameter);
// Save a pointer to the KIND= expression in the parse tree
// in case we need to reanalyze it during PDT instantiation.
- state_.originalKindParameter = &expr->thing.thing.thing.value();
+ state_.originalKindParameter = parser::Unwrap<parser::Expr>(expr);
}
}
// Inhibit some errors now that will be caught later during instantiations.
@@ -5649,6 +5649,7 @@ bool DeclarationVisitor::Pre(const parser::NamedConstantDef &x) {
if (details->init() || symbol.test(Symbol::Flag::InDataStmt)) {
Say(name, "Named constant '%s' already has a value"_err_en_US);
}
+ parser::CharBlock at{parser::UnwrapRef<parser::Expr>(expr).source};
if (inOldStyleParameterStmt_) {
// non-standard extension PARAMETER statement (no parentheses)
Walk(expr);
@@ -5657,7 +5658,6 @@ bool DeclarationVisitor::Pre(const parser::NamedConstantDef &x) {
SayWithDecl(name, symbol,
"Alternative style PARAMETER '%s' must not already have an explicit type"_err_en_US);
} else if (folded) {
- auto at{expr.thing.value().source};
if (evaluate::IsActuallyConstant(*folded)) {
if (const auto *type{currScope().GetType(*folded)}) {
if (type->IsPolymorphic()) {
@@ -5682,8 +5682,7 @@ bool DeclarationVisitor::Pre(const parser::NamedConstantDef &x) {
// standard-conforming PARAMETER statement (with parentheses)
ApplyImplicitRules(symbol);
Walk(expr);
- if (auto converted{EvaluateNonPointerInitializer(
- symbol, expr, expr.thing.value().source)}) {
+ if (auto converted{EvaluateNonPointerInitializer(symbol, expr, at)}) {
details->set_init(std::move(*converted));
}
}
@@ -6149,7 +6148,7 @@ bool DeclarationVisitor::Pre(const parser::KindParam &x) {
if (const auto *kind{std::get_if<
parser::Scalar<parser::Integer<parser::Constant<parser::Name>>>>(
&x.u)}) {
- const parser::Name &name{kind->thing.thing.thing};
+ const auto &name{parser::UnwrapRef<parser::Name>(kind)};
if (!FindSymbol(name)) {
Say(name, "Parameter '%s' not found"_err_en_US);
}
@@ -7460,7 +7459,7 @@ void DeclarationVisitor::DeclareLocalEntity(
Symbol *DeclarationVisitor::DeclareStatementEntity(
const parser::DoVariable &doVar,
const std::optional<parser::IntegerTypeSpec> &type) {
- const parser::Name &name{doVar.thing.thing};
+ const auto &name{parser::UnwrapRef<parser::Name>(doVar)};
const DeclTypeSpec *declTypeSpec{nullptr};
if (auto *prev{FindSymbol(name)}) {
if (prev->owner() == currScope()) {
@@ -7893,13 +7892,14 @@ bool ConstructVisitor::Pre(const parser::DataIDoObject &x) {
common::visit(
common::visitors{
[&](const parser::Scalar<Indirection<parser::Designator>> &y) {
- Walk(y.thing.value());
- const parser::Name &first{parser::GetFirstName(y.thing.value())};
+ const auto &designator{parser::UnwrapRef<parser::Designator>(y)};
+ Walk(designator);
+ const parser::Name &first{parser::GetFirstName(designator)};
if (first.symbol) {
first.symbol->set(Symbol::Flag::InDataStmt);
}
},
- [&](const Indirection<parser::DataImpliedDo> &y) { Walk(y.value()); },
+ [&](const Indirection<parser::DataImpliedDo> &y) { Walk(y); },
},
x.u);
return false;
@@ -8582,8 +8582,7 @@ public:
void Post(const parser::WriteStmt &) { inAsyncIO_ = false; }
void Post(const parser::IoControlSpec::Size &size) {
if (const auto *designator{
- std::get_if<common::Indirection<parser::Designator>>(
- &size.v.thing.thing.u)}) {
+ parser::Unwrap<common::Indirection<parser::Designator>>(size)}) {
NoteAsyncIODesignator(designator->value());
}
}
@@ -9175,16 +9174,17 @@ bool DeclarationVisitor::CheckNonPointerInitialization(
}
void DeclarationVisitor::NonPointerInitialization(
- const parser::Name &name, const parser::ConstantExpr &expr) {
+ const parser::Name &name, const parser::ConstantExpr &constExpr) {
if (CheckNonPointerInitialization(
name, /*inLegacyDataInitialization=*/false)) {
Symbol &ultimate{name.symbol->GetUltimate()};
auto &details{ultimate.get<ObjectEntityDetails>()};
+ const auto &expr{parser::UnwrapRef<parser::Expr>(constExpr)};
if (ultimate.owner().IsParameterizedDerivedType()) {
// Save the expression for per-instantiation analysis.
- details.set_unanalyzedPDTComponentInit(&expr.thing.value());
+ details.set_unanalyzedPDTComponentInit(&expr);
} else if (MaybeExpr folded{EvaluateNonPointerInitializer(
- ultimate, expr, expr.thing.value().source)}) {
+ ultimate, constExpr, expr.source)}) {
details.set_init(std::move(*folded));
ultimate.set(Symbol::Flag::InDataStmt, false);
}
@@ -10687,9 +10687,6 @@ void ResolveNamesVisitor::Post(const parser::Program &x) {
CHECK(!attrs_);
CHECK(!cudaDataAttr_);
CHECK(!GetDeclTypeSpec());
- // Top-level resolution to propagate information across program units after
- // each of them has been resolved separately.
- ResolveOmpTopLevelParts(context(), x);
}
// A singleton instance of the scope -> IMPLICIT rules mapping is
diff --git a/flang/lib/Semantics/symbol.cpp b/flang/lib/Semantics/symbol.cpp
index 69169469..0ec44b7 100644
--- a/flang/lib/Semantics/symbol.cpp
+++ b/flang/lib/Semantics/symbol.cpp
@@ -70,6 +70,32 @@ static void DumpList(llvm::raw_ostream &os, const char *label, const T &list) {
}
}
+llvm::raw_ostream &operator<<(
+ llvm::raw_ostream &os, const WithOmpDeclarative &x) {
+ if (x.has_ompRequires() || x.has_ompAtomicDefaultMemOrder()) {
+ os << " OmpRequirements:(";
+ if (const common::OmpMemoryOrderType *admo{x.ompAtomicDefaultMemOrder()}) {
+ os << parser::ToLowerCaseLetters(llvm::omp::getOpenMPClauseName(
+ llvm::omp::Clause::OMPC_atomic_default_mem_order))
+ << '(' << parser::ToLowerCaseLetters(EnumToString(*admo)) << ')';
+ if (x.has_ompRequires()) {
+ os << ',';
+ }
+ }
+ if (const WithOmpDeclarative::RequiresClauses *reqs{x.ompRequires()}) {
+ size_t num{0}, size{reqs->count()};
+ reqs->IterateOverMembers([&](llvm::omp::Clause f) {
+ os << parser::ToLowerCaseLetters(llvm::omp::getOpenMPClauseName(f));
+ if (++num < size) {
+ os << ',';
+ }
+ });
+ }
+ os << ')';
+ }
+ return os;
+}
+
void SubprogramDetails::set_moduleInterface(Symbol &symbol) {
CHECK(!moduleInterface_);
moduleInterface_ = &symbol;
@@ -150,6 +176,7 @@ llvm::raw_ostream &operator<<(
os << x;
}
}
+ os << static_cast<const WithOmpDeclarative &>(x);
return os;
}
@@ -580,7 +607,9 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &os, const Details &details) {
common::visit( //
common::visitors{
[&](const UnknownDetails &) {},
- [&](const MainProgramDetails &) {},
+ [&](const MainProgramDetails &x) {
+ os << static_cast<const WithOmpDeclarative &>(x);
+ },
[&](const ModuleDetails &x) {
if (x.isSubmodule()) {
os << " (";
@@ -599,6 +628,7 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &os, const Details &details) {
if (x.isDefaultPrivate()) {
os << " isDefaultPrivate";
}
+ os << static_cast<const WithOmpDeclarative &>(x);
},
[&](const SubprogramNameDetails &x) {
os << ' ' << EnumToString(x.kind());