diff options
Diffstat (limited to 'flang/lib/Lower')
-rw-r--r-- | flang/lib/Lower/Allocatable.cpp | 13 | ||||
-rw-r--r-- | flang/lib/Lower/Bridge.cpp | 5 | ||||
-rw-r--r-- | flang/lib/Lower/CUDA.cpp | 27 | ||||
-rw-r--r-- | flang/lib/Lower/IO.cpp | 3 | ||||
-rw-r--r-- | flang/lib/Lower/OpenACC.cpp | 27 | ||||
-rw-r--r-- | flang/lib/Lower/OpenMP/Atomic.cpp | 97 | ||||
-rw-r--r-- | flang/lib/Lower/OpenMP/ClauseProcessor.cpp | 6 | ||||
-rw-r--r-- | flang/lib/Lower/OpenMP/Clauses.cpp | 85 | ||||
-rw-r--r-- | flang/lib/Lower/OpenMP/OpenMP.cpp | 15 |
9 files changed, 231 insertions, 47 deletions
diff --git a/flang/lib/Lower/Allocatable.cpp b/flang/lib/Lower/Allocatable.cpp index 53239cb..e7a6c4d 100644 --- a/flang/lib/Lower/Allocatable.cpp +++ b/flang/lib/Lower/Allocatable.cpp @@ -629,6 +629,10 @@ private: unsigned allocatorIdx = Fortran::lower::getAllocatorIdx(alloc.getSymbol()); fir::ExtendedValue exv = isSource ? sourceExv : moldExv; + if (const Fortran::semantics::Symbol *sym{GetLastSymbol(sourceExpr)}) + if (Fortran::semantics::IsCUDADevice(*sym)) + TODO(loc, "CUDA Fortran: allocate with device source"); + // Generate a sequence of runtime calls. errorManager.genStatCheck(builder, loc); genAllocateObjectInit(box, allocatorIdx); @@ -767,6 +771,15 @@ private: const fir::MutableBoxValue &box, ErrorManager &errorManager, const Fortran::semantics::Symbol &sym) { + + if (const Fortran::semantics::DeclTypeSpec *declTypeSpec = sym.GetType()) + if (const Fortran::semantics::DerivedTypeSpec *derivedTypeSpec = + declTypeSpec->AsDerived()) + if (derivedTypeSpec->HasDefaultInitialization( + /*ignoreAllocatable=*/true, /*ignorePointer=*/true)) + TODO(loc, + "CUDA Fortran: allocate on device with default initialization"); + Fortran::lower::StatementContext stmtCtx; cuf::DataAttributeAttr cudaAttr = Fortran::lower::translateSymbolCUFDataAttribute(builder.getContext(), diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index 68adf34..525fb0e 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -4987,11 +4987,8 @@ private: // host = device if (!lhsIsDevice && rhsIsDevice) { - if (Fortran::lower::isTransferWithConversion(rhs)) { + if (auto elementalOp = Fortran::lower::isTransferWithConversion(rhs)) { mlir::OpBuilder::InsertionGuard insertionGuard(builder); - auto elementalOp = - mlir::dyn_cast<hlfir::ElementalOp>(rhs.getDefiningOp()); - assert(elementalOp && "expect elemental op"); auto designateOp = *elementalOp.getBody()->getOps<hlfir::DesignateOp>().begin(); builder.setInsertionPoint(elementalOp); diff --git a/flang/lib/Lower/CUDA.cpp b/flang/lib/Lower/CUDA.cpp index bb4bdee..9501b0e 100644 --- a/flang/lib/Lower/CUDA.cpp +++ b/flang/lib/Lower/CUDA.cpp @@ -68,11 +68,26 @@ cuf::DataAttributeAttr Fortran::lower::translateSymbolCUFDataAttribute( return cuf::getDataAttribute(mlirContext, cudaAttr); } -bool Fortran::lower::isTransferWithConversion(mlir::Value rhs) { +hlfir::ElementalOp Fortran::lower::isTransferWithConversion(mlir::Value rhs) { + auto isConversionElementalOp = [](hlfir::ElementalOp elOp) { + return llvm::hasSingleElement( + elOp.getBody()->getOps<hlfir::DesignateOp>()) && + llvm::hasSingleElement(elOp.getBody()->getOps<fir::LoadOp>()) == 1 && + llvm::hasSingleElement(elOp.getBody()->getOps<fir::ConvertOp>()) == + 1; + }; + if (auto declOp = mlir::dyn_cast<hlfir::DeclareOp>(rhs.getDefiningOp())) { + if (!declOp.getMemref().getDefiningOp()) + return {}; + if (auto associateOp = mlir::dyn_cast<hlfir::AssociateOp>( + declOp.getMemref().getDefiningOp())) + if (auto elOp = mlir::dyn_cast<hlfir::ElementalOp>( + associateOp.getSource().getDefiningOp())) + if (isConversionElementalOp(elOp)) + return elOp; + } if (auto elOp = mlir::dyn_cast<hlfir::ElementalOp>(rhs.getDefiningOp())) - if (llvm::hasSingleElement(elOp.getBody()->getOps<hlfir::DesignateOp>()) && - llvm::hasSingleElement(elOp.getBody()->getOps<fir::LoadOp>()) == 1 && - llvm::hasSingleElement(elOp.getBody()->getOps<fir::ConvertOp>()) == 1) - return true; - return false; + if (isConversionElementalOp(elOp)) + return elOp; + return {}; } diff --git a/flang/lib/Lower/IO.cpp b/flang/lib/Lower/IO.cpp index 604b137..cd53dc9 100644 --- a/flang/lib/Lower/IO.cpp +++ b/flang/lib/Lower/IO.cpp @@ -950,7 +950,8 @@ static void genIoLoop(Fortran::lower::AbstractConverter &converter, makeNextConditionalOn(builder, loc, checkResult, ok, inLoop); const auto &itemList = std::get<0>(ioImpliedDo.t); const auto &control = std::get<1>(ioImpliedDo.t); - const auto &loopSym = *control.name.thing.thing.symbol; + const auto &loopSym = + *Fortran::parser::UnwrapRef<Fortran::parser::Name>(control.name).symbol; mlir::Value loopVar = fir::getBase(converter.genExprAddr( Fortran::evaluate::AsGenericExpr(loopSym).value(), stmtCtx)); auto genControlValue = [&](const Fortran::parser::ScalarIntExpr &expr) { diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp index 62e5c0c..cfb1891 100644 --- a/flang/lib/Lower/OpenACC.cpp +++ b/flang/lib/Lower/OpenACC.cpp @@ -978,15 +978,40 @@ static RecipeOp genRecipeOp( auto mappableTy = mlir::dyn_cast<mlir::acc::MappableType>(ty); assert(mappableTy && "Expected that all variable types are considered mappable"); + bool needsDestroy = false; auto retVal = mappableTy.generatePrivateInit( builder, loc, mlir::cast<mlir::TypedValue<mlir::acc::MappableType>>( initBlock->getArgument(0)), initName, initBlock->getArguments().take_back(initBlock->getArguments().size() - 1), - initValue); + initValue, needsDestroy); mlir::acc::YieldOp::create(builder, loc, retVal ? retVal : initBlock->getArgument(0)); + // Create destroy region and generate destruction if requested. + if (needsDestroy) { + llvm::SmallVector<mlir::Type> destroyArgsTy; + llvm::SmallVector<mlir::Location> destroyArgsLoc; + // original and privatized/reduction value + destroyArgsTy.push_back(ty); + destroyArgsTy.push_back(ty); + destroyArgsLoc.push_back(loc); + destroyArgsLoc.push_back(loc); + // Append bounds arguments (if any) in the same order as init region + if (argsTy.size() > 1) { + destroyArgsTy.append(argsTy.begin() + 1, argsTy.end()); + destroyArgsLoc.insert(destroyArgsLoc.end(), argsTy.size() - 1, loc); + } + + builder.createBlock(&recipe.getDestroyRegion(), + recipe.getDestroyRegion().end(), destroyArgsTy, + destroyArgsLoc); + builder.setInsertionPointToEnd(&recipe.getDestroyRegion().back()); + // Call interface on the privatized/reduction value (2nd argument). + (void)mappableTy.generatePrivateDestroy( + builder, loc, recipe.getDestroyRegion().front().getArgument(1)); + mlir::acc::TerminatorOp::create(builder, loc); + } return recipe; } diff --git a/flang/lib/Lower/OpenMP/Atomic.cpp b/flang/lib/Lower/OpenMP/Atomic.cpp index ff82a36..3ab8a58 100644 --- a/flang/lib/Lower/OpenMP/Atomic.cpp +++ b/flang/lib/Lower/OpenMP/Atomic.cpp @@ -20,6 +20,7 @@ #include "flang/Optimizer/Builder/FIRBuilder.h" #include "flang/Optimizer/Builder/Todo.h" #include "flang/Parser/parse-tree.h" +#include "flang/Semantics/openmp-utils.h" #include "flang/Semantics/semantics.h" #include "flang/Semantics/type.h" #include "flang/Support/Fortran.h" @@ -183,12 +184,8 @@ getMemoryOrderFromRequires(const semantics::Scope &scope) { // scope. // For safety, traverse all enclosing scopes and check if their symbol // contains REQUIRES. - for (const auto *sc{&scope}; sc->kind() != semantics::Scope::Kind::Global; - sc = &sc->parent()) { - const semantics::Symbol *sym = sc->symbol(); - if (!sym) - continue; - + const semantics::Scope &unitScope = semantics::omp::GetProgramUnit(scope); + if (auto *symbol = unitScope.symbol()) { const common::OmpMemoryOrderType *admo = common::visit( [](auto &&s) { using WithOmpDeclarative = semantics::WithOmpDeclarative; @@ -198,7 +195,8 @@ getMemoryOrderFromRequires(const semantics::Scope &scope) { } return static_cast<const common::OmpMemoryOrderType *>(nullptr); }, - sym->details()); + symbol->details()); + if (admo) return getMemoryOrderKind(*admo); } @@ -214,19 +212,83 @@ getDefaultAtomicMemOrder(semantics::SemanticsContext &semaCtx) { return std::nullopt; } -static std::optional<mlir::omp::ClauseMemoryOrderKind> +static std::pair<std::optional<mlir::omp::ClauseMemoryOrderKind>, bool> getAtomicMemoryOrder(semantics::SemanticsContext &semaCtx, const omp::List<omp::Clause> &clauses, const semantics::Scope &scope) { for (const omp::Clause &clause : clauses) { if (auto maybeKind = getMemoryOrderKind(clause.id)) - return *maybeKind; + return std::make_pair(*maybeKind, /*canOverride=*/false); } if (auto maybeKind = getMemoryOrderFromRequires(scope)) - return *maybeKind; + return std::make_pair(*maybeKind, /*canOverride=*/true); - return getDefaultAtomicMemOrder(semaCtx); + return std::make_pair(getDefaultAtomicMemOrder(semaCtx), + /*canOverride=*/false); +} + +static std::optional<mlir::omp::ClauseMemoryOrderKind> +makeValidForAction(std::optional<mlir::omp::ClauseMemoryOrderKind> memOrder, + int action0, int action1, unsigned version) { + // When the atomic default memory order specified on a REQUIRES directive is + // disallowed on a given ATOMIC operation, and it's not ACQ_REL, the order + // reverts to RELAXED. ACQ_REL decays to either ACQUIRE or RELEASE, depending + // on the operation. + + if (!memOrder) { + return memOrder; + } + + using Analysis = parser::OpenMPAtomicConstruct::Analysis; + // Figure out the main action (i.e. disregard a potential capture operation) + int action = action0; + if (action1 != Analysis::None) + action = action0 == Analysis::Read ? action1 : action0; + + // Avaliable orderings: acquire, acq_rel, relaxed, release, seq_cst + + if (action == Analysis::Read) { + // "acq_rel" decays to "acquire" + if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Acq_rel) + return mlir::omp::ClauseMemoryOrderKind::Acquire; + } else if (action == Analysis::Write) { + // "acq_rel" decays to "release" + if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Acq_rel) + return mlir::omp::ClauseMemoryOrderKind::Release; + } + + if (version > 50) { + if (action == Analysis::Read) { + // "release" prohibited + if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Release) + return mlir::omp::ClauseMemoryOrderKind::Relaxed; + } + if (action == Analysis::Write) { + // "acquire" prohibited + if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Acquire) + return mlir::omp::ClauseMemoryOrderKind::Relaxed; + } + } else { + if (action == Analysis::Read) { + // "release" prohibited + if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Release) + return mlir::omp::ClauseMemoryOrderKind::Relaxed; + } else { + if (action & Analysis::Write) { // include "update" + // "acquire" prohibited + if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Acquire) + return mlir::omp::ClauseMemoryOrderKind::Relaxed; + if (action == Analysis::Update) { + // "acq_rel" prohibited + if (*memOrder == mlir::omp::ClauseMemoryOrderKind::Acq_rel) + return mlir::omp::ClauseMemoryOrderKind::Relaxed; + } + } + } + } + + return memOrder; } static mlir::omp::ClauseMemoryOrderKindAttr @@ -449,16 +511,19 @@ void Fortran::lower::omp::lowerAtomic( mlir::Value atomAddr = fir::getBase(converter.genExprAddr(atom, stmtCtx, &loc)); mlir::IntegerAttr hint = getAtomicHint(converter, clauses); - std::optional<mlir::omp::ClauseMemoryOrderKind> memOrder = - getAtomicMemoryOrder(semaCtx, clauses, - semaCtx.FindScope(construct.source)); + auto [memOrder, canOverride] = getAtomicMemoryOrder( + semaCtx, clauses, semaCtx.FindScope(construct.source)); + + unsigned version = semaCtx.langOptions().OpenMPVersion; + int action0 = analysis.op0.what & analysis.Action; + int action1 = analysis.op1.what & analysis.Action; + if (canOverride) + memOrder = makeValidForAction(memOrder, action0, action1, version); if (auto *cond = get(analysis.cond)) { (void)cond; TODO(loc, "OpenMP ATOMIC COMPARE"); } else { - int action0 = analysis.op0.what & analysis.Action; - int action1 = analysis.op1.what & analysis.Action; mlir::Operation *captureOp = nullptr; fir::FirOpBuilder::InsertPoint preAt = builder.saveInsertionPoint(); fir::FirOpBuilder::InsertPoint atomicAt, postAt; diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp index 55eda7e..85398be 100644 --- a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp +++ b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp @@ -1343,8 +1343,10 @@ bool ClauseProcessor::processMap( const parser::CharBlock &source) { using Map = omp::clause::Map; mlir::Location clauseLocation = converter.genLocation(source); - const auto &[mapType, typeMods, refMod, mappers, iterator, objects] = - clause.t; + const auto &[mapType, typeMods, attachMod, refMod, mappers, iterator, + objects] = clause.t; + if (attachMod) + TODO(currentLocation, "ATTACH modifier is not implemented yet"); llvm::omp::OpenMPOffloadMappingFlags mapTypeBits = llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_NONE; std::string mapperIdName = "__implicit_mapper"; diff --git a/flang/lib/Lower/OpenMP/Clauses.cpp b/flang/lib/Lower/OpenMP/Clauses.cpp index fac37a3..ba34212 100644 --- a/flang/lib/Lower/OpenMP/Clauses.cpp +++ b/flang/lib/Lower/OpenMP/Clauses.cpp @@ -219,7 +219,6 @@ MAKE_EMPTY_CLASS(AcqRel, AcqRel); MAKE_EMPTY_CLASS(Acquire, Acquire); MAKE_EMPTY_CLASS(Capture, Capture); MAKE_EMPTY_CLASS(Compare, Compare); -MAKE_EMPTY_CLASS(DynamicAllocators, DynamicAllocators); MAKE_EMPTY_CLASS(Full, Full); MAKE_EMPTY_CLASS(Inbranch, Inbranch); MAKE_EMPTY_CLASS(Mergeable, Mergeable); @@ -235,13 +234,9 @@ MAKE_EMPTY_CLASS(OmpxBare, OmpxBare); MAKE_EMPTY_CLASS(Read, Read); MAKE_EMPTY_CLASS(Relaxed, Relaxed); MAKE_EMPTY_CLASS(Release, Release); -MAKE_EMPTY_CLASS(ReverseOffload, ReverseOffload); MAKE_EMPTY_CLASS(SeqCst, SeqCst); -MAKE_EMPTY_CLASS(SelfMaps, SelfMaps); MAKE_EMPTY_CLASS(Simd, Simd); MAKE_EMPTY_CLASS(Threads, Threads); -MAKE_EMPTY_CLASS(UnifiedAddress, UnifiedAddress); -MAKE_EMPTY_CLASS(UnifiedSharedMemory, UnifiedSharedMemory); MAKE_EMPTY_CLASS(Unknown, Unknown); MAKE_EMPTY_CLASS(Untied, Untied); MAKE_EMPTY_CLASS(Weak, Weak); @@ -775,7 +770,18 @@ Doacross make(const parser::OmpClause::Doacross &inp, return makeDoacross(inp.v.v, semaCtx); } -// DynamicAllocators: empty +DynamicAllocators make(const parser::OmpClause::DynamicAllocators &inp, + semantics::SemanticsContext &semaCtx) { + // inp.v -> td::optional<arser::OmpDynamicAllocatorsClause> + auto &&maybeRequired = maybeApply( + [&](const parser::OmpDynamicAllocatorsClause &c) { + return makeExpr(c.v, semaCtx); + }, + inp.v); + + return DynamicAllocators{/*Required=*/std::move(maybeRequired)}; +} + DynGroupprivate make(const parser::OmpClause::DynGroupprivate &inp, semantics::SemanticsContext &semaCtx) { @@ -1069,6 +1075,15 @@ Map make(const parser::OmpClause::Map &inp, ); CLAUSET_ENUM_CONVERT( // + convertAttachMod, parser::OmpAttachModifier::Value, Map::AttachModifier, + // clang-format off + MS(Always, Always) + MS(Auto, Auto) + MS(Never, Never) + // clang-format on + ); + + CLAUSET_ENUM_CONVERT( // convertRefMod, parser::OmpRefModifier::Value, Map::RefModifier, // clang-format off MS(Ref_Ptee, RefPtee) @@ -1115,6 +1130,13 @@ Map make(const parser::OmpClause::Map &inp, if (!modSet.empty()) maybeTypeMods = Map::MapTypeModifiers(modSet.begin(), modSet.end()); + auto attachMod = [&]() -> std::optional<Map::AttachModifier> { + if (auto *t = + semantics::OmpGetUniqueModifier<parser::OmpAttachModifier>(mods)) + return convertAttachMod(t->v); + return std::nullopt; + }(); + auto refMod = [&]() -> std::optional<Map::RefModifier> { if (auto *t = semantics::OmpGetUniqueModifier<parser::OmpRefModifier>(mods)) return convertRefMod(t->v); @@ -1135,6 +1157,7 @@ Map make(const parser::OmpClause::Map &inp, return Map{{/*MapType=*/std::move(type), /*MapTypeModifiers=*/std::move(maybeTypeMods), + /*AttachModifier=*/std::move(attachMod), /*RefModifier=*/std::move(refMod), /*Mapper=*/std::move(mappers), /*Iterator=*/std::move(iterator), /*LocatorList=*/makeObjects(t2, semaCtx)}}; @@ -1321,7 +1344,18 @@ Reduction make(const parser::OmpClause::Reduction &inp, // Relaxed: empty // Release: empty -// ReverseOffload: empty + +ReverseOffload make(const parser::OmpClause::ReverseOffload &inp, + semantics::SemanticsContext &semaCtx) { + // inp.v -> std::optional<parser::OmpReverseOffloadClause> + auto &&maybeRequired = maybeApply( + [&](const parser::OmpReverseOffloadClause &c) { + return makeExpr(c.v, semaCtx); + }, + inp.v); + + return ReverseOffload{/*Required=*/std::move(maybeRequired)}; +} Safelen make(const parser::OmpClause::Safelen &inp, semantics::SemanticsContext &semaCtx) { @@ -1374,6 +1408,18 @@ Schedule make(const parser::OmpClause::Schedule &inp, // SeqCst: empty +SelfMaps make(const parser::OmpClause::SelfMaps &inp, + semantics::SemanticsContext &semaCtx) { + // inp.v -> std::optional<parser::OmpSelfMapsClause> + auto &&maybeRequired = maybeApply( + [&](const parser::OmpSelfMapsClause &c) { + return makeExpr(c.v, semaCtx); + }, + inp.v); + + return SelfMaps{/*Required=*/std::move(maybeRequired)}; +} + Severity make(const parser::OmpClause::Severity &inp, semantics::SemanticsContext &semaCtx) { // inp -> empty @@ -1463,8 +1509,29 @@ To make(const parser::OmpClause::To &inp, /*LocatorList=*/makeObjects(t3, semaCtx)}}; } -// UnifiedAddress: empty -// UnifiedSharedMemory: empty +UnifiedAddress make(const parser::OmpClause::UnifiedAddress &inp, + semantics::SemanticsContext &semaCtx) { + // inp.v -> std::optional<parser::OmpUnifiedAddressClause> + auto &&maybeRequired = maybeApply( + [&](const parser::OmpUnifiedAddressClause &c) { + return makeExpr(c.v, semaCtx); + }, + inp.v); + + return UnifiedAddress{/*Required=*/std::move(maybeRequired)}; +} + +UnifiedSharedMemory make(const parser::OmpClause::UnifiedSharedMemory &inp, + semantics::SemanticsContext &semaCtx) { + // inp.v -> std::optional<parser::OmpUnifiedSharedMemoryClause> + auto &&maybeRequired = maybeApply( + [&](const parser::OmpUnifiedSharedMemoryClause &c) { + return makeExpr(c.v, semaCtx); + }, + inp.v); + + return UnifiedSharedMemory{/*Required=*/std::move(maybeRequired)}; +} Uniform make(const parser::OmpClause::Uniform &inp, semantics::SemanticsContext &semaCtx) { diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index 444f274..f86ee01 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -4208,18 +4208,17 @@ bool Fortran::lower::markOpenMPDeferredDeclareTargetFunctions( void Fortran::lower::genOpenMPRequires(mlir::Operation *mod, const semantics::Symbol *symbol) { using MlirRequires = mlir::omp::ClauseRequires; - using SemaRequires = semantics::WithOmpDeclarative::RequiresFlag; if (auto offloadMod = llvm::dyn_cast<mlir::omp::OffloadModuleInterface>(mod)) { - semantics::WithOmpDeclarative::RequiresFlags semaFlags; + semantics::WithOmpDeclarative::RequiresClauses reqs; if (symbol) { common::visit( [&](const auto &details) { if constexpr (std::is_base_of_v<semantics::WithOmpDeclarative, std::decay_t<decltype(details)>>) { if (details.has_ompRequires()) - semaFlags = *details.ompRequires(); + reqs = *details.ompRequires(); } }, symbol->details()); @@ -4228,14 +4227,14 @@ void Fortran::lower::genOpenMPRequires(mlir::Operation *mod, // Use pre-populated omp.requires module attribute if it was set, so that // the "-fopenmp-force-usm" compiler option is honored. MlirRequires mlirFlags = offloadMod.getRequires(); - if (semaFlags.test(SemaRequires::ReverseOffload)) + if (reqs.test(llvm::omp::Clause::OMPC_dynamic_allocators)) + mlirFlags = mlirFlags | MlirRequires::dynamic_allocators; + if (reqs.test(llvm::omp::Clause::OMPC_reverse_offload)) mlirFlags = mlirFlags | MlirRequires::reverse_offload; - if (semaFlags.test(SemaRequires::UnifiedAddress)) + if (reqs.test(llvm::omp::Clause::OMPC_unified_address)) mlirFlags = mlirFlags | MlirRequires::unified_address; - if (semaFlags.test(SemaRequires::UnifiedSharedMemory)) + if (reqs.test(llvm::omp::Clause::OMPC_unified_shared_memory)) mlirFlags = mlirFlags | MlirRequires::unified_shared_memory; - if (semaFlags.test(SemaRequires::DynamicAllocators)) - mlirFlags = mlirFlags | MlirRequires::dynamic_allocators; offloadMod.setRequires(mlirFlags); } |