diff options
author | Joel E. Denny <jdenny.ornl@gmail.com> | 2025-01-29 12:40:19 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-01-29 12:40:19 -0500 |
commit | 18f8106f310ee702046a11f360af47947c030d2e (patch) | |
tree | a04d3ede228437f82f541890ae0ebe59dc18977d /llvm/lib | |
parent | 15412d735a4f3e85b1c68025ca28d5671fde7b47 (diff) | |
download | llvm-18f8106f310ee702046a11f360af47947c030d2e.zip llvm-18f8106f310ee702046a11f360af47947c030d2e.tar.gz llvm-18f8106f310ee702046a11f360af47947c030d2e.tar.bz2 |
[KernelInfo] Implement new LLVM IR pass for GPU code analysis (#102944)
This patch implements an LLVM IR pass, named kernel-info, that reports
various statistics for codes compiled for GPUs. The ultimate goal of
these statistics to help identify bad code patterns and ways to mitigate
them. The pass operates at the LLVM IR level so that it can, in theory,
support any LLVM-based compiler for programming languages supporting
GPUs. It has been tested so far with LLVM IR generated by Clang for
OpenMP offload codes targeting NVIDIA GPUs and AMD GPUs.
By default, the pass runs at the end of LTO, and options like
``-Rpass=kernel-info`` enable its remarks. Example `opt` and `clang`
command lines appear in `llvm/docs/KernelInfo.rst`. Remarks include
summary statistics (e.g., total size of static allocas) and individual
occurrences (e.g., source location of each alloca). Examples of its
output appear in tests in `llvm/test/Analysis/KernelInfo`.
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Analysis/CMakeLists.txt | 1 | ||||
-rw-r--r-- | llvm/lib/Analysis/KernelInfo.cpp | 326 | ||||
-rw-r--r-- | llvm/lib/Analysis/TargetTransformInfo.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Passes/PassBuilder.cpp | 1 | ||||
-rw-r--r-- | llvm/lib/Passes/PassRegistry.def | 1 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h | 3 | ||||
-rw-r--r-- | llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp | 10 | ||||
-rw-r--r-- | llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h | 4 | ||||
-rw-r--r-- | llvm/lib/Target/TargetMachine.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/Transforms/IPO/OpenMPOpt.cpp | 13 |
13 files changed, 395 insertions, 13 deletions
diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt index 0db5b80..a44f6c6 100644 --- a/llvm/lib/Analysis/CMakeLists.txt +++ b/llvm/lib/Analysis/CMakeLists.txt @@ -79,6 +79,7 @@ add_llvm_component_library(LLVMAnalysis InstructionPrecedenceTracking.cpp InstructionSimplify.cpp InteractiveModelRunner.cpp + KernelInfo.cpp LastRunTrackingAnalysis.cpp LazyBranchProbabilityInfo.cpp LazyBlockFrequencyInfo.cpp diff --git a/llvm/lib/Analysis/KernelInfo.cpp b/llvm/lib/Analysis/KernelInfo.cpp new file mode 100644 index 0000000..4a06fd5 --- /dev/null +++ b/llvm/lib/Analysis/KernelInfo.cpp @@ -0,0 +1,326 @@ +//===- KernelInfo.cpp - Kernel Analysis -----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the KernelInfoPrinter class used to emit remarks about +// function properties from a GPU kernel. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Analysis/KernelInfo.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/PassManager.h" +#include "llvm/Passes/PassBuilder.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +#define DEBUG_TYPE "kernel-info" + +namespace { + +/// Data structure holding function info for kernels. +class KernelInfo { + void updateForBB(const BasicBlock &BB, OptimizationRemarkEmitter &ORE); + +public: + static void emitKernelInfo(Function &F, FunctionAnalysisManager &FAM, + TargetMachine *TM); + + /// Whether the function has external linkage and is not a kernel function. + bool ExternalNotKernel = false; + + /// Launch bounds. + SmallVector<std::pair<StringRef, int64_t>> LaunchBounds; + + /// The number of alloca instructions inside the function, the number of those + /// with allocation sizes that cannot be determined at compile time, and the + /// sum of the sizes that can be. + /// + /// With the current implementation for at least some GPU archs, + /// AllocasDyn > 0 might not be possible, but we report AllocasDyn anyway in + /// case the implementation changes. + int64_t Allocas = 0; + int64_t AllocasDyn = 0; + int64_t AllocasStaticSizeSum = 0; + + /// Number of direct/indirect calls (anything derived from CallBase). + int64_t DirectCalls = 0; + int64_t IndirectCalls = 0; + + /// Number of direct calls made from this function to other functions + /// defined in this module. + int64_t DirectCallsToDefinedFunctions = 0; + + /// Number of direct calls to inline assembly. + int64_t InlineAssemblyCalls = 0; + + /// Number of calls of type InvokeInst. + int64_t Invokes = 0; + + /// Target-specific flat address space. + unsigned FlatAddrspace; + + /// Number of flat address space memory accesses (via load, store, etc.). + int64_t FlatAddrspaceAccesses = 0; +}; + +} // end anonymous namespace + +static void identifyCallee(OptimizationRemark &R, const Module *M, + const Value *V, StringRef Kind = "") { + SmallString<100> Name; // might be function name or asm expression + if (const Function *F = dyn_cast<Function>(V)) { + if (auto *SubProgram = F->getSubprogram()) { + if (SubProgram->isArtificial()) + R << "artificial "; + Name = SubProgram->getName(); + } + } + if (Name.empty()) { + raw_svector_ostream OS(Name); + V->printAsOperand(OS, /*PrintType=*/false, M); + } + if (!Kind.empty()) + R << Kind << " "; + R << "'" << Name << "'"; +} + +static void identifyFunction(OptimizationRemark &R, const Function &F) { + identifyCallee(R, F.getParent(), &F, "function"); +} + +static void remarkAlloca(OptimizationRemarkEmitter &ORE, const Function &Caller, + const AllocaInst &Alloca, + TypeSize::ScalarTy StaticSize) { + ORE.emit([&] { + StringRef DbgName; + DebugLoc Loc; + bool Artificial = false; + auto DVRs = findDVRDeclares(&const_cast<AllocaInst &>(Alloca)); + if (!DVRs.empty()) { + const DbgVariableRecord &DVR = **DVRs.begin(); + DbgName = DVR.getVariable()->getName(); + Loc = DVR.getDebugLoc(); + Artificial = DVR.Variable->isArtificial(); + } + OptimizationRemark R(DEBUG_TYPE, "Alloca", DiagnosticLocation(Loc), + Alloca.getParent()); + R << "in "; + identifyFunction(R, Caller); + R << ", "; + if (Artificial) + R << "artificial "; + SmallString<20> ValName; + raw_svector_ostream OS(ValName); + Alloca.printAsOperand(OS, /*PrintType=*/false, Caller.getParent()); + R << "alloca ('" << ValName << "') "; + if (!DbgName.empty()) + R << "for '" << DbgName << "' "; + else + R << "without debug info "; + R << "with "; + if (StaticSize) + R << "static size of " << itostr(StaticSize) << " bytes"; + else + R << "dynamic size"; + return R; + }); +} + +static void remarkCall(OptimizationRemarkEmitter &ORE, const Function &Caller, + const CallBase &Call, StringRef CallKind, + StringRef RemarkKind) { + ORE.emit([&] { + OptimizationRemark R(DEBUG_TYPE, RemarkKind, &Call); + R << "in "; + identifyFunction(R, Caller); + R << ", " << CallKind << ", callee is "; + identifyCallee(R, Caller.getParent(), Call.getCalledOperand()); + return R; + }); +} + +static void remarkFlatAddrspaceAccess(OptimizationRemarkEmitter &ORE, + const Function &Caller, + const Instruction &Inst) { + ORE.emit([&] { + OptimizationRemark R(DEBUG_TYPE, "FlatAddrspaceAccess", &Inst); + R << "in "; + identifyFunction(R, Caller); + if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Inst)) { + R << ", '" << II->getCalledFunction()->getName() << "' call"; + } else { + R << ", '" << Inst.getOpcodeName() << "' instruction"; + } + if (!Inst.getType()->isVoidTy()) { + SmallString<20> Name; + raw_svector_ostream OS(Name); + Inst.printAsOperand(OS, /*PrintType=*/false, Caller.getParent()); + R << " ('" << Name << "')"; + } + R << " accesses memory in flat address space"; + return R; + }); +} + +void KernelInfo::updateForBB(const BasicBlock &BB, + OptimizationRemarkEmitter &ORE) { + const Function &F = *BB.getParent(); + const Module &M = *F.getParent(); + const DataLayout &DL = M.getDataLayout(); + for (const Instruction &I : BB.instructionsWithoutDebug()) { + if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(&I)) { + ++Allocas; + TypeSize::ScalarTy StaticSize = 0; + if (std::optional<TypeSize> Size = Alloca->getAllocationSize(DL)) { + StaticSize = Size->getFixedValue(); + assert(StaticSize <= std::numeric_limits<int64_t>::max()); + AllocasStaticSizeSum += StaticSize; + } else { + ++AllocasDyn; + } + remarkAlloca(ORE, F, *Alloca, StaticSize); + } else if (const CallBase *Call = dyn_cast<CallBase>(&I)) { + SmallString<40> CallKind; + SmallString<40> RemarkKind; + if (Call->isIndirectCall()) { + ++IndirectCalls; + CallKind += "indirect"; + RemarkKind += "Indirect"; + } else { + ++DirectCalls; + CallKind += "direct"; + RemarkKind += "Direct"; + } + if (isa<InvokeInst>(Call)) { + ++Invokes; + CallKind += " invoke"; + RemarkKind += "Invoke"; + } else { + CallKind += " call"; + RemarkKind += "Call"; + } + if (!Call->isIndirectCall()) { + if (const Function *Callee = Call->getCalledFunction()) { + if (!Callee->isIntrinsic() && !Callee->isDeclaration()) { + ++DirectCallsToDefinedFunctions; + CallKind += " to defined function"; + RemarkKind += "ToDefinedFunction"; + } + } else if (Call->isInlineAsm()) { + ++InlineAssemblyCalls; + CallKind += " to inline assembly"; + RemarkKind += "ToInlineAssembly"; + } + } + remarkCall(ORE, F, *Call, CallKind, RemarkKind); + if (const AnyMemIntrinsic *MI = dyn_cast<AnyMemIntrinsic>(Call)) { + if (MI->getDestAddressSpace() == FlatAddrspace) { + ++FlatAddrspaceAccesses; + remarkFlatAddrspaceAccess(ORE, F, I); + } else if (const AnyMemTransferInst *MT = + dyn_cast<AnyMemTransferInst>(MI)) { + if (MT->getSourceAddressSpace() == FlatAddrspace) { + ++FlatAddrspaceAccesses; + remarkFlatAddrspaceAccess(ORE, F, I); + } + } + } + } else if (const LoadInst *Load = dyn_cast<LoadInst>(&I)) { + if (Load->getPointerAddressSpace() == FlatAddrspace) { + ++FlatAddrspaceAccesses; + remarkFlatAddrspaceAccess(ORE, F, I); + } + } else if (const StoreInst *Store = dyn_cast<StoreInst>(&I)) { + if (Store->getPointerAddressSpace() == FlatAddrspace) { + ++FlatAddrspaceAccesses; + remarkFlatAddrspaceAccess(ORE, F, I); + } + } else if (const AtomicRMWInst *At = dyn_cast<AtomicRMWInst>(&I)) { + if (At->getPointerAddressSpace() == FlatAddrspace) { + ++FlatAddrspaceAccesses; + remarkFlatAddrspaceAccess(ORE, F, I); + } + } else if (const AtomicCmpXchgInst *At = dyn_cast<AtomicCmpXchgInst>(&I)) { + if (At->getPointerAddressSpace() == FlatAddrspace) { + ++FlatAddrspaceAccesses; + remarkFlatAddrspaceAccess(ORE, F, I); + } + } + } +} + +static void remarkProperty(OptimizationRemarkEmitter &ORE, const Function &F, + StringRef Name, int64_t Value) { + ORE.emit([&] { + OptimizationRemark R(DEBUG_TYPE, Name, &F); + R << "in "; + identifyFunction(R, F); + R << ", " << Name << " = " << itostr(Value); + return R; + }); +} + +static std::optional<int64_t> parseFnAttrAsInteger(Function &F, + StringRef Name) { + if (!F.hasFnAttribute(Name)) + return std::nullopt; + return F.getFnAttributeAsParsedInteger(Name); +} + +void KernelInfo::emitKernelInfo(Function &F, FunctionAnalysisManager &FAM, + TargetMachine *TM) { + KernelInfo KI; + TargetTransformInfo &TheTTI = FAM.getResult<TargetIRAnalysis>(F); + KI.FlatAddrspace = TheTTI.getFlatAddressSpace(); + + // Record function properties. + KI.ExternalNotKernel = F.hasExternalLinkage() && !F.hasKernelCallingConv(); + for (StringRef Name : {"omp_target_num_teams", "omp_target_thread_limit"}) { + if (auto Val = parseFnAttrAsInteger(F, Name)) + KI.LaunchBounds.push_back({Name, *Val}); + } + TheTTI.collectKernelLaunchBounds(F, KI.LaunchBounds); + + auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); + for (const auto &BB : F) + KI.updateForBB(BB, ORE); + +#define REMARK_PROPERTY(PROP_NAME) \ + remarkProperty(ORE, F, #PROP_NAME, KI.PROP_NAME) + REMARK_PROPERTY(ExternalNotKernel); + for (auto LB : KI.LaunchBounds) + remarkProperty(ORE, F, LB.first, LB.second); + REMARK_PROPERTY(Allocas); + REMARK_PROPERTY(AllocasStaticSizeSum); + REMARK_PROPERTY(AllocasDyn); + REMARK_PROPERTY(DirectCalls); + REMARK_PROPERTY(IndirectCalls); + REMARK_PROPERTY(DirectCallsToDefinedFunctions); + REMARK_PROPERTY(InlineAssemblyCalls); + REMARK_PROPERTY(Invokes); + REMARK_PROPERTY(FlatAddrspaceAccesses); +#undef REMARK_PROPERTY + + return; +} + +PreservedAnalyses KernelInfoPrinter::run(Function &F, + FunctionAnalysisManager &AM) { + // Skip it if remarks are not enabled as it will do nothing useful. + if (F.getContext().getDiagHandlerPtr()->isPassedOptRemarkEnabled(DEBUG_TYPE)) + KernelInfo::emitKernelInfo(F, AM, TM); + return PreservedAnalyses::all(); +} diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index 8b9722d..424bb7b 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -1441,6 +1441,12 @@ TargetTransformInfo::getNumBytesToPadGlobalArray(unsigned Size, return TTIImpl->getNumBytesToPadGlobalArray(Size, ArrayType); } +void TargetTransformInfo::collectKernelLaunchBounds( + const Function &F, + SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const { + return TTIImpl->collectKernelLaunchBounds(F, LB); +} + TargetTransformInfo::Concept::~Concept() = default; TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {} diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index 0918b1e..9b93ebc 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -46,6 +46,7 @@ #include "llvm/Analysis/InlineAdvisor.h" #include "llvm/Analysis/InlineSizeEstimatorAnalysis.h" #include "llvm/Analysis/InstCount.h" +#include "llvm/Analysis/KernelInfo.h" #include "llvm/Analysis/LastRunTrackingAnalysis.h" #include "llvm/Analysis/LazyCallGraph.h" #include "llvm/Analysis/LazyValueInfo.h" diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def index 0eb050c..9300a3d 100644 --- a/llvm/lib/Passes/PassRegistry.def +++ b/llvm/lib/Passes/PassRegistry.def @@ -391,6 +391,7 @@ FUNCTION_PASS("irce", IRCEPass()) FUNCTION_PASS("jump-threading", JumpThreadingPass()) FUNCTION_PASS("jump-table-to-switch", JumpTableToSwitchPass()); FUNCTION_PASS("kcfi", KCFIPass()) +FUNCTION_PASS("kernel-info", KernelInfoPrinter(TM)) FUNCTION_PASS("lcssa", LCSSAPass()) FUNCTION_PASS("libcalls-shrinkwrap", LibCallsShrinkWrapPass()) FUNCTION_PASS("lint", LintPass()) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index 1f29589..5b2081c 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -53,6 +53,7 @@ #include "Utils/AMDGPUBaseInfo.h" #include "llvm/Analysis/CGSCCPassManager.h" #include "llvm/Analysis/CallGraphSCCPass.h" +#include "llvm/Analysis/KernelInfo.h" #include "llvm/Analysis/UniformityAnalysis.h" #include "llvm/CodeGen/AtomicExpand.h" #include "llvm/CodeGen/DeadMachineInstructionElim.h" @@ -879,6 +880,11 @@ void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { PM.addPass(AMDGPUAttributorPass(*this, Opt)); } } + if (!NoKernelInfoEndLTO) { + FunctionPassManager FPM; + FPM.addPass(KernelInfoPrinter(this)); + PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + } }); PB.registerRegClassFilterParsingCallback( diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp index 5160851f..5bfd891 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -1430,3 +1430,19 @@ unsigned GCNTTIImpl::getPrefetchDistance() const { bool GCNTTIImpl::shouldPrefetchAddressSpace(unsigned AS) const { return AMDGPU::isFlatGlobalAddrSpace(AS); } + +void GCNTTIImpl::collectKernelLaunchBounds( + const Function &F, + SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const { + SmallVector<unsigned> MaxNumWorkgroups = ST->getMaxNumWorkGroups(F); + LB.push_back({"amdgpu-max-num-workgroups[0]", MaxNumWorkgroups[0]}); + LB.push_back({"amdgpu-max-num-workgroups[1]", MaxNumWorkgroups[1]}); + LB.push_back({"amdgpu-max-num-workgroups[2]", MaxNumWorkgroups[2]}); + std::pair<unsigned, unsigned> FlatWorkGroupSize = + ST->getFlatWorkGroupSizes(F); + LB.push_back({"amdgpu-flat-work-group-size[0]", FlatWorkGroupSize.first}); + LB.push_back({"amdgpu-flat-work-group-size[1]", FlatWorkGroupSize.second}); + std::pair<unsigned, unsigned> WavesPerEU = ST->getWavesPerEU(F); + LB.push_back({"amdgpu-waves-per-eu[0]", WavesPerEU.first}); + LB.push_back({"amdgpu-waves-per-eu[1]", WavesPerEU.second}); +} diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h index 585f38f..a0d6200 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h @@ -273,6 +273,9 @@ public: /// \return if target want to issue a prefetch in address space \p AS. bool shouldPrefetchAddressSpace(unsigned AS) const override; + void collectKernelLaunchBounds( + const Function &F, + SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const; }; } // end namespace llvm diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp index 6d4b82a..e88027f 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp @@ -21,6 +21,7 @@ #include "NVPTXTargetObjectFile.h" #include "NVPTXTargetTransformInfo.h" #include "TargetInfo/NVPTXTargetInfo.h" +#include "llvm/Analysis/KernelInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/TargetPassConfig.h" @@ -266,6 +267,15 @@ void NVPTXTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { FPM.addPass(NVPTXCopyByValArgsPass()); PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); }); + + if (!NoKernelInfoEndLTO) { + PB.registerFullLinkTimeOptimizationLastEPCallback( + [this](ModulePassManager &PM, OptimizationLevel Level) { + FunctionPassManager FPM; + FPM.addPass(KernelInfoPrinter(this)); + PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + }); + } } TargetTransformInfo diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp index 4ec2ec1..85e99d7 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp @@ -562,4 +562,18 @@ Value *NVPTXTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, } } return nullptr; -}
\ No newline at end of file +} + +void NVPTXTTIImpl::collectKernelLaunchBounds( + const Function &F, + SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const { + std::optional<unsigned> Val; + if ((Val = getMaxClusterRank(F))) + LB.push_back({"maxclusterrank", *Val}); + if ((Val = getMaxNTIDx(F))) + LB.push_back({"maxntidx", *Val}); + if ((Val = getMaxNTIDy(F))) + LB.push_back({"maxntidy", *Val}); + if ((Val = getMaxNTIDz(F))) + LB.push_back({"maxntidz", *Val}); +} diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h index 0f4fb28..b0a846a 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h +++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h @@ -129,6 +129,10 @@ public: Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const; + + void collectKernelLaunchBounds( + const Function &F, + SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const; }; } // end namespace llvm diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp index d5365f3..027ae62 100644 --- a/llvm/lib/Target/TargetMachine.cpp +++ b/llvm/lib/Target/TargetMachine.cpp @@ -26,6 +26,11 @@ #include "llvm/Target/TargetLoweringObjectFile.h" using namespace llvm; +cl::opt<bool> NoKernelInfoEndLTO( + "no-kernel-info-end-lto", + cl::desc("remove the kernel-info pass at the end of the full LTO pipeline"), + cl::init(false), cl::Hidden); + //--------------------------------------------------------------------------- // TargetMachine Class // diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp index 1000813..6822279 100644 --- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp +++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp @@ -5905,17 +5905,6 @@ bool llvm::omp::isOpenMPKernel(Function &Fn) { return Fn.hasFnAttribute("kernel"); } -static bool isKernelCC(Function &F) { - switch (F.getCallingConv()) { - default: - return false; - case CallingConv::PTX_Kernel: - case CallingConv::AMDGPU_KERNEL: - case CallingConv::SPIR_KERNEL: - return true; - } -} - KernelSet llvm::omp::getDeviceKernels(Module &M) { // TODO: Create a more cross-platform way of determining device kernels. KernelSet Kernels; @@ -5948,7 +5937,7 @@ KernelSet llvm::omp::getDeviceKernels(Module &M) { } for (Function &F : M) - if (isKernelCC(F)) + if (F.hasKernelCallingConv()) ProcessKernel(F); return Kernels; |