diff options
author | Saiyedul Islam <Saiyedul.Islam@amd.com> | 2020-04-10 07:55:11 +0000 |
---|---|---|
committer | Saiyedul Islam <Saiyedul.Islam@amd.com> | 2020-05-12 00:33:00 +0000 |
commit | 117e5609e98b43f925c678b72f816ad3a1c3eee7 (patch) | |
tree | 95ac87a08d7b4a43deb3a855f698df8440be4de7 /llvm/lib | |
parent | a8874c76e8ae9ca67f6806f4c27ac8ba94232a21 (diff) | |
download | llvm-117e5609e98b43f925c678b72f816ad3a1c3eee7.zip llvm-117e5609e98b43f925c678b72f816ad3a1c3eee7.tar.gz llvm-117e5609e98b43f925c678b72f816ad3a1c3eee7.tar.bz2 |
[AMDGPU] Reserving VGPR for future SGPR Spill
Summary: One VGPR register is allocated to handle a future spill of SGPR if "--amdgpu-reserve-vgpr-for-sgpr-spill" option is used
Reviewers: arsenm, rampitec, msearles, cdevadas
Reviewed By: arsenm
Subscribers: madhur13490, qcolombet, kerbowa, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #amdgpu, #llvm
Differential Revision: https://reviews.llvm.org/D70379
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 11 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp | 51 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp | 37 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h | 12 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp | 30 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIRegisterInfo.h | 3 |
6 files changed, 131 insertions, 13 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index a2abc45..9f15512 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -95,6 +95,10 @@ static cl::opt<bool> DisableLoopAlignment( cl::desc("Do not align and prefetch loops"), cl::init(false)); +static cl::opt<bool> VGPRReserveforSGPRSpill( + "amdgpu-reserve-vgpr-for-sgpr-spill", + cl::desc("Allocates one VGPR for future SGPR Spill"), cl::init(true)); + static bool hasFP32Denormals(const MachineFunction &MF) { const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); return Info->getMode().allFP32Denormals(); @@ -10858,6 +10862,13 @@ void SITargetLowering::finalizeLowering(MachineFunction &MF) const { } TargetLoweringBase::finalizeLowering(MF); + + // Allocate a VGPR for future SGPR Spill if + // "amdgpu-reserve-vgpr-for-sgpr-spill" option is used + // FIXME: We won't need this hack if we split SGPR allocation from VGPR + if (VGPRReserveforSGPRSpill && !Info->VGPRReservedForSGPRSpill && + !Info->isEntryFunction() && MF.getFrameInfo().hasStackObjects()) + Info->reserveVGPRforSGPRSpills(MF); } void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp index 2cf3054..7631a0e 100644 --- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp @@ -231,6 +231,52 @@ bool SILowerSGPRSpills::spillCalleeSavedRegs(MachineFunction &MF) { return false; } +static ArrayRef<MCPhysReg> getAllVGPR32(const GCNSubtarget &ST, + const MachineFunction &MF) { + return makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), ST.getMaxNumVGPRs(MF)); +} + +// Find lowest available VGPR and use it as VGPR reserved for SGPR spills. +static bool lowerShiftReservedVGPR(MachineFunction &MF, + const GCNSubtarget &ST) { + MachineRegisterInfo &MRI = MF.getRegInfo(); + MachineFrameInfo &FrameInfo = MF.getFrameInfo(); + SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); + Register LowestAvailableVGPR, ReservedVGPR; + ArrayRef<MCPhysReg> AllVGPR32s = getAllVGPR32(ST, MF); + for (MCPhysReg Reg : AllVGPR32s) { + if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) { + LowestAvailableVGPR = Reg; + break; + } + } + + if (!LowestAvailableVGPR) + return false; + + ReservedVGPR = FuncInfo->VGPRReservedForSGPRSpill; + const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); + int i = 0; + + for (MachineBasicBlock &MBB : MF) { + for (auto Reg : FuncInfo->getSGPRSpillVGPRs()) { + if (Reg.VGPR == ReservedVGPR) { + MBB.removeLiveIn(ReservedVGPR); + MBB.addLiveIn(LowestAvailableVGPR); + Optional<int> FI; + if (FuncInfo->isCalleeSavedReg(CSRegs, LowestAvailableVGPR)) + FI = FrameInfo.CreateSpillStackObject(4, Align(4)); + + FuncInfo->setSGPRSpillVGPRs(LowestAvailableVGPR, FI, i); + } + ++i; + } + MBB.sortUniqueLiveIns(); + } + + return true; +} + bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) { const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); TII = ST.getInstrInfo(); @@ -270,6 +316,9 @@ bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) { // // This operates under the assumption that only other SGPR spills are users // of the frame index. + + lowerShiftReservedVGPR(MF, ST); + for (MachineBasicBlock &MBB : MF) { MachineBasicBlock::iterator Next; for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) { @@ -318,6 +367,8 @@ bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) { } MadeChange = true; + } else if (FuncInfo->VGPRReservedForSGPRSpill) { + FuncInfo->removeVGPRForSGPRSpill(FuncInfo->VGPRReservedForSGPRSpill, MF); } SaveBlocks.clear(); diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp index 104138e..8f25ebd 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp @@ -261,7 +261,8 @@ Register SIMachineFunctionInfo::addImplicitBufferPtr(const SIRegisterInfo &TRI) return ArgInfo.ImplicitBufferPtr.getRegister(); } -static bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg) { +bool SIMachineFunctionInfo::isCalleeSavedReg(const MCPhysReg *CSRegs, + MCPhysReg Reg) { for (unsigned I = 0; CSRegs[I]; ++I) { if (CSRegs[I] == Reg) return true; @@ -295,6 +296,7 @@ bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF, MachineFrameInfo &FrameInfo = MF.getFrameInfo(); MachineRegisterInfo &MRI = MF.getRegInfo(); unsigned WaveSize = ST.getWavefrontSize(); + SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); unsigned Size = FrameInfo.getObjectSize(FI); assert(Size >= 4 && Size <= 64 && "invalid sgpr spill size"); @@ -310,7 +312,7 @@ bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF, Register LaneVGPR; unsigned VGPRIndex = (NumVGPRSpillLanes % WaveSize); - if (VGPRIndex == 0) { + if (VGPRIndex == 0 && !FuncInfo->VGPRReservedForSGPRSpill) { LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF); if (LaneVGPR == AMDGPU::NoRegister) { // We have no VGPRs left for spilling SGPRs. Reset because we will not @@ -342,6 +344,19 @@ bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF, return true; } +/// Reserve a VGPR for spilling of SGPRs +bool SIMachineFunctionInfo::reserveVGPRforSGPRSpills(MachineFunction &MF) { + const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); + + Register LaneVGPR = TRI->findUnusedRegister( + MF.getRegInfo(), &AMDGPU::VGPR_32RegClass, MF, true); + SpillVGPRs.push_back(SGPRSpillVGPRCSR(LaneVGPR, None)); + FuncInfo->VGPRReservedForSGPRSpill = LaneVGPR; + return true; +} + /// Reserve AGPRs or VGPRs to support spilling for FrameIndex \p FI. /// Either AGPR is spilled to VGPR to vice versa. /// Returns true if a \p FI can be eliminated completely. @@ -554,3 +569,21 @@ bool SIMachineFunctionInfo::initializeBaseYamlFields( WaveLimiter = YamlMFI.WaveLimiter; return false; } + +// Remove VGPR which was reserved for SGPR spills if there are no spilled SGPRs +bool SIMachineFunctionInfo::removeVGPRForSGPRSpill(Register ReservedVGPR, + MachineFunction &MF) { + for (auto *i = SpillVGPRs.begin(); i < SpillVGPRs.end(); i++) { + if (i->VGPR == ReservedVGPR) { + SpillVGPRs.erase(i); + + for (MachineBasicBlock &MBB : MF) { + MBB.removeLiveIn(ReservedVGPR); + MBB.sortUniqueLiveIns(); + } + this->VGPRReservedForSGPRSpill = AMDGPU::NoRegister; + return true; + } + } + return false; +} diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h index ae04896..0d85136 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -485,6 +485,9 @@ public: // FIXME Register SGPRForFPSaveRestoreCopy; Optional<int> FramePointerSaveIndex; + Register VGPRReservedForSGPRSpill; + bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg); + public: SIMachineFunctionInfo(const MachineFunction &MF); @@ -500,6 +503,14 @@ public: return SpillVGPRs; } + void setSGPRSpillVGPRs(Register NewVGPR, Optional<int> newFI, int Index) { + SpillVGPRs[Index].VGPR = NewVGPR; + SpillVGPRs[Index].FI = newFI; + VGPRReservedForSGPRSpill = NewVGPR; + } + + bool removeVGPRForSGPRSpill(Register ReservedVGPR, MachineFunction &MF); + ArrayRef<MCPhysReg> getAGPRSpillVGPRs() const { return SpillAGPR; } @@ -517,6 +528,7 @@ public: bool haveFreeLanesForSGPRSpill(const MachineFunction &MF, unsigned NumLane) const; bool allocateSGPRSpillToVGPR(MachineFunction &MF, int FI); + bool reserveVGPRforSGPRSpills(MachineFunction &MF); bool allocateVGPRSpillToAGPR(MachineFunction &MF, int FI, bool isAGPRtoVGPR); void removeDeadFrameIndices(MachineFrameInfo &MFI); diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index 07d5b62..08d6c97 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -320,6 +320,10 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const { for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs()) reserveRegisterTuples(Reserved, Reg); + if (MFI->VGPRReservedForSGPRSpill) + for (auto SSpill : MFI->getSGPRSpillVGPRs()) + reserveRegisterTuples(Reserved, SSpill.VGPR); + return Reserved; } @@ -1529,17 +1533,23 @@ bool SIRegisterInfo::shouldRewriteCopySrc( return getCommonSubClass(DefRC, SrcRC) != nullptr; } -/// Returns a register that is not used at any point in the function. +/// Returns a lowest register that is not used at any point in the function. /// If all registers are used, then this function will return -// AMDGPU::NoRegister. -MCRegister -SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI, - const TargetRegisterClass *RC, - const MachineFunction &MF) const { - - for (MCRegister Reg : *RC) - if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) - return Reg; +/// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return +/// highest unused register. +MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI, + const TargetRegisterClass *RC, + const MachineFunction &MF, + bool ReserveHighestVGPR) const { + if (ReserveHighestVGPR) { + for (MCRegister Reg : reverse(*RC)) + if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) + return Reg; + } else { + for (MCRegister Reg : *RC) + if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) + return Reg; + } return MCRegister(); } diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h index 2f328e7..bafd147 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h @@ -196,7 +196,8 @@ public: MCRegister findUnusedRegister(const MachineRegisterInfo &MRI, const TargetRegisterClass *RC, - const MachineFunction &MF) const; + const MachineFunction &MF, + bool ReserveHighestVGPR = false) const; const TargetRegisterClass *getRegClassForReg(const MachineRegisterInfo &MRI, Register Reg) const; |