aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp63
1 files changed, 3 insertions, 60 deletions
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index fab78a9..bdc0810 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -29,6 +29,7 @@
#include "SIMachineFunctionInfo.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/ErrorHandling.h"
@@ -1633,64 +1634,6 @@ void GCNSchedStage::revertScheduling() {
DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd);
}
-bool PreRARematStage::allUsesAvailableAt(const MachineInstr *InstToRemat,
- SlotIndex OriginalIdx,
- SlotIndex RematIdx) const {
-
- LiveIntervals *LIS = DAG.LIS;
- MachineRegisterInfo &MRI = DAG.MRI;
- OriginalIdx = OriginalIdx.getRegSlot(true);
- RematIdx = std::max(RematIdx, RematIdx.getRegSlot(true));
- for (const MachineOperand &MO : InstToRemat->operands()) {
- if (!MO.isReg() || !MO.getReg() || !MO.readsReg())
- continue;
-
- if (!MO.getReg().isVirtual()) {
- // Do not attempt to reason about PhysRegs
- // TODO: better analysis of PhysReg livness
- if (!DAG.MRI.isConstantPhysReg(MO.getReg()) &&
- !DAG.TII->isIgnorableUse(MO))
- return false;
-
- // Constant PhysRegs and IgnorableUses are okay
- continue;
- }
-
- LiveInterval &LI = LIS->getInterval(MO.getReg());
- const VNInfo *OVNI = LI.getVNInfoAt(OriginalIdx);
- assert(OVNI);
-
- // Don't allow rematerialization immediately after the original def.
- // It would be incorrect if InstToRemat redefines the register.
- // See PR14098.
- if (SlotIndex::isSameInstr(OriginalIdx, RematIdx))
- return false;
-
- if (OVNI != LI.getVNInfoAt(RematIdx))
- return false;
-
- // Check that subrange is live at RematIdx.
- if (LI.hasSubRanges()) {
- const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
- unsigned SubReg = MO.getSubReg();
- LaneBitmask LM = SubReg ? TRI->getSubRegIndexLaneMask(SubReg)
- : MRI.getMaxLaneMaskForVReg(MO.getReg());
- for (LiveInterval::SubRange &SR : LI.subranges()) {
- if ((SR.LaneMask & LM).none())
- continue;
- if (!SR.liveAt(RematIdx))
- return false;
-
- // Early exit if all used lanes are checked. No need to continue.
- LM &= ~SR.LaneMask;
- if (LM.none())
- break;
- }
- }
- }
- return true;
-}
-
bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() {
const Function &F = MF.getFunction();
@@ -1812,9 +1755,9 @@ bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() {
// Do not rematerialize an instruction it it uses registers that aren't
// available at its use. This ensures that we are not extending any live
// range while rematerializing.
- SlotIndex DefIdx = DAG.LIS->getInstructionIndex(DefMI);
SlotIndex UseIdx = DAG.LIS->getInstructionIndex(*UseMI).getRegSlot(true);
- if (!allUsesAvailableAt(&DefMI, DefIdx, UseIdx))
+ if (!VirtRegAuxInfo::allUsesAvailableAt(&DefMI, UseIdx, *DAG.LIS, DAG.MRI,
+ *DAG.TII))
continue;
REMAT_DEBUG(dbgs() << "Region " << I << ": remat instruction " << DefMI);