aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/RegAllocFast.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/CodeGen/RegAllocFast.cpp')
-rw-r--r--llvm/lib/CodeGen/RegAllocFast.cpp1272
1 files changed, 547 insertions, 725 deletions
diff --git a/llvm/lib/CodeGen/RegAllocFast.cpp b/llvm/lib/CodeGen/RegAllocFast.cpp
index cfee1a77..68308c6 100644
--- a/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -56,10 +56,6 @@ STATISTIC(NumStores, "Number of stores added");
STATISTIC(NumLoads , "Number of loads added");
STATISTIC(NumCoalesced, "Number of copies coalesced");
-// FIXME: Remove this switch when all testcases are fixed!
-static cl::opt<bool> IgnoreMissingDefs("rafast-ignore-missing-defs",
- cl::Hidden);
-
static RegisterRegAlloc
fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator);
@@ -89,9 +85,8 @@ namespace {
MachineInstr *LastUse = nullptr; ///< Last instr to use reg.
Register VirtReg; ///< Virtual register number.
MCPhysReg PhysReg = 0; ///< Currently held here.
- bool LiveOut = false; ///< Register is possibly live out.
- bool Reloaded = false; ///< Register was reloaded.
- bool Error = false; ///< Could not allocate.
+ unsigned short LastOpNum = 0; ///< OpNum on LastUse.
+ bool Dirty = false; ///< Register needs spill.
explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {}
@@ -106,9 +101,6 @@ namespace {
LiveRegMap LiveVirtRegs;
DenseMap<unsigned, SmallVector<MachineInstr *, 2>> LiveDbgValueMap;
- /// List of DBG_VALUE that we encountered without the vreg being assigned
- /// because they were placed after the last use of the vreg.
- DenseMap<unsigned, SmallVector<MachineInstr *, 1>> DanglingDbgValues;
/// Has a bit set for every virtual register for which it was determined
/// that it is alive across blocks.
@@ -120,13 +112,9 @@ namespace {
/// immediately without checking aliases.
regFree,
- /// A pre-assigned register has been assigned before register allocation
- /// (e.g., setting up a call parameter).
- regPreAssigned,
-
- /// Used temporarily in reloadAtBegin() to mark register units that are
- /// live-in to the basic block.
- regLiveIn,
+ /// A reserved register has been assigned explicitly (e.g., setting up a
+ /// call parameter), and it remains reserved until it is used.
+ regReserved
/// A register state may also be a virtual register number, indication
/// that the physical register is currently allocated to a virtual
@@ -136,17 +124,15 @@ namespace {
/// Maps each physical register to a RegUnitState enum or virtual register.
std::vector<unsigned> RegUnitStates;
+ SmallVector<Register, 16> VirtDead;
SmallVector<MachineInstr *, 32> Coalesced;
using RegUnitSet = SparseSet<uint16_t, identity<uint16_t>>;
/// Set of register units that are used in the current instruction, and so
/// cannot be allocated.
RegUnitSet UsedInInstr;
- RegUnitSet PhysRegUses;
- SmallVector<uint16_t, 8> DefOperandIndexes;
void setPhysRegState(MCPhysReg PhysReg, unsigned NewState);
- bool isPhysRegFree(MCPhysReg PhysReg) const;
/// Mark a physreg as used in this instruction.
void markRegUsedInInstr(MCPhysReg PhysReg) {
@@ -155,29 +141,13 @@ namespace {
}
/// Check if a physreg or any of its aliases are used in this instruction.
- bool isRegUsedInInstr(MCPhysReg PhysReg, bool LookAtPhysRegUses) const {
- for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ bool isRegUsedInInstr(MCPhysReg PhysReg) const {
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
if (UsedInInstr.count(*Units))
return true;
- if (LookAtPhysRegUses && PhysRegUses.count(*Units))
- return true;
- }
return false;
}
- /// Mark physical register as being used in a register use operand.
- /// This is only used by the special livethrough handling code.
- void markPhysRegUsedInInstr(MCPhysReg PhysReg) {
- for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
- PhysRegUses.insert(*Units);
- }
-
- /// Remove mark of physical register being used in the instruction.
- void unmarkRegUsedInInstr(MCPhysReg PhysReg) {
- for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
- UsedInInstr.erase(*Units);
- }
-
enum : unsigned {
spillClean = 50,
spillDirty = 100,
@@ -207,21 +177,27 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override;
void allocateBasicBlock(MachineBasicBlock &MBB);
-
- void addRegClassDefCounts(std::vector<unsigned> &RegClassDefCounts,
- Register Reg) const;
-
void allocateInstruction(MachineInstr &MI);
void handleDebugValue(MachineInstr &MI);
+ void handleThroughOperands(MachineInstr &MI,
+ SmallVectorImpl<Register> &VirtDead);
+ bool isLastUseOfLocalReg(const MachineOperand &MO) const;
+
+ void addKillFlag(const LiveReg &LRI);
#ifndef NDEBUG
bool verifyRegStateMapping(const LiveReg &LR) const;
#endif
- bool usePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
- bool definePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
- bool displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
- void freePhysReg(MCPhysReg PhysReg);
+ void killVirtReg(LiveReg &LR);
+ void killVirtReg(Register VirtReg);
+ void spillVirtReg(MachineBasicBlock::iterator MI, LiveReg &LR);
+ void spillVirtReg(MachineBasicBlock::iterator MI, Register VirtReg);
+
+ void usePhysReg(MachineOperand &MO);
+ void definePhysReg(MachineBasicBlock::iterator MI, MCPhysReg PhysReg,
+ unsigned NewState);
unsigned calcSpillCost(MCPhysReg PhysReg) const;
+ void assignVirtToPhysReg(LiveReg &, MCPhysReg PhysReg);
LiveRegMap::iterator findLiveVirtReg(Register VirtReg) {
return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
@@ -231,24 +207,14 @@ namespace {
return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
}
- void assignVirtToPhysReg(MachineInstr &MI, LiveReg &, MCPhysReg PhysReg);
- void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint,
- bool LookAtPhysRegUses = false);
+ void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint);
void allocVirtRegUndef(MachineOperand &MO);
- void assignDanglingDebugValues(MachineInstr &Def, Register VirtReg,
- MCPhysReg Reg);
- void defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
- Register VirtReg);
- void defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
- bool LookAtPhysRegUses = false);
- void useVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg);
-
- MachineBasicBlock::iterator
- getMBBBeginInsertionPoint(MachineBasicBlock &MBB,
- SmallSet<Register, 2> &PrologLiveIns) const;
-
- void reloadAtBegin(MachineBasicBlock &MBB);
- void setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg);
+ MCPhysReg defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
+ Register Hint);
+ LiveReg &reloadVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
+ Register Hint);
+ void spillAll(MachineBasicBlock::iterator MI, bool OnlyLiveOut);
+ bool setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg);
Register traceCopies(Register VirtReg) const;
Register traceCopyChain(Register Reg) const;
@@ -277,14 +243,6 @@ void RegAllocFast::setPhysRegState(MCPhysReg PhysReg, unsigned NewState) {
RegUnitStates[*UI] = NewState;
}
-bool RegAllocFast::isPhysRegFree(MCPhysReg PhysReg) const {
- for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
- if (RegUnitStates[*UI] != regFree)
- return false;
- }
- return true;
-}
-
/// This allocates space for the specified virtual register to be held on the
/// stack.
int RegAllocFast::getStackSpaceFor(Register VirtReg) {
@@ -342,7 +300,7 @@ bool RegAllocFast::mayLiveOut(Register VirtReg) {
// block.
static const unsigned Limit = 8;
unsigned C = 0;
- for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) {
+ for (const MachineInstr &UseInst : MRI->reg_nodbg_instructions(VirtReg)) {
if (UseInst.getParent() != MBB || ++C >= Limit) {
MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
// Cannot be live-out if there are no successors.
@@ -394,19 +352,15 @@ void RegAllocFast::spill(MachineBasicBlock::iterator Before, Register VirtReg,
TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI);
++NumStores;
- // When we spill a virtual register, we will have spill instructions behind
- // every definition of it, meaning we can switch all the DBG_VALUEs over
- // to just reference the stack slot.
+ // If this register is used by DBG_VALUE then insert new DBG_VALUE to
+ // identify spilled location as the place to find corresponding variable's
+ // value.
SmallVectorImpl<MachineInstr *> &LRIDbgValues = LiveDbgValueMap[VirtReg];
for (MachineInstr *DBG : LRIDbgValues) {
MachineInstr *NewDV = buildDbgValueForSpill(*MBB, Before, *DBG, FI);
assert(NewDV->getParent() == MBB && "dangling parent pointer");
(void)NewDV;
LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV);
- // Rewrite unassigned dbg_values to use the stack slot.
- MachineOperand &MO = DBG->getOperand(0);
- if (MO.isReg() && MO.getReg() == 0)
- updateDbgValueForSpill(*DBG, FI);
}
// Now this register is spilled there is should not be any DBG_VALUE
// pointing to this register because they are all pointing to spilled value
@@ -425,75 +379,113 @@ void RegAllocFast::reload(MachineBasicBlock::iterator Before, Register VirtReg,
++NumLoads;
}
-/// Get basic block begin insertion point.
-/// This is not just MBB.begin() because surprisingly we have EH_LABEL
-/// instructions marking the begin of a basic block. This means we must insert
-/// new instructions after such labels...
-MachineBasicBlock::iterator
-RegAllocFast::getMBBBeginInsertionPoint(
- MachineBasicBlock &MBB, SmallSet<Register, 2> &PrologLiveIns) const {
- MachineBasicBlock::iterator I = MBB.begin();
- while (I != MBB.end()) {
- if (I->isLabel()) {
- ++I;
- continue;
- }
-
- // Most reloads should be inserted after prolog instructions.
- if (!TII->isBasicBlockPrologue(*I))
- break;
+/// Return true if MO is the only remaining reference to its virtual register,
+/// and it is guaranteed to be a block-local register.
+bool RegAllocFast::isLastUseOfLocalReg(const MachineOperand &MO) const {
+ // If the register has ever been spilled or reloaded, we conservatively assume
+ // it is a global register used in multiple blocks.
+ if (StackSlotForVirtReg[MO.getReg()] != -1)
+ return false;
+
+ // Check that the use/def chain has exactly one operand - MO.
+ MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(MO.getReg());
+ if (&*I != &MO)
+ return false;
+ return ++I == MRI->reg_nodbg_end();
+}
- // However if a prolog instruction reads a register that needs to be
- // reloaded, the reload should be inserted before the prolog.
- for (MachineOperand &MO : I->operands()) {
- if (MO.isReg())
- PrologLiveIns.insert(MO.getReg());
- }
+/// Set kill flags on last use of a virtual register.
+void RegAllocFast::addKillFlag(const LiveReg &LR) {
+ if (!LR.LastUse) return;
+ MachineOperand &MO = LR.LastUse->getOperand(LR.LastOpNum);
+ if (MO.isUse() && !LR.LastUse->isRegTiedToDefOperand(LR.LastOpNum)) {
+ if (MO.getReg() == LR.PhysReg)
+ MO.setIsKill();
+ // else, don't do anything we are problably redefining a
+ // subreg of this register and given we don't track which
+ // lanes are actually dead, we cannot insert a kill flag here.
+ // Otherwise we may end up in a situation like this:
+ // ... = (MO) physreg:sub1, implicit killed physreg
+ // ... <== Here we would allow later pass to reuse physreg:sub1
+ // which is potentially wrong.
+ // LR:sub0 = ...
+ // ... = LR.sub1 <== This is going to use physreg:sub1
+ }
+}
- ++I;
+#ifndef NDEBUG
+bool RegAllocFast::verifyRegStateMapping(const LiveReg &LR) const {
+ for (MCRegUnitIterator UI(LR.PhysReg, TRI); UI.isValid(); ++UI) {
+ if (RegUnitStates[*UI] != LR.VirtReg)
+ return false;
}
- return I;
+ return true;
}
+#endif
-/// Reload all currently assigned virtual registers.
-void RegAllocFast::reloadAtBegin(MachineBasicBlock &MBB) {
- if (LiveVirtRegs.empty())
- return;
+/// Mark virtreg as no longer available.
+void RegAllocFast::killVirtReg(LiveReg &LR) {
+ assert(verifyRegStateMapping(LR) && "Broken RegState mapping");
+ addKillFlag(LR);
+ MCPhysReg PhysReg = LR.PhysReg;
+ setPhysRegState(PhysReg, regFree);
+ LR.PhysReg = 0;
+}
- for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
- MCPhysReg Reg = P.PhysReg;
- // Set state to live-in. This possibly overrides mappings to virtual
- // registers but we don't care anymore at this point.
- setPhysRegState(Reg, regLiveIn);
- }
+/// Mark virtreg as no longer available.
+void RegAllocFast::killVirtReg(Register VirtReg) {
+ assert(Register::isVirtualRegister(VirtReg) &&
+ "killVirtReg needs a virtual register");
+ LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
+ if (LRI != LiveVirtRegs.end() && LRI->PhysReg)
+ killVirtReg(*LRI);
+}
+/// This method spills the value specified by VirtReg into the corresponding
+/// stack slot if needed.
+void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI,
+ Register VirtReg) {
+ assert(Register::isVirtualRegister(VirtReg) &&
+ "Spilling a physical register is illegal!");
+ LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
+ assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
+ "Spilling unmapped virtual register");
+ spillVirtReg(MI, *LRI);
+}
+
+/// Do the actual work of spilling.
+void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI, LiveReg &LR) {
+ assert(verifyRegStateMapping(LR) && "Broken RegState mapping");
+
+ MCPhysReg PhysReg = LR.PhysReg;
+
+ if (LR.Dirty) {
+ // If this physreg is used by the instruction, we want to kill it on the
+ // instruction, not on the spill.
+ bool SpillKill = MachineBasicBlock::iterator(LR.LastUse) != MI;
+ LR.Dirty = false;
- SmallSet<Register, 2> PrologLiveIns;
+ spill(MI, LR.VirtReg, PhysReg, SpillKill);
+ if (SpillKill)
+ LR.LastUse = nullptr; // Don't kill register again
+ }
+ killVirtReg(LR);
+}
+
+/// Spill all dirty virtregs without killing them.
+void RegAllocFast::spillAll(MachineBasicBlock::iterator MI, bool OnlyLiveOut) {
+ if (LiveVirtRegs.empty())
+ return;
// The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
// of spilling here is deterministic, if arbitrary.
- MachineBasicBlock::iterator InsertBefore
- = getMBBBeginInsertionPoint(MBB, PrologLiveIns);
- for (const LiveReg &LR : LiveVirtRegs) {
- MCPhysReg PhysReg = LR.PhysReg;
- if (PhysReg == 0)
+ for (LiveReg &LR : LiveVirtRegs) {
+ if (!LR.PhysReg)
continue;
-
- unsigned FirstUnit = *MCRegUnitIterator(PhysReg, TRI);
- if (RegUnitStates[FirstUnit] == regLiveIn)
+ if (OnlyLiveOut && !mayLiveOut(LR.VirtReg))
continue;
-
- assert((&MBB != &MBB.getParent()->front() || IgnoreMissingDefs) &&
- "no reload in start block. Missing vreg def?");
-
- if (PrologLiveIns.count(PhysReg)) {
- // FIXME: Theoretically this should use an insert point skipping labels
- // but I'm not sure how labels should interact with prolog instruction
- // that need reloads.
- reload(MBB.begin(), LR.VirtReg, PhysReg);
- } else
- reload(InsertBefore, LR.VirtReg, PhysReg);
+ spillVirtReg(MI, LR);
}
LiveVirtRegs.clear();
}
@@ -501,74 +493,51 @@ void RegAllocFast::reloadAtBegin(MachineBasicBlock &MBB) {
/// Handle the direct use of a physical register. Check that the register is
/// not used by a virtreg. Kill the physreg, marking it free. This may add
/// implicit kills to MO->getParent() and invalidate MO.
-bool RegAllocFast::usePhysReg(MachineInstr &MI, MCPhysReg Reg) {
- assert(Register::isPhysicalRegister(Reg) && "expected physreg");
- bool displacedAny = displacePhysReg(MI, Reg);
- setPhysRegState(Reg, regPreAssigned);
- markRegUsedInInstr(Reg);
- return displacedAny;
-}
+void RegAllocFast::usePhysReg(MachineOperand &MO) {
+ // Ignore undef uses.
+ if (MO.isUndef())
+ return;
-bool RegAllocFast::definePhysReg(MachineInstr &MI, MCPhysReg Reg) {
- bool displacedAny = displacePhysReg(MI, Reg);
- setPhysRegState(Reg, regPreAssigned);
- return displacedAny;
+ Register PhysReg = MO.getReg();
+ assert(PhysReg.isPhysical() && "Bad usePhysReg operand");
+
+ markRegUsedInInstr(PhysReg);
+
+ for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
+ switch (RegUnitStates[*UI]) {
+ case regReserved:
+ RegUnitStates[*UI] = regFree;
+ LLVM_FALLTHROUGH;
+ case regFree:
+ break;
+ default:
+ llvm_unreachable("Unexpected reg unit state");
+ }
+ }
+
+ // All aliases are disabled, bring register into working set.
+ setPhysRegState(PhysReg, regFree);
+ MO.setIsKill();
}
/// Mark PhysReg as reserved or free after spilling any virtregs. This is very
/// similar to defineVirtReg except the physreg is reserved instead of
/// allocated.
-bool RegAllocFast::displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg) {
- bool displacedAny = false;
-
+void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI,
+ MCPhysReg PhysReg, unsigned NewState) {
for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
- unsigned Unit = *UI;
- switch (unsigned VirtReg = RegUnitStates[Unit]) {
- default: {
- LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
- assert(LRI != LiveVirtRegs.end() && "datastructures in sync");
- MachineBasicBlock::iterator ReloadBefore =
- std::next((MachineBasicBlock::iterator)MI.getIterator());
- reload(ReloadBefore, VirtReg, LRI->PhysReg);
-
- setPhysRegState(LRI->PhysReg, regFree);
- LRI->PhysReg = 0;
- LRI->Reloaded = true;
- displacedAny = true;
- break;
- }
- case regPreAssigned:
- RegUnitStates[Unit] = regFree;
- displacedAny = true;
+ switch (unsigned VirtReg = RegUnitStates[*UI]) {
+ default:
+ spillVirtReg(MI, VirtReg);
break;
case regFree:
+ case regReserved:
break;
}
}
- return displacedAny;
-}
-
-void RegAllocFast::freePhysReg(MCPhysReg PhysReg) {
- LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg, TRI) << ':');
- unsigned FirstUnit = *MCRegUnitIterator(PhysReg, TRI);
- switch (unsigned VirtReg = RegUnitStates[FirstUnit]) {
- case regFree:
- LLVM_DEBUG(dbgs() << '\n');
- return;
- case regPreAssigned:
- LLVM_DEBUG(dbgs() << '\n');
- setPhysRegState(PhysReg, regFree);
- return;
- default: {
- LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
- assert(LRI != LiveVirtRegs.end());
- LLVM_DEBUG(dbgs() << ' ' << printReg(LRI->VirtReg, TRI) << '\n');
- setPhysRegState(LRI->PhysReg, regFree);
- LRI->PhysReg = 0;
- }
- return;
- }
+ markRegUsedInInstr(PhysReg);
+ setPhysRegState(PhysReg, NewState);
}
/// Return the cost of spilling clearing out PhysReg and aliases so it is free
@@ -576,61 +545,35 @@ void RegAllocFast::freePhysReg(MCPhysReg PhysReg) {
/// disabled - it can be allocated directly.
/// \returns spillImpossible when PhysReg or an alias can't be spilled.
unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
+ if (isRegUsedInInstr(PhysReg)) {
+ LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI)
+ << " is already used in instr.\n");
+ return spillImpossible;
+ }
+
for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
switch (unsigned VirtReg = RegUnitStates[*UI]) {
case regFree:
break;
- case regPreAssigned:
- LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned "
- << printReg(PhysReg, TRI) << '\n');
+ case regReserved:
+ LLVM_DEBUG(dbgs() << printReg(VirtReg, TRI) << " corresponding "
+ << printReg(PhysReg, TRI) << " is reserved already.\n");
return spillImpossible;
default: {
- bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 ||
- findLiveVirtReg(VirtReg)->LiveOut;
- return SureSpill ? spillClean : spillDirty;
+ LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
+ assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
+ "Missing VirtReg entry");
+ return LRI->Dirty ? spillDirty : spillClean;
}
}
}
return 0;
}
-void RegAllocFast::assignDanglingDebugValues(MachineInstr &Definition,
- Register VirtReg, MCPhysReg Reg) {
- auto UDBGValIter = DanglingDbgValues.find(VirtReg);
- if (UDBGValIter == DanglingDbgValues.end())
- return;
-
- SmallVectorImpl<MachineInstr*> &Dangling = UDBGValIter->second;
- for (MachineInstr *DbgValue : Dangling) {
- assert(DbgValue->isDebugValue());
- MachineOperand &MO = DbgValue->getOperand(0);
- if (!MO.isReg())
- continue;
-
- // Test whether the physreg survives from the definition to the DBG_VALUE.
- MCPhysReg SetToReg = Reg;
- unsigned Limit = 20;
- for (MachineBasicBlock::iterator I = std::next(Definition.getIterator()),
- E = DbgValue->getIterator(); I != E; ++I) {
- if (I->modifiesRegister(Reg, TRI) || --Limit == 0) {
- LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
- << '\n');
- SetToReg = 0;
- break;
- }
- }
- MO.setReg(SetToReg);
- if (SetToReg != 0)
- MO.setIsRenamable();
- }
- Dangling.clear();
-}
-
/// This method updates local state so that we know that PhysReg is the
/// proper container for VirtReg now. The physical register must not be used
/// for anything else when this is called.
-void RegAllocFast::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR,
- MCPhysReg PhysReg) {
+void RegAllocFast::assignVirtToPhysReg(LiveReg &LR, MCPhysReg PhysReg) {
Register VirtReg = LR.VirtReg;
LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to "
<< printReg(PhysReg, TRI) << '\n');
@@ -638,8 +581,6 @@ void RegAllocFast::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR,
assert(PhysReg != 0 && "Trying to assign no register");
LR.PhysReg = PhysReg;
setPhysRegState(PhysReg, VirtReg);
-
- assignDanglingDebugValues(AtMI, VirtReg, PhysReg);
}
static bool isCoalescable(const MachineInstr &MI) {
@@ -683,10 +624,11 @@ Register RegAllocFast::traceCopies(Register VirtReg) const {
}
/// Allocates a physical register for VirtReg.
-void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR,
- Register Hint0, bool LookAtPhysRegUses) {
+void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint0) {
const Register VirtReg = LR.VirtReg;
- assert(LR.PhysReg == 0);
+
+ assert(Register::isVirtualRegister(VirtReg) &&
+ "Can only allocate virtual registers");
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg)
@@ -694,36 +636,41 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR,
<< " with hint " << printReg(Hint0, TRI) << '\n');
// Take hint when possible.
- if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) && RC.contains(Hint0) &&
- !isRegUsedInInstr(Hint0, LookAtPhysRegUses)) {
- // Take hint if the register is currently free.
- if (isPhysRegFree(Hint0)) {
+ if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) &&
+ RC.contains(Hint0)) {
+ // Ignore the hint if we would have to spill a dirty register.
+ unsigned Cost = calcSpillCost(Hint0);
+ if (Cost < spillDirty) {
LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI)
<< '\n');
- assignVirtToPhysReg(MI, LR, Hint0);
+ if (Cost)
+ definePhysReg(MI, Hint0, regFree);
+ assignVirtToPhysReg(LR, Hint0);
return;
} else {
- LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI)
- << " occupied\n");
+ LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI)
+ << "occupied\n");
}
} else {
Hint0 = Register();
}
-
// Try other hint.
Register Hint1 = traceCopies(VirtReg);
- if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) && RC.contains(Hint1) &&
- !isRegUsedInInstr(Hint1, LookAtPhysRegUses)) {
- // Take hint if the register is currently free.
- if (isPhysRegFree(Hint1)) {
+ if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) &&
+ RC.contains(Hint1) && !isRegUsedInInstr(Hint1)) {
+ // Ignore the hint if we would have to spill a dirty register.
+ unsigned Cost = calcSpillCost(Hint1);
+ if (Cost < spillDirty) {
LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI)
- << '\n');
- assignVirtToPhysReg(MI, LR, Hint1);
+ << '\n');
+ if (Cost)
+ definePhysReg(MI, Hint1, regFree);
+ assignVirtToPhysReg(LR, Hint1);
return;
} else {
- LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI)
- << " occupied\n");
+ LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI)
+ << "occupied\n");
}
} else {
Hint1 = Register();
@@ -734,20 +681,15 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR,
ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
for (MCPhysReg PhysReg : AllocationOrder) {
LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' ');
- if (isRegUsedInInstr(PhysReg, LookAtPhysRegUses)) {
- LLVM_DEBUG(dbgs() << "already used in instr.\n");
- continue;
- }
-
unsigned Cost = calcSpillCost(PhysReg);
LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n');
// Immediate take a register with cost 0.
if (Cost == 0) {
- assignVirtToPhysReg(MI, LR, PhysReg);
+ assignVirtToPhysReg(LR, PhysReg);
return;
}
- if (PhysReg == Hint0 || PhysReg == Hint1)
+ if (PhysReg == Hint1 || PhysReg == Hint0)
Cost -= spillPrefBonus;
if (Cost < BestCost) {
@@ -763,14 +705,13 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR,
MI.emitError("inline assembly requires more registers than available");
else
MI.emitError("ran out of registers during register allocation");
-
- LR.Error = true;
- LR.PhysReg = 0;
+ definePhysReg(MI, *AllocationOrder.begin(), regFree);
+ assignVirtToPhysReg(LR, *AllocationOrder.begin());
return;
}
- displacePhysReg(MI, BestReg);
- assignVirtToPhysReg(MI, LR, BestReg);
+ definePhysReg(MI, BestReg, regFree);
+ assignVirtToPhysReg(LR, BestReg);
}
void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
@@ -798,166 +739,212 @@ void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
MO.setIsRenamable(true);
}
-/// Variation of defineVirtReg() with special handling for livethrough regs
-/// (tied or earlyclobber) that may interfere with preassigned uses.
-void RegAllocFast::defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
- Register VirtReg) {
- LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
- if (LRI != LiveVirtRegs.end()) {
- MCPhysReg PrevReg = LRI->PhysReg;
- if (PrevReg != 0 && isRegUsedInInstr(PrevReg, true)) {
- LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg, TRI)
- << " (tied/earlyclobber resolution)\n");
- freePhysReg(PrevReg);
- LRI->PhysReg = 0;
- allocVirtReg(MI, *LRI, 0, true);
- MachineBasicBlock::iterator InsertBefore =
- std::next((MachineBasicBlock::iterator)MI.getIterator());
- LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI->PhysReg, TRI) << " to "
- << printReg(PrevReg, TRI) << '\n');
- BuildMI(*MBB, InsertBefore, MI.getDebugLoc(),
- TII->get(TargetOpcode::COPY), PrevReg)
- .addReg(LRI->PhysReg, llvm::RegState::Kill);
- }
- MachineOperand &MO = MI.getOperand(OpNum);
- if (MO.getSubReg() && !MO.isUndef()) {
- LRI->LastUse = &MI;
- }
- }
- return defineVirtReg(MI, OpNum, VirtReg, true);
-}
-
-/// Allocates a register for VirtReg definition. Typically the register is
-/// already assigned from a use of the virtreg, however we still need to
-/// perform an allocation if:
-/// - It is a dead definition without any uses.
-/// - The value is live out and all uses are in different basic blocks.
-void RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum,
- Register VirtReg, bool LookAtPhysRegUses) {
- assert(VirtReg.isVirtual() && "Not a virtual register");
- MachineOperand &MO = MI.getOperand(OpNum);
+/// Allocates a register for VirtReg and mark it as dirty.
+MCPhysReg RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum,
+ Register VirtReg, Register Hint) {
+ assert(Register::isVirtualRegister(VirtReg) && "Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
- if (New) {
- if (!MO.isDead()) {
- if (mayLiveOut(VirtReg)) {
- LRI->LiveOut = true;
- } else {
- // It is a dead def without the dead flag; add the flag now.
- MO.setIsDead(true);
- }
- }
- }
- if (LRI->PhysReg == 0)
- allocVirtReg(MI, *LRI, 0, LookAtPhysRegUses);
- else {
- assert(!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) &&
- "TODO: preassign mismatch");
- LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI)
- << " use existing assignment to "
- << printReg(LRI->PhysReg, TRI) << '\n');
- }
-
- MCPhysReg PhysReg = LRI->PhysReg;
- assert(PhysReg != 0 && "Register not assigned");
- if (LRI->Reloaded || LRI->LiveOut) {
- if (!MI.isImplicitDef()) {
- MachineBasicBlock::iterator SpillBefore =
- std::next((MachineBasicBlock::iterator)MI.getIterator());
- LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI->LiveOut << " RL: "
- << LRI->Reloaded << '\n');
- bool Kill = LRI->LastUse == nullptr;
- spill(SpillBefore, VirtReg, PhysReg, Kill);
- LRI->LastUse = nullptr;
+ if (!LRI->PhysReg) {
+ // If there is no hint, peek at the only use of this register.
+ if ((!Hint || !Hint.isPhysical()) &&
+ MRI->hasOneNonDBGUse(VirtReg)) {
+ const MachineInstr &UseMI = *MRI->use_instr_nodbg_begin(VirtReg);
+ // It's a copy, use the destination register as a hint.
+ if (UseMI.isCopyLike())
+ Hint = UseMI.getOperand(0).getReg();
}
- LRI->LiveOut = false;
- LRI->Reloaded = false;
+ allocVirtReg(MI, *LRI, Hint);
+ } else if (LRI->LastUse) {
+ // Redefining a live register - kill at the last use, unless it is this
+ // instruction defining VirtReg multiple times.
+ if (LRI->LastUse != &MI || LRI->LastUse->getOperand(LRI->LastOpNum).isUse())
+ addKillFlag(*LRI);
}
- markRegUsedInInstr(PhysReg);
- setPhysReg(MI, MO, PhysReg);
+ assert(LRI->PhysReg && "Register not assigned");
+ LRI->LastUse = &MI;
+ LRI->LastOpNum = OpNum;
+ LRI->Dirty = true;
+ markRegUsedInInstr(LRI->PhysReg);
+ return LRI->PhysReg;
}
-/// Allocates a register for a VirtReg use.
-void RegAllocFast::useVirtReg(MachineInstr &MI, unsigned OpNum,
- Register VirtReg) {
- assert(VirtReg.isVirtual() && "Not a virtual register");
- MachineOperand &MO = MI.getOperand(OpNum);
+/// Make sure VirtReg is available in a physreg and return it.
+RegAllocFast::LiveReg &RegAllocFast::reloadVirtReg(MachineInstr &MI,
+ unsigned OpNum,
+ Register VirtReg,
+ Register Hint) {
+ assert(Register::isVirtualRegister(VirtReg) && "Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
- if (New) {
- MachineOperand &MO = MI.getOperand(OpNum);
- if (!MO.isKill()) {
- if (mayLiveOut(VirtReg)) {
- LRI->LiveOut = true;
- } else {
- // It is a last (killing) use without the kill flag; add the flag now.
- MO.setIsKill(true);
- }
- }
- } else {
- assert((!MO.isKill() || LRI->LastUse == &MI) && "Invalid kill flag");
- }
-
- // If necessary allocate a register.
- if (LRI->PhysReg == 0) {
- assert(!MO.isTied() && "tied op should be allocated");
- Register Hint;
- if (MI.isCopy() && MI.getOperand(1).getSubReg() == 0) {
- Hint = MI.getOperand(0).getReg();
- assert(Hint.isPhysical() &&
- "Copy destination should already be assigned");
- }
- allocVirtReg(MI, *LRI, Hint, false);
- if (LRI->Error) {
- const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
- ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
- setPhysReg(MI, MO, *AllocationOrder.begin());
- return;
+ MachineOperand &MO = MI.getOperand(OpNum);
+ if (!LRI->PhysReg) {
+ allocVirtReg(MI, *LRI, Hint);
+ reload(MI, VirtReg, LRI->PhysReg);
+ } else if (LRI->Dirty) {
+ if (isLastUseOfLocalReg(MO)) {
+ LLVM_DEBUG(dbgs() << "Killing last use: " << MO << '\n');
+ if (MO.isUse())
+ MO.setIsKill();
+ else
+ MO.setIsDead();
+ } else if (MO.isKill()) {
+ LLVM_DEBUG(dbgs() << "Clearing dubious kill: " << MO << '\n');
+ MO.setIsKill(false);
+ } else if (MO.isDead()) {
+ LLVM_DEBUG(dbgs() << "Clearing dubious dead: " << MO << '\n');
+ MO.setIsDead(false);
}
+ } else if (MO.isKill()) {
+ // We must remove kill flags from uses of reloaded registers because the
+ // register would be killed immediately, and there might be a second use:
+ // %foo = OR killed %x, %x
+ // This would cause a second reload of %x into a different register.
+ LLVM_DEBUG(dbgs() << "Clearing clean kill: " << MO << '\n');
+ MO.setIsKill(false);
+ } else if (MO.isDead()) {
+ LLVM_DEBUG(dbgs() << "Clearing clean dead: " << MO << '\n');
+ MO.setIsDead(false);
}
-
+ assert(LRI->PhysReg && "Register not assigned");
LRI->LastUse = &MI;
+ LRI->LastOpNum = OpNum;
markRegUsedInInstr(LRI->PhysReg);
- setPhysReg(MI, MO, LRI->PhysReg);
+ return *LRI;
}
/// Changes operand OpNum in MI the refer the PhysReg, considering subregs. This
/// may invalidate any operand pointers. Return true if the operand kills its
/// register.
-void RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO,
+bool RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO,
MCPhysReg PhysReg) {
+ bool Dead = MO.isDead();
if (!MO.getSubReg()) {
MO.setReg(PhysReg);
MO.setIsRenamable(true);
- return;
+ return MO.isKill() || Dead;
}
// Handle subregister index.
MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : Register());
MO.setIsRenamable(true);
- // Note: We leave the subreg number around a little longer in case of defs.
- // This is so that the register freeing logic in allocateInstruction can still
- // recognize this as subregister defs. The code there will clear the number.
- if (!MO.isDef())
- MO.setSubReg(0);
+ MO.setSubReg(0);
// A kill flag implies killing the full register. Add corresponding super
// register kill.
if (MO.isKill()) {
MI.addRegisterKilled(PhysReg, TRI, true);
- return;
+ return true;
}
// A <def,read-undef> of a sub-register requires an implicit def of the full
// register.
- if (MO.isDef() && MO.isUndef()) {
- if (MO.isDead())
- MI.addRegisterDead(PhysReg, TRI, true);
- else
- MI.addRegisterDefined(PhysReg, TRI);
+ if (MO.isDef() && MO.isUndef())
+ MI.addRegisterDefined(PhysReg, TRI);
+
+ return Dead;
+}
+
+// Handles special instruction operand like early clobbers and tied ops when
+// there are additional physreg defines.
+void RegAllocFast::handleThroughOperands(MachineInstr &MI,
+ SmallVectorImpl<Register> &VirtDead) {
+ LLVM_DEBUG(dbgs() << "Scanning for through registers:");
+ SmallSet<Register, 8> ThroughRegs;
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg()) continue;
+ Register Reg = MO.getReg();
+ if (!Reg.isVirtual())
+ continue;
+ if (MO.isEarlyClobber() || (MO.isUse() && MO.isTied()) ||
+ (MO.getSubReg() && MI.readsVirtualRegister(Reg))) {
+ if (ThroughRegs.insert(Reg).second)
+ LLVM_DEBUG(dbgs() << ' ' << printReg(Reg));
+ }
+ }
+
+ // If any physreg defines collide with preallocated through registers,
+ // we must spill and reallocate.
+ LLVM_DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg() || !MO.isDef()) continue;
+ Register Reg = MO.getReg();
+ if (!Reg || !Reg.isPhysical())
+ continue;
+ markRegUsedInInstr(Reg);
+
+ for (MCRegUnitIterator UI(Reg, TRI); UI.isValid(); ++UI) {
+ if (!ThroughRegs.count(RegUnitStates[*UI]))
+ continue;
+
+ // Need to spill any aliasing registers.
+ for (MCRegUnitRootIterator RI(*UI, TRI); RI.isValid(); ++RI) {
+ for (MCSuperRegIterator SI(*RI, TRI, true); SI.isValid(); ++SI) {
+ definePhysReg(MI, *SI, regFree);
+ }
+ }
+ }
}
+
+ SmallVector<Register, 8> PartialDefs;
+ LLVM_DEBUG(dbgs() << "Allocating tied uses.\n");
+ for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
+ MachineOperand &MO = MI.getOperand(I);
+ if (!MO.isReg()) continue;
+ Register Reg = MO.getReg();
+ if (!Register::isVirtualRegister(Reg))
+ continue;
+ if (MO.isUse()) {
+ if (!MO.isTied()) continue;
+ LLVM_DEBUG(dbgs() << "Operand " << I << "(" << MO
+ << ") is tied to operand " << MI.findTiedOperandIdx(I)
+ << ".\n");
+ LiveReg &LR = reloadVirtReg(MI, I, Reg, 0);
+ MCPhysReg PhysReg = LR.PhysReg;
+ setPhysReg(MI, MO, PhysReg);
+ // Note: we don't update the def operand yet. That would cause the normal
+ // def-scan to attempt spilling.
+ } else if (MO.getSubReg() && MI.readsVirtualRegister(Reg)) {
+ LLVM_DEBUG(dbgs() << "Partial redefine: " << MO << '\n');
+ // Reload the register, but don't assign to the operand just yet.
+ // That would confuse the later phys-def processing pass.
+ LiveReg &LR = reloadVirtReg(MI, I, Reg, 0);
+ PartialDefs.push_back(LR.PhysReg);
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << "Allocating early clobbers.\n");
+ for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI.getOperand(I);
+ if (!MO.isReg()) continue;
+ Register Reg = MO.getReg();
+ if (!Register::isVirtualRegister(Reg))
+ continue;
+ if (!MO.isEarlyClobber())
+ continue;
+ // Note: defineVirtReg may invalidate MO.
+ MCPhysReg PhysReg = defineVirtReg(MI, I, Reg, 0);
+ if (setPhysReg(MI, MI.getOperand(I), PhysReg))
+ VirtDead.push_back(Reg);
+ }
+
+ // Restore UsedInInstr to a state usable for allocating normal virtual uses.
+ UsedInInstr.clear();
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
+ Register Reg = MO.getReg();
+ if (!Reg || !Reg.isPhysical())
+ continue;
+ LLVM_DEBUG(dbgs() << "\tSetting " << printReg(Reg, TRI)
+ << " as used in instr\n");
+ markRegUsedInInstr(Reg);
+ }
+
+ // Also mark PartialDefs as used to avoid reallocation.
+ for (Register PartialDef : PartialDefs)
+ markRegUsedInInstr(PartialDef);
}
#ifndef NDEBUG
@@ -968,21 +955,15 @@ void RegAllocFast::dumpState() const {
switch (unsigned VirtReg = RegUnitStates[Unit]) {
case regFree:
break;
- case regPreAssigned:
+ case regReserved:
dbgs() << " " << printRegUnit(Unit, TRI) << "[P]";
break;
- case regLiveIn:
- llvm_unreachable("Should not have regLiveIn in map");
default: {
dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg);
LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry");
- if (I->LiveOut || I->Reloaded) {
- dbgs() << '[';
- if (I->LiveOut) dbgs() << 'O';
- if (I->Reloaded) dbgs() << 'R';
- dbgs() << ']';
- }
+ if (I->Dirty)
+ dbgs() << "[D]";
assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present");
break;
}
@@ -1005,277 +986,111 @@ void RegAllocFast::dumpState() const {
}
#endif
-/// Count number of defs consumed from each register class by \p Reg
-void RegAllocFast::addRegClassDefCounts(std::vector<unsigned> &RegClassDefCounts,
- Register Reg) const {
- assert(RegClassDefCounts.size() == TRI->getNumRegClasses());
-
- if (Reg.isVirtual()) {
- const TargetRegisterClass *OpRC = MRI->getRegClass(Reg);
- for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
- RCIdx != RCIdxEnd; ++RCIdx) {
- const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
- // FIXME: Consider aliasing sub/super registers.
- if (OpRC->hasSubClassEq(IdxRC))
- ++RegClassDefCounts[RCIdx];
- }
-
- return;
- }
-
- for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
- RCIdx != RCIdxEnd; ++RCIdx) {
- const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
- for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) {
- if (IdxRC->contains(*Alias)) {
- ++RegClassDefCounts[RCIdx];
- break;
- }
- }
- }
-}
-
void RegAllocFast::allocateInstruction(MachineInstr &MI) {
- // The basic algorithm here is:
- // 1. Mark registers of def operands as free
- // 2. Allocate registers to use operands and place reload instructions for
- // registers displaced by the allocation.
- //
- // However we need to handle some corner cases:
- // - pre-assigned defs and uses need to be handled before the other def/use
- // operands are processed to avoid the allocation heuristics clashing with
- // the pre-assignment.
- // - The "free def operands" step has to come last instead of first for tied
- // operands and early-clobbers.
+ const MCInstrDesc &MCID = MI.getDesc();
+
+ // If this is a copy, we may be able to coalesce.
+ Register CopySrcReg;
+ Register CopyDstReg;
+ unsigned CopySrcSub = 0;
+ unsigned CopyDstSub = 0;
+ if (MI.isCopy()) {
+ CopyDstReg = MI.getOperand(0).getReg();
+ CopySrcReg = MI.getOperand(1).getReg();
+ CopyDstSub = MI.getOperand(0).getSubReg();
+ CopySrcSub = MI.getOperand(1).getSubReg();
+ }
+ // Track registers used by instruction.
UsedInInstr.clear();
- // Scan for special cases; Apply pre-assigned register defs to state.
- bool HasPhysRegUse = false;
- bool HasRegMask = false;
- bool HasVRegDef = false;
- bool HasDef = false;
- bool HasEarlyClobber = false;
- bool NeedToAssignLiveThroughs = false;
- for (MachineOperand &MO : MI.operands()) {
- if (MO.isReg()) {
- Register Reg = MO.getReg();
- if (Reg.isVirtual()) {
- if (MO.isDef()) {
- HasDef = true;
- HasVRegDef = true;
- if (MO.isEarlyClobber()) {
- HasEarlyClobber = true;
- NeedToAssignLiveThroughs = true;
- }
- if (MO.isTied() || (MO.getSubReg() != 0 && !MO.isUndef()))
- NeedToAssignLiveThroughs = true;
- }
- } else if (Reg.isPhysical()) {
- if (!MRI->isReserved(Reg)) {
- if (MO.isDef()) {
- HasDef = true;
- bool displacedAny = definePhysReg(MI, Reg);
- if (MO.isEarlyClobber())
- HasEarlyClobber = true;
- if (!displacedAny)
- MO.setIsDead(true);
- }
- if (MO.readsReg())
- HasPhysRegUse = true;
- }
- }
- } else if (MO.isRegMask()) {
- HasRegMask = true;
+ // First scan.
+ // Mark physreg uses and early clobbers as used.
+ // Find the end of the virtreg operands
+ unsigned VirtOpEnd = 0;
+ bool hasTiedOps = false;
+ bool hasEarlyClobbers = false;
+ bool hasPartialRedefs = false;
+ bool hasPhysDefs = false;
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
+ // Make sure MRI knows about registers clobbered by regmasks.
+ if (MO.isRegMask()) {
+ MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
+ continue;
}
- }
-
- // Allocate virtreg defs.
- if (HasDef) {
- if (HasVRegDef) {
- // Special handling for early clobbers, tied operands or subregister defs:
- // Compared to "normal" defs these:
- // - Must not use a register that is pre-assigned for a use operand.
- // - In order to solve tricky inline assembly constraints we change the
- // heuristic to figure out a good operand order before doing
- // assignments.
- if (NeedToAssignLiveThroughs) {
- DefOperandIndexes.clear();
- PhysRegUses.clear();
-
- // Track number of defs which may consume a register from the class.
- std::vector<unsigned> RegClassDefCounts(TRI->getNumRegClasses(), 0);
- assert(RegClassDefCounts[0] == 0);
-
- LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n");
- for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
- const MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg())
- continue;
- Register Reg = MO.getReg();
- if (MO.readsReg()) {
- if (Reg.isPhysical()) {
- LLVM_DEBUG(dbgs() << "mark extra used: " << printReg(Reg, TRI)
- << '\n');
- markPhysRegUsedInInstr(Reg);
- }
- }
-
- if (MO.isDef()) {
- if (Reg.isVirtual())
- DefOperandIndexes.push_back(I);
-
- addRegClassDefCounts(RegClassDefCounts, Reg);
- }
- }
-
- llvm::sort(DefOperandIndexes.begin(), DefOperandIndexes.end(),
- [&](uint16_t I0, uint16_t I1) {
- const MachineOperand &MO0 = MI.getOperand(I0);
- const MachineOperand &MO1 = MI.getOperand(I1);
- Register Reg0 = MO0.getReg();
- Register Reg1 = MO1.getReg();
- const TargetRegisterClass &RC0 = *MRI->getRegClass(Reg0);
- const TargetRegisterClass &RC1 = *MRI->getRegClass(Reg1);
-
- // Identify regclass that are easy to use up completely just in this
- // instruction.
- unsigned ClassSize0 = RegClassInfo.getOrder(&RC0).size();
- unsigned ClassSize1 = RegClassInfo.getOrder(&RC1).size();
-
- bool SmallClass0 = ClassSize0 < RegClassDefCounts[RC0.getID()];
- bool SmallClass1 = ClassSize1 < RegClassDefCounts[RC1.getID()];
- if (SmallClass0 > SmallClass1)
- return true;
- if (SmallClass0 < SmallClass1)
- return false;
-
- // Allocate early clobbers and livethrough operands first.
- bool Livethrough0 = MO0.isEarlyClobber() || MO0.isTied() ||
- (MO0.getSubReg() == 0 && !MO0.isUndef());
- bool Livethrough1 = MO1.isEarlyClobber() || MO1.isTied() ||
- (MO1.getSubReg() == 0 && !MO1.isUndef());
- if (Livethrough0 > Livethrough1)
- return true;
- if (Livethrough0 < Livethrough1)
- return false;
-
- // Tie-break rule: operand index.
- return I0 < I1;
- });
-
- for (uint16_t OpIdx : DefOperandIndexes) {
- MachineOperand &MO = MI.getOperand(OpIdx);
- LLVM_DEBUG(dbgs() << "Allocating " << MO << '\n');
- unsigned Reg = MO.getReg();
- if (MO.isEarlyClobber() || MO.isTied() ||
- (MO.getSubReg() && !MO.isUndef())) {
- defineLiveThroughVirtReg(MI, OpIdx, Reg);
- } else {
- defineVirtReg(MI, OpIdx, Reg);
- }
- }
+ if (!MO.isReg()) continue;
+ Register Reg = MO.getReg();
+ if (!Reg) continue;
+ if (Register::isVirtualRegister(Reg)) {
+ VirtOpEnd = i+1;
+ if (MO.isUse()) {
+ hasTiedOps = hasTiedOps ||
+ MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1;
} else {
- // Assign virtual register defs.
- for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
- MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg() || !MO.isDef())
- continue;
- Register Reg = MO.getReg();
- if (Reg.isVirtual())
- defineVirtReg(MI, I, Reg);
- }
- }
- }
-
- // Free registers occupied by defs.
- // Iterate operands in reverse order, so we see the implicit super register
- // defs first (we added them earlier in case of <def,read-undef>).
- for (unsigned I = MI.getNumOperands(); I-- > 0;) {
- MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg() || !MO.isDef())
- continue;
-
- // subreg defs don't free the full register. We left the subreg number
- // around as a marker in setPhysReg() to recognize this case here.
- if (MO.getSubReg() != 0) {
- MO.setSubReg(0);
- continue;
- }
-
- // Do not free tied operands and early clobbers.
- if (MO.isTied() || MO.isEarlyClobber())
- continue;
- Register Reg = MO.getReg();
- if (!Reg)
- continue;
- assert(Reg.isPhysical());
- if (MRI->isReserved(Reg))
- continue;
- freePhysReg(Reg);
- unmarkRegUsedInInstr(Reg);
- }
- }
-
- // Displace clobbered registers.
- if (HasRegMask) {
- for (const MachineOperand &MO : MI.operands()) {
- if (MO.isRegMask()) {
- // MRI bookkeeping.
- MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
-
- // Displace clobbered registers.
- const uint32_t *Mask = MO.getRegMask();
- for (LiveRegMap::iterator LRI = LiveVirtRegs.begin(),
- LRIE = LiveVirtRegs.end(); LRI != LRIE; ++LRI) {
- MCPhysReg PhysReg = LRI->PhysReg;
- if (PhysReg != 0 && MachineOperand::clobbersPhysReg(Mask, PhysReg))
- displacePhysReg(MI, PhysReg);
- }
+ if (MO.isEarlyClobber())
+ hasEarlyClobbers = true;
+ if (MO.getSubReg() && MI.readsVirtualRegister(Reg))
+ hasPartialRedefs = true;
}
+ continue;
}
+ if (!MRI->isAllocatable(Reg)) continue;
+ if (MO.isUse()) {
+ usePhysReg(MO);
+ } else if (MO.isEarlyClobber()) {
+ definePhysReg(MI, Reg,
+ (MO.isImplicit() || MO.isDead()) ? regFree : regReserved);
+ hasEarlyClobbers = true;
+ } else
+ hasPhysDefs = true;
}
- // Apply pre-assigned register uses to state.
- if (HasPhysRegUse) {
- for (MachineOperand &MO : MI.operands()) {
- if (!MO.isReg() || !MO.readsReg())
- continue;
- Register Reg = MO.getReg();
- if (!Reg.isPhysical())
- continue;
- if (MRI->isReserved(Reg))
- continue;
- bool displacedAny = usePhysReg(MI, Reg);
- if (!displacedAny && !MRI->isReserved(Reg))
- MO.setIsKill(true);
- }
+ // The instruction may have virtual register operands that must be allocated
+ // the same register at use-time and def-time: early clobbers and tied
+ // operands. If there are also physical defs, these registers must avoid
+ // both physical defs and uses, making them more constrained than normal
+ // operands.
+ // Similarly, if there are multiple defs and tied operands, we must make
+ // sure the same register is allocated to uses and defs.
+ // We didn't detect inline asm tied operands above, so just make this extra
+ // pass for all inline asm.
+ if (MI.isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
+ (hasTiedOps && (hasPhysDefs || MCID.getNumDefs() > 1))) {
+ handleThroughOperands(MI, VirtDead);
+ // Don't attempt coalescing when we have funny stuff going on.
+ CopyDstReg = Register();
+ // Pretend we have early clobbers so the use operands get marked below.
+ // This is not necessary for the common case of a single tied use.
+ hasEarlyClobbers = true;
}
- // Allocate virtreg uses and insert reloads as necessary.
+ // Second scan.
+ // Allocate virtreg uses.
bool HasUndefUse = false;
- for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
+ for (unsigned I = 0; I != VirtOpEnd; ++I) {
MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg() || !MO.isUse())
- continue;
+ if (!MO.isReg()) continue;
Register Reg = MO.getReg();
if (!Reg.isVirtual())
continue;
+ if (MO.isUse()) {
+ if (MO.isUndef()) {
+ HasUndefUse = true;
+ // There is no need to allocate a register for an undef use.
+ continue;
+ }
- if (MO.isUndef()) {
- HasUndefUse = true;
- continue;
- }
-
-
- // Populate MayLiveAcrossBlocks in case the use block is allocated before
- // the def block (removing the vreg uses).
- mayLiveIn(Reg);
-
+ // Populate MayLiveAcrossBlocks in case the use block is allocated before
+ // the def block (removing the vreg uses).
+ mayLiveIn(Reg);
- assert(!MO.isInternalRead() && "Bundles not supported");
- assert(MO.readsReg() && "reading use");
- useVirtReg(MI, I, Reg);
+ LiveReg &LR = reloadVirtReg(MI, I, Reg, CopyDstReg);
+ MCPhysReg PhysReg = LR.PhysReg;
+ CopySrcReg = (CopySrcReg == Reg || CopySrcReg == PhysReg) ? PhysReg : 0;
+ if (setPhysReg(MI, MO, PhysReg))
+ killVirtReg(LR);
+ }
}
// Allocate undef operands. This is a separate step because in a situation
@@ -1294,40 +1109,76 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
}
}
- // Free early clobbers.
- if (HasEarlyClobber) {
- for (unsigned I = MI.getNumOperands(); I-- > 0; ) {
- MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg() || !MO.isDef() || !MO.isEarlyClobber())
- continue;
- // subreg defs don't free the full register. We left the subreg number
- // around as a marker in setPhysReg() to recognize this case here.
- if (MO.getSubReg() != 0) {
- MO.setSubReg(0);
- continue;
- }
-
+ // Track registers defined by instruction - early clobbers and tied uses at
+ // this point.
+ UsedInInstr.clear();
+ if (hasEarlyClobbers) {
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg()) continue;
Register Reg = MO.getReg();
- if (!Reg)
- continue;
- assert(Reg.isPhysical() && "should have register assigned");
-
- // We sometimes get odd situations like:
- // early-clobber %x0 = INSTRUCTION %x0
- // which is semantically questionable as the early-clobber should
- // apply before the use. But in practice we consider the use to
- // happen before the early clobber now. Don't free the early clobber
- // register in this case.
- if (MI.readsRegister(Reg, TRI))
+ if (!Reg || !Reg.isPhysical())
continue;
-
- freePhysReg(Reg);
+ // Look for physreg defs and tied uses.
+ if (!MO.isDef() && !MO.isTied()) continue;
+ markRegUsedInInstr(Reg);
}
}
+ unsigned DefOpEnd = MI.getNumOperands();
+ if (MI.isCall()) {
+ // Spill all virtregs before a call. This serves one purpose: If an
+ // exception is thrown, the landing pad is going to expect to find
+ // registers in their spill slots.
+ // Note: although this is appealing to just consider all definitions
+ // as call-clobbered, this is not correct because some of those
+ // definitions may be used later on and we do not want to reuse
+ // those for virtual registers in between.
+ LLVM_DEBUG(dbgs() << " Spilling remaining registers before call.\n");
+ spillAll(MI, /*OnlyLiveOut*/ false);
+ }
+
+ // Third scan.
+ // Mark all physreg defs as used before allocating virtreg defs.
+ for (unsigned I = 0; I != DefOpEnd; ++I) {
+ const MachineOperand &MO = MI.getOperand(I);
+ if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
+ continue;
+ Register Reg = MO.getReg();
+
+ if (!Reg || !Reg.isPhysical() || !MRI->isAllocatable(Reg))
+ continue;
+ definePhysReg(MI, Reg, MO.isDead() ? regFree : regReserved);
+ }
+
+ // Fourth scan.
+ // Allocate defs and collect dead defs.
+ for (unsigned I = 0; I != DefOpEnd; ++I) {
+ const MachineOperand &MO = MI.getOperand(I);
+ if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
+ continue;
+ Register Reg = MO.getReg();
+
+ // We have already dealt with phys regs in the previous scan.
+ if (Reg.isPhysical())
+ continue;
+ MCPhysReg PhysReg = defineVirtReg(MI, I, Reg, CopySrcReg);
+ if (setPhysReg(MI, MI.getOperand(I), PhysReg)) {
+ VirtDead.push_back(Reg);
+ CopyDstReg = Register(); // cancel coalescing;
+ } else
+ CopyDstReg = (CopyDstReg == Reg || CopyDstReg == PhysReg) ? PhysReg : 0;
+ }
+
+ // Kill dead defs after the scan to ensure that multiple defs of the same
+ // register are allocated identically. We didn't need to do this for uses
+ // because we are creating our own kill flags, and they are always at the last
+ // use.
+ for (Register VirtReg : VirtDead)
+ killVirtReg(VirtReg);
+ VirtDead.clear();
+
LLVM_DEBUG(dbgs() << "<< " << MI);
- if (MI.isCopy() && MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
- MI.getNumOperands() == 2) {
+ if (CopyDstReg && CopyDstReg == CopySrcReg && CopyDstSub == CopySrcSub) {
LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n");
Coalesced.push_back(&MI);
}
@@ -1344,22 +1195,23 @@ void RegAllocFast::handleDebugValue(MachineInstr &MI) {
if (!Register::isVirtualRegister(Reg))
return;
- // Already spilled to a stackslot?
- int SS = StackSlotForVirtReg[Reg];
- if (SS != -1) {
- // Modify DBG_VALUE now that the value is in a spill slot.
- updateDbgValueForSpill(MI, SS);
- LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI);
- return;
- }
-
// See if this virtual register has already been allocated to a physical
// register or spilled to a stack slot.
LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
setPhysReg(MI, MO, LRI->PhysReg);
} else {
- DanglingDbgValues[Reg].push_back(&MI);
+ int SS = StackSlotForVirtReg[Reg];
+ if (SS != -1) {
+ // Modify DBG_VALUE now that the value is in a spill slot.
+ updateDbgValueForSpill(MI, SS);
+ LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << MI);
+ return;
+ }
+
+ // We can't allocate a physreg for a DebugValue, sorry!
+ LLVM_DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
+ MO.setReg(Register());
}
// If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
@@ -1367,17 +1219,6 @@ void RegAllocFast::handleDebugValue(MachineInstr &MI) {
LiveDbgValueMap[Reg].push_back(&MI);
}
-#ifndef NDEBUG
-bool RegAllocFast::verifyRegStateMapping(const LiveReg &LR) const {
- for (MCRegUnitIterator UI(LR.PhysReg, TRI); UI.isValid(); ++UI) {
- if (RegUnitStates[*UI] != LR.VirtReg)
- return false;
- }
-
- return true;
-}
-#endif
-
void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
this->MBB = &MBB;
LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
@@ -1385,15 +1226,18 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
RegUnitStates.assign(TRI->getNumRegUnits(), regFree);
assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
- for (MachineBasicBlock *Succ : MBB.successors()) {
- for (const MachineBasicBlock::RegisterMaskPair &LI : Succ->liveins())
- setPhysRegState(LI.PhysReg, regPreAssigned);
- }
+ MachineBasicBlock::iterator MII = MBB.begin();
+ // Add live-in registers as live.
+ for (const MachineBasicBlock::RegisterMaskPair &LI : MBB.liveins())
+ if (MRI->isAllocatable(LI.PhysReg))
+ definePhysReg(MII, LI.PhysReg, regReserved);
+
+ VirtDead.clear();
Coalesced.clear();
- // Traverse block in reverse order allocating instructions one by one.
- for (MachineInstr &MI : reverse(MBB)) {
+ // Otherwise, sequentially allocate each instruction in the MBB.
+ for (MachineInstr &MI : MBB) {
LLVM_DEBUG(
dbgs() << "\n>> " << MI << "Regs:";
dumpState()
@@ -1409,14 +1253,9 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
allocateInstruction(MI);
}
- LLVM_DEBUG(
- dbgs() << "Begin Regs:";
- dumpState()
- );
-
// Spill all physical registers holding virtual registers now.
- LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n");
- reloadAtBegin(MBB);
+ LLVM_DEBUG(dbgs() << "Spilling live registers at end of block.\n");
+ spillAll(MBB.getFirstTerminator(), /*OnlyLiveOut*/ true);
// Erase all the coalesced copies. We are delaying it until now because
// LiveVirtRegs might refer to the instrs.
@@ -1424,20 +1263,6 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
MBB.erase(MI);
NumCoalesced += Coalesced.size();
- for (auto &UDBGPair : DanglingDbgValues) {
- for (MachineInstr *DbgValue : UDBGPair.second) {
- assert(DbgValue->isDebugValue() && "expected DBG_VALUE");
- MachineOperand &MO = DbgValue->getOperand(0);
- // Nothing to do if the vreg was spilled in the meantime.
- if (!MO.isReg())
- continue;
- LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
- << '\n');
- MO.setReg(0);
- }
- }
- DanglingDbgValues.clear();
-
LLVM_DEBUG(MBB.dump());
}
@@ -1451,11 +1276,8 @@ bool RegAllocFast::runOnMachineFunction(MachineFunction &MF) {
MFI = &MF.getFrameInfo();
MRI->freezeReservedRegs(MF);
RegClassInfo.runOnMachineFunction(MF);
- unsigned NumRegUnits = TRI->getNumRegUnits();
UsedInInstr.clear();
- UsedInInstr.setUniverse(NumRegUnits);
- PhysRegUses.clear();
- PhysRegUses.setUniverse(NumRegUnits);
+ UsedInInstr.setUniverse(TRI->getNumRegUnits());
// initialize the virtual->physical register map to have a 'null'
// mapping for all virtual registers