aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/SplitKit.cpp
diff options
context:
space:
mode:
authorYashwant Singh <Yashwant.Singh@amd.com>2023-07-07 22:29:30 +0530
committerYashwant Singh <Yashwant.Singh@amd.com>2023-07-07 22:29:50 +0530
commitb7836d856206ec39509d42529f958c920368166b (patch)
treeb7e4e80dfe83c5c260375dfff19bd3640fdf40ef /llvm/lib/CodeGen/SplitKit.cpp
parenteb98abab2c83c9c101c4749c93836108657d6164 (diff)
downloadllvm-b7836d856206ec39509d42529f958c920368166b.zip
llvm-b7836d856206ec39509d42529f958c920368166b.tar.gz
llvm-b7836d856206ec39509d42529f958c920368166b.tar.bz2
[CodeGen]Allow targets to use target specific COPY instructions for live range splitting
Replacing D143754. Right now the LiveRangeSplitting during register allocation uses TargetOpcode::COPY instruction for splitting. For AMDGPU target that creates a problem as we have both vector and scalar copies. Vector copies perform a copy over a vector register but only on the lanes(threads) that are active. This is mostly sufficient however we do run into cases when we have to copy the entire vector register and not just active lane data. One major place where we need that is live range splitting. Allowing targets to use their own copy instructions(if defined) will provide a lot of flexibility and ease to lower these pseudo instructions to correct MIR. - Introduce getTargetCopyOpcode() virtual function and use if to generate copy in Live range splitting. - Replace necessary MI.isCopy() checks with TII.isCopyInstr() in register allocator pipeline. Reviewed By: arsenm, cdevadas, kparzysz Differential Revision: https://reviews.llvm.org/D150388
Diffstat (limited to 'llvm/lib/CodeGen/SplitKit.cpp')
-rw-r--r--llvm/lib/CodeGen/SplitKit.cpp17
1 files changed, 10 insertions, 7 deletions
diff --git a/llvm/lib/CodeGen/SplitKit.cpp b/llvm/lib/CodeGen/SplitKit.cpp
index eee54f0..83964ec 100644
--- a/llvm/lib/CodeGen/SplitKit.cpp
+++ b/llvm/lib/CodeGen/SplitKit.cpp
@@ -514,10 +514,10 @@ void SplitEditor::forceRecompute(unsigned RegIdx, const VNInfo &ParentVNI) {
VFP = ValueForcePair(nullptr, true);
}
-SlotIndex SplitEditor::buildSingleSubRegCopy(Register FromReg, Register ToReg,
- MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
- unsigned SubIdx, LiveInterval &DestLI, bool Late, SlotIndex Def) {
- const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY);
+SlotIndex SplitEditor::buildSingleSubRegCopy(
+ Register FromReg, Register ToReg, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertBefore, unsigned SubIdx,
+ LiveInterval &DestLI, bool Late, SlotIndex Def, const MCInstrDesc &Desc) {
bool FirstCopy = !Def.isValid();
MachineInstr *CopyMI = BuildMI(MBB, InsertBefore, DebugLoc(), Desc)
.addReg(ToReg, RegState::Define | getUndefRegState(FirstCopy)
@@ -536,7 +536,8 @@ SlotIndex SplitEditor::buildSingleSubRegCopy(Register FromReg, Register ToReg,
SlotIndex SplitEditor::buildCopy(Register FromReg, Register ToReg,
LaneBitmask LaneMask, MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, bool Late, unsigned RegIdx) {
- const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY);
+ const MCInstrDesc &Desc =
+ TII.get(TII.getLiveRangeSplitOpcode(FromReg, *MBB.getParent()));
SlotIndexes &Indexes = *LIS.getSlotIndexes();
if (LaneMask.all() || LaneMask == MRI.getMaxLaneMaskForVReg(FromReg)) {
// The full vreg is copied.
@@ -564,7 +565,7 @@ SlotIndex SplitEditor::buildCopy(Register FromReg, Register ToReg,
SlotIndex Def;
for (unsigned BestIdx : SubIndexes) {
Def = buildSingleSubRegCopy(FromReg, ToReg, MBB, InsertBefore, BestIdx,
- DestLI, Late, Def);
+ DestLI, Late, Def, Desc);
}
BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
@@ -1584,7 +1585,9 @@ bool SplitAnalysis::shouldSplitSingleBlock(const BlockInfo &BI,
if (BI.LiveIn && BI.LiveOut)
return true;
// No point in isolating a copy. It has no register class constraints.
- if (LIS.getInstructionFromIndex(BI.FirstInstr)->isCopyLike())
+ MachineInstr *MI = LIS.getInstructionFromIndex(BI.FirstInstr);
+ bool copyLike = TII.isCopyInstr(*MI) || MI->isSubregToReg();
+ if (copyLike)
return false;
// Finally, don't isolate an end point that was created by earlier splits.
return isOriginalEndpoint(BI.FirstInstr);