aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/MachineScheduler.cpp
diff options
context:
space:
mode:
authorRuiling, Song <ruiling.song@amd.com>2025-06-05 15:28:04 +0800
committerGitHub <noreply@github.com>2025-06-05 15:28:04 +0800
commit0487db1f130913d4fad18483e305b843636ec4ce (patch)
treed2e538dd11075b85534de53e36fdb06ff9cf19d4 /llvm/lib/CodeGen/MachineScheduler.cpp
parentdba418816731bc1cc677519fdbb77caca812ddda (diff)
downloadllvm-0487db1f130913d4fad18483e305b843636ec4ce.zip
llvm-0487db1f130913d4fad18483e305b843636ec4ce.tar.gz
llvm-0487db1f130913d4fad18483e305b843636ec4ce.tar.bz2
MachineScheduler: Improve instruction clustering (#137784)
The existing way of managing clustered nodes was done through adding weak edges between the neighbouring cluster nodes, which is a sort of ordered queue. And this will be later recorded as `NextClusterPred` or `NextClusterSucc` in `ScheduleDAGMI`. But actually the instruction may be picked not in the exact order of the queue. For example, we have a queue of cluster nodes A B C. But during scheduling, node B might be picked first, then it will be very likely that we only cluster B and C for Top-Down scheduling (leaving A alone). Another issue is: ``` if (!ReorderWhileClustering && SUa->NodeNum > SUb->NodeNum) std::swap(SUa, SUb); if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) ``` may break the cluster queue. For example, we want to cluster nodes (order as in `MemOpRecords`): 1 3 2. 1(SUa) will be pred of 3(SUb) normally. But when it comes to (3, 2), As 3(SUa) > 2(SUb), we would reorder the two nodes, which makes 2 be pred of 3. This makes both 1 and 2 become preds of 3, but there is no edge between 1 and 2. Thus we get a broken cluster chain. To fix both issues, we introduce an unordered set in the change. This could help improve clustering in some hard case. One key reason the change causes so many test check changes is: As the cluster candidates are not ordered now, the candidates might be picked in different order from before. The most affected targets are: AMDGPU, AArch64, RISCV. For RISCV, it seems to me most are just minor instruction reorder, don't see obvious regression. For AArch64, there were some combining of ldr into ldp being affected. With two cases being regressed and two being improved. This has more deeper reason that machine scheduler cannot cluster them well both before and after the change, and the load combine algorithm later is also not smart enough. For AMDGPU, some cases have more v_dual instructions used while some are regressed. It seems less critical. Seems like test `v_vselect_v32bf16` gets more buffer_load being claused.
Diffstat (limited to 'llvm/lib/CodeGen/MachineScheduler.cpp')
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp82
1 files changed, 52 insertions, 30 deletions
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 2cc7f54..7b4f29e 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -15,6 +15,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/PriorityQueue.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
@@ -943,8 +944,6 @@ void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
if (SuccEdge->isWeak()) {
--SuccSU->WeakPredsLeft;
- if (SuccEdge->isCluster())
- NextClusterSucc = SuccSU;
return;
}
#ifndef NDEBUG
@@ -967,12 +966,6 @@ void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
/// releaseSuccessors - Call releaseSucc on each of SU's successors.
void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
- // Reset the next successor, For example, we want to cluster A B C.
- // After A is picked, we will set B as next cluster succ, but if we pick
- // D instead of B after A, then we need to reset the next cluster succ because
- // we have decided to not pick the cluster candidate B during pickNode().
- // Leaving B as the NextClusterSucc just make things messy.
- NextClusterSucc = nullptr;
for (SDep &Succ : SU->Succs)
releaseSucc(SU, &Succ);
}
@@ -986,8 +979,6 @@ void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
if (PredEdge->isWeak()) {
--PredSU->WeakSuccsLeft;
- if (PredEdge->isCluster())
- NextClusterPred = PredSU;
return;
}
#ifndef NDEBUG
@@ -1010,7 +1001,6 @@ void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
/// releasePredecessors - Call releasePred on each of SU's predecessors.
void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
- NextClusterPred = nullptr;
for (SDep &Pred : SU->Preds)
releasePred(SU, &Pred);
}
@@ -1184,11 +1174,8 @@ findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
}
/// Identify DAG roots and setup scheduler queues.
-void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
- ArrayRef<SUnit*> BotRoots) {
- NextClusterSucc = nullptr;
- NextClusterPred = nullptr;
-
+void ScheduleDAGMI::initQueues(ArrayRef<SUnit *> TopRoots,
+ ArrayRef<SUnit *> BotRoots) {
// Release all DAG roots for scheduling, not including EntrySU/ExitSU.
//
// Nodes with unreleased weak edges can still be roots.
@@ -2116,6 +2103,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
ScheduleDAGInstrs *DAG) {
// Keep track of the current cluster length and bytes for each SUnit.
DenseMap<unsigned, std::pair<unsigned, unsigned>> SUnit2ClusterInfo;
+ EquivalenceClasses<SUnit *> Clusters;
// At this point, `MemOpRecords` array must hold atleast two mem ops. Try to
// cluster mem ops collected within `MemOpRecords` array.
@@ -2155,6 +2143,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
SUnit *SUa = MemOpa.SU;
SUnit *SUb = MemOpb.SU;
+
if (!ReorderWhileClustering && SUa->NodeNum > SUb->NodeNum)
std::swap(SUa, SUb);
@@ -2162,6 +2151,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster)))
continue;
+ Clusters.unionSets(SUa, SUb);
LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
<< SUb->NodeNum << ")\n");
++NumClustered;
@@ -2201,6 +2191,21 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
<< ", Curr cluster bytes: " << CurrentClusterBytes
<< "\n");
}
+
+ // Add cluster group information.
+ // Iterate over all of the equivalence sets.
+ auto &AllClusters = DAG->getClusters();
+ for (const EquivalenceClasses<SUnit *>::ECValue *I : Clusters) {
+ if (!I->isLeader())
+ continue;
+ ClusterInfo Group;
+ unsigned ClusterIdx = AllClusters.size();
+ for (SUnit *MemberI : Clusters.members(*I)) {
+ MemberI->ParentClusterIdx = ClusterIdx;
+ Group.insert(MemberI);
+ }
+ AllClusters.push_back(Group);
+ }
}
void BaseMemOpClusterMutation::collectMemOpRecords(
@@ -3688,6 +3693,9 @@ void GenericScheduler::initialize(ScheduleDAGMI *dag) {
}
TopCand.SU = nullptr;
BotCand.SU = nullptr;
+
+ TopCluster = nullptr;
+ BotCluster = nullptr;
}
/// Initialize the per-region scheduling policy.
@@ -3997,13 +4005,11 @@ bool GenericScheduler::tryCandidate(SchedCandidate &Cand,
// This is a best effort to set things up for a post-RA pass. Optimizations
// like generating loads of multiple registers should ideally be done within
// the scheduler pass by combining the loads during DAG postprocessing.
- const SUnit *CandNextClusterSU =
- Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
- const SUnit *TryCandNextClusterSU =
- TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
- if (tryGreater(TryCand.SU == TryCandNextClusterSU,
- Cand.SU == CandNextClusterSU,
- TryCand, Cand, Cluster))
+ const ClusterInfo *CandCluster = Cand.AtTop ? TopCluster : BotCluster;
+ const ClusterInfo *TryCandCluster = TryCand.AtTop ? TopCluster : BotCluster;
+ if (tryGreater(TryCandCluster && TryCandCluster->contains(TryCand.SU),
+ CandCluster && CandCluster->contains(Cand.SU), TryCand, Cand,
+ Cluster))
return TryCand.Reason != NoCand;
if (SameBoundary) {
@@ -4262,11 +4268,25 @@ void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) {
void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
if (IsTopNode) {
SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
+ TopCluster = DAG->getCluster(SU->ParentClusterIdx);
+ LLVM_DEBUG(if (TopCluster) {
+ dbgs() << " Top Cluster: ";
+ for (auto *N : *TopCluster)
+ dbgs() << N->NodeNum << '\t';
+ dbgs() << '\n';
+ });
Top.bumpNode(SU);
if (SU->hasPhysRegUses)
reschedulePhysReg(SU, true);
} else {
SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
+ BotCluster = DAG->getCluster(SU->ParentClusterIdx);
+ LLVM_DEBUG(if (BotCluster) {
+ dbgs() << " Bot Cluster: ";
+ for (auto *N : *BotCluster)
+ dbgs() << N->NodeNum << '\t';
+ dbgs() << '\n';
+ });
Bot.bumpNode(SU);
if (SU->hasPhysRegDefs)
reschedulePhysReg(SU, false);
@@ -4303,6 +4323,8 @@ void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
if (!Bot.HazardRec) {
Bot.HazardRec = DAG->TII->CreateTargetMIHazardRecognizer(Itin, DAG);
}
+ TopCluster = nullptr;
+ BotCluster = nullptr;
}
void PostGenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
@@ -4367,14 +4389,12 @@ bool PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
return TryCand.Reason != NoCand;
// Keep clustered nodes together.
- const SUnit *CandNextClusterSU =
- Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
- const SUnit *TryCandNextClusterSU =
- TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
- if (tryGreater(TryCand.SU == TryCandNextClusterSU,
- Cand.SU == CandNextClusterSU, TryCand, Cand, Cluster))
+ const ClusterInfo *CandCluster = Cand.AtTop ? TopCluster : BotCluster;
+ const ClusterInfo *TryCandCluster = TryCand.AtTop ? TopCluster : BotCluster;
+ if (tryGreater(TryCandCluster && TryCandCluster->contains(TryCand.SU),
+ CandCluster && CandCluster->contains(Cand.SU), TryCand, Cand,
+ Cluster))
return TryCand.Reason != NoCand;
-
// Avoid critical resource consumption and balance the schedule.
if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
TryCand, Cand, ResourceReduce))
@@ -4571,9 +4591,11 @@ SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
if (IsTopNode) {
SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
+ TopCluster = DAG->getCluster(SU->ParentClusterIdx);
Top.bumpNode(SU);
} else {
SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
+ BotCluster = DAG->getCluster(SU->ParentClusterIdx);
Bot.bumpNode(SU);
}
}