aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/MachineScheduler.cpp
diff options
context:
space:
mode:
authorDavid Green <david.green@arm.com>2024-03-06 17:40:13 +0000
committerGitHub <noreply@github.com>2024-03-06 17:40:13 +0000
commit44be5a7fdc20a7f90d63dc18699a470e900bd3ba (patch)
treef99af6c6d0a725e730d0dc5708e2fe4348f4ed60 /llvm/lib/CodeGen/MachineScheduler.cpp
parent5dc5bfbb70f96cc001dc1173cbecfadab2e48fbf (diff)
downloadllvm-44be5a7fdc20a7f90d63dc18699a470e900bd3ba.zip
llvm-44be5a7fdc20a7f90d63dc18699a470e900bd3ba.tar.gz
llvm-44be5a7fdc20a7f90d63dc18699a470e900bd3ba.tar.bz2
[Codegen] Make Width in getMemOperandsWithOffsetWidth a LocationSize. (#83875)
This is another part of #70452 which makes getMemOperandsWithOffsetWidth use a LocationSize for Width, as opposed to the unsigned it currently uses. The advantages on it's own are not super high if getMemOperandsWithOffsetWidth usually uses known sizes, but if the values can come from an MMO it can help be more accurate in case they are Unknown (and in the future, scalable).
Diffstat (limited to 'llvm/lib/CodeGen/MachineScheduler.cpp')
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp13
1 files changed, 7 insertions, 6 deletions
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 3bbd126..0d5bf32 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -1729,11 +1729,11 @@ class BaseMemOpClusterMutation : public ScheduleDAGMutation {
SUnit *SU;
SmallVector<const MachineOperand *, 4> BaseOps;
int64_t Offset;
- unsigned Width;
+ LocationSize Width;
bool OffsetIsScalable;
MemOpInfo(SUnit *SU, ArrayRef<const MachineOperand *> BaseOps,
- int64_t Offset, bool OffsetIsScalable, unsigned Width)
+ int64_t Offset, bool OffsetIsScalable, LocationSize Width)
: SU(SU), BaseOps(BaseOps.begin(), BaseOps.end()), Offset(Offset),
Width(Width), OffsetIsScalable(OffsetIsScalable) {}
@@ -1866,11 +1866,12 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
auto MemOpb = MemOpRecords[NextIdx];
unsigned ClusterLength = 2;
- unsigned CurrentClusterBytes = MemOpa.Width + MemOpb.Width;
+ unsigned CurrentClusterBytes = MemOpa.Width.getValue().getKnownMinValue() +
+ MemOpb.Width.getValue().getKnownMinValue();
if (SUnit2ClusterInfo.count(MemOpa.SU->NodeNum)) {
ClusterLength = SUnit2ClusterInfo[MemOpa.SU->NodeNum].first + 1;
- CurrentClusterBytes =
- SUnit2ClusterInfo[MemOpa.SU->NodeNum].second + MemOpb.Width;
+ CurrentClusterBytes = SUnit2ClusterInfo[MemOpa.SU->NodeNum].second +
+ MemOpb.Width.getValue().getKnownMinValue();
}
if (!TII->shouldClusterMemOps(MemOpa.BaseOps, MemOpa.Offset,
@@ -1940,7 +1941,7 @@ void BaseMemOpClusterMutation::collectMemOpRecords(
SmallVector<const MachineOperand *, 4> BaseOps;
int64_t Offset;
bool OffsetIsScalable;
- unsigned Width;
+ LocationSize Width = 0;
if (TII->getMemOperandsWithOffsetWidth(MI, BaseOps, Offset,
OffsetIsScalable, Width, TRI)) {
MemOpRecords.push_back(