aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/MachineInstr.cpp
diff options
context:
space:
mode:
authorHarvin Iriawan <25712785+harviniriawan@users.noreply.github.com>2024-03-23 12:56:25 +0000
committerGitHub <noreply@github.com>2024-03-23 12:56:25 +0000
commit57146daeaaf366050dc913db910fcc2995a3e06d (patch)
tree70fa80bc762ff3b997b1898ece11d920b313fbb3 /llvm/lib/CodeGen/MachineInstr.cpp
parentf886dfed3ae6cf70827cedc8d8aefde6250a239b (diff)
downloadllvm-57146daeaaf366050dc913db910fcc2995a3e06d.zip
llvm-57146daeaaf366050dc913db910fcc2995a3e06d.tar.gz
llvm-57146daeaaf366050dc913db910fcc2995a3e06d.tar.bz2
[CodeGen] Update for scalable MemoryType in MMO (#70452)
Remove getSizeOrUnknown call when MachineMemOperand is created. For Scalable TypeSize, the MemoryType created becomes a scalable_vector. 2 MMOs that have scalable memory access can then use the updated BasicAA that understands scalable LocationSize. Original Patch by Harvin Iriawan Co-authored-by: David Green <david.green@arm.com>
Diffstat (limited to 'llvm/lib/CodeGen/MachineInstr.cpp')
-rw-r--r--llvm/lib/CodeGen/MachineInstr.cpp37
1 files changed, 27 insertions, 10 deletions
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index fe2f9cc..8102bb9 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -1306,6 +1306,7 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
LocationSize WidthB = MMOb->getSize();
bool KnownWidthA = WidthA.hasValue();
bool KnownWidthB = WidthB.hasValue();
+ bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
const Value *ValA = MMOa->getValue();
const Value *ValB = MMOb->getValue();
@@ -1321,12 +1322,14 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
SameVal = true;
}
- if (SameVal) {
+ if (SameVal && BothMMONonScalable) {
if (!KnownWidthA || !KnownWidthB)
return true;
int64_t MaxOffset = std::max(OffsetA, OffsetB);
- LocationSize LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
- return (MinOffset + (int)LowWidth.getValue() > MaxOffset);
+ int64_t LowWidth = (MinOffset == OffsetA)
+ ? WidthA.getValue().getKnownMinValue()
+ : WidthB.getValue().getKnownMinValue();
+ return (MinOffset + LowWidth > MaxOffset);
}
if (!AA)
@@ -1338,15 +1341,29 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
- int64_t OverlapA = KnownWidthA ? WidthA.getValue() + OffsetA - MinOffset
- : MemoryLocation::UnknownSize;
- int64_t OverlapB = KnownWidthB ? WidthB.getValue() + OffsetB - MinOffset
- : MemoryLocation::UnknownSize;
+ // If Scalable Location Size has non-zero offset, Width + Offset does not work
+ // at the moment
+ if ((WidthA.isScalable() && OffsetA > 0) ||
+ (WidthB.isScalable() && OffsetB > 0))
+ return true;
+
+ int64_t OverlapA =
+ KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
+ : MemoryLocation::UnknownSize;
+ int64_t OverlapB =
+ KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
+ : MemoryLocation::UnknownSize;
+
+ LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
+ ? WidthA
+ : LocationSize::precise(OverlapA);
+ LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
+ ? WidthB
+ : LocationSize::precise(OverlapB);
return !AA->isNoAlias(
- MemoryLocation(ValA, OverlapA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
- MemoryLocation(ValB, OverlapB,
- UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
+ MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
+ MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
}
bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,