aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen
diff options
context:
space:
mode:
authorDavid Green <david.green@arm.com>2024-03-17 18:15:56 +0000
committerGitHub <noreply@github.com>2024-03-17 18:15:56 +0000
commit601e102bdb55e12a2f791e0d68fd6f81ffc21e21 (patch)
treee4f0e1c41496c6bbdc98059460c884790a2bfd87 /llvm/lib/CodeGen
parent5143a1241362616840af826d18c067025dae1111 (diff)
downloadllvm-601e102bdb55e12a2f791e0d68fd6f81ffc21e21.zip
llvm-601e102bdb55e12a2f791e0d68fd6f81ffc21e21.tar.gz
llvm-601e102bdb55e12a2f791e0d68fd6f81ffc21e21.tar.bz2
[CodeGen] Use LocationSize for MMO getSize (#84751)
This is part of #70452 that changes the type used for the external interface of MMO to LocationSize as opposed to uint64_t. This means the constructors take LocationSize, and convert ~UINT64_C(0) to LocationSize::beforeOrAfter(). The getSize methods return a LocationSize. This allows us to be more precise with unknown sizes, not accidentally treating them as unsigned values, and in the future should allow us to add proper scalable vector support but none of that is included in this patch. It should mostly be an NFC. Global ISel is still expected to use the underlying LLT as it needs, and are not expected to see unknown sizes for generic operations. Most of the changes are hopefully fairly mechanical, adding a lot of getValue() calls and protecting them with hasValue() where needed.
Diffstat (limited to 'llvm/lib/CodeGen')
-rw-r--r--llvm/lib/CodeGen/DFAPacketizer.cpp7
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp12
-rw-r--r--llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp7
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp8
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp19
-rw-r--r--llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp5
-rw-r--r--llvm/lib/CodeGen/MIRVRegNamerUtils.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineFunction.cpp22
-rw-r--r--llvm/lib/CodeGen/MachineInstr.cpp41
-rw-r--r--llvm/lib/CodeGen/MachineOperand.cpp19
-rw-r--r--llvm/lib/CodeGen/MachinePipeliner.cpp13
-rw-r--r--llvm/lib/CodeGen/MachineStableHash.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineVerifier.cpp9
-rw-r--r--llvm/lib/CodeGen/ModuloSchedule.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp7
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp45
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp21
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp46
-rw-r--r--llvm/lib/CodeGen/TargetInstrInfo.cpp3
20 files changed, 160 insertions, 136 deletions
diff --git a/llvm/lib/CodeGen/DFAPacketizer.cpp b/llvm/lib/CodeGen/DFAPacketizer.cpp
index 48bb4a0..c16166a 100644
--- a/llvm/lib/CodeGen/DFAPacketizer.cpp
+++ b/llvm/lib/CodeGen/DFAPacketizer.cpp
@@ -252,12 +252,13 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
bool VLIWPacketizerList::alias(const MachineMemOperand &Op1,
const MachineMemOperand &Op2,
bool UseTBAA) const {
- if (!Op1.getValue() || !Op2.getValue())
+ if (!Op1.getValue() || !Op2.getValue() || !Op1.getSize().hasValue() ||
+ !Op2.getSize().hasValue())
return true;
int64_t MinOffset = std::min(Op1.getOffset(), Op2.getOffset());
- int64_t Overlapa = Op1.getSize() + Op1.getOffset() - MinOffset;
- int64_t Overlapb = Op2.getSize() + Op2.getOffset() - MinOffset;
+ int64_t Overlapa = Op1.getSize().getValue() + Op1.getOffset() - MinOffset;
+ int64_t Overlapb = Op2.getSize().getValue() + Op2.getOffset() - MinOffset;
AliasResult AAResult =
AA->alias(MemoryLocation(Op1.getValue(), Overlapa,
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index bee49db..d3f86af 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -770,12 +770,12 @@ bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
LLT RegTy = MRI.getType(LoadReg);
Register PtrReg = LoadMI->getPointerReg();
unsigned RegSize = RegTy.getSizeInBits();
- uint64_t LoadSizeBits = LoadMI->getMemSizeInBits();
+ LocationSize LoadSizeBits = LoadMI->getMemSizeInBits();
unsigned MaskSizeBits = MaskVal.countr_one();
// The mask may not be larger than the in-memory type, as it might cover sign
// extended bits
- if (MaskSizeBits > LoadSizeBits)
+ if (MaskSizeBits > LoadSizeBits.getValue())
return false;
// If the mask covers the whole destination register, there's nothing to
@@ -795,7 +795,8 @@ bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
// still adjust the opcode to indicate the high bit behavior.
if (LoadMI->isSimple())
MemDesc.MemoryTy = LLT::scalar(MaskSizeBits);
- else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize)
+ else if (LoadSizeBits.getValue() > MaskSizeBits ||
+ LoadSizeBits.getValue() == RegSize)
return false;
// TODO: Could check if it's legal with the reduced or original memory size.
@@ -860,7 +861,8 @@ bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) {
// If truncating more than the original extended value, abort.
auto LoadSizeBits = LoadMI->getMemSizeInBits();
- if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits)
+ if (TruncSrc &&
+ MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits.getValue())
return false;
if (LoadSizeBits == SizeInBits)
return true;
@@ -891,7 +893,7 @@ bool CombinerHelper::matchSextInRegOfLoad(
if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg))
return false;
- uint64_t MemBits = LoadDef->getMemSizeInBits();
+ uint64_t MemBits = LoadDef->getMemSizeInBits().getValue();
// If the sign extend extends from a narrower width than the load's width,
// then we can narrow the load width when we combine to a G_SEXTLOAD.
diff --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
index 099bf45..2e2cc9a 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
@@ -415,7 +415,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
if (DstTy.isVector())
break;
// Everything above the retrieved bits is zero
- Known.Zero.setBitsFrom((*MI.memoperands_begin())->getSizeInBits());
+ Known.Zero.setBitsFrom(
+ (*MI.memoperands_begin())->getSizeInBits().getValue());
break;
}
case TargetOpcode::G_ASHR: {
@@ -666,7 +667,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
// e.g. i16->i32 = '17' bits known.
const MachineMemOperand *MMO = *MI.memoperands_begin();
- return TyBits - MMO->getSizeInBits() + 1;
+ return TyBits - MMO->getSizeInBits().getValue() + 1;
}
case TargetOpcode::G_ZEXTLOAD: {
// FIXME: We need an in-memory type representation.
@@ -675,7 +676,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
// e.g. i16->i32 = '16' bits known.
const MachineMemOperand *MMO = *MI.memoperands_begin();
- return TyBits - MMO->getSizeInBits();
+ return TyBits - MMO->getSizeInBits().getValue();
}
case TargetOpcode::G_TRUNC: {
Register Src = MI.getOperand(1).getReg();
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index bd3ff72..bc06204 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -1317,7 +1317,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (DstTy.isVector())
return UnableToLegalize;
- if (8 * LoadMI.getMemSize() != DstTy.getSizeInBits()) {
+ if (8 * LoadMI.getMemSize().getValue() != DstTy.getSizeInBits()) {
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
MIRBuilder.buildLoad(TmpReg, LoadMI.getPointerReg(), LoadMI.getMMO());
MIRBuilder.buildAnyExt(DstReg, TmpReg);
@@ -1335,7 +1335,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
auto &MMO = LoadMI.getMMO();
- unsigned MemSize = MMO.getSizeInBits();
+ unsigned MemSize = MMO.getSizeInBits().getValue();
if (MemSize == NarrowSize) {
MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
@@ -1368,7 +1368,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (SrcTy.isVector() && LeftoverBits != 0)
return UnableToLegalize;
- if (8 * StoreMI.getMemSize() != SrcTy.getSizeInBits()) {
+ if (8 * StoreMI.getMemSize().getValue() != SrcTy.getSizeInBits()) {
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
MIRBuilder.buildTrunc(TmpReg, SrcReg);
MIRBuilder.buildStore(TmpReg, StoreMI.getPointerReg(), StoreMI.getMMO());
@@ -4456,7 +4456,7 @@ LegalizerHelper::reduceLoadStoreWidth(GLoadStore &LdStMI, unsigned TypeIdx,
LLT ValTy = MRI.getType(ValReg);
// FIXME: Do we need a distinct NarrowMemory legalize action?
- if (ValTy.getSizeInBits() != 8 * LdStMI.getMemSize()) {
+ if (ValTy.getSizeInBits() != 8 * LdStMI.getMemSize().getValue()) {
LLVM_DEBUG(dbgs() << "Can't narrow extload/truncstore\n");
return UnableToLegalize;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
index b5c9d3e..9fc8ecd 100644
--- a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
@@ -117,12 +117,8 @@ bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1,
if (!BasePtr0.BaseReg.isValid() || !BasePtr1.BaseReg.isValid())
return false;
- LocationSize Size1 = LdSt1->getMemSize() != MemoryLocation::UnknownSize
- ? LdSt1->getMemSize()
- : LocationSize::beforeOrAfterPointer();
- LocationSize Size2 = LdSt2->getMemSize() != MemoryLocation::UnknownSize
- ? LdSt2->getMemSize()
- : LocationSize::beforeOrAfterPointer();
+ LocationSize Size1 = LdSt1->getMemSize();
+ LocationSize Size2 = LdSt2->getMemSize();
int64_t PtrDiff;
if (BasePtr0.BaseReg == BasePtr1.BaseReg) {
@@ -214,14 +210,9 @@ bool GISelAddressing::instMayAlias(const MachineInstr &MI,
Offset = 0;
}
- TypeSize Size = LS->getMMO().getMemoryType().getSizeInBytes();
- return {LS->isVolatile(),
- LS->isAtomic(),
- BaseReg,
- Offset /*base offset*/,
- Size.isScalable() ? LocationSize::beforeOrAfterPointer()
- : LocationSize::precise(Size),
- &LS->getMMO()};
+ LocationSize Size = LS->getMMO().getSize();
+ return {LS->isVolatile(), LS->isAtomic(), BaseReg,
+ Offset /*base offset*/, Size, &LS->getMMO()};
}
// FIXME: support recognizing lifetime instructions.
// Default.
diff --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
index cfc8c28..481d9e3 100644
--- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
+++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
@@ -1356,10 +1356,11 @@ InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) {
// from the stack at some point. Happily the memory operand will tell us
// the size written to the stack.
auto *MemOperand = *MI.memoperands_begin();
- unsigned SizeInBits = MemOperand->getSizeInBits();
+ LocationSize SizeInBits = MemOperand->getSizeInBits();
+ assert(SizeInBits.hasValue() && "Expected to find a valid size!");
// Find that position in the stack indexes we're tracking.
- auto IdxIt = MTracker->StackSlotIdxes.find({SizeInBits, 0});
+ auto IdxIt = MTracker->StackSlotIdxes.find({SizeInBits.getValue(), 0});
if (IdxIt == MTracker->StackSlotIdxes.end())
// That index is not tracked. This is suprising, and unlikely to ever
// occur, but the safe action is to indicate the variable is optimised out.
diff --git a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
index 812d579..ccfc456 100644
--- a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
+++ b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
@@ -123,7 +123,7 @@ std::string VRegRenamer::getInstructionOpcodeHash(MachineInstr &MI) {
llvm::transform(MI.uses(), std::back_inserter(MIOperands), GetHashableMO);
for (const auto *Op : MI.memoperands()) {
- MIOperands.push_back((unsigned)Op->getSize());
+ MIOperands.push_back((unsigned)Op->getSize().getValue());
MIOperands.push_back((unsigned)Op->getFlags());
MIOperands.push_back((unsigned)Op->getOffset());
MIOperands.push_back((unsigned)Op->getSuccessOrdering());
diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp
index 323f1a6..ad53214 100644
--- a/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/llvm/lib/CodeGen/MachineFunction.cpp
@@ -484,13 +484,17 @@ void MachineFunction::deleteMachineBasicBlock(MachineBasicBlock *MBB) {
}
MachineMemOperand *MachineFunction::getMachineMemOperand(
- MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
- Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
+ MachinePointerInfo PtrInfo, MachineMemOperand::Flags F, LocationSize Size,
+ Align BaseAlignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
SyncScope::ID SSID, AtomicOrdering Ordering,
AtomicOrdering FailureOrdering) {
+ assert((!Size.hasValue() ||
+ Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
+ "Unexpected an unknown size to be represented using "
+ "LocationSize::beforeOrAfter()");
return new (Allocator)
- MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges,
- SSID, Ordering, FailureOrdering);
+ MachineMemOperand(PtrInfo, F, Size, BaseAlignment, AAInfo, Ranges, SSID,
+ Ordering, FailureOrdering);
}
MachineMemOperand *MachineFunction::getMachineMemOperand(
@@ -503,8 +507,14 @@ MachineMemOperand *MachineFunction::getMachineMemOperand(
Ordering, FailureOrdering);
}
-MachineMemOperand *MachineFunction::getMachineMemOperand(
- const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, uint64_t Size) {
+MachineMemOperand *
+MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
+ const MachinePointerInfo &PtrInfo,
+ LocationSize Size) {
+ assert((!Size.hasValue() ||
+ Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
+ "Unexpected an unknown size to be represented using "
+ "LocationSize::beforeOrAfter()");
return new (Allocator)
MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(),
AAMDNodes(), nullptr, MMO->getSyncScopeID(),
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 6654e1d6..fe2f9cc 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -1302,10 +1302,10 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
int64_t OffsetB = MMOb->getOffset();
int64_t MinOffset = std::min(OffsetA, OffsetB);
- uint64_t WidthA = MMOa->getSize();
- uint64_t WidthB = MMOb->getSize();
- bool KnownWidthA = WidthA != MemoryLocation::UnknownSize;
- bool KnownWidthB = WidthB != MemoryLocation::UnknownSize;
+ LocationSize WidthA = MMOa->getSize();
+ LocationSize WidthB = MMOb->getSize();
+ bool KnownWidthA = WidthA.hasValue();
+ bool KnownWidthB = WidthB.hasValue();
const Value *ValA = MMOa->getValue();
const Value *ValB = MMOb->getValue();
@@ -1325,8 +1325,8 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
if (!KnownWidthA || !KnownWidthB)
return true;
int64_t MaxOffset = std::max(OffsetA, OffsetB);
- int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
- return (MinOffset + LowWidth > MaxOffset);
+ LocationSize LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
+ return (MinOffset + (int)LowWidth.getValue() > MaxOffset);
}
if (!AA)
@@ -1338,10 +1338,10 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
- int64_t OverlapA =
- KnownWidthA ? WidthA + OffsetA - MinOffset : MemoryLocation::UnknownSize;
- int64_t OverlapB =
- KnownWidthB ? WidthB + OffsetB - MinOffset : MemoryLocation::UnknownSize;
+ int64_t OverlapA = KnownWidthA ? WidthA.getValue() + OffsetA - MinOffset
+ : MemoryLocation::UnknownSize;
+ int64_t OverlapB = KnownWidthB ? WidthB.getValue() + OffsetB - MinOffset
+ : MemoryLocation::UnknownSize;
return !AA->isNoAlias(
MemoryLocation(ValA, OverlapA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
@@ -2357,15 +2357,16 @@ using MMOList = SmallVector<const MachineMemOperand *, 2>;
static LocationSize getSpillSlotSize(const MMOList &Accesses,
const MachineFrameInfo &MFI) {
uint64_t Size = 0;
- for (const auto *A : Accesses)
+ for (const auto *A : Accesses) {
if (MFI.isSpillSlotObjectIndex(
cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
->getFrameIndex())) {
- uint64_t S = A->getSize();
- if (S == ~UINT64_C(0))
+ LocationSize S = A->getSize();
+ if (!S.hasValue())
return LocationSize::beforeOrAfterPointer();
- Size += S;
+ Size += S.getValue();
}
+ }
return Size;
}
@@ -2374,10 +2375,8 @@ MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
int FI;
if (TII->isStoreToStackSlotPostFE(*this, FI)) {
const MachineFrameInfo &MFI = getMF()->getFrameInfo();
- if (MFI.isSpillSlotObjectIndex(FI)) {
- uint64_t Size = (*memoperands_begin())->getSize();
- return Size == ~UINT64_C(0) ? LocationSize::beforeOrAfterPointer() : Size;
- }
+ if (MFI.isSpillSlotObjectIndex(FI))
+ return (*memoperands_begin())->getSize();
}
return std::nullopt;
}
@@ -2395,10 +2394,8 @@ MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
int FI;
if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
const MachineFrameInfo &MFI = getMF()->getFrameInfo();
- if (MFI.isSpillSlotObjectIndex(FI)) {
- uint64_t Size = (*memoperands_begin())->getSize();
- return Size == ~UINT64_C(0) ? LocationSize::beforeOrAfterPointer() : Size;
- }
+ if (MFI.isSpillSlotObjectIndex(FI))
+ return (*memoperands_begin())->getSize();
}
return std::nullopt;
}
diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp
index c7c0a1c..937ca53 100644
--- a/llvm/lib/CodeGen/MachineOperand.cpp
+++ b/llvm/lib/CodeGen/MachineOperand.cpp
@@ -1101,24 +1101,26 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f,
assert(getFailureOrdering() == FailureOrdering && "Value truncated");
}
-MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f,
- uint64_t s, Align a,
+MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags F,
+ LocationSize TS, Align BaseAlignment,
const AAMDNodes &AAInfo,
const MDNode *Ranges, SyncScope::ID SSID,
AtomicOrdering Ordering,
AtomicOrdering FailureOrdering)
- : MachineMemOperand(ptrinfo, f,
- s == ~UINT64_C(0) ? LLT() : LLT::scalar(8 * s), a,
- AAInfo, Ranges, SSID, Ordering, FailureOrdering) {}
+ : MachineMemOperand(ptrinfo, F,
+ !TS.hasValue() || TS.isScalable()
+ ? LLT()
+ : LLT::scalar(8 * TS.getValue().getKnownMinValue()),
+ BaseAlignment, AAInfo, Ranges, SSID, Ordering,
+ FailureOrdering) {}
void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
// The Value and Offset may differ due to CSE. But the flags and size
// should be the same.
assert(MMO->getFlags() == getFlags() && "Flags mismatch!");
- assert((MMO->getSize() == ~UINT64_C(0) || getSize() == ~UINT64_C(0) ||
+ assert((!MMO->getSize().hasValue() || !getSize().hasValue() ||
MMO->getSize() == getSize()) &&
"Size mismatch!");
-
if (MMO->getBaseAlign() >= getBaseAlign()) {
// Update the alignment value.
BaseAlign = MMO->getBaseAlign();
@@ -1240,7 +1242,8 @@ void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
<< "unknown-address";
}
MachineOperand::printOperandOffset(OS, getOffset());
- if (getSize() > 0 && getAlign() != getSize())
+ if (!getSize().hasValue() ||
+ getAlign() != getSize().getValue().getKnownMinValue())
OS << ", align " << getAlign().value();
if (getAlign() != getBaseAlign())
OS << ", basealign " << getBaseAlign().value();
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index d8cb681..eb42a78 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -2732,19 +2732,20 @@ bool SwingSchedulerDAG::isLoopCarriedDep(SUnit *Source, const SDep &Dep,
if (!LoopDefS || !TII->getIncrementValue(*LoopDefS, D))
return true;
- uint64_t AccessSizeS = (*SI->memoperands_begin())->getSize();
- uint64_t AccessSizeD = (*DI->memoperands_begin())->getSize();
+ LocationSize AccessSizeS = (*SI->memoperands_begin())->getSize();
+ LocationSize AccessSizeD = (*DI->memoperands_begin())->getSize();
// This is the main test, which checks the offset values and the loop
// increment value to determine if the accesses may be loop carried.
- if (AccessSizeS == MemoryLocation::UnknownSize ||
- AccessSizeD == MemoryLocation::UnknownSize)
+ if (!AccessSizeS.hasValue() || !AccessSizeD.hasValue())
return true;
- if (DeltaS != DeltaD || DeltaS < AccessSizeS || DeltaD < AccessSizeD)
+ if (DeltaS != DeltaD || DeltaS < AccessSizeS.getValue() ||
+ DeltaD < AccessSizeD.getValue())
return true;
- return (OffsetS + (int64_t)AccessSizeS < OffsetD + (int64_t)AccessSizeD);
+ return (OffsetS + (int64_t)AccessSizeS.getValue() <
+ OffsetD + (int64_t)AccessSizeD.getValue());
}
void SwingSchedulerDAG::postProcessDAG() {
diff --git a/llvm/lib/CodeGen/MachineStableHash.cpp b/llvm/lib/CodeGen/MachineStableHash.cpp
index 1cd9047..5abfbd5 100644
--- a/llvm/lib/CodeGen/MachineStableHash.cpp
+++ b/llvm/lib/CodeGen/MachineStableHash.cpp
@@ -200,7 +200,7 @@ stable_hash llvm::stableHashValue(const MachineInstr &MI, bool HashVRegs,
for (const auto *Op : MI.memoperands()) {
if (!HashMemOperands)
break;
- HashComponents.push_back(static_cast<unsigned>(Op->getSize()));
+ HashComponents.push_back(static_cast<unsigned>(Op->getSize().getValue()));
HashComponents.push_back(static_cast<unsigned>(Op->getFlags()));
HashComponents.push_back(static_cast<unsigned>(Op->getOffset()));
HashComponents.push_back(static_cast<unsigned>(Op->getSuccessOrdering()));
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index c2d6dd3..c69d36f 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1195,13 +1195,16 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
const MachineMemOperand &MMO = **MI->memoperands_begin();
if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
- if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
+ if (TypeSize::isKnownGE(MMO.getSizeInBits().getValue(),
+ ValTy.getSizeInBits()))
report("Generic extload must have a narrower memory type", MI);
} else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
- if (MMO.getSize() > ValTy.getSizeInBytes())
+ if (TypeSize::isKnownGT(MMO.getSize().getValue(),
+ ValTy.getSizeInBytes()))
report("load memory size cannot exceed result size", MI);
} else if (MI->getOpcode() == TargetOpcode::G_STORE) {
- if (ValTy.getSizeInBytes() < MMO.getSize())
+ if (TypeSize::isKnownLT(ValTy.getSizeInBytes(),
+ MMO.getSize().getValue()))
report("store memory size cannot exceed value size", MI);
}
diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp
index 0bef513..bdae94c 100644
--- a/llvm/lib/CodeGen/ModuloSchedule.cpp
+++ b/llvm/lib/CodeGen/ModuloSchedule.cpp
@@ -979,8 +979,8 @@ void ModuloScheduleExpander::updateMemOperands(MachineInstr &NewMI,
NewMMOs.push_back(
MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize()));
} else {
- NewMMOs.push_back(
- MF.getMachineMemOperand(MMO, 0, MemoryLocation::UnknownSize));
+ NewMMOs.push_back(MF.getMachineMemOperand(
+ MMO, 0, LocationSize::beforeOrAfterPointer()));
}
}
NewMI.setMemRefs(MF, NewMMOs);
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b6a5925..5eb53d5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -24160,7 +24160,7 @@ static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) {
// TODO: Use "BaseIndexOffset" to make this more effective.
SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(), Offset, DL);
- uint64_t StoreSize = MemoryLocation::getSizeOrUnknown(VT.getStoreSize());
+ LocationSize StoreSize = MemoryLocation::getSizeOrUnknown(VT.getStoreSize());
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO;
if (Offset.isScalable()) {
@@ -27805,14 +27805,13 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
: (LSN->getAddressingMode() == ISD::PRE_DEC)
? -1 * C->getSExtValue()
: 0;
- uint64_t Size =
+ LocationSize Size =
MemoryLocation::getSizeOrUnknown(LSN->getMemoryVT().getStoreSize());
return {LSN->isVolatile(),
LSN->isAtomic(),
LSN->getBasePtr(),
Offset /*base offset*/,
- Size != ~UINT64_C(0) ? LocationSize::precise(Size)
- : LocationSize::beforeOrAfterPointer(),
+ Size,
LSN->getMemOperand()};
}
if (const auto *LN = cast<LifetimeSDNode>(N))
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 2dccc45..808e3c6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -267,7 +267,9 @@ static MachineMemOperand *getStackAlignedMMO(SDValue StackPtr,
auto &MFI = MF.getFrameInfo();
int FI = cast<FrameIndexSDNode>(StackPtr)->getIndex();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
- uint64_t ObjectSize = isObjectScalable ? ~UINT64_C(0) : MFI.getObjectSize(FI);
+ LocationSize ObjectSize = isObjectScalable
+ ? LocationSize::beforeOrAfterPointer()
+ : LocationSize::precise(MFI.getObjectSize(FI));
return MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
ObjectSize, MFI.getObjectAlign(FI));
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 5fb9d8d..1f6e009 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1967,7 +1967,8 @@ void DAGTypeLegalizer::SplitVecRes_VP_LOAD(VPLoadSDNode *LD, SDValue &Lo,
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
LD->getPointerInfo(), MachineMemOperand::MOLoad,
- MemoryLocation::UnknownSize, Alignment, LD->getAAInfo(), LD->getRanges());
+ LocationSize::beforeOrAfterPointer(), Alignment, LD->getAAInfo(),
+ LD->getRanges());
Lo =
DAG.getLoadVP(LD->getAddressingMode(), ExtType, LoVT, dl, Ch, Ptr, Offset,
@@ -1990,8 +1991,8 @@ void DAGTypeLegalizer::SplitVecRes_VP_LOAD(VPLoadSDNode *LD, SDValue &Lo,
LoMemVT.getStoreSize().getFixedValue());
MMO = DAG.getMachineFunction().getMachineMemOperand(
- MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, Alignment,
- LD->getAAInfo(), LD->getRanges());
+ MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
+ Alignment, LD->getAAInfo(), LD->getRanges());
Hi = DAG.getLoadVP(LD->getAddressingMode(), ExtType, HiVT, dl, Ch, Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
@@ -2070,8 +2071,8 @@ void DAGTypeLegalizer::SplitVecRes_VP_STRIDED_LOAD(VPStridedLoadSDNode *SLD,
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(SLD->getPointerInfo().getAddrSpace()),
- MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, Alignment,
- SLD->getAAInfo(), SLD->getRanges());
+ MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
+ Alignment, SLD->getAAInfo(), SLD->getRanges());
Hi = DAG.getStridedLoadVP(SLD->getAddressingMode(), SLD->getExtensionType(),
HiVT, DL, SLD->getChain(), Ptr, SLD->getOffset(),
@@ -2130,7 +2131,7 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MLD->getPointerInfo(), MachineMemOperand::MOLoad,
- MemoryLocation::UnknownSize, Alignment, MLD->getAAInfo(),
+ LocationSize::beforeOrAfterPointer(), Alignment, MLD->getAAInfo(),
MLD->getRanges());
Lo = DAG.getMaskedLoad(LoVT, dl, Ch, Ptr, Offset, MaskLo, PassThruLo, LoMemVT,
@@ -2154,8 +2155,8 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
LoMemVT.getStoreSize().getFixedValue());
MMO = DAG.getMachineFunction().getMachineMemOperand(
- MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, Alignment,
- MLD->getAAInfo(), MLD->getRanges());
+ MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
+ Alignment, MLD->getAAInfo(), MLD->getRanges());
Hi = DAG.getMaskedLoad(HiVT, dl, Ch, Ptr, Offset, MaskHi, PassThruHi,
HiMemVT, MMO, MLD->getAddressingMode(), ExtType,
@@ -2217,7 +2218,8 @@ void DAGTypeLegalizer::SplitVecRes_Gather(MemSDNode *N, SDValue &Lo,
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
N->getPointerInfo(), MachineMemOperand::MOLoad,
- MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges());
+ LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(),
+ N->getRanges());
if (auto *MGT = dyn_cast<MaskedGatherSDNode>(N)) {
SDValue PassThru = MGT->getPassThru();
@@ -2884,10 +2886,10 @@ void DAGTypeLegalizer::SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo,
auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex);
MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
- PtrInfo, MachineMemOperand::MOStore, MemoryLocation::UnknownSize,
+ PtrInfo, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
Alignment);
MachineMemOperand *LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
- PtrInfo, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize,
+ PtrInfo, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
Alignment);
unsigned EltWidth = VT.getScalarSizeInBits() / 8;
@@ -3478,7 +3480,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) {
SDValue Lo, Hi;
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
N->getPointerInfo(), MachineMemOperand::MOStore,
- MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges());
+ LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(),
+ N->getRanges());
Lo = DAG.getStoreVP(Ch, DL, DataLo, Ptr, Offset, MaskLo, EVLLo, LoMemVT, MMO,
N->getAddressingMode(), N->isTruncatingStore(),
@@ -3501,8 +3504,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) {
LoMemVT.getStoreSize().getFixedValue());
MMO = DAG.getMachineFunction().getMachineMemOperand(
- MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize, Alignment,
- N->getAAInfo(), N->getRanges());
+ MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
+ Alignment, N->getAAInfo(), N->getRanges());
Hi = DAG.getStoreVP(Ch, DL, DataHi, Ptr, Offset, MaskHi, EVLHi, HiMemVT, MMO,
N->getAddressingMode(), N->isTruncatingStore(),
@@ -3574,8 +3577,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VP_STRIDED_STORE(VPStridedStoreSDNode *N,
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(N->getPointerInfo().getAddrSpace()),
- MachineMemOperand::MOStore, MemoryLocation::UnknownSize, Alignment,
- N->getAAInfo(), N->getRanges());
+ MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
+ Alignment, N->getAAInfo(), N->getRanges());
SDValue Hi = DAG.getStridedStoreVP(
N->getChain(), DL, HiData, Ptr, N->getOffset(), N->getStride(), HiMask,
@@ -3626,7 +3629,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N,
SDValue Lo, Hi, Res;
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
N->getPointerInfo(), MachineMemOperand::MOStore,
- MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges());
+ LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(),
+ N->getRanges());
Lo = DAG.getMaskedStore(Ch, DL, DataLo, Ptr, Offset, MaskLo, LoMemVT, MMO,
N->getAddressingMode(), N->isTruncatingStore(),
@@ -3651,8 +3655,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N,
LoMemVT.getStoreSize().getFixedValue());
MMO = DAG.getMachineFunction().getMachineMemOperand(
- MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize, Alignment,
- N->getAAInfo(), N->getRanges());
+ MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
+ Alignment, N->getAAInfo(), N->getRanges());
Hi = DAG.getMaskedStore(Ch, DL, DataHi, Ptr, Offset, MaskHi, HiMemVT, MMO,
N->getAddressingMode(), N->isTruncatingStore(),
@@ -3716,7 +3720,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_Scatter(MemSDNode *N, unsigned OpNo) {
SDValue Lo;
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
N->getPointerInfo(), MachineMemOperand::MOStore,
- MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges());
+ LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(),
+ N->getRanges());
if (auto *MSC = dyn_cast<MaskedScatterSDNode>(N)) {
SDValue OpsLo[] = {Ch, DataLo, MaskLo, Ptr, IndexLo, Ops.Scale};
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 19f9354..2670f48 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -8392,11 +8392,12 @@ SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
SDValue SelectionDAG::getMemIntrinsicNode(
unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
- MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
- if (!Size && MemVT.isScalableVector())
+ MachineMemOperand::Flags Flags, LocationSize Size,
+ const AAMDNodes &AAInfo) {
+ if (Size.hasValue() && MemVT.isScalableVector())
Size = MemoryLocation::UnknownSize;
- else if (!Size)
- Size = MemVT.getStoreSize();
+ else if (Size.hasValue() && !Size.getValue())
+ Size = LocationSize::precise(MemVT.getStoreSize());
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO =
@@ -8558,7 +8559,7 @@ SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
if (PtrInfo.V.isNull())
PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
- uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
+ LocationSize Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
Alignment, AAInfo, Ranges);
@@ -8679,7 +8680,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
MachineFunction &MF = getMachineFunction();
- uint64_t Size =
+ LocationSize Size =
MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize());
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
@@ -8828,7 +8829,7 @@ SDValue SelectionDAG::getLoadVP(
if (PtrInfo.V.isNull())
PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
- uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
+ LocationSize Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
Alignment, AAInfo, Ranges);
@@ -11718,8 +11719,10 @@ MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
// the MMO. This is because the MMO might indicate only a possible address
// range instead of specifying the affected memory addresses precisely.
// TODO: Make MachineMemOperands aware of scalable vectors.
- assert(memvt.getStoreSize().getKnownMinValue() <= MMO->getSize() &&
- "Size mismatch!");
+ assert(
+ (!MMO->getType().isValid() ||
+ memvt.getStoreSize().getKnownMinValue() <= MMO->getSize().getValue()) &&
+ "Size mismatch!");
}
/// Profile - Gather unique data for the node.
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index b6a35f7..f1923a6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3037,7 +3037,8 @@ static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
MachineMemOperand::MODereferenceable;
MachineMemOperand *MemRef = MF.getMachineMemOperand(
- MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
+ MPInfo, Flags, LocationSize::precise(PtrTy.getSizeInBits() / 8),
+ DAG.getEVTAlign(PtrTy));
DAG.setNodeMemRefs(Node, {MemRef});
}
if (PtrTy != PtrMemTy)
@@ -4753,7 +4754,7 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
- MemoryLocation::UnknownSize, Alignment, I.getAAMetadata());
+ LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata());
SDValue StoreNode =
DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
ISD::UNINDEXED, false /* Truncating */, IsCompressing);
@@ -4925,7 +4926,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
- MemoryLocation::UnknownSize, Alignment, AAInfo, Ranges);
+ LocationSize::beforeOrAfterPointer(), Alignment, AAInfo, Ranges);
SDValue Load =
DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
@@ -5003,9 +5004,9 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(
- MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
- DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
- FailureOrdering);
+ MachinePointerInfo(I.getPointerOperand()), Flags,
+ LocationSize::precise(MemVT.getStoreSize()), DAG.getEVTAlign(MemVT),
+ AAMDNodes(), nullptr, SSID, SuccessOrdering, FailureOrdering);
SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
dl, MemVT, VTs, InChain,
@@ -5057,8 +5058,9 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(
- MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
- DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
+ MachinePointerInfo(I.getPointerOperand()), Flags,
+ LocationSize::precise(MemVT.getStoreSize()), DAG.getEVTAlign(MemVT),
+ AAMDNodes(), nullptr, SSID, Ordering);
SDValue L =
DAG.getAtomic(NT, dl, MemVT, InChain,
@@ -5103,8 +5105,9 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
- MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
- I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
+ MachinePointerInfo(I.getPointerOperand()), Flags,
+ LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(),
+ nullptr, SSID, Order);
InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
@@ -5140,8 +5143,9 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(
- MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
- I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
+ MachinePointerInfo(I.getPointerOperand()), Flags,
+ LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(),
+ nullptr, SSID, Ordering);
SDValue Val = getValue(I.getValueOperand());
if (Val.getValueType() != MemVT)
@@ -6904,7 +6908,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
auto MPI =
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
- MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize,
+ MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
TempAlign);
Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
@@ -6933,7 +6937,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
MachineMemOperand::MOStore);
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
- MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize,
+ MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
TempAlign);
Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
}
@@ -8087,7 +8091,7 @@ void SelectionDAGBuilder::visitVPLoad(
SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
- MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
+ LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
MMO, false /*IsExpanding */);
if (AddToChain)
@@ -8110,8 +8114,8 @@ void SelectionDAGBuilder::visitVPGather(
unsigned AS =
PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
- MachinePointerInfo(AS), MachineMemOperand::MOLoad,
- MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
+ MachinePointerInfo(AS), MachineMemOperand::MOLoad,
+ LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
SDValue Base, Index, Scale;
ISD::MemIndexType IndexType;
bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
@@ -8151,7 +8155,7 @@ void SelectionDAGBuilder::visitVPStore(
SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
- MemoryLocation::UnknownSize, *Alignment, AAInfo);
+ LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
/* IsTruncating */ false, /*IsCompressing*/ false);
@@ -8174,7 +8178,7 @@ void SelectionDAGBuilder::visitVPScatter(
PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(AS), MachineMemOperand::MOStore,
- MemoryLocation::UnknownSize, *Alignment, AAInfo);
+ LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
SDValue Base, Index, Scale;
ISD::MemIndexType IndexType;
bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
@@ -8217,7 +8221,7 @@ void SelectionDAGBuilder::visitVPStridedLoad(
unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(AS), MachineMemOperand::MOLoad,
- MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
+ LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
OpValues[2], OpValues[3], MMO,
@@ -8240,7 +8244,7 @@ void SelectionDAGBuilder::visitVPStridedStore(
unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(AS), MachineMemOperand::MOStore,
- MemoryLocation::UnknownSize, *Alignment, AAInfo);
+ LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
SDValue ST = DAG.getStridedStoreVP(
getMemoryRoot(), DL, OpValues[0], OpValues[1],
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index 5b02c1b..9fbd516 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -1554,7 +1554,8 @@ TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
SmallVector<uint64_t, 8> Ops;
DIExpression::appendOffset(Ops, Offset);
Ops.push_back(dwarf::DW_OP_deref_size);
- Ops.push_back(MMO->getSize());
+ Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
+ : ~UINT64_C(0));
Expr = DIExpression::prependOpcodes(Expr, Ops);
return ParamLoadedValue(*BaseOp, Expr);
}