aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/MachineInstr.cpp
diff options
context:
space:
mode:
authorJean-Michel Gorius <jean-michel.gorius@ens-rennes.fr>2020-05-22 21:26:46 +0200
committerJean-Michel Gorius <jean-michel.gorius@ens-rennes.fr>2020-05-22 21:26:46 +0200
commit65cd2c7a8015577fea15c861f41d2e4b5768961f (patch)
treec47d40f748bf23419c6ab754606e989ca43a6197 /llvm/lib/CodeGen/MachineInstr.cpp
parent8cb75745412e4bc9592d2409cc6cfa4a2940d9e7 (diff)
downloadllvm-65cd2c7a8015577fea15c861f41d2e4b5768961f.zip
llvm-65cd2c7a8015577fea15c861f41d2e4b5768961f.tar.gz
llvm-65cd2c7a8015577fea15c861f41d2e4b5768961f.tar.bz2
Revert "[CodeGen] Add support for multiple memory operands in MachineInstr::mayAlias"
This temporarily reverts commit 7019cea26dfef5882c96f278c32d0f9c49a5e516. It seems that, for some targets, there are instructions with a lot of memory operands (probably more than would be expected). This causes a lot of buildbots to timeout and notify failed builds. While investigations are ongoing to find out why this happens, revert the changes.
Diffstat (limited to 'llvm/lib/CodeGen/MachineInstr.cpp')
-rw-r--r--llvm/lib/CodeGen/MachineInstr.cpp137
1 files changed, 65 insertions, 72 deletions
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 67c2438..7afa61f 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -1228,88 +1228,81 @@ bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
return false;
- if (memoperands_empty() || Other.memoperands_empty())
+ // FIXME: Need to handle multiple memory operands to support all targets.
+ if (!hasOneMemOperand() || !Other.hasOneMemOperand())
return true;
- auto HasAlias = [&](const MachineMemOperand &MMOa,
- const MachineMemOperand &MMOb) {
- // The following interface to AA is fashioned after DAGCombiner::isAlias
- // and operates with MachineMemOperand offset with some important
- // assumptions:
- // - LLVM fundamentally assumes flat address spaces.
- // - MachineOperand offset can *only* result from legalization and
- // cannot affect queries other than the trivial case of overlap
- // checking.
- // - These offsets never wrap and never step outside
- // of allocated objects.
- // - There should never be any negative offsets here.
- //
- // FIXME: Modify API to hide this math from "user"
- // Even before we go to AA we can reason locally about some
- // memory objects. It can save compile time, and possibly catch some
- // corner cases not currently covered.
-
- int64_t OffsetA = MMOa.getOffset();
- int64_t OffsetB = MMOb.getOffset();
- int64_t MinOffset = std::min(OffsetA, OffsetB);
-
- uint64_t WidthA = MMOa.getSize();
- uint64_t WidthB = MMOb.getSize();
- bool KnownWidthA = WidthA != MemoryLocation::UnknownSize;
- bool KnownWidthB = WidthB != MemoryLocation::UnknownSize;
-
- const Value *ValA = MMOa.getValue();
- const Value *ValB = MMOb.getValue();
- bool SameVal = (ValA && ValB && (ValA == ValB));
- if (!SameVal) {
- const PseudoSourceValue *PSVa = MMOa.getPseudoValue();
- const PseudoSourceValue *PSVb = MMOb.getPseudoValue();
- if (PSVa && ValB && !PSVa->mayAlias(&MFI))
- return false;
- if (PSVb && ValA && !PSVb->mayAlias(&MFI))
- return false;
- if (PSVa && PSVb && (PSVa == PSVb))
- SameVal = true;
- }
-
- if (SameVal) {
- if (!KnownWidthA || !KnownWidthB)
- return true;
- int64_t MaxOffset = std::max(OffsetA, OffsetB);
- int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
- return (MinOffset + LowWidth > MaxOffset);
- }
+ MachineMemOperand *MMOa = *memoperands_begin();
+ MachineMemOperand *MMOb = *Other.memoperands_begin();
+
+ // The following interface to AA is fashioned after DAGCombiner::isAlias
+ // and operates with MachineMemOperand offset with some important
+ // assumptions:
+ // - LLVM fundamentally assumes flat address spaces.
+ // - MachineOperand offset can *only* result from legalization and
+ // cannot affect queries other than the trivial case of overlap
+ // checking.
+ // - These offsets never wrap and never step outside
+ // of allocated objects.
+ // - There should never be any negative offsets here.
+ //
+ // FIXME: Modify API to hide this math from "user"
+ // Even before we go to AA we can reason locally about some
+ // memory objects. It can save compile time, and possibly catch some
+ // corner cases not currently covered.
+
+ int64_t OffsetA = MMOa->getOffset();
+ int64_t OffsetB = MMOb->getOffset();
+ int64_t MinOffset = std::min(OffsetA, OffsetB);
+
+ uint64_t WidthA = MMOa->getSize();
+ uint64_t WidthB = MMOb->getSize();
+ bool KnownWidthA = WidthA != MemoryLocation::UnknownSize;
+ bool KnownWidthB = WidthB != MemoryLocation::UnknownSize;
+
+ const Value *ValA = MMOa->getValue();
+ const Value *ValB = MMOb->getValue();
+ bool SameVal = (ValA && ValB && (ValA == ValB));
+ if (!SameVal) {
+ const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
+ const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
+ if (PSVa && ValB && !PSVa->mayAlias(&MFI))
+ return false;
+ if (PSVb && ValA && !PSVb->mayAlias(&MFI))
+ return false;
+ if (PSVa && PSVb && (PSVa == PSVb))
+ SameVal = true;
+ }
- if (!AA)
+ if (SameVal) {
+ if (!KnownWidthA || !KnownWidthB)
return true;
+ int64_t MaxOffset = std::max(OffsetA, OffsetB);
+ int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
+ return (MinOffset + LowWidth > MaxOffset);
+ }
- if (!ValA || !ValB)
- return true;
+ if (!AA)
+ return true;
- assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
- assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
+ if (!ValA || !ValB)
+ return true;
- int64_t OverlapA = KnownWidthA ? WidthA + OffsetA - MinOffset
- : MemoryLocation::UnknownSize;
- int64_t OverlapB = KnownWidthB ? WidthB + OffsetB - MinOffset
- : MemoryLocation::UnknownSize;
+ assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
+ assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
- AliasResult AAResult =
- AA->alias(MemoryLocation(ValA, OverlapA,
- UseTBAA ? MMOa.getAAInfo() : AAMDNodes()),
- MemoryLocation(ValB, OverlapB,
- UseTBAA ? MMOb.getAAInfo() : AAMDNodes()));
+ int64_t OverlapA = KnownWidthA ? WidthA + OffsetA - MinOffset
+ : MemoryLocation::UnknownSize;
+ int64_t OverlapB = KnownWidthB ? WidthB + OffsetB - MinOffset
+ : MemoryLocation::UnknownSize;
- return (AAResult != NoAlias);
- };
+ AliasResult AAResult = AA->alias(
+ MemoryLocation(ValA, OverlapA,
+ UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
+ MemoryLocation(ValB, OverlapB,
+ UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
- for (auto &&MMOa : memoperands()) {
- for (auto &&MMOb : Other.memoperands()) {
- if (HasAlias(*MMOa, *MMOb))
- return true;
- }
- }
- return false;
+ return (AAResult != NoAlias);
}
/// hasOrderedMemoryRef - Return true if this instruction may have an ordered