diff options
author | Nikita Popov <nikita.ppv@gmail.com> | 2021-09-12 17:30:40 +0200 |
---|---|---|
committer | Nikita Popov <nikita.ppv@gmail.com> | 2021-09-12 17:43:37 +0200 |
commit | 4189e5fe12b6f9b9036d8faffeab15ad6acd7d99 (patch) | |
tree | 4b3b4d9a19d6cefacb69ba61ffd368c3f0087c4b /llvm/lib/CodeGen/CodeGenPrepare.cpp | |
parent | 8e86c0e4f49bcfebd966fd5893d00b093df7f8ba (diff) | |
download | llvm-4189e5fe12b6f9b9036d8faffeab15ad6acd7d99.zip llvm-4189e5fe12b6f9b9036d8faffeab15ad6acd7d99.tar.gz llvm-4189e5fe12b6f9b9036d8faffeab15ad6acd7d99.tar.bz2 |
[CGP] Support opaque pointers in address mode fold
Rather than inspecting the pointer element type, use the access
type of the load/store/atomicrmw/cmpxchg.
In the process of doing this, simplify the logic by storing the
address + type in MemoryUses, rather than an Instruction + Operand
pair (which was then used to fetch the address).
Diffstat (limited to 'llvm/lib/CodeGen/CodeGenPrepare.cpp')
-rw-r--r-- | llvm/lib/CodeGen/CodeGenPrepare.cpp | 40 |
1 files changed, 14 insertions, 26 deletions
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index fba8f6d..46b60dc 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -4859,10 +4859,9 @@ static constexpr int MaxMemoryUsesToScan = 20; /// Recursively walk all the uses of I until we find a memory use. /// If we find an obviously non-foldable instruction, return true. -/// Add the ultimately found memory instructions to MemoryUses. +/// Add accessed addresses and types to MemoryUses. static bool FindAllMemoryUses( - Instruction *I, - SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, + Instruction *I, SmallVectorImpl<std::pair<Value *, Type *>> &MemoryUses, SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, int SeenInsts = 0) { @@ -4883,31 +4882,28 @@ static bool FindAllMemoryUses( Instruction *UserI = cast<Instruction>(U.getUser()); if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { - MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); + MemoryUses.push_back({U.get(), LI->getType()}); continue; } if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { - unsigned opNo = U.getOperandNo(); - if (opNo != StoreInst::getPointerOperandIndex()) + if (U.getOperandNo() != StoreInst::getPointerOperandIndex()) return true; // Storing addr, not into addr. - MemoryUses.push_back(std::make_pair(SI, opNo)); + MemoryUses.push_back({U.get(), SI->getValueOperand()->getType()}); continue; } if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { - unsigned opNo = U.getOperandNo(); - if (opNo != AtomicRMWInst::getPointerOperandIndex()) + if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex()) return true; // Storing addr, not into addr. - MemoryUses.push_back(std::make_pair(RMW, opNo)); + MemoryUses.push_back({U.get(), RMW->getValOperand()->getType()}); continue; } if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { - unsigned opNo = U.getOperandNo(); - if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) + if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex()) return true; // Storing addr, not into addr. - MemoryUses.push_back(std::make_pair(CmpX, opNo)); + MemoryUses.push_back({U.get(), CmpX->getCompareOperand()->getType()}); continue; } @@ -5017,7 +5013,7 @@ isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, // we can remove the addressing mode and effectively trade one live register // for another (at worst.) In this context, folding an addressing mode into // the use is just a particularly nice way of sinking it. - SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; + SmallVector<std::pair<Value *, Type *>, 16> MemoryUses; SmallPtrSet<Instruction*, 16> ConsideredInsts; if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, PSI, BFI)) @@ -5033,18 +5029,10 @@ isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, // growth since most architectures have some reasonable small and fast way to // compute an effective address. (i.e LEA on x86) SmallVector<Instruction*, 32> MatchedAddrModeInsts; - for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { - Instruction *User = MemoryUses[i].first; - unsigned OpNo = MemoryUses[i].second; - - // Get the access type of this use. If the use isn't a pointer, we don't - // know what it accesses. - Value *Address = User->getOperand(OpNo); - PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); - if (!AddrTy) - return false; - Type *AddressAccessTy = AddrTy->getElementType(); - unsigned AS = AddrTy->getAddressSpace(); + for (const std::pair<Value *, Type *> &Pair : MemoryUses) { + Value *Address = Pair.first; + Type *AddressAccessTy = Pair.second; + unsigned AS = Address->getType()->getPointerAddressSpace(); // Do a match against the root of this address, ignoring profitability. This // will tell us if the addressing mode for the memory operation will |