diff options
author | Michael Kuperstein <mkuper@google.com> | 2016-11-23 18:33:49 +0000 |
---|---|---|
committer | Michael Kuperstein <mkuper@google.com> | 2016-11-23 18:33:49 +0000 |
commit | 47eb85a0033fd21764fe100e736c3ec54d4f741f (patch) | |
tree | 87b3a0867fc7cfd6773183ffa1082de972f1bbc8 /llvm/lib/CodeGen/TargetInstrInfo.cpp | |
parent | 3c3fe5d885b8772634a7571907400580873ab611 (diff) | |
download | llvm-47eb85a0033fd21764fe100e736c3ec54d4f741f.zip llvm-47eb85a0033fd21764fe100e736c3ec54d4f741f.tar.gz llvm-47eb85a0033fd21764fe100e736c3ec54d4f741f.tar.bz2 |
[X86] Allow folding of stack reloads when loading a subreg of the spilled reg
We did not support subregs in InlineSpiller:foldMemoryOperand() because targets
may not deal with them correctly.
This adds a target hook to let the spiller know that a target can handle
subregs, and actually enables it for x86 for the case of stack slot reloads.
This fixes PR30832.
Differential Revision: https://reviews.llvm.org/D26521
llvm-svn: 287792
Diffstat (limited to 'llvm/lib/CodeGen/TargetInstrInfo.cpp')
-rw-r--r-- | llvm/lib/CodeGen/TargetInstrInfo.cpp | 29 |
1 files changed, 26 insertions, 3 deletions
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 265b4bfa..01f91b9 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -515,6 +515,31 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, assert(MBB && "foldMemoryOperand needs an inserted instruction"); MachineFunction &MF = *MBB->getParent(); + // If we're not folding a load into a subreg, the size of the load is the + // size of the spill slot. But if we are, we need to figure out what the + // actual load size is. + int64_t MemSize = 0; + const MachineFrameInfo &MFI = MF.getFrameInfo(); + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + + if (Flags & MachineMemOperand::MOStore) { + MemSize = MFI.getObjectSize(FI); + } else { + for (unsigned Idx : Ops) { + int64_t OpSize = MFI.getObjectSize(FI); + + if (auto SubReg = MI.getOperand(Idx).getSubReg()) { + unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg); + if (SubRegSize > 0 && !(SubRegSize % 8)) + OpSize = SubRegSize / 8; + } + + MemSize = std::max(MemSize, OpSize); + } + } + + assert(MemSize && "Did not expect a zero-sized stack slot"); + MachineInstr *NewMI = nullptr; if (MI.getOpcode() == TargetOpcode::STACKMAP || @@ -538,10 +563,9 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, assert((!(Flags & MachineMemOperand::MOLoad) || NewMI->mayLoad()) && "Folded a use to a non-load!"); - const MachineFrameInfo &MFI = MF.getFrameInfo(); assert(MFI.getObjectOffset(FI) != -1); MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo::getFixedStack(MF, FI), Flags, MFI.getObjectSize(FI), + MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize, MFI.getObjectAlignment(FI)); NewMI->addMemOperand(MF, MMO); @@ -558,7 +582,6 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, const MachineOperand &MO = MI.getOperand(1 - Ops[0]); MachineBasicBlock::iterator Pos = MI; - const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (Flags == MachineMemOperand::MOStore) storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); |