aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
diff options
context:
space:
mode:
authorDavid Green <david.green@arm.com>2021-02-24 08:46:15 +0000
committerDavid Green <david.green@arm.com>2021-02-24 08:46:15 +0000
commit03892a27d6b89df92def3239338cafbbfa541dbd (patch)
tree6c14b59068e5107f2f57f6bcfe2416b006dad849 /llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
parent11a53f47fb3448c747e18519f3799c919ec65aa1 (diff)
downloadllvm-03892a27d6b89df92def3239338cafbbfa541dbd.zip
llvm-03892a27d6b89df92def3239338cafbbfa541dbd.tar.gz
llvm-03892a27d6b89df92def3239338cafbbfa541dbd.tar.bz2
[ARM] Expand the range of allowed post-incs in load/store optimizer
Currently the load/store optimizer will only fold in increments of the same size as the load/store. This patch expands that to any legal immediate for the post-inc instruction. This is a recommit of 3b34b06fc5908b with correctness fixes and extra tests. Differential Revision: https://reviews.llvm.org/D95885
Diffstat (limited to 'llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp')
-rw-r--r--llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp25
1 files changed, 15 insertions, 10 deletions
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 5fe6180..28fe01c 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1502,12 +1502,16 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineInstr *MI) {
NewOpc = getPreIndexedLoadStoreOpcode(Opcode, ARM_AM::sub);
} else {
MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset, TRI);
- if (Offset == Bytes) {
- NewOpc = getPostIndexedLoadStoreOpcode(Opcode, ARM_AM::add);
- } else if (!isAM5 && Offset == -Bytes) {
- NewOpc = getPostIndexedLoadStoreOpcode(Opcode, ARM_AM::sub);
- } else
+ if (MergeInstr == MBB.end())
return false;
+
+ NewOpc = getPostIndexedLoadStoreOpcode(Opcode, ARM_AM::add);
+ if ((isAM5 && Offset != Bytes) ||
+ (!isAM5 && !isLegalAddressImm(NewOpc, Offset, TII))) {
+ NewOpc = getPostIndexedLoadStoreOpcode(Opcode, ARM_AM::sub);
+ if (isAM5 || !isLegalAddressImm(NewOpc, Offset, TII))
+ return false;
+ }
}
LLVM_DEBUG(dbgs() << " Erasing old increment: " << *MergeInstr);
MBB.erase(MergeInstr);
@@ -1546,7 +1550,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineInstr *MI) {
(void)MIB;
LLVM_DEBUG(dbgs() << " Added new instruction: " << *MIB);
} else {
- int Imm = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
+ int Imm = ARM_AM::getAM2Opc(AddSub, abs(Offset), ARM_AM::no_shift);
auto MIB =
BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg())
.addReg(Base, RegState::Define)
@@ -1576,7 +1580,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineInstr *MI) {
// the vestigal zero-reg offset register. When that's fixed, this clause
// can be removed entirely.
if (isAM2 && NewOpc == ARM::STR_POST_IMM) {
- int Imm = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
+ int Imm = ARM_AM::getAM2Opc(AddSub, abs(Offset), ARM_AM::no_shift);
// STR_PRE, STR_POST
auto MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc), Base)
.addReg(MO.getReg(), getKillRegState(MO.isKill()))
@@ -1633,9 +1637,10 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSDouble(MachineInstr &MI) const {
NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_PRE : ARM::t2STRD_PRE;
} else {
MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset, TRI);
- if (Offset == 8 || Offset == -8) {
- NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_POST : ARM::t2STRD_POST;
- } else
+ if (MergeInstr == MBB.end())
+ return false;
+ NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_POST : ARM::t2STRD_POST;
+ if (!isLegalAddressImm(NewOpc, Offset, TII))
return false;
}
LLVM_DEBUG(dbgs() << " Erasing old increment: " << *MergeInstr);