aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
authorAmara Emerson <aemerson@apple.com>2020-06-09 15:14:04 -0700
committerAmara Emerson <aemerson@apple.com>2020-06-09 16:47:58 -0700
commit938cc573ee1ae7fd628da0c2a9a0bcbc36c451e7 (patch)
tree60efd6f335dad0989a83ece1bff2f3bfebf5709f /llvm/lib
parent641d5ac4d1965990fcf981f153369b038816cd16 (diff)
downloadllvm-938cc573ee1ae7fd628da0c2a9a0bcbc36c451e7.zip
llvm-938cc573ee1ae7fd628da0c2a9a0bcbc36c451e7.tar.gz
llvm-938cc573ee1ae7fd628da0c2a9a0bcbc36c451e7.tar.bz2
[AArch64][GlobalISel] Select G_ADD_LOW into a MOVaddr pseudo.
This ensures that we match SelectionDAG behaviour by waiting until the expand pseudos pass to generate ADRP + ADD pairs. Doing this at selection time for the G_ADD_LOW is fine because by the time we get to selecting the G_ADD_LOW, previous attempts to fold it into loads/stores must have failed. Differential Revision: https://reviews.llvm.org/D81512
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp25
1 files changed, 22 insertions, 3 deletions
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index e8137a8..49a7914 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -1919,9 +1919,28 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return selectBrJT(I, MRI);
case AArch64::G_ADD_LOW: {
- I.setDesc(TII.get(AArch64::ADDXri));
- I.addOperand(MachineOperand::CreateImm(0));
- return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
+ // This op may have been separated from it's ADRP companion by the localizer
+ // or some other code motion pass. Given that many CPUs will try to
+ // macro fuse these operations anyway, select this into a MOVaddr pseudo
+ // which will later be expanded into an ADRP+ADD pair after scheduling.
+ MachineInstr *BaseMI = MRI.getVRegDef(I.getOperand(1).getReg());
+ if (BaseMI->getOpcode() != AArch64::ADRP) {
+ I.setDesc(TII.get(AArch64::ADDXri));
+ I.addOperand(MachineOperand::CreateImm(0));
+ return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
+ }
+ assert(TM.getCodeModel() == CodeModel::Small &&
+ "Expected small code model");
+ MachineIRBuilder MIB(I);
+ auto Op1 = BaseMI->getOperand(1);
+ auto Op2 = I.getOperand(2);
+ auto MovAddr = MIB.buildInstr(AArch64::MOVaddr, {I.getOperand(0)}, {})
+ .addGlobalAddress(Op1.getGlobal(), Op1.getOffset(),
+ Op1.getTargetFlags())
+ .addGlobalAddress(Op2.getGlobal(), Op2.getOffset(),
+ Op2.getTargetFlags());
+ I.eraseFromParent();
+ return constrainSelectedInstRegOperands(*MovAddr, TII, TRI, RBI);
}
case TargetOpcode::G_BSWAP: {