diff options
author | John Brawn <john.brawn@arm.com> | 2023-07-25 17:10:12 +0100 |
---|---|---|
committer | John Brawn <john.brawn@arm.com> | 2023-07-28 10:37:40 +0100 |
commit | 8336d38be92d253582feadb728ac3691a6f3c39c (patch) | |
tree | 8f41b234cf5fbc6f264fb5df8edc739e6e0a9952 /llvm/lib | |
parent | bf2ad26b4ff856aab9a62ad168e6bdefeedc374f (diff) | |
download | llvm-8336d38be92d253582feadb728ac3691a6f3c39c.zip llvm-8336d38be92d253582feadb728ac3691a6f3c39c.tar.gz llvm-8336d38be92d253582feadb728ac3691a6f3c39c.tar.bz2 |
[ARM] Correctly handle combining segmented stacks with execute-only
Using segmented stacks with execute-only mostly works, but we need to
use the correct movi32 opcode in 6-M, and there's one place where for
thumb1 (i.e. 6-M and 8-M.base) a constant pool was unconditionally
used which needed to be fixed.
Differential Revision: https://reviews.llvm.org/D156339
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/ARM/ARMFrameLowering.cpp | 38 |
1 files changed, 22 insertions, 16 deletions
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp index 4496d49..7bcf95e 100644 --- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp +++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp @@ -2966,6 +2966,7 @@ void ARMFrameLowering::adjustForSegmentedStacks( // We save R4 and R5 before use and restore them before leaving the function. unsigned ScratchReg0 = ARM::R4; unsigned ScratchReg1 = ARM::R5; + unsigned MovOp = ST->useMovt() ? ARM::t2MOVi32imm : ARM::tMOVi32imm; uint64_t AlignedStackSize; MachineBasicBlock *PrevStackMBB = MF.CreateMachineBasicBlock(); @@ -3083,8 +3084,8 @@ void ARMFrameLowering::adjustForSegmentedStacks( .addImm(AlignedStackSize) .add(predOps(ARMCC::AL)); } else { - if (Thumb2) { - BuildMI(McrMBB, DL, TII.get(ARM::t2MOVi32imm), ScratchReg0) + if (Thumb2 || ST->genExecuteOnly()) { + BuildMI(McrMBB, DL, TII.get(MovOp), ScratchReg0) .addImm(AlignedStackSize); } else { auto MBBI = McrMBB->end(); @@ -3119,16 +3120,21 @@ void ARMFrameLowering::adjustForSegmentedStacks( } if (Thumb && ST->isThumb1Only()) { - unsigned PCLabelId = ARMFI->createPICLabelUId(); - ARMConstantPoolValue *NewCPV = ARMConstantPoolSymbol::Create( - MF.getFunction().getContext(), "__STACK_LIMIT", PCLabelId, 0); - MachineConstantPool *MCP = MF.getConstantPool(); - unsigned CPI = MCP->getConstantPoolIndex(NewCPV, Align(4)); - - // ldr SR0, [pc, offset(STACK_LIMIT)] - BuildMI(GetMBB, DL, TII.get(ARM::tLDRpci), ScratchReg0) - .addConstantPoolIndex(CPI) - .add(predOps(ARMCC::AL)); + if (ST->genExecuteOnly()) { + BuildMI(GetMBB, DL, TII.get(MovOp), ScratchReg0) + .addExternalSymbol("__STACK_LIMIT"); + } else { + unsigned PCLabelId = ARMFI->createPICLabelUId(); + ARMConstantPoolValue *NewCPV = ARMConstantPoolSymbol::Create( + MF.getFunction().getContext(), "__STACK_LIMIT", PCLabelId, 0); + MachineConstantPool *MCP = MF.getConstantPool(); + unsigned CPI = MCP->getConstantPoolIndex(NewCPV, Align(4)); + + // ldr SR0, [pc, offset(STACK_LIMIT)] + BuildMI(GetMBB, DL, TII.get(ARM::tLDRpci), ScratchReg0) + .addConstantPoolIndex(CPI) + .add(predOps(ARMCC::AL)); + } // ldr SR0, [SR0] BuildMI(GetMBB, DL, TII.get(ARM::tLDRi), ScratchReg0) @@ -3188,8 +3194,8 @@ void ARMFrameLowering::adjustForSegmentedStacks( .addImm(AlignedStackSize) .add(predOps(ARMCC::AL)); } else { - if (Thumb2) { - BuildMI(AllocMBB, DL, TII.get(ARM::t2MOVi32imm), ScratchReg0) + if (Thumb2 || ST->genExecuteOnly()) { + BuildMI(AllocMBB, DL, TII.get(MovOp), ScratchReg0) .addImm(AlignedStackSize); } else { auto MBBI = AllocMBB->end(); @@ -3221,8 +3227,8 @@ void ARMFrameLowering::adjustForSegmentedStacks( .addImm(alignToARMConstant(ARMFI->getArgumentStackSize())) .add(predOps(ARMCC::AL)); } else { - if (Thumb2) { - BuildMI(AllocMBB, DL, TII.get(ARM::t2MOVi32imm), ScratchReg1) + if (Thumb2 || ST->genExecuteOnly()) { + BuildMI(AllocMBB, DL, TII.get(MovOp), ScratchReg1) .addImm(alignToARMConstant(ARMFI->getArgumentStackSize())); } else { auto MBBI = AllocMBB->end(); |