diff options
author | Matthias Braun <matze@braunis.de> | 2016-03-02 19:20:00 +0000 |
---|---|---|
committer | Matthias Braun <matze@braunis.de> | 2016-03-02 19:20:00 +0000 |
commit | f290912d2248687e688779d1d89999df56c14a09 (patch) | |
tree | 3fcf0e6854ae16ba357b814c0a0a16f1a88f313c /llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp | |
parent | 578787ad3055d0a874bb76b148a9d5dc7bd18db5 (diff) | |
download | llvm-f290912d2248687e688779d1d89999df56c14a09.zip llvm-f290912d2248687e688779d1d89999df56c14a09.tar.gz llvm-f290912d2248687e688779d1d89999df56c14a09.tar.bz2 |
ARM: Introduce conservative load/store optimization mode
Most of the time ARM has the CCR.UNALIGN_TRP bit set to false which
means that unaligned loads/stores do not trap and even extensive testing
will not catch these bugs. However the multi/double variants are not
affected by this bit and will still trap. In effect a more aggressive
load/store optimization will break existing (bad) code.
These bugs do not necessarily manifest in the broken code where the
misaligned pointer is formed but often later in perfectly legal code
where it is accessed. This means recompiling system libraries (which
have no alignment bugs) with a newer compiler will break existing
applications (with alignment bugs) that worked before.
So (under protest) I implemented this safe mode which limits the
formation of multi/double operations to cases that are not affected by
user code (stack operations like spills/reloads) or cases where the
normal operations trap anyway (floating point load/stores). It is
disabled by default.
Differential Revision: http://reviews.llvm.org/D17015
llvm-svn: 262504
Diffstat (limited to 'llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp')
-rw-r--r-- | llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 34 |
1 files changed, 34 insertions, 0 deletions
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 5ee6641..cc49f9d 100644 --- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -60,6 +60,15 @@ STATISTIC(NumSTRD2STM, "Number of strd instructions turned back into stm"); STATISTIC(NumLDRD2LDR, "Number of ldrd instructions turned back into ldr's"); STATISTIC(NumSTRD2STR, "Number of strd instructions turned back into str's"); +/// This switch disables formation of double/multi instructions that could +/// potentially lead to (new) alignment traps even with CCR.UNALIGN_TRP +/// disabled. This can be used to create libraries that are robust even when +/// users provoke undefined behaviour by supplying misaligned pointers. +/// \see mayCombineMisaligned() +static cl::opt<bool> +AssumeMisalignedLoadStores("arm-assume-misaligned-load-store", cl::Hidden, + cl::init(false), cl::desc("Be more conservative in ARM load/store opt")); + namespace llvm { void initializeARMLoadStoreOptPass(PassRegistry &); } @@ -916,6 +925,24 @@ static bool isValidLSDoubleOffset(int Offset) { return (Value % 4) == 0 && Value < 1024; } +/// Return true for loads/stores that can be combined to a double/multi +/// operation without increasing the requirements for alignment. +static bool mayCombineMisaligned(const TargetSubtargetInfo &STI, + const MachineInstr &MI) { + // vldr/vstr trap on misaligned pointers anyway, forming vldm makes no + // difference. + unsigned Opcode = MI.getOpcode(); + if (!isi32Load(Opcode) && !isi32Store(Opcode)) + return true; + + // Stack pointer alignment is out of the programmers control so we can trust + // SP-relative loads/stores. + if (getLoadStoreBaseOp(MI).getReg() == ARM::SP && + STI.getFrameLowering()->getTransientStackAlignment() >= 4) + return true; + return false; +} + /// Find candidates for load/store multiple merge in list of MemOpQueueEntries. void ARMLoadStoreOpt::FormCandidates(const MemOpQueue &MemOps) { const MachineInstr *FirstMI = MemOps[0].MI; @@ -954,6 +981,10 @@ void ARMLoadStoreOpt::FormCandidates(const MemOpQueue &MemOps) { if (PReg == ARM::SP || PReg == ARM::PC) CanMergeToLSMulti = CanMergeToLSDouble = false; + // Should we be conservative? + if (AssumeMisalignedLoadStores && !mayCombineMisaligned(*STI, *MI)) + CanMergeToLSMulti = CanMergeToLSDouble = false; + // Merge following instructions where possible. for (unsigned I = SIndex+1; I < EIndex; ++I, ++Count) { int NewOffset = MemOps[I].Offset; @@ -1926,6 +1957,9 @@ INITIALIZE_PASS(ARMPreAllocLoadStoreOpt, "arm-prera-load-store-opt", ARM_PREALLOC_LOAD_STORE_OPT_NAME, false, false) bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { + if (AssumeMisalignedLoadStores) + return false; + TD = &Fn.getDataLayout(); STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget()); TII = STI->getInstrInfo(); |