diff options
Diffstat (limited to 'llvm/lib/CodeGen')
-rw-r--r-- | llvm/lib/CodeGen/BranchFolding.cpp | 7 | ||||
-rw-r--r-- | llvm/lib/CodeGen/CodeGenPrepare.cpp | 9 | ||||
-rw-r--r-- | llvm/lib/CodeGen/ExpandMemCmp.cpp | 3 | ||||
-rw-r--r-- | llvm/lib/CodeGen/GlobalISel/Utils.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/LiveIntervals.cpp | 3 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MachineBlockPlacement.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MachineCombiner.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MachineSizeOpts.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectOptimize.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 3 | ||||
-rw-r--r-- | llvm/lib/CodeGen/TailDuplicator.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/TargetLoweringBase.cpp | 1 |
12 files changed, 21 insertions, 36 deletions
diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp index 1dc2785..f8de136 100644 --- a/llvm/lib/CodeGen/BranchFolding.cpp +++ b/llvm/lib/CodeGen/BranchFolding.cpp @@ -645,11 +645,8 @@ ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2, // we don't have to split a block. At worst we will be introducing 1 new // branch instruction, which is likely to be smaller than the 2 // instructions that would be deleted in the merge. - MachineFunction *MF = MBB1->getParent(); - bool OptForSize = - MF->getFunction().hasOptSize() || - (llvm::shouldOptimizeForSize(MBB1, PSI, &MBBFreqInfo) && - llvm::shouldOptimizeForSize(MBB2, PSI, &MBBFreqInfo)); + bool OptForSize = llvm::shouldOptimizeForSize(MBB1, PSI, &MBBFreqInfo) && + llvm::shouldOptimizeForSize(MBB2, PSI, &MBBFreqInfo); return EffectiveTailLen >= 2 && OptForSize && (FullBlockTail1 || FullBlockTail2); } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 67a3590..5224a6c 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -612,7 +612,6 @@ bool CodeGenPrepare::_run(Function &F) { // bypassSlowDivision may create new BBs, but we don't want to reapply the // optimization to those blocks. BasicBlock *Next = BB->getNextNode(); - // F.hasOptSize is already checked in the outer if statement. if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) EverMadeChange |= bypassSlowDivision(BB, BypassWidths); BB = Next; @@ -2608,7 +2607,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) { // cold block. This interacts with our handling for loads and stores to // ensure that we can fold all uses of a potential addressing computation // into their uses. TODO: generalize this to work over profiling data - if (CI->hasFnAttr(Attribute::Cold) && !OptSize && + if (CI->hasFnAttr(Attribute::Cold) && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) for (auto &Arg : CI->args()) { if (!Arg->getType()->isPointerTy()) @@ -5505,9 +5504,7 @@ static bool FindAllMemoryUses( if (CI->hasFnAttr(Attribute::Cold)) { // If this is a cold call, we can sink the addressing calculation into // the cold path. See optimizeCallInst - bool OptForSize = - OptSize || llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI); - if (!OptForSize) + if (!llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI)) continue; } @@ -7402,7 +7399,7 @@ bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { SelectKind = TargetLowering::ScalarValSelect; if (TLI->isSelectSupported(SelectKind) && - (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize || + (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()))) return false; diff --git a/llvm/lib/CodeGen/ExpandMemCmp.cpp b/llvm/lib/CodeGen/ExpandMemCmp.cpp index 6d626de..1de01e4 100644 --- a/llvm/lib/CodeGen/ExpandMemCmp.cpp +++ b/llvm/lib/CodeGen/ExpandMemCmp.cpp @@ -852,8 +852,7 @@ static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI, // available load sizes. const bool IsUsedForZeroCmp = IsBCmp || isOnlyUsedInZeroEqualityComparison(CI); - bool OptForSize = CI->getFunction()->hasOptSize() || - llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI); + bool OptForSize = llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI); auto Options = TTI->enableMemCmpExpansion(OptForSize, IsUsedForZeroCmp); if (!Options) return false; diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp index 722ceea..513a49b 100644 --- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp +++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp @@ -1621,9 +1621,7 @@ int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool llvm::shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { - const auto &F = MBB.getParent()->getFunction(); - return F.hasOptSize() || F.hasMinSize() || - llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI); + return llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI); } void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, diff --git a/llvm/lib/CodeGen/LiveIntervals.cpp b/llvm/lib/CodeGen/LiveIntervals.cpp index 21a316c..a0b6bf4 100644 --- a/llvm/lib/CodeGen/LiveIntervals.cpp +++ b/llvm/lib/CodeGen/LiveIntervals.cpp @@ -890,8 +890,7 @@ float LiveIntervals::getSpillWeight(bool isDef, bool isUse, const auto *MF = MBB->getParent(); // When optimizing for size we only consider the codesize impact of spilling // the register, not the runtime impact. - if (PSI && (MF->getFunction().hasOptSize() || - llvm::shouldOptimizeForSize(MF, PSI, MBFI))) + if (PSI && llvm::shouldOptimizeForSize(MF, PSI, MBFI)) return Weight; return Weight * MBFI->getBlockFreqRelativeToEntryBlock(MBB); } diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp index dd5220b..d1dced9 100644 --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -2189,9 +2189,7 @@ MachineBlockPlacement::findBestLoopTop(const MachineLoop &L, // i.e. when the layout predecessor does not fallthrough to the loop header. // In practice this never happens though: there always seems to be a preheader // that can fallthrough and that is also placed before the header. - bool OptForSize = F->getFunction().hasOptSize() || - llvm::shouldOptimizeForSize(L.getHeader(), PSI, MBFI.get()); - if (OptForSize) + if (llvm::shouldOptimizeForSize(L.getHeader(), PSI, MBFI.get())) return L.getHeader(); MachineBasicBlock *OldTop = nullptr; @@ -3511,7 +3509,6 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) { initTailDupThreshold(); const bool OptForSize = - MF.getFunction().hasOptSize() || llvm::shouldOptimizeForSize(&MF, PSI, &MBFI->getMBFI()); // Determine whether to use ext-tsp for perf/size optimization. The method // is beneficial only for instances with at least 3 basic blocks and it can be diff --git a/llvm/lib/CodeGen/MachineCombiner.cpp b/llvm/lib/CodeGen/MachineCombiner.cpp index 5bfc1d6..141cc1f 100644 --- a/llvm/lib/CodeGen/MachineCombiner.cpp +++ b/llvm/lib/CodeGen/MachineCombiner.cpp @@ -77,9 +77,6 @@ class MachineCombiner : public MachineFunctionPass { TargetSchedModel TSchedModel; - /// True if optimizing for code size. - bool OptSize = false; - public: static char ID; MachineCombiner() : MachineFunctionPass(ID) { @@ -571,7 +568,7 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) { SparseSet<LiveRegUnit> RegUnits; RegUnits.setUniverse(TRI->getNumRegUnits()); - bool OptForSize = OptSize || llvm::shouldOptimizeForSize(MBB, PSI, MBFI); + bool OptForSize = llvm::shouldOptimizeForSize(MBB, PSI, MBFI); bool DoRegPressureReduce = TII->shouldReduceRegisterPressure(MBB, &RegClassInfo); @@ -733,7 +730,6 @@ bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) { &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI() : nullptr; TraceEnsemble = nullptr; - OptSize = MF.getFunction().hasOptSize(); RegClassInfo.runOnMachineFunction(MF); LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); diff --git a/llvm/lib/CodeGen/MachineSizeOpts.cpp b/llvm/lib/CodeGen/MachineSizeOpts.cpp index 53bed73..4d458f2 100644 --- a/llvm/lib/CodeGen/MachineSizeOpts.cpp +++ b/llvm/lib/CodeGen/MachineSizeOpts.cpp @@ -28,6 +28,8 @@ bool llvm::shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *MBFI, PGSOQueryType QueryType) { + if (MF->getFunction().hasOptSize()) + return true; return shouldFuncOptimizeForSizeImpl(MF, PSI, MBFI, QueryType); } @@ -36,6 +38,8 @@ bool llvm::shouldOptimizeForSize(const MachineBasicBlock *MBB, const MachineBlockFrequencyInfo *MBFI, PGSOQueryType QueryType) { assert(MBB); + if (MBB->getParent()->getFunction().hasOptSize()) + return true; return shouldOptimizeForSizeImpl(MBB, PSI, MBFI, QueryType); } @@ -44,7 +48,9 @@ bool llvm::shouldOptimizeForSize(const MachineBasicBlock *MBB, MBFIWrapper *MBFIW, PGSOQueryType QueryType) { assert(MBB); - if (!PSI || !MBFIW) + if (MBB->getParent()->getFunction().hasOptSize()) + return true; + if (!MBFIW) return false; BlockFrequency BlockFreq = MBFIW->getBlockFreq(MBB); return shouldOptimizeForSizeImpl(BlockFreq, PSI, &MBFIW->getMBFI(), diff --git a/llvm/lib/CodeGen/SelectOptimize.cpp b/llvm/lib/CodeGen/SelectOptimize.cpp index 61341e1..55b0eb7 100644 --- a/llvm/lib/CodeGen/SelectOptimize.cpp +++ b/llvm/lib/CodeGen/SelectOptimize.cpp @@ -431,7 +431,7 @@ PreservedAnalyses SelectOptimizeImpl::run(Function &F, BFI = &FAM.getResult<BlockFrequencyAnalysis>(F); // When optimizing for size, selects are preferable over branches. - if (F.hasOptSize() || llvm::shouldOptimizeForSize(&F, PSI, BFI)) + if (llvm::shouldOptimizeForSize(&F, PSI, BFI)) return PreservedAnalyses::all(); LI = &FAM.getResult<LoopAnalysis>(F); @@ -467,7 +467,7 @@ bool SelectOptimizeImpl::runOnFunction(Function &F, Pass &P) { TSchedModel.init(TSI); // When optimizing for size, selects are preferable over branches. - if (F.hasOptSize() || llvm::shouldOptimizeForSize(&F, PSI, BFI)) + if (llvm::shouldOptimizeForSize(&F, PSI, BFI)) return false; return optimizeSelects(F); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 0770355..1a86b3b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1370,8 +1370,7 @@ SelectionDAG::~SelectionDAG() { } bool SelectionDAG::shouldOptForSize() const { - return MF->getFunction().hasOptSize() || - llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); + return llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); } void SelectionDAG::allnodes_clear() { diff --git a/llvm/lib/CodeGen/TailDuplicator.cpp b/llvm/lib/CodeGen/TailDuplicator.cpp index c5fa4e6..3f2e151 100644 --- a/llvm/lib/CodeGen/TailDuplicator.cpp +++ b/llvm/lib/CodeGen/TailDuplicator.cpp @@ -586,13 +586,11 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple, // duplicate only one, because one branch instruction can be eliminated to // compensate for the duplication. unsigned MaxDuplicateCount; - bool OptForSize = MF->getFunction().hasOptSize() || - llvm::shouldOptimizeForSize(&TailBB, PSI, MBFI); if (TailDupSize == 0) MaxDuplicateCount = TailDuplicateSize; else MaxDuplicateCount = TailDupSize; - if (OptForSize) + if (llvm::shouldOptimizeForSize(&TailBB, PSI, MBFI)) MaxDuplicateCount = 1; // If the block to be duplicated ends in an unanalyzable fallthrough, don't diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index 7a28f78..cab0ed2 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1633,7 +1633,6 @@ bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI, // performed in findJumpTable() in SelectionDAGBuiler and // getEstimatedNumberOfCaseClusters() in BasicTTIImpl. const bool OptForSize = - SI->getParent()->getParent()->hasOptSize() || llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI); const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize); const unsigned MaxJumpTableSize = getMaximumJumpTableSize(); |