aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/CodeGenPrepare.cpp
diff options
context:
space:
mode:
authorEllis Hoag <ellis.sparky.hoag@gmail.com>2024-10-28 09:45:03 -0700
committerGitHub <noreply@github.com>2024-10-28 09:45:03 -0700
commit6ab26eab4f1e06f2da7b3183c55666ad57f8866e (patch)
tree8d58cecc95cfea7a335f12e325e952263cb3db3e /llvm/lib/CodeGen/CodeGenPrepare.cpp
parent92412c106f5275b4b385f7c2d882008181de2854 (diff)
downloadllvm-6ab26eab4f1e06f2da7b3183c55666ad57f8866e.zip
llvm-6ab26eab4f1e06f2da7b3183c55666ad57f8866e.tar.gz
llvm-6ab26eab4f1e06f2da7b3183c55666ad57f8866e.tar.bz2
Check hasOptSize() in shouldOptimizeForSize() (#112626)
Diffstat (limited to 'llvm/lib/CodeGen/CodeGenPrepare.cpp')
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp9
1 files changed, 3 insertions, 6 deletions
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 67a3590..5224a6c 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -612,7 +612,6 @@ bool CodeGenPrepare::_run(Function &F) {
// bypassSlowDivision may create new BBs, but we don't want to reapply the
// optimization to those blocks.
BasicBlock *Next = BB->getNextNode();
- // F.hasOptSize is already checked in the outer if statement.
if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
BB = Next;
@@ -2608,7 +2607,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
// cold block. This interacts with our handling for loads and stores to
// ensure that we can fold all uses of a potential addressing computation
// into their uses. TODO: generalize this to work over profiling data
- if (CI->hasFnAttr(Attribute::Cold) && !OptSize &&
+ if (CI->hasFnAttr(Attribute::Cold) &&
!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
for (auto &Arg : CI->args()) {
if (!Arg->getType()->isPointerTy())
@@ -5505,9 +5504,7 @@ static bool FindAllMemoryUses(
if (CI->hasFnAttr(Attribute::Cold)) {
// If this is a cold call, we can sink the addressing calculation into
// the cold path. See optimizeCallInst
- bool OptForSize =
- OptSize || llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
- if (!OptForSize)
+ if (!llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI))
continue;
}
@@ -7402,7 +7399,7 @@ bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
SelectKind = TargetLowering::ScalarValSelect;
if (TLI->isSelectSupported(SelectKind) &&
- (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize ||
+ (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) ||
llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get())))
return false;