aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/CodeGenPrepare.cpp
diff options
context:
space:
mode:
authorNikita Popov <npopov@redhat.com>2023-06-09 16:21:39 +0200
committerNikita Popov <npopov@redhat.com>2023-06-15 14:17:58 +0200
commit03de1cb715c9a106f3ea4139042a33afcd7ca6ae (patch)
tree31650fa065d2c83fca4e6e6a9195c34af59578e1 /llvm/lib/CodeGen/CodeGenPrepare.cpp
parente96bec9cd8e14ee2174490c0ce09cedfcd6be79e (diff)
downloadllvm-03de1cb715c9a106f3ea4139042a33afcd7ca6ae.zip
llvm-03de1cb715c9a106f3ea4139042a33afcd7ca6ae.tar.gz
llvm-03de1cb715c9a106f3ea4139042a33afcd7ca6ae.tar.bz2
[InstCombine][CGP] Move swapMayExposeCSEOpportunities() fold
InstCombine tries to swap compare operands to match sub instructions in order to expose "CSE opportunities". However, it doesn't really make sense to perform this transform in the middle-end, as we cannot actually CSE the instructions there. The backend already performs this fold in https://github.com/llvm/llvm-project/blob/18f5446a45da5a61dbfb1b7667d27fb441ac62db/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp#L4236 on the SDAG level, however this only works within a single basic block. To handle cross-BB cases, we do need to handle this in the IR layer. This patch moves the fold from InstCombine to CGP in the backend, while keeping the same (somewhat dubious) heuristic. Differential Revision: https://reviews.llvm.org/D152541
Diffstat (limited to 'llvm/lib/CodeGen/CodeGenPrepare.cpp')
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp34
1 files changed, 34 insertions, 0 deletions
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 06e9c2e..ee4c193 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -1831,6 +1831,37 @@ static bool foldICmpWithDominatingICmp(CmpInst *Cmp,
return true;
}
+/// Many architectures use the same instruction for both subtract and cmp. Try
+/// to swap cmp operands to match subtract operations to allow for CSE.
+static bool swapICmpOperandsToExposeCSEOpportunities(CmpInst *Cmp) {
+ Value *Op0 = Cmp->getOperand(0);
+ Value *Op1 = Cmp->getOperand(1);
+ if (!Op0->getType()->isIntegerTy() || isa<Constant>(Op0) ||
+ isa<Constant>(Op1))
+ return false;
+
+ // If a subtract already has the same operands as a compare, swapping would be
+ // bad. If a subtract has the same operands as a compare but in reverse order,
+ // then swapping is good.
+ int GoodToSwap = 0;
+ unsigned NumInspected = 0;
+ for (const User *U : Op0->users()) {
+ // Avoid walking many users.
+ if (++NumInspected > 128)
+ return false;
+ if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
+ GoodToSwap++;
+ else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
+ GoodToSwap--;
+ }
+
+ if (GoodToSwap > 0) {
+ Cmp->swapOperands();
+ return true;
+ }
+ return false;
+}
+
bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
if (sinkCmpExpression(Cmp, *TLI))
return true;
@@ -1844,6 +1875,9 @@ bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
if (foldICmpWithDominatingICmp(Cmp, *TLI))
return true;
+ if (swapICmpOperandsToExposeCSEOpportunities(Cmp))
+ return true;
+
return false;
}