aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/PeepholeOptimizer.cpp
diff options
context:
space:
mode:
authorNikita Popov <nikita.ppv@gmail.com>2021-08-28 21:29:31 +0200
committerNikita Popov <nikita.ppv@gmail.com>2021-08-30 19:46:04 +0200
commit0529e2e01888129b21becd1fe3a61d9cb07c6fcd (patch)
treef440bf98933140d8b51678718022dc814b0d88c3 /llvm/lib/CodeGen/PeepholeOptimizer.cpp
parented4946fe20964509b27b38e74331c36857d4be53 (diff)
downloadllvm-0529e2e01888129b21becd1fe3a61d9cb07c6fcd.zip
llvm-0529e2e01888129b21becd1fe3a61d9cb07c6fcd.tar.gz
llvm-0529e2e01888129b21becd1fe3a61d9cb07c6fcd.tar.bz2
[InstrInfo] Use 64-bit immediates for analyzeCompare() (NFCI)
The backend generally uses 64-bit immediates (e.g. what MachineOperand::getImm() returns), so use that for analyzeCompare() and optimizeCompareInst() as well. This avoids truncation for targets that support immediates larger 32-bit. In particular, we can avoid the bugprone value normalization hack in the AArch64 target. This is a followup to D108076. Differential Revision: https://reviews.llvm.org/D108875
Diffstat (limited to 'llvm/lib/CodeGen/PeepholeOptimizer.cpp')
-rw-r--r--llvm/lib/CodeGen/PeepholeOptimizer.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index 49bdba5..f9b16d2 100644
--- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -626,7 +626,7 @@ bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr &MI) {
// If this instruction is a comparison against zero and isn't comparing a
// physical register, we can try to optimize it.
Register SrcReg, SrcReg2;
- int CmpMask, CmpValue;
+ int64_t CmpMask, CmpValue;
if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
SrcReg.isPhysical() || SrcReg2.isPhysical())
return false;