diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2020-10-16 11:27:16 +0100 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2020-10-16 11:27:28 +0100 |
commit | 1cf347e48b588ea277cc550b3ac57ecfa540618c (patch) | |
tree | a99700c61ec8bd68b991ee812877878b66d58f57 /llvm/lib | |
parent | e8d9ee9c7cfe46d9b552111a27d866fce0498b0a (diff) | |
download | llvm-1cf347e48b588ea277cc550b3ac57ecfa540618c.zip llvm-1cf347e48b588ea277cc550b3ac57ecfa540618c.tar.gz llvm-1cf347e48b588ea277cc550b3ac57ecfa540618c.tar.bz2 |
[InstCombine] narrowRotate - minor refactoring for funnel shift support. NFC.
Prep work for PR35155 - renamed narrowRotate to narrowFunnelShift, rewrote some comments and adjusted code to collect separate shift values, although we bail if they don't match (still only rotations are only actually folded).
I'm trying to match matchFunnelShift as much as possible in case we finally get to merge these one day.
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp | 35 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineInternal.h | 2 |
2 files changed, 21 insertions, 16 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index d8eddb6..c42b240 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -515,9 +515,9 @@ static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt)); } -/// Rotate left/right may occur in a wider type than necessary because of type -/// promotion rules. Try to narrow the inputs and convert to funnel shift. -Instruction *InstCombinerImpl::narrowRotate(TruncInst &Trunc) { +/// Funnel/Rotate left/right may occur in a wider type than necessary because of +/// type promotion rules. Try to narrow the inputs and convert to funnel shift. +Instruction *InstCombinerImpl::narrowFunnelShift(TruncInst &Trunc) { assert((isa<VectorType>(Trunc.getSrcTy()) || shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) && "Don't narrow to an illegal scalar type"); @@ -529,38 +529,38 @@ Instruction *InstCombinerImpl::narrowRotate(TruncInst &Trunc) { if (!isPowerOf2_32(NarrowWidth)) return nullptr; - // First, find an or'd pair of opposite shifts with the same shifted operand: - // trunc (or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1)) + // First, find an or'd pair of opposite shifts: + // trunc (or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)) BinaryOperator *Or0, *Or1; if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1))))) return nullptr; - Value *ShVal, *ShAmt0, *ShAmt1; - if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) || - !match(Or1, - m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1)))) || + Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1; + if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) || + !match(Or1, m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) || Or0->getOpcode() == Or1->getOpcode()) return nullptr; - // Canonicalize to or(shl(ShVal, ShAmt0), lshr(ShVal, ShAmt1)). + // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)). if (Or0->getOpcode() == BinaryOperator::LShr) { std::swap(Or0, Or1); + std::swap(ShVal0, ShVal1); std::swap(ShAmt0, ShAmt1); } assert(Or0->getOpcode() == BinaryOperator::Shl && Or1->getOpcode() == BinaryOperator::LShr && "Illegal or(shift,shift) pair"); - // Match the shift amount operands for a rotate pattern. This always matches - // a subtraction on the R operand. + // Match the shift amount operands for a funnel/rotate pattern. This always + // matches a subtraction on the R operand. auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * { // The shift amounts may add up to the narrow bit width: - // (shl ShVal, L) | (lshr ShVal, Width - L) + // (shl ShVal0, L) | (lshr ShVal1, Width - L) if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) return L; // The shift amount may be masked with negation: - // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1))) + // (shl ShVal0, (X & (Width - 1))) | (lshr ShVal1, ((-X) & (Width - 1))) Value *X; unsigned Mask = Width - 1; if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && @@ -575,6 +575,11 @@ Instruction *InstCombinerImpl::narrowRotate(TruncInst &Trunc) { return nullptr; }; + // TODO: Add support for funnel shifts (ShVal0 != ShVal1). + if (ShVal0 != ShVal1) + return nullptr; + Value *ShVal = ShVal0; + Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth); bool IsFshl = true; // Sub on LSHR. if (!ShAmt) { @@ -654,7 +659,7 @@ Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) { default: break; } - if (Instruction *NarrowOr = narrowRotate(Trunc)) + if (Instruction *NarrowOr = narrowFunnelShift(Trunc)) return NarrowOr; return nullptr; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 10b6769..2033bc2 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -326,7 +326,7 @@ private: Instruction *narrowBinOp(TruncInst &Trunc); Instruction *narrowMaskedBinOp(BinaryOperator &And); Instruction *narrowMathIfNoOverflow(BinaryOperator &I); - Instruction *narrowRotate(TruncInst &Trunc); + Instruction *narrowFunnelShift(TruncInst &Trunc); Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN); Instruction *matchSAddSubSat(SelectInst &MinMax1); |