diff options
Diffstat (limited to 'llvm/lib/Transforms/InstCombine')
4 files changed, 39 insertions, 9 deletions
| diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 8d9933b..92fca90 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3496,7 +3496,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {        if (isPowerOf2_64(AlignMask + 1)) {          uint64_t Offset = 0;          match(A, m_Add(m_Value(A), m_ConstantInt(Offset))); -        if (match(A, m_PtrToInt(m_Value(A)))) { +        if (match(A, m_PtrToIntOrAddr(m_Value(A)))) {            /// Note: this doesn't preserve the offset information but merges            /// offset and alignment.            /// TODO: we can generate a GEP instead of merging the alignment with diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index 9b9fe26..614c6eb 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1525,7 +1525,15 @@ Instruction *InstCombinerImpl::visitSExt(SExtInst &Sext) {    }    // Try to extend the entire expression tree to the wide destination type. -  if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) { +  bool ShouldExtendExpression = true; +  Value *TruncSrc = nullptr; +  // It is not desirable to extend expression in the trunc + sext pattern when +  // destination type is narrower than original (pre-trunc) type. +  if (match(Src, m_Trunc(m_Value(TruncSrc)))) +    if (TruncSrc->getType()->getScalarSizeInBits() > DestBitSize) +      ShouldExtendExpression = false; +  if (ShouldExtendExpression && shouldChangeType(SrcTy, DestTy) && +      canEvaluateSExtd(Src, DestTy)) {      // Okay, we can transform this!  Insert the new expression now.      LLVM_DEBUG(          dbgs() << "ICE: EvaluateInDifferentType converting expression type" @@ -1545,13 +1553,18 @@ Instruction *InstCombinerImpl::visitSExt(SExtInst &Sext) {                                        ShAmt);    } -  Value *X; -  if (match(Src, m_Trunc(m_Value(X)))) { +  Value *X = TruncSrc; +  if (X) {      // If the input has more sign bits than bits truncated, then convert      // directly to final type.      unsigned XBitSize = X->getType()->getScalarSizeInBits(); -    if (ComputeNumSignBits(X, &Sext) > XBitSize - SrcBitSize) -      return CastInst::CreateIntegerCast(X, DestTy, /* isSigned */ true); +    bool HasNSW = cast<TruncInst>(Src)->hasNoSignedWrap(); +    if (HasNSW || (ComputeNumSignBits(X, &Sext) > XBitSize - SrcBitSize)) { +      auto *Res = CastInst::CreateIntegerCast(X, DestTy, /* isSigned */ true); +      if (auto *ResTrunc = dyn_cast<TruncInst>(Res); ResTrunc && HasNSW) +        ResTrunc->setHasNoSignedWrap(true); +      return Res; +    }      // If input is a trunc from the destination type, then convert into shifts.      if (Src->hasOneUse() && X->getType() == DestTy) { @@ -2135,7 +2148,7 @@ Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) {    return nullptr;  } -Value *InstCombinerImpl::foldPtrToIntOfGEP(Type *IntTy, Value *Ptr) { +Value *InstCombinerImpl::foldPtrToIntOrAddrOfGEP(Type *IntTy, Value *Ptr) {    // Look through chain of one-use GEPs.    Type *PtrTy = Ptr->getType();    SmallVector<GEPOperator *> GEPs; @@ -2197,7 +2210,7 @@ Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) {        Mask->getType() == Ty)      return BinaryOperator::CreateAnd(Builder.CreatePtrToInt(Ptr, Ty), Mask); -  if (Value *V = foldPtrToIntOfGEP(Ty, SrcOp)) +  if (Value *V = foldPtrToIntOrAddrOfGEP(Ty, SrcOp))      return replaceInstUsesWith(CI, V);    Value *Vec, *Scalar, *Index; @@ -2215,6 +2228,21 @@ Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) {  }  Instruction *InstCombinerImpl::visitPtrToAddr(PtrToAddrInst &CI) { +  Value *SrcOp = CI.getPointerOperand(); +  Type *Ty = CI.getType(); + +  // (ptrtoaddr (ptrmask P, M)) +  //    -> (and (ptrtoaddr P), M) +  // This is generally beneficial as `and` is better supported than `ptrmask`. +  Value *Ptr, *Mask; +  if (match(SrcOp, m_OneUse(m_Intrinsic<Intrinsic::ptrmask>(m_Value(Ptr), +                                                            m_Value(Mask)))) && +      Mask->getType() == Ty) +    return BinaryOperator::CreateAnd(Builder.CreatePtrToAddr(Ptr), Mask); + +  if (Value *V = foldPtrToIntOrAddrOfGEP(Ty, SrcOp)) +    return replaceInstUsesWith(CI, V); +    // FIXME: Implement variants of ptrtoint folds.    return commonCastTransforms(CI);  } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 9c75d9a..d85e4f7 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -700,7 +700,7 @@ public:    /// folded operation.    void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN); -  Value *foldPtrToIntOfGEP(Type *IntTy, Value *Ptr); +  Value *foldPtrToIntOrAddrOfGEP(Type *IntTy, Value *Ptr);    Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond,                             Instruction &I);    Instruction *foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS, diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index 651e305..550dfc5 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -105,6 +105,8 @@ static Value *simplifyShiftSelectingPackedElement(Instruction *I,    if (~KnownShrBits.Zero != ShlAmt)      return nullptr; +  IRBuilderBase::InsertPointGuard Guard(IC.Builder); +  IC.Builder.SetInsertPoint(I);    Value *ShrAmtZ =        IC.Builder.CreateICmpEQ(ShrAmt, Constant::getNullValue(ShrAmt->getType()),                                ShrAmt->getName() + ".z"); | 
