diff options
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r-- | llvm/lib/Analysis/DependenceAnalysis.cpp | 57 | ||||
-rw-r--r-- | llvm/lib/Analysis/IVDescriptors.cpp | 13 | ||||
-rw-r--r-- | llvm/lib/Analysis/InstructionSimplify.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Analysis/MemorySSA.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Analysis/ScalarEvolution.cpp | 27 |
5 files changed, 65 insertions, 36 deletions
diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp index 8d20b0e..805b682 100644 --- a/llvm/lib/Analysis/DependenceAnalysis.cpp +++ b/llvm/lib/Analysis/DependenceAnalysis.cpp @@ -1180,32 +1180,41 @@ bool DependenceInfo::isKnownLessThan(const SCEV *S, const SCEV *Size) const { S = SE->getTruncateOrZeroExtend(S, MaxType); Size = SE->getTruncateOrZeroExtend(Size, MaxType); - // Special check for addrecs using BE taken count - if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) - if (AddRec->isAffine() && AddRec->hasNoSignedWrap()) { - const SCEV *BECount = SE->getBackedgeTakenCount(AddRec->getLoop()); - const SCEV *Start = AddRec->getStart(); - const SCEV *Step = AddRec->getStepRecurrence(*SE); - const SCEV *End = AddRec->evaluateAtIteration(BECount, *SE); - const SCEV *Diff0 = SE->getMinusSCEV(Start, Size); - const SCEV *Diff1 = SE->getMinusSCEV(End, Size); - - // If the value of Step is non-negative and the AddRec is non-wrap, it - // reaches its maximum at the last iteration. So it's enouth to check - // whether End - Size is negative. - if (SE->isKnownNonNegative(Step) && SE->isKnownNegative(Diff1)) - return true; + auto CheckAddRecBECount = [&]() { + const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S); + if (!AddRec || !AddRec->isAffine() || !AddRec->hasNoSignedWrap()) + return false; + const SCEV *BECount = collectUpperBound(AddRec->getLoop(), MaxType); + // If the BTC cannot be computed, check the base case for S. + if (!BECount || isa<SCEVCouldNotCompute>(BECount)) + return false; + const SCEV *Start = AddRec->getStart(); + const SCEV *Step = AddRec->getStepRecurrence(*SE); + const SCEV *End = AddRec->evaluateAtIteration(BECount, *SE); + const SCEV *Diff0 = SE->getMinusSCEV(Start, Size); + const SCEV *Diff1 = SE->getMinusSCEV(End, Size); + + // If the value of Step is non-negative and the AddRec is non-wrap, it + // reaches its maximum at the last iteration. So it's enouth to check + // whether End - Size is negative. + if (SE->isKnownNonNegative(Step) && SE->isKnownNegative(Diff1)) + return true; - // If the value of Step is non-positive and the AddRec is non-wrap, the - // initial value is its maximum. - if (SE->isKnownNonPositive(Step) && SE->isKnownNegative(Diff0)) - return true; + // If the value of Step is non-positive and the AddRec is non-wrap, the + // initial value is its maximum. + if (SE->isKnownNonPositive(Step) && SE->isKnownNegative(Diff0)) + return true; - // Even if we don't know the sign of Step, either Start or End must be - // the maximum value of the AddRec since it is non-wrap. - if (SE->isKnownNegative(Diff0) && SE->isKnownNegative(Diff1)) - return true; - } + // Even if we don't know the sign of Step, either Start or End must be + // the maximum value of the AddRec since it is non-wrap. + if (SE->isKnownNegative(Diff0) && SE->isKnownNegative(Diff1)) + return true; + + return false; + }; + + if (CheckAddRecBECount()) + return true; // Check using normal isKnownNegative const SCEV *LimitedBound = SE->getMinusSCEV(S, Size); diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp index b8c540c..9f8ac6e 100644 --- a/llvm/lib/Analysis/IVDescriptors.cpp +++ b/llvm/lib/Analysis/IVDescriptors.cpp @@ -849,17 +849,12 @@ RecurrenceDescriptor::isMinMaxPattern(Instruction *I, RecurKind Kind, /// %sum.2 = select %cmp, %add, %sum.1 RecurrenceDescriptor::InstDesc RecurrenceDescriptor::isConditionalRdxPattern(Instruction *I) { - SelectInst *SI = dyn_cast<SelectInst>(I); - if (!SI) - return InstDesc(false, I); - - CmpInst *CI = dyn_cast<CmpInst>(SI->getCondition()); + Value *TrueVal, *FalseVal; // Only handle single use cases for now. - if (!CI || !CI->hasOneUse()) + if (!match(I, + m_Select(m_OneUse(m_Cmp()), m_Value(TrueVal), m_Value(FalseVal)))) return InstDesc(false, I); - Value *TrueVal = SI->getTrueValue(); - Value *FalseVal = SI->getFalseValue(); // Handle only when either of operands of select instruction is a PHI // node for now. if ((isa<PHINode>(TrueVal) && isa<PHINode>(FalseVal)) || @@ -886,7 +881,7 @@ RecurrenceDescriptor::isConditionalRdxPattern(Instruction *I) { if (!IPhi || IPhi != FalseVal) return InstDesc(false, I); - return InstDesc(true, SI); + return InstDesc(true, I); } RecurrenceDescriptor::InstDesc RecurrenceDescriptor::isRecurrenceInstr( diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 4e38626..e08ef60 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -6644,7 +6644,7 @@ Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, "Invalid mask width"); // If index-width (mask size) is less than pointer-size then mask is // 1-extended. - if (match(Op1, m_PtrToInt(m_Specific(Op0)))) + if (match(Op1, m_PtrToIntOrAddr(m_Specific(Op0)))) return Op0; // NOTE: We may have attributes associated with the return value of the diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp index ab37338..0b2e3fc 100644 --- a/llvm/lib/Analysis/MemorySSA.cpp +++ b/llvm/lib/Analysis/MemorySSA.cpp @@ -393,7 +393,7 @@ static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, /// \param AA The AliasAnalysis we used for our search. /// \param AllowImpreciseClobber Always false, unless we do relaxed verify. -LLVM_ATTRIBUTE_UNUSED static void +[[maybe_unused]] static void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, BatchAAResults &AA, diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index 3fab6b0..a64b93d 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -6417,8 +6417,18 @@ APInt ScalarEvolution::getConstantMultipleImpl(const SCEV *S, case scSequentialUMinExpr: return GetGCDMultiple(cast<SCEVNAryExpr>(S)); case scUnknown: { - // ask ValueTracking for known bits + // Ask ValueTracking for known bits. SCEVUnknown only become available at + // the point their underlying IR instruction has been defined. If CtxI was + // not provided, use: + // * the first instruction in the entry block if it is an argument + // * the instruction itself otherwise. const SCEVUnknown *U = cast<SCEVUnknown>(S); + if (!CtxI) { + if (isa<Argument>(U->getValue())) + CtxI = &*F.getEntryBlock().begin(); + else if (auto *I = dyn_cast<Instruction>(U->getValue())) + CtxI = I; + } unsigned Known = computeKnownBits(U->getValue(), getDataLayout(), &AC, CtxI, &DT) .countMinTrailingZeros(); @@ -15761,6 +15771,21 @@ void ScalarEvolution::LoopGuards::collectFromBlock( const SCEV *OneAlignedUp = GetNextSCEVDividesByDivisor(One, DividesBy); To = SE.getUMaxExpr(FromRewritten, OneAlignedUp); + } else { + if (LHS->getType()->isPointerTy()) { + LHS = SE.getLosslessPtrToIntExpr(LHS); + RHS = SE.getLosslessPtrToIntExpr(RHS); + if (isa<SCEVCouldNotCompute>(LHS) || isa<SCEVCouldNotCompute>(RHS)) + break; + } + auto AddSubRewrite = [&](const SCEV *A, const SCEV *B) { + const SCEV *Sub = SE.getMinusSCEV(A, B); + AddRewrite(Sub, Sub, + SE.getUMaxExpr(Sub, SE.getOne(From->getType()))); + }; + AddSubRewrite(LHS, RHS); + AddSubRewrite(RHS, LHS); + continue; } break; default: |