diff options
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r-- | llvm/lib/Analysis/AssumeBundleQueries.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Analysis/AssumptionCache.cpp | 22 | ||||
-rw-r--r-- | llvm/lib/Analysis/ScalarEvolution.cpp | 51 | ||||
-rw-r--r-- | llvm/lib/Analysis/ValueTracking.cpp | 14 |
4 files changed, 66 insertions, 23 deletions
diff --git a/llvm/lib/Analysis/AssumeBundleQueries.cpp b/llvm/lib/Analysis/AssumeBundleQueries.cpp index 110cddb..7440dbd 100644 --- a/llvm/lib/Analysis/AssumeBundleQueries.cpp +++ b/llvm/lib/Analysis/AssumeBundleQueries.cpp @@ -162,7 +162,7 @@ llvm::getKnowledgeForValue(const Value *V, return RetainedKnowledge::none(); if (AC) { for (AssumptionCache::ResultElem &Elem : AC->assumptionsFor(V)) { - auto *II = dyn_cast_or_null<AssumeInst>(Elem.Assume); + auto *II = cast_or_null<AssumeInst>(Elem.Assume); if (!II || Elem.Index == AssumptionCache::ExprResultIdx) continue; if (RetainedKnowledge RK = getKnowledgeFromBundle( diff --git a/llvm/lib/Analysis/AssumptionCache.cpp b/llvm/lib/Analysis/AssumptionCache.cpp index 2d648cc..11796ef 100644 --- a/llvm/lib/Analysis/AssumptionCache.cpp +++ b/llvm/lib/Analysis/AssumptionCache.cpp @@ -6,8 +6,8 @@ // //===----------------------------------------------------------------------===// // -// This file contains a pass that keeps track of @llvm.assume and -// @llvm.experimental.guard intrinsics in the functions of a module. +// This file contains a pass that keeps track of @llvm.assume intrinsics in +// the functions of a module. // //===----------------------------------------------------------------------===// @@ -140,7 +140,7 @@ findAffectedValues(CallBase *CI, TargetTransformInfo *TTI, } } -void AssumptionCache::updateAffectedValues(CondGuardInst *CI) { +void AssumptionCache::updateAffectedValues(AssumeInst *CI) { SmallVector<AssumptionCache::ResultElem, 16> Affected; findAffectedValues(CI, TTI, Affected); @@ -153,7 +153,7 @@ void AssumptionCache::updateAffectedValues(CondGuardInst *CI) { } } -void AssumptionCache::unregisterAssumption(CondGuardInst *CI) { +void AssumptionCache::unregisterAssumption(AssumeInst *CI) { SmallVector<AssumptionCache::ResultElem, 16> Affected; findAffectedValues(CI, TTI, Affected); @@ -217,7 +217,7 @@ void AssumptionCache::scanFunction() { // to this cache. for (BasicBlock &B : F) for (Instruction &I : B) - if (isa<CondGuardInst>(&I)) + if (isa<AssumeInst>(&I)) AssumeHandles.push_back({&I, ExprResultIdx}); // Mark the scan as complete. @@ -225,10 +225,10 @@ void AssumptionCache::scanFunction() { // Update affected values. for (auto &A : AssumeHandles) - updateAffectedValues(cast<CondGuardInst>(A)); + updateAffectedValues(cast<AssumeInst>(A)); } -void AssumptionCache::registerAssumption(CondGuardInst *CI) { +void AssumptionCache::registerAssumption(AssumeInst *CI) { // If we haven't scanned the function yet, just drop this assumption. It will // be found when we scan later. if (!Scanned) @@ -238,9 +238,9 @@ void AssumptionCache::registerAssumption(CondGuardInst *CI) { #ifndef NDEBUG assert(CI->getParent() && - "Cannot a register CondGuardInst not in a basic block"); + "Cannot register @llvm.assume call not in a basic block"); assert(&F == CI->getParent()->getParent() && - "Cannot a register CondGuardInst not in this function"); + "Cannot register @llvm.assume call not in this function"); // We expect the number of assumptions to be small, so in an asserts build // check that we don't accumulate duplicates and that all assumptions point @@ -252,8 +252,8 @@ void AssumptionCache::registerAssumption(CondGuardInst *CI) { assert(&F == cast<Instruction>(VH)->getParent()->getParent() && "Cached assumption not inside this function!"); - assert(isa<CondGuardInst>(VH) && - "Cached something other than CondGuardInst!"); + assert(match(cast<CallInst>(VH), m_Intrinsic<Intrinsic::assume>()) && + "Cached something other than a call to @llvm.assume!"); assert(AssumptionSet.insert(VH).second && "Cache contains multiple copies of a call!"); } diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index 923cd0f..93257e3 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -1771,7 +1771,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty, // these to compute max backedge taken counts, but can still use // these to prove lack of overflow. Use this fact to avoid // doing extra work that may not pay off. - if (!isa<SCEVCouldNotCompute>(MaxBECount) || !AC.assumptions().empty()) { + if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || + !AC.assumptions().empty()) { auto NewFlags = proveNoUnsignedWrapViaInduction(AR); setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); @@ -5147,7 +5148,8 @@ ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { // these to prove lack of overflow. Use this fact to avoid // doing extra work that may not pay off. - if (isa<SCEVCouldNotCompute>(MaxBECount) && AC.assumptions().empty()) + if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && + AC.assumptions().empty()) return Result; // If the backedge is guarded by a comparison with the pre-inc value the @@ -5200,7 +5202,8 @@ ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { // these to prove lack of overflow. Use this fact to avoid // doing extra work that may not pay off. - if (isa<SCEVCouldNotCompute>(MaxBECount) && AC.assumptions().empty()) + if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && + AC.assumptions().empty()) return Result; // If the backedge is guarded by a comparison with the pre-inc value the @@ -11388,7 +11391,7 @@ bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // No need to even try if we know the module has no guards. - if (AC.assumptions().empty()) + if (!HasGuards) return false; return any_of(*BB, [&](const Instruction &I) { @@ -11598,6 +11601,15 @@ bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, return true; } + // Check conditions due to any @llvm.experimental.guard intrinsics. + auto *GuardDecl = F.getParent()->getFunction( + Intrinsic::getName(Intrinsic::experimental_guard)); + if (GuardDecl) + for (const auto *GU : GuardDecl->users()) + if (const auto *Guard = dyn_cast<IntrinsicInst>(GU)) + if (Guard->getFunction() == BB->getParent() && DT.dominates(Guard, BB)) + if (ProveViaCond(Guard->getArgOperand(0), false)) + return true; return false; } @@ -13470,11 +13482,25 @@ ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, LoopInfo &LI) : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), - LoopDispositions(64), BlockDispositions(64) {} + LoopDispositions(64), BlockDispositions(64) { + // To use guards for proving predicates, we need to scan every instruction in + // relevant basic blocks, and not just terminators. Doing this is a waste of + // time if the IR does not actually contain any calls to + // @llvm.experimental.guard, so do a quick check and remember this beforehand. + // + // This pessimizes the case where a pass that preserves ScalarEvolution wants + // to _add_ guards to the module when there weren't any before, and wants + // ScalarEvolution to optimize based on those guards. For now we prefer to be + // efficient in lieu of being smart in that rather obscure case. + + auto *GuardDecl = F.getParent()->getFunction( + Intrinsic::getName(Intrinsic::experimental_guard)); + HasGuards = GuardDecl && !GuardDecl->use_empty(); +} ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) - : F(Arg.F), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), LI(Arg.LI), - CouldNotCompute(std::move(Arg.CouldNotCompute)), + : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), + LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), ValueExprMap(std::move(Arg.ValueExprMap)), PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), PendingPhiRanges(std::move(Arg.PendingPhiRanges)), @@ -15166,7 +15192,16 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { Terms.emplace_back(AssumeI->getOperand(0), true); } - // Second, collect conditions from dominating branches. Starting at the loop + // Second, collect information from llvm.experimental.guards dominating the loop. + auto *GuardDecl = F.getParent()->getFunction( + Intrinsic::getName(Intrinsic::experimental_guard)); + if (GuardDecl) + for (const auto *GU : GuardDecl->users()) + if (const auto *Guard = dyn_cast<IntrinsicInst>(GU)) + if (Guard->getFunction() == Header->getParent() && DT.dominates(Guard, Header)) + Terms.emplace_back(Guard->getArgOperand(0), true); + + // Third, collect conditions from dominating branches. Starting at the loop // predecessor, climb up the predecessor chain, as long as there are // predecessors that can be found that have unique successors leading to the // original header. diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 5bd8cac..cfeb62d 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -616,7 +616,7 @@ static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) { for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { if (!AssumeVH) continue; - CondGuardInst *I = cast<CondGuardInst>(AssumeVH); + CallInst *I = cast<CallInst>(AssumeVH); assert(I->getFunction() == Q.CxtI->getFunction() && "Got assumption for the wrong function!"); @@ -624,6 +624,9 @@ static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) { // We're running this loop for once for each value queried resulting in a // runtime of ~O(#assumes * #values). + assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && + "must be an assume intrinsic"); + Value *RHS; CmpInst::Predicate Pred; auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); @@ -661,7 +664,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { if (!AssumeVH) continue; - CondGuardInst *I = cast<CondGuardInst>(AssumeVH); + CallInst *I = cast<CallInst>(AssumeVH); assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"); @@ -669,6 +672,9 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, // We're running this loop for once for each value queried resulting in a // runtime of ~O(#assumes * #values). + assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && + "must be an assume intrinsic"); + Value *Arg = I->getArgOperand(0); if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { @@ -7492,9 +7498,11 @@ ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned, for (auto &AssumeVH : AC->assumptionsFor(V)) { if (!AssumeVH) continue; - IntrinsicInst *I = cast<IntrinsicInst>(AssumeVH); + CallInst *I = cast<CallInst>(AssumeVH); assert(I->getParent()->getParent() == CtxI->getParent()->getParent() && "Got assumption for the wrong function!"); + assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && + "must be an assume intrinsic"); if (!isValidAssumeForContext(I, CtxI, DT)) continue; |