diff options
Diffstat (limited to 'llvm/lib/Analysis/MemoryDependenceAnalysis.cpp')
-rw-r--r-- | llvm/lib/Analysis/MemoryDependenceAnalysis.cpp | 298 |
1 files changed, 154 insertions, 144 deletions
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp index 214f968..fe32a34f 100644 --- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -45,8 +45,7 @@ STATISTIC(NumCacheNonLocalPtr, "Number of fully cached non-local ptr responses"); STATISTIC(NumCacheDirtyNonLocalPtr, "Number of cached, but dirty, non-local ptr responses"); -STATISTIC(NumUncacheNonLocalPtr, - "Number of uncached non-local ptr responses"); +STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses"); STATISTIC(NumCacheCompleteNonLocalPtr, "Number of block queries that were completely cached"); @@ -57,10 +56,10 @@ static cl::opt<unsigned> BlockScanLimit( cl::desc("The number of instructions to scan in a block in memory " "dependency analysis (default = 100)")); -static cl::opt<unsigned> BlockNumberLimit( - "memdep-block-number-limit", cl::Hidden, cl::init(1000), - cl::desc("The number of blocks to scan during memory " - "dependency analysis (default = 1000)")); +static cl::opt<unsigned> + BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000), + cl::desc("The number of blocks to scan during memory " + "dependency analysis (default = 1000)")); // Limit on the number of memdep results to process. static const unsigned int NumResultsLimit = 100; @@ -69,19 +68,17 @@ char MemoryDependenceAnalysis::ID = 0; // Register this pass... INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep", - "Memory Dependence Analysis", false, true) + "Memory Dependence Analysis", false, true) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep", - "Memory Dependence Analysis", false, true) + "Memory Dependence Analysis", false, true) -MemoryDependenceAnalysis::MemoryDependenceAnalysis() - : FunctionPass(ID) { +MemoryDependenceAnalysis::MemoryDependenceAnalysis() : FunctionPass(ID) { initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry()); } -MemoryDependenceAnalysis::~MemoryDependenceAnalysis() { -} +MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {} /// Clean up memory in between runs void MemoryDependenceAnalysis::releaseMemory() { @@ -116,14 +113,15 @@ bool MemoryDependenceAnalysis::runOnFunction(Function &F) { /// RemoveFromReverseMap - This is a helper function that removes Val from /// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry. template <typename KeyTy> -static void RemoveFromReverseMap(DenseMap<Instruction*, - SmallPtrSet<KeyTy, 4> > &ReverseMap, - Instruction *Inst, KeyTy Val) { - typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator - InstIt = ReverseMap.find(Inst); +static void +RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap, + Instruction *Inst, KeyTy Val) { + typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt = + ReverseMap.find(Inst); assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); bool Found = InstIt->second.erase(Val); - assert(Found && "Invalid reverse map!"); (void)Found; + assert(Found && "Invalid reverse map!"); + (void)Found; if (InstIt->second.empty()) ReverseMap.erase(InstIt); } @@ -208,9 +206,9 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, /// getCallSiteDependencyFrom - Private helper for finding the local /// dependencies of a call site. -MemDepResult MemoryDependenceAnalysis:: -getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall, - BasicBlock::iterator ScanIt, BasicBlock *BB) { +MemDepResult MemoryDependenceAnalysis::getCallSiteDependencyFrom( + CallSite CS, bool isReadOnlyCall, BasicBlock::iterator ScanIt, + BasicBlock *BB) { unsigned Limit = BlockScanLimit; // Walk backwards through the block, looking for dependencies @@ -235,7 +233,8 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall, if (auto InstCS = CallSite(Inst)) { // Debug intrinsics don't cause dependences. - if (isa<DbgInfoIntrinsic>(Inst)) continue; + if (isa<DbgInfoIntrinsic>(Inst)) + continue; // If these two calls do not interfere, look past it. switch (AA->getModRefInfo(CS, InstCS)) { case MRI_NoModRef: @@ -297,7 +296,8 @@ unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, const LoadInst *LI) { // We can only extend simple integer loads. - if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0; + if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) + return 0; // Load widening is hostile to ThreadSanitizer: it may cause false positives // or make the reports more cryptic (access sizes are wrong). @@ -313,7 +313,8 @@ unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( // If the two pointers are not based on the same pointer, we can't tell that // they are related. - if (LIBase != MemLocBase) return 0; + if (LIBase != MemLocBase) + return 0; // Okay, the two values are based on the same pointer, but returned as // no-alias. This happens when we have things like two byte loads at "P+1" @@ -322,7 +323,8 @@ unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( // the bits required by MemLoc. // If MemLoc is before LI, then no widening of LI will help us out. - if (MemLocOffs < LIOffs) return 0; + if (MemLocOffs < LIOffs) + return 0; // Get the alignment of the load in bytes. We assume that it is safe to load // any legal integer up to this size without a problem. For example, if we're @@ -331,21 +333,22 @@ unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( // to i16. unsigned LoadAlign = LI->getAlignment(); - int64_t MemLocEnd = MemLocOffs+MemLocSize; + int64_t MemLocEnd = MemLocOffs + MemLocSize; // If no amount of rounding up will let MemLoc fit into LI, then bail out. - if (LIOffs+LoadAlign < MemLocEnd) return 0; + if (LIOffs + LoadAlign < MemLocEnd) + return 0; // This is the size of the load to try. Start with the next larger power of // two. - unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U; + unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits() / 8U; NewLoadByteSize = NextPowerOf2(NewLoadByteSize); while (1) { // If this load size is bigger than our known alignment or would not fit // into a native integer register, then we fail. if (NewLoadByteSize > LoadAlign || - !DL.fitsInLegalInteger(NewLoadByteSize*8)) + !DL.fitsInLegalInteger(NewLoadByteSize * 8)) return 0; if (LIOffs + NewLoadByteSize > MemLocEnd && @@ -357,7 +360,7 @@ unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( return 0; // If a load of this width would include all of MemLoc, then we succeed. - if (LIOffs+NewLoadByteSize >= MemLocEnd) + if (LIOffs + NewLoadByteSize >= MemLocEnd) return NewLoadByteSize; NewLoadByteSize <<= 1; @@ -374,7 +377,6 @@ static bool isVolatile(Instruction *Inst) { return false; } - /// getPointerDependencyFrom - Return the instruction on which a memory /// location depends. If isLoad is true, this routine ignores may-aliases with /// read-only operations. If isLoad is false, this routine ignores may-aliases @@ -507,7 +509,7 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( // Return "true" if and only if the instruction I is either a non-simple // load or a non-simple store. - auto isNonSimpleLoadOrStore = [] (Instruction *I) -> bool { + auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool { if (auto *LI = dyn_cast<LoadInst>(I)) return !LI->isSimple(); if (auto *SI = dyn_cast<StoreInst>(I)) @@ -517,7 +519,7 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( // Return "true" if I is not a load and not a store, but it does access // memory. - auto isOtherMemAccess = [] (Instruction *I) -> bool { + auto isOtherMemAccess = [](Instruction *I) -> bool { return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory(); }; @@ -527,7 +529,8 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) // Debug intrinsics don't (and can't) cause dependencies. - if (isa<DbgInfoIntrinsic>(II)) continue; + if (isa<DbgInfoIntrinsic>(II)) + continue; // Limit the amount of scanning we do so we don't end up with quadratic // running time on extreme testcases. @@ -549,11 +552,11 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( } } - // Values depend on loads if the pointers are must aliased. This means that - // a load depends on another must aliased load from the same value. - // One exception is atomic loads: a value can depend on an atomic load that it - // does not alias with when this atomic load indicates that another thread may - // be accessing the location. + // Values depend on loads if the pointers are must aliased. This means + // that a load depends on another must aliased load from the same value. + // One exception is atomic loads: a value can depend on an atomic load that + // it does not alias with when this atomic load indicates that another + // thread may be accessing the location. if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { // While volatile access cannot be eliminated, they do not have to clobber @@ -570,7 +573,8 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( } // Atomic loads have complications involved. - // A Monotonic (or higher) load is OK if the query inst is itself not atomic. + // A Monotonic (or higher) load is OK if the query inst is itself not + // atomic. // FIXME: This is overly conservative. if (LI->isAtomic() && LI->getOrdering() > Unordered) { if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || @@ -673,7 +677,7 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( if (R == MustAlias) return MemDepResult::getDef(Inst); if (isInvariantLoad) - continue; + continue; return MemDepResult::getClobber(Inst); } @@ -703,7 +707,7 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( } if (isInvariantLoad) - continue; + continue; // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. ModRefInfo MR = AA->getModRefInfo(Inst, MemLoc); @@ -798,7 +802,8 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { /// cache arrays are properly kept sorted. static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, int Count = -1) { - if (Count == -1) Count = Cache.size(); + if (Count == -1) + Count = Cache.size(); assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) && "Cache isn't sorted!"); } @@ -819,7 +824,8 @@ static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, const MemoryDependenceAnalysis::NonLocalDepInfo & MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { assert(getDependency(QueryCS.getInstruction()).isNonLocal() && - "getNonLocalCallDependency should only be used on calls with non-local deps!"); + "getNonLocalCallDependency should only be used on calls with " + "non-local deps!"); PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()]; NonLocalDepInfo &Cache = CacheP.first; @@ -827,7 +833,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { /// the cached case, this can happen due to instructions being deleted etc. In /// the uncached case, this starts out as the set of predecessors we care /// about. - SmallVector<BasicBlock*, 32> DirtyBlocks; + SmallVector<BasicBlock *, 32> DirtyBlocks; if (!Cache.empty()) { // Okay, we have a cache entry. If we know it is not dirty, just return it @@ -839,8 +845,8 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { // If we already have a partially computed set of results, scan them to // determine what is dirty, seeding our initial DirtyBlocks worklist. - for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end(); - I != E; ++I) + for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end(); I != E; + ++I) if (I->getResult().isDirty()) DirtyBlocks.push_back(I->getBB()); @@ -848,7 +854,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { std::sort(Cache.begin(), Cache.end()); ++NumCacheDirtyNonLocal; - //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " + // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " // << Cache.size() << " cached: " << *QueryInst; } else { // Seed DirtyBlocks with each of the preds of QueryInst's block. @@ -861,7 +867,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { // isReadonlyCall - If this is a read-only call, we can be more aggressive. bool isReadonlyCall = AA->onlyReadsMemory(QueryCS); - SmallPtrSet<BasicBlock*, 32> Visited; + SmallPtrSet<BasicBlock *, 32> Visited; unsigned NumSortedEntries = Cache.size(); DEBUG(AssertSorted(Cache)); @@ -879,13 +885,13 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { // the cache set. If so, find it. DEBUG(AssertSorted(Cache, NumSortedEntries)); NonLocalDepInfo::iterator Entry = - std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries, - NonLocalDepEntry(DirtyBB)); + std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries, + NonLocalDepEntry(DirtyBB)); if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB) --Entry; NonLocalDepEntry *ExistingResult = nullptr; - if (Entry != Cache.begin()+NumSortedEntries && + if (Entry != Cache.begin() + NumSortedEntries && Entry->getBB() == DirtyBB) { // If we already have an entry, and if it isn't already dirty, the block // is done. @@ -912,7 +918,8 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { MemDepResult Dep; if (ScanPos != DirtyBB->begin()) { - Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB); + Dep = + getCallSiteDependencyFrom(QueryCS, isReadonlyCall, ScanPos, DirtyBB); } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { // No dependence found. If this is the entry block of the function, it is // a clobber, otherwise it is unknown. @@ -954,9 +961,8 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { /// This method assumes the pointer has a "NonLocal" dependency within its /// own block. /// -void MemoryDependenceAnalysis:: -getNonLocalPointerDependency(Instruction *QueryInst, - SmallVectorImpl<NonLocalDepResult> &Result) { +void MemoryDependenceAnalysis::getNonLocalPointerDependency( + Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) { const MemoryLocation Loc = MemoryLocation::get(QueryInst); bool isLoad = isa<LoadInst>(QueryInst); BasicBlock *FromBB = QueryInst->getParent(); @@ -983,8 +989,7 @@ getNonLocalPointerDependency(Instruction *QueryInst, return false; }; if (isVolatile(QueryInst) || isOrdered(QueryInst)) { - Result.push_back(NonLocalDepResult(FromBB, - MemDepResult::getUnknown(), + Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), const_cast<Value *>(Loc.Ptr))); return; } @@ -995,13 +1000,12 @@ getNonLocalPointerDependency(Instruction *QueryInst, // each block. Because of critical edges, we currently bail out if querying // a block with multiple different pointers. This can happen during PHI // translation. - DenseMap<BasicBlock*, Value*> Visited; + DenseMap<BasicBlock *, Value *> Visited; if (!getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB, Result, Visited, true)) return; Result.clear(); - Result.push_back(NonLocalDepResult(FromBB, - MemDepResult::getUnknown(), + Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), const_cast<Value *>(Loc.Ptr))); } @@ -1015,14 +1019,13 @@ MemDepResult MemoryDependenceAnalysis::GetNonLocalInfoForBlock( // Do a binary search to see if we already have an entry for this block in // the cache set. If so, find it. - NonLocalDepInfo::iterator Entry = - std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries, - NonLocalDepEntry(BB)); - if (Entry != Cache->begin() && (Entry-1)->getBB() == BB) + NonLocalDepInfo::iterator Entry = std::upper_bound( + Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB)); + if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB) --Entry; NonLocalDepEntry *ExistingResult = nullptr; - if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB) + if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB) ExistingResult = &*Entry; // If we have a cached entry, and it is non-dirty, use it as the value for @@ -1050,8 +1053,8 @@ MemDepResult MemoryDependenceAnalysis::GetNonLocalInfoForBlock( } // Scan the block for the dependency. - MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, - QueryInst); + MemDepResult Dep = + getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst); // If we had a dirty entry for the block, update it. Otherwise, just add // a new entry. @@ -1090,7 +1093,7 @@ SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, NonLocalDepEntry Val = Cache.back(); Cache.pop_back(); MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry = - std::upper_bound(Cache.begin(), Cache.end()-1, Val); + std::upper_bound(Cache.begin(), Cache.end() - 1, Val); Cache.insert(Entry, Val); // FALL THROUGH. } @@ -1100,7 +1103,7 @@ SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, NonLocalDepEntry Val = Cache.back(); Cache.pop_back(); MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry = - std::upper_bound(Cache.begin(), Cache.end(), Val); + std::upper_bound(Cache.begin(), Cache.end(), Val); Cache.insert(Entry, Val); } break; @@ -1142,7 +1145,7 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( // Get the NLPI for CacheKey, inserting one into the map if it doesn't // already have one. std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = - NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); + NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); NonLocalPointerInfo *CacheInfo = &Pair.first->second; // If we already have a cache entry for this CacheKey, we may need to do some @@ -1154,17 +1157,17 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( CacheInfo->Pair = BBSkipFirstBlockPair(); CacheInfo->Size = Loc.Size; for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(), - DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI) + DE = CacheInfo->NonLocalDeps.end(); + DI != DE; ++DI) if (Instruction *Inst = DI->getResult().getInst()) RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); CacheInfo->NonLocalDeps.clear(); } else if (CacheInfo->Size > Loc.Size) { // This query's Size is less than the cached one. Conservatively restart // the query using the greater size. - return getNonLocalPointerDepFromBB(QueryInst, Pointer, - Loc.getWithNewSize(CacheInfo->Size), - isLoad, StartBB, Result, Visited, - SkipFirstBlock); + return getNonLocalPointerDepFromBB( + QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, + StartBB, Result, Visited, SkipFirstBlock); } // If the query's AATags are inconsistent with the cached one, @@ -1175,16 +1178,16 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( CacheInfo->Pair = BBSkipFirstBlockPair(); CacheInfo->AATags = AAMDNodes(); for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(), - DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI) + DE = CacheInfo->NonLocalDeps.end(); + DI != DE; ++DI) if (Instruction *Inst = DI->getResult().getInst()) RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); CacheInfo->NonLocalDeps.clear(); } if (Loc.AATags) - return getNonLocalPointerDepFromBB(QueryInst, - Pointer, Loc.getWithoutAATags(), - isLoad, StartBB, Result, Visited, - SkipFirstBlock); + return getNonLocalPointerDepFromBB( + QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result, + Visited, SkipFirstBlock); } } @@ -1201,7 +1204,7 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( if (!Visited.empty()) { for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); I != E; ++I) { - DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB()); + DenseMap<BasicBlock *, Value *>::iterator VI = Visited.find(I->getBB()); if (VI == Visited.end() || VI->second == Pointer.getAddr()) continue; @@ -1213,17 +1216,16 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( } Value *Addr = Pointer.getAddr(); - for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); - I != E; ++I) { + for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); I != E; + ++I) { Visited.insert(std::make_pair(I->getBB(), Addr)); if (I->getResult().isNonLocal()) { continue; } if (!DT) { - Result.push_back(NonLocalDepResult(I->getBB(), - MemDepResult::getUnknown(), - Addr)); + Result.push_back( + NonLocalDepResult(I->getBB(), MemDepResult::getUnknown(), Addr)); } else if (DT->isReachableFromEntry(I->getBB())) { Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr)); } @@ -1241,11 +1243,11 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( else CacheInfo->Pair = BBSkipFirstBlockPair(); - SmallVector<BasicBlock*, 32> Worklist; + SmallVector<BasicBlock *, 32> Worklist; Worklist.push_back(StartBB); // PredList used inside loop. - SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList; + SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList; // Keep track of the entries that we know are sorted. Previously cached // entries will all be sorted. The entries we add we only sort on demand (we @@ -1287,15 +1289,13 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( // Get the dependency info for Pointer in BB. If we have cached // information, we will use it, otherwise we compute it. DEBUG(AssertSorted(*Cache, NumSortedEntries)); - MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, - Loc, isLoad, BB, Cache, - NumSortedEntries); + MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB, + Cache, NumSortedEntries); // If we got a Def or Clobber, add this to the list of results. if (!Dep.isNonLocal()) { if (!DT) { - Result.push_back(NonLocalDepResult(BB, - MemDepResult::getUnknown(), + Result.push_back(NonLocalDepResult(BB, MemDepResult::getUnknown(), Pointer.getAddr())); continue; } else if (DT->isReachableFromEntry(BB)) { @@ -1311,11 +1311,11 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( // the same Pointer. if (!Pointer.NeedsPHITranslationFromBlock(BB)) { SkipFirstBlock = false; - SmallVector<BasicBlock*, 16> NewBlocks; + SmallVector<BasicBlock *, 16> NewBlocks; for (BasicBlock *Pred : PredCache.get(BB)) { // Verify that we haven't looked at this block yet. - std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool> - InsertRes = Visited.insert(std::make_pair(Pred, Pointer.getAddr())); + std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = + Visited.insert(std::make_pair(Pred, Pointer.getAddr())); if (InsertRes.second) { // First time we've looked at *PI. NewBlocks.push_back(Pred); @@ -1377,8 +1377,8 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( // with PHI translation when a critical edge exists and the PHI node in // the successor translates to a pointer value different than the // pointer the block was first analyzed with. - std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool> - InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal)); + std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = + Visited.insert(std::make_pair(Pred, PredPtrVal)); if (!InsertRes.second) { // We found the pred; take it off the list of preds to visit. @@ -1430,9 +1430,8 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( // assume it is unknown, but this also does not block PRE of the load. if (!CanTranslate || getNonLocalPointerDepFromBB(QueryInst, PredPointer, - Loc.getWithNewPtr(PredPtrVal), - isLoad, Pred, - Result, Visited)) { + Loc.getWithNewPtr(PredPtrVal), isLoad, + Pred, Result, Visited)) { // Add the entry to the Result list. NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal); Result.push_back(Entry); @@ -1488,17 +1487,17 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( return true; bool foundBlock = false; - for (NonLocalDepEntry &I: llvm::reverse(*Cache)) { + for (NonLocalDepEntry &I : llvm::reverse(*Cache)) { if (I.getBB() != BB) continue; - assert((GotWorklistLimit || I.getResult().isNonLocal() || \ + assert((GotWorklistLimit || I.getResult().isNonLocal() || !DT->isReachableFromEntry(BB)) && "Should only be here with transparent block"); foundBlock = true; I.setResult(MemDepResult::getUnknown()); - Result.push_back(NonLocalDepResult(I.getBB(), I.getResult(), - Pointer.getAddr())); + Result.push_back( + NonLocalDepResult(I.getBB(), I.getResult(), Pointer.getAddr())); break; } (void)foundBlock; @@ -1513,11 +1512,11 @@ bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( /// RemoveCachedNonLocalPointerDependencies - If P exists in /// CachedNonLocalPointerInfo, remove it. -void MemoryDependenceAnalysis:: -RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) { - CachedNonLocalPointerInfo::iterator It = - NonLocalPointerDeps.find(P); - if (It == NonLocalPointerDeps.end()) return; +void MemoryDependenceAnalysis::RemoveCachedNonLocalPointerDependencies( + ValueIsLoadPair P) { + CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P); + if (It == NonLocalPointerDeps.end()) + return; // Remove all of the entries in the BB->val map. This involves removing // instructions from the reverse map. @@ -1525,7 +1524,8 @@ RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) { for (unsigned i = 0, e = PInfo.size(); i != e; ++i) { Instruction *Target = PInfo[i].getResult().getInst(); - if (!Target) continue; // Ignore non-local dep results. + if (!Target) + continue; // Ignore non-local dep results. assert(Target->getParent() == PInfo[i].getBB()); // Eliminating the dirty entry from 'Cache', so update the reverse info. @@ -1536,7 +1536,6 @@ RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) { NonLocalPointerDeps.erase(It); } - /// invalidateCachedPointerInfo - This method is used to invalidate cached /// information about the specified pointer, because it may be too /// conservative in memdep. This is an optional call that can be used when @@ -1545,7 +1544,8 @@ RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) { /// in more places that cached info does not necessarily keep. void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) { // If Ptr isn't really a pointer, just ignore it. - if (!Ptr->getType()->isPointerTy()) return; + if (!Ptr->getType()->isPointerTy()) + return; // Flush store info for the pointer. RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); // Flush load info for the pointer. @@ -1600,7 +1600,7 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { // Loop over all of the things that depend on the instruction we're removing. // - SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd; + SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd; // If we find RemInst as a clobber or Def in any of the maps for other values, // we need to replace its entry with a dirty version of the instruction after @@ -1625,10 +1625,11 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { LocalDeps[InstDependingOnRemInst] = NewDirtyVal; // Make sure to remember that new things depend on NewDepInst. - assert(NewDirtyVal.getInst() && "There is no way something else can have " + assert(NewDirtyVal.getInst() && + "There is no way something else can have " "a local dep on this if it is a terminator!"); - ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(), - InstDependingOnRemInst)); + ReverseDepsToAdd.push_back( + std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst)); } ReverseLocalDeps.erase(ReverseDepIt); @@ -1636,8 +1637,8 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { // Add new reverse deps after scanning the set, to avoid invalidating the // 'ReverseDeps' reference. while (!ReverseDepsToAdd.empty()) { - ReverseLocalDeps[ReverseDepsToAdd.back().first] - .insert(ReverseDepsToAdd.back().second); + ReverseLocalDeps[ReverseDepsToAdd.back().first].insert( + ReverseDepsToAdd.back().second); ReverseDepsToAdd.pop_back(); } } @@ -1652,8 +1653,10 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { INLD.second = true; for (NonLocalDepInfo::iterator DI = INLD.first.begin(), - DE = INLD.first.end(); DI != DE; ++DI) { - if (DI->getResult().getInst() != RemInst) continue; + DE = INLD.first.end(); + DI != DE; ++DI) { + if (DI->getResult().getInst() != RemInst) + continue; // Convert to a dirty entry for the subsequent instruction. DI->setResult(NewDirtyVal); @@ -1667,8 +1670,8 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { // Add new reverse deps after scanning the set, to avoid invalidating 'Set' while (!ReverseDepsToAdd.empty()) { - ReverseNonLocalDeps[ReverseDepsToAdd.back().first] - .insert(ReverseDepsToAdd.back().second); + ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert( + ReverseDepsToAdd.back().second); ReverseDepsToAdd.pop_back(); } } @@ -1676,9 +1679,10 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { // If the instruction is in ReverseNonLocalPtrDeps then it appears as a // value in the NonLocalPointerDeps info. ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = - ReverseNonLocalPtrDeps.find(RemInst); + ReverseNonLocalPtrDeps.find(RemInst); if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { - SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd; + SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8> + ReversePtrDepsToAdd; for (ValueIsLoadPair P : ReversePtrDepIt->second) { assert(P.getPointer() != RemInst && @@ -1692,7 +1696,8 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { // Update any entries for RemInst to use the instruction after it. for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end(); DI != DE; ++DI) { - if (DI->getResult().getInst() != RemInst) continue; + if (DI->getResult().getInst() != RemInst) + continue; // Convert to a dirty entry for the subsequent instruction. DI->setResult(NewDirtyVal); @@ -1709,13 +1714,12 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); while (!ReversePtrDepsToAdd.empty()) { - ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first] - .insert(ReversePtrDepsToAdd.back().second); + ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert( + ReversePtrDepsToAdd.back().second); ReversePtrDepsToAdd.pop_back(); } } - assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?"); DEBUG(verifyRemoved(RemInst)); } @@ -1725,14 +1729,16 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { #ifndef NDEBUG for (LocalDepMapType::const_iterator I = LocalDeps.begin(), - E = LocalDeps.end(); I != E; ++I) { + E = LocalDeps.end(); + I != E; ++I) { assert(I->first != D && "Inst occurs in data structures"); - assert(I->second.getInst() != D && - "Inst occurs in data structures"); + assert(I->second.getInst() != D && "Inst occurs in data structures"); } - for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(), - E = NonLocalPointerDeps.end(); I != E; ++I) { + for (CachedNonLocalPointerInfo::const_iterator + I = NonLocalPointerDeps.begin(), + E = NonLocalPointerDeps.end(); + I != E; ++I) { assert(I->first.getPointer() != D && "Inst occurs in NLPD map key"); const NonLocalDepInfo &Val = I->second.NonLocalDeps; for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end(); @@ -1741,23 +1747,27 @@ void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { } for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(), - E = NonLocalDeps.end(); I != E; ++I) { + E = NonLocalDeps.end(); + I != E; ++I) { assert(I->first != D && "Inst occurs in data structures"); const PerInstNLInfo &INLD = I->second; for (NonLocalDepInfo::const_iterator II = INLD.first.begin(), - EE = INLD.first.end(); II != EE; ++II) - assert(II->getResult().getInst() != D && "Inst occurs in data structures"); + EE = INLD.first.end(); + II != EE; ++II) + assert(II->getResult().getInst() != D && + "Inst occurs in data structures"); } for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(), - E = ReverseLocalDeps.end(); I != E; ++I) { + E = ReverseLocalDeps.end(); + I != E; ++I) { assert(I->first != D && "Inst occurs in data structures"); for (Instruction *Inst : I->second) assert(Inst != D && "Inst occurs in data structures"); } for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(), - E = ReverseNonLocalDeps.end(); + E = ReverseNonLocalDeps.end(); I != E; ++I) { assert(I->first != D && "Inst occurs in data structures"); for (Instruction *Inst : I->second) @@ -1765,13 +1775,13 @@ void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { } for (ReverseNonLocalPtrDepTy::const_iterator - I = ReverseNonLocalPtrDeps.begin(), - E = ReverseNonLocalPtrDeps.end(); I != E; ++I) { + I = ReverseNonLocalPtrDeps.begin(), + E = ReverseNonLocalPtrDeps.end(); + I != E; ++I) { assert(I->first != D && "Inst occurs in rev NLPD map"); for (ValueIsLoadPair P : I->second) - assert(P != ValueIsLoadPair(D, false) && - P != ValueIsLoadPair(D, true) && + assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) && "Inst occurs in ReverseNonLocalPtrDeps map"); } #endif |