diff options
author | Philip Reames <preames@rivosinc.com> | 2022-09-27 15:55:44 -0700 |
---|---|---|
committer | Philip Reames <listmail@philipreames.com> | 2022-09-27 15:55:44 -0700 |
commit | f6d110e26f1bdd3b4462f7fda620e07c425ccf76 (patch) | |
tree | f13842a01e9a3b9b9e921e9f2ccac99a124f684e /llvm/lib/Analysis/LoopAccessAnalysis.cpp | |
parent | 899ebd7e99fde149eea7fc378a8511f0e134b1a1 (diff) | |
download | llvm-f6d110e26f1bdd3b4462f7fda620e07c425ccf76.zip llvm-f6d110e26f1bdd3b4462f7fda620e07c425ccf76.tar.gz llvm-f6d110e26f1bdd3b4462f7fda620e07c425ccf76.tar.bz2 |
[LAA] Make getPtrStride return Option instead of overloading zero as error value [nfc]
This is purely NFC restructure in advance of a change which actually exposes zero strides. This is mostly because I find this interface confusing each time I look at it.
Diffstat (limited to 'llvm/lib/Analysis/LoopAccessAnalysis.cpp')
-rw-r--r-- | llvm/lib/Analysis/LoopAccessAnalysis.cpp | 33 |
1 files changed, 17 insertions, 16 deletions
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index f06e412..8dec053 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -758,7 +758,7 @@ static bool isNoWrap(PredicatedScalarEvolution &PSE, if (PSE.getSE()->isLoopInvariant(PtrScev, L)) return true; - int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides); + int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0); if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW)) return true; @@ -1365,17 +1365,18 @@ static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, } /// Check whether the access through \p Ptr has a constant stride. -int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, - Value *Ptr, const Loop *Lp, - const ValueToValueMap &StridesMap, bool Assume, - bool ShouldCheckWrap) { +Optional<int64_t> +llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, + Value *Ptr, const Loop *Lp, + const ValueToValueMap &StridesMap, bool Assume, + bool ShouldCheckWrap) { Type *Ty = Ptr->getType(); assert(Ty->isPointerTy() && "Unexpected non-ptr"); if (isa<ScalableVectorType>(AccessTy)) { LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy << "\n"); - return 0; + return None; } const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); @@ -1387,14 +1388,14 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, if (!AR) { LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr << " SCEV: " << *PtrScev << "\n"); - return 0; + return None; } // The access function must stride over the innermost loop. if (Lp != AR->getLoop()) { LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << *Ptr << " SCEV: " << *AR << "\n"); - return 0; + return None; } // The address calculation must not wrap. Otherwise, a dependence could be @@ -1422,7 +1423,7 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, LLVM_DEBUG( dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " << *Ptr << " SCEV: " << *AR << "\n"); - return 0; + return None; } } @@ -1434,7 +1435,7 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, if (!C) { LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << " SCEV: " << *AR << "\n"); - return 0; + return None; } auto &DL = Lp->getHeader()->getModule()->getDataLayout(); @@ -1444,7 +1445,7 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, // Huge step value - give up. if (APStepVal.getBitWidth() > 64) - return 0; + return None; int64_t StepVal = APStepVal.getSExtValue(); @@ -1452,7 +1453,7 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, int64_t Stride = StepVal / Size; int64_t Rem = StepVal % Size; if (Rem) - return 0; + return None; // If the SCEV could wrap but we have an inbounds gep with a unit stride we // know we can't "wrap around the address space". In case of address space @@ -1469,7 +1470,7 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, << "LAA: Added an overflow assumption\n"); PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); } else - return 0; + return None; } return Stride; @@ -1846,9 +1847,9 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, return Dependence::Unknown; int64_t StrideAPtr = - getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true); + getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true).value_or(0); int64_t StrideBPtr = - getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true); + getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true).value_or(0); const SCEV *Src = PSE.getSCEV(APtr); const SCEV *Sink = PSE.getSCEV(BPtr); @@ -2350,7 +2351,7 @@ void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI, bool IsReadOnlyPtr = false; Type *AccessTy = getLoadStoreType(LD); if (Seen.insert({Ptr, AccessTy}).second || - !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides)) { + !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) { ++NumReads; IsReadOnlyPtr = true; } |