diff options
author | Nikita Popov <npopov@redhat.com> | 2023-06-02 11:05:55 +0200 |
---|---|---|
committer | Nikita Popov <npopov@redhat.com> | 2023-06-02 11:17:46 +0200 |
commit | 371835e82c0a1521d4400e117065b3493b68a7bb (patch) | |
tree | 39799840fc70f24f59a92799d4c661ea51690742 /llvm/lib/Analysis/ValueTracking.cpp | |
parent | fb447e7e7c8bbad71032e732b7436fbf7097dd90 (diff) | |
download | llvm-371835e82c0a1521d4400e117065b3493b68a7bb.zip llvm-371835e82c0a1521d4400e117065b3493b68a7bb.tar.gz llvm-371835e82c0a1521d4400e117065b3493b68a7bb.tar.bz2 |
[ValueTracking] Replace Query with SimplifyQuery (NFC)
These implement essentially the same thing, so normalize
ValueTracking to use SimplifyQuery. In the future we can directly
expose the SimplifyQuery-based APIs.
Diffstat (limited to 'llvm/lib/Analysis/ValueTracking.cpp')
-rw-r--r-- | llvm/lib/Analysis/ValueTracking.cpp | 287 |
1 files changed, 136 insertions, 151 deletions
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index ecc97a0..fe0c414 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -94,28 +94,6 @@ static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { return DL.getPointerTypeSizeInBits(Ty); } -namespace { - -// Simplifying using an assume can only be done in a particular control-flow -// context (the context instruction provides that context). If an assume and -// the context instruction are not in the same block then the DT helps in -// figuring out if we can use it. -struct Query { - const DataLayout &DL; - AssumptionCache *AC; - const Instruction *CxtI; - const DominatorTree *DT; - - /// If true, it is safe to use metadata during simplification. - InstrInfoQuery IIQ; - - Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, - const DominatorTree *DT, bool UseInstrInfo) - : DL(DL), AC(AC), CxtI(CxtI), DT(DT), IIQ(UseInstrInfo) {} -}; - -} // end anonymous namespace - // Given the provided Value and, potentially, a context instruction, return // the preferred context instruction (if any). static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { @@ -166,10 +144,11 @@ static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, } static void computeKnownBits(const Value *V, const APInt &DemandedElts, - KnownBits &Known, unsigned Depth, const Query &Q); + KnownBits &Known, unsigned Depth, + const SimplifyQuery &Q); static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { // Since the number of lanes in a scalable vector is unknown at compile time, // we track one bit which is implicitly broadcast to all lanes. This means // that all lanes in a scalable vector are considered demanded. @@ -184,7 +163,8 @@ void llvm::computeKnownBits(const Value *V, KnownBits &Known, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { ::computeKnownBits(V, Known, Depth, - Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, + safeCxtI(V, CxtI), UseInstrInfo)); } void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, @@ -193,31 +173,32 @@ void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { ::computeKnownBits(V, DemandedElts, Known, Depth, - Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, + safeCxtI(V, CxtI), UseInstrInfo)); } static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, - unsigned Depth, const Query &Q); + unsigned Depth, const SimplifyQuery &Q); static KnownBits computeKnownBits(const Value *V, unsigned Depth, - const Query &Q); + const SimplifyQuery &Q); KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, - const DominatorTree *DT, - bool UseInstrInfo) { - return ::computeKnownBits( - V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); + const DominatorTree *DT, bool UseInstrInfo) { + return ::computeKnownBits(V, Depth, + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, + safeCxtI(V, CxtI), UseInstrInfo)); } KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { - return ::computeKnownBits( - V, DemandedElts, Depth, - Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); + return ::computeKnownBits(V, DemandedElts, Depth, + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, + safeCxtI(V, CxtI), UseInstrInfo)); } bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, @@ -287,26 +268,30 @@ bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) { } static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, - const Query &Q); + const SimplifyQuery &Q); bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { - return ::isKnownToBeAPowerOfTwo( - V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); + return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, + safeCxtI(V, CxtI), + UseInstrInfo)); } static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, - unsigned Depth, const Query &Q); + unsigned Depth, const SimplifyQuery &Q); -static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); +static bool isKnownNonZero(const Value *V, unsigned Depth, + const SimplifyQuery &Q); bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { return ::isKnownNonZero(V, Depth, - Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, + safeCxtI(V, CxtI), UseInstrInfo)); } bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, @@ -337,33 +322,34 @@ bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, } static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, - const Query &Q); + const SimplifyQuery &Q); bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { return ::isKnownNonEqual(V1, V2, 0, - Query(DL, AC, safeCxtI(V2, V1, CxtI), DT, - UseInstrInfo)); + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, + safeCxtI(V2, V1, CxtI), UseInstrInfo)); } static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, - const Query &Q); + const SimplifyQuery &Q); bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { - return ::MaskedValueIsZero( - V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); + return ::MaskedValueIsZero(V, Mask, Depth, + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, + safeCxtI(V, CxtI), UseInstrInfo)); } static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, - unsigned Depth, const Query &Q); + unsigned Depth, const SimplifyQuery &Q); static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); APInt DemandedElts = FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); @@ -374,8 +360,9 @@ unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { - return ::ComputeNumSignBits( - V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); + return ::ComputeNumSignBits(V, Depth, + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, + safeCxtI(V, CxtI), UseInstrInfo)); } unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL, @@ -389,7 +376,7 @@ unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL, static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, bool NSW, const APInt &DemandedElts, KnownBits &KnownOut, KnownBits &Known2, - unsigned Depth, const Query &Q) { + unsigned Depth, const SimplifyQuery &Q) { computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q); // If one operand is unknown and we have no nowrap information, @@ -404,7 +391,7 @@ static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q); computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); @@ -592,7 +579,7 @@ static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) { return !TrueValues.contains(APInt::getZero(C->getBitWidth())); } -static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) { +static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q) { // Use of assumptions is context-sensitive. If we don't have a context, we // cannot use them! if (!Q.AC || !Q.CxtI) @@ -637,7 +624,7 @@ static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) { static void computeKnownBitsFromCmp(const Value *V, const ICmpInst *Cmp, KnownBits &Known, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { unsigned BitWidth = Known.getBitWidth(); // We are attempting to compute known bits for the operands of an assume. // Do not try to use other assumptions for those recursive calls because @@ -645,7 +632,7 @@ static void computeKnownBitsFromCmp(const Value *V, const ICmpInst *Cmp, // An example of the mutual recursion: computeKnownBits can call // isKnownNonZero which calls computeKnownBitsFromAssume (this function) // and so on. - Query QueryNoAC = Q; + SimplifyQuery QueryNoAC = Q; QueryNoAC.AC = nullptr; // Note that ptrtoint may change the bitwidth. @@ -878,7 +865,7 @@ static void computeKnownBitsFromCmp(const Value *V, const ICmpInst *Cmp, } static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, - unsigned Depth, const Query &Q) { + unsigned Depth, const SimplifyQuery &Q) { // Use of assumptions is context-sensitive. If we don't have a context, we // cannot use them! if (!Q.AC || !Q.CxtI) @@ -958,7 +945,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, /// combined for all permitted shift amounts. static void computeKnownBitsFromShiftOperator( const Operator *I, const APInt &DemandedElts, KnownBits &Known, - KnownBits &Known2, unsigned Depth, const Query &Q, + KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q, function_ref<KnownBits(const KnownBits &, const KnownBits &, bool)> KF) { computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); @@ -971,11 +958,10 @@ static void computeKnownBitsFromShiftOperator( Known = KF(Known2, Known, ShAmtNonZero); } -static KnownBits getKnownBitsFromAndXorOr(const Operator *I, - const APInt &DemandedElts, - const KnownBits &KnownLHS, - const KnownBits &KnownRHS, - unsigned Depth, const Query &Q) { +static KnownBits +getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, + const KnownBits &KnownLHS, const KnownBits &KnownRHS, + unsigned Depth, const SimplifyQuery &Q) { unsigned BitWidth = KnownLHS.getBitWidth(); KnownBits KnownOut(BitWidth); bool IsAnd = false; @@ -1052,9 +1038,10 @@ KnownBits llvm::analyzeKnownBitsFromAndXorOr( APInt DemandedElts = FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); - return getKnownBitsFromAndXorOr( - I, DemandedElts, KnownLHS, KnownRHS, Depth, - Query(DL, AC, safeCxtI(I, CxtI), DT, UseInstrInfo)); + return getKnownBitsFromAndXorOr(I, DemandedElts, KnownLHS, KnownRHS, Depth, + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, + safeCxtI(I, CxtI), + UseInstrInfo)); } ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) { @@ -1079,7 +1066,7 @@ ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) { static void computeKnownBitsFromOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { unsigned BitWidth = Known.getBitWidth(); KnownBits Known2(BitWidth); @@ -1436,7 +1423,7 @@ static void computeKnownBitsFromOperator(const Operator *I, // inferred hold at original context instruction. TODO: It may be // correct to use the original context. IF warranted, explore and // add sufficient tests to cover. - Query RecQ = Q; + SimplifyQuery RecQ = Q; RecQ.CxtI = P; computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ); switch (Opcode) { @@ -1469,7 +1456,7 @@ static void computeKnownBitsFromOperator(const Operator *I, // phi. This is important because that is where the value is actually // "evaluated" even though it is used later somewhere else. (see also // D69571). - Query RecQ = Q; + SimplifyQuery RecQ = Q; unsigned OpNum = P->getOperand(0) == R ? 0 : 1; Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator(); @@ -1547,7 +1534,7 @@ static void computeKnownBitsFromOperator(const Operator *I, // phi. This is important because that is where the value is actually // "evaluated" even though it is used later somewhere else. (see also // D69571). - Query RecQ = Q; + SimplifyQuery RecQ = Q; RecQ.CxtI = P->getIncomingBlock(u)->getTerminator(); Known2 = KnownBits(BitWidth); @@ -1864,7 +1851,7 @@ static void computeKnownBitsFromOperator(const Operator *I, /// Determine which bits of V are known to be either zero or one and return /// them. KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, - unsigned Depth, const Query &Q) { + unsigned Depth, const SimplifyQuery &Q) { KnownBits Known(getBitWidth(V->getType(), Q.DL)); computeKnownBits(V, DemandedElts, Known, Depth, Q); return Known; @@ -1872,7 +1859,8 @@ KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, /// Determine which bits of V are known to be either zero or one and return /// them. -KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { +KnownBits computeKnownBits(const Value *V, unsigned Depth, + const SimplifyQuery &Q) { KnownBits Known(getBitWidth(V->getType(), Q.DL)); computeKnownBits(V, Known, Depth, Q); return Known; @@ -1894,7 +1882,8 @@ KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { /// same width as the vector element, and the bit is set only if it is true /// for all of the demanded elements in the vector specified by DemandedElts. void computeKnownBits(const Value *V, const APInt &DemandedElts, - KnownBits &Known, unsigned Depth, const Query &Q) { + KnownBits &Known, unsigned Depth, + const SimplifyQuery &Q) { if (!DemandedElts) { // No demanded elts, better to assume we don't know anything. Known.resetAll(); @@ -2023,7 +2012,7 @@ void computeKnownBits(const Value *V, const APInt &DemandedElts, /// Try to detect a recurrence that the value of the induction variable is /// always a power of two (or zero). static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, - unsigned Depth, Query &Q) { + unsigned Depth, SimplifyQuery &Q) { BinaryOperator *BO = nullptr; Value *Start = nullptr, *Step = nullptr; if (!matchSimpleRecurrence(PN, BO, Start, Step)) @@ -2082,7 +2071,7 @@ static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, /// be a power of two when defined. Supports values with integer or pointer /// types and vectors of integers. bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); // Attempt to match against constants. @@ -2171,7 +2160,7 @@ bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, // A PHI node is power of two if all incoming values are power of two, or if // it is an induction variable where in each step its value is a power of two. if (const PHINode *PN = dyn_cast<PHINode>(V)) { - Query RecQ = Q; + SimplifyQuery RecQ = Q; // Check if it is an induction variable and always power of two. if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ)) @@ -2211,7 +2200,7 @@ bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, /// /// Currently this routine does not support vector GEPs. static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { const Function *F = nullptr; if (const Instruction *I = dyn_cast<Instruction>(GEP)) F = I->getFunction(); @@ -2409,8 +2398,8 @@ static bool isNonZeroRecurrence(const PHINode *PN) { } static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth, - const Query &Q, unsigned BitWidth, Value *X, Value *Y, - bool NSW) { + const SimplifyQuery &Q, unsigned BitWidth, Value *X, + Value *Y, bool NSW) { KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); @@ -2448,7 +2437,7 @@ static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth, } static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth, - const Query &Q, unsigned BitWidth, Value *X, + const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y) { if (auto *C = dyn_cast<Constant>(X)) if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Depth, Q)) @@ -2466,7 +2455,7 @@ static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth, } static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, - unsigned Depth, const Query &Q, + unsigned Depth, const SimplifyQuery &Q, const KnownBits &KnownVal) { auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) { switch (I->getOpcode()) { @@ -2523,7 +2512,7 @@ static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, /// pointer couldn't possibly be null at the specified instruction. /// Supports values with integer or pointer type and vectors of integers. bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { #ifndef NDEBUG Type *Ty = V->getType(); @@ -2831,7 +2820,7 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, return true; // Check if all incoming values are non-zero using recursion. - Query RecQ = Q; + SimplifyQuery RecQ = Q; unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); return llvm::all_of(PN->operands(), [&](const Use &U) { if (U.get() == PN) @@ -2928,7 +2917,7 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, return Known.One != 0; } -bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { +bool isKnownNonZero(const Value* V, unsigned Depth, const SimplifyQuery& Q) { auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); APInt DemandedElts = FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); @@ -3044,7 +3033,7 @@ getInvertibleOperands(const Operator *Op1, /// Return true if V2 == V1 + X, where X is known non-zero. static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); if (!BO || BO->getOpcode() != Instruction::Add) return false; @@ -3061,7 +3050,7 @@ static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth, /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and /// the multiplication is nuw or nsw. static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { const APInt *C; return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) && @@ -3074,7 +3063,7 @@ static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth, /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and /// the shift is nuw or nsw. static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { const APInt *C; return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) && @@ -3085,7 +3074,7 @@ static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth, } static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, - unsigned Depth, const Query &Q) { + unsigned Depth, const SimplifyQuery &Q) { // Check two PHIs are in same block. if (PN1->getParent() != PN2->getParent()) return false; @@ -3105,7 +3094,7 @@ static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, if (UsedFullRecursion) return false; - Query RecQ = Q; + SimplifyQuery RecQ = Q; RecQ.CxtI = IncomBB->getTerminator(); if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ)) return false; @@ -3116,7 +3105,7 @@ static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, /// Return true if it is known that V1 != V2. static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { if (V1 == V2) return false; if (V1->getType() != V2->getType()) @@ -3176,7 +3165,7 @@ static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, - const Query &Q) { + const SimplifyQuery &Q) { KnownBits Known(Mask.getBitWidth()); computeKnownBits(V, Known, Depth, Q); return Mask.isSubsetOf(Known.Zero); @@ -3260,10 +3249,10 @@ static unsigned computeNumSignBitsVectorConstant(const Value *V, static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, - unsigned Depth, const Query &Q); + unsigned Depth, const SimplifyQuery &Q); static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, - unsigned Depth, const Query &Q) { + unsigned Depth, const SimplifyQuery &Q) { unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q); assert(Result > 0 && "At least one sign bit needs to be present!"); return Result; @@ -3278,7 +3267,7 @@ static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, /// elements in the vector specified by DemandedElts. static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, - unsigned Depth, const Query &Q) { + unsigned Depth, const SimplifyQuery &Q) { Type *Ty = V->getType(); #ifndef NDEBUG assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); @@ -3498,7 +3487,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, // Take the minimum of all incoming values. This can't infinitely loop // because of our depth threshold. - Query RecQ = Q; + SimplifyQuery RecQ = Q; Tmp = TyBits; for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) { if (Tmp == 1) return Tmp; @@ -4197,7 +4186,7 @@ std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred, } static FPClassTest computeKnownFPClassFromAssumes(const Value *V, - const Query &Q) { + const SimplifyQuery &Q) { FPClassTest KnownFromAssume = fcAllFlags; // Try to restrict the floating-point classes based on information from @@ -4245,31 +4234,29 @@ static FPClassTest computeKnownFPClassFromAssumes(const Value *V, void computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, - unsigned Depth, const Query &Q, - const TargetLibraryInfo *TLI); + unsigned Depth, const SimplifyQuery &Q); static void computeKnownFPClass(const Value *V, KnownFPClass &Known, FPClassTest InterestedClasses, unsigned Depth, - const Query &Q, const TargetLibraryInfo *TLI) { + const SimplifyQuery &Q) { auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); APInt DemandedElts = FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); - computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q, TLI); + computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q); } static void computeKnownFPClassForFPTrunc(const Operator *Op, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, unsigned Depth, - const Query &Q, - const TargetLibraryInfo *TLI) { + const SimplifyQuery &Q) { if ((InterestedClasses & (KnownFPClass::OrderedLessThanZeroMask | fcNan)) == fcNone) return; KnownFPClass KnownSrc; computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, - KnownSrc, Depth + 1, Q, TLI); + KnownSrc, Depth + 1, Q); // Sign should be preserved // TODO: Handle cannot be ordered greater than zero @@ -4285,8 +4272,7 @@ static void computeKnownFPClassForFPTrunc(const Operator *Op, // cannotBeOrderedLessThanZero into here. void computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, - unsigned Depth, const Query &Q, - const TargetLibraryInfo *TLI) { + unsigned Depth, const SimplifyQuery &Q) { assert(Known.isUnknown() && "should not be called with known information"); if (!DemandedElts) { @@ -4370,16 +4356,16 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, switch (Opc) { case Instruction::FNeg: { computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, - Known, Depth + 1, Q, TLI); + Known, Depth + 1, Q); Known.fneg(); break; } case Instruction::Select: { KnownFPClass Known2; computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedClasses, - Known, Depth + 1, Q, TLI); + Known, Depth + 1, Q); computeKnownFPClass(Op->getOperand(2), DemandedElts, InterestedClasses, - Known2, Depth + 1, Q, TLI); + Known2, Depth + 1, Q); Known |= Known2; break; } @@ -4392,7 +4378,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // If we only care about the sign bit we don't need to inspect the // operand. computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedClasses, Known, Depth + 1, Q, TLI); + InterestedClasses, Known, Depth + 1, Q); } Known.fabs(); @@ -4402,9 +4388,9 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass KnownSign; computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedClasses, Known, Depth + 1, Q, TLI); + InterestedClasses, Known, Depth + 1, Q); computeKnownFPClass(II->getArgOperand(1), DemandedElts, - InterestedClasses, KnownSign, Depth + 1, Q, TLI); + InterestedClasses, KnownSign, Depth + 1, Q); Known.copysign(KnownSign); break; } @@ -4422,7 +4408,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // x * x + y is non-negative if y is non-negative. KnownFPClass KnownAddend; computeKnownFPClass(II->getArgOperand(2), DemandedElts, - InterestedClasses, KnownAddend, Depth + 1, Q, TLI); + InterestedClasses, KnownAddend, Depth + 1, Q); // TODO: Known sign bit with no nans if (KnownAddend.cannotBeOrderedLessThanZero()) @@ -4437,7 +4423,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask; computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedSrcs, KnownSrc, Depth + 1, Q, TLI); + InterestedSrcs, KnownSrc, Depth + 1, Q); if (KnownSrc.isKnownNeverPosInfinity()) Known.knownNot(fcPosInf); @@ -4468,7 +4454,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // Return NaN on infinite inputs. KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedClasses, KnownSrc, Depth + 1, Q, TLI); + InterestedClasses, KnownSrc, Depth + 1, Q); Known.knownNot(fcInf); if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity()) Known.knownNot(fcNan); @@ -4481,9 +4467,9 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, case Intrinsic::maximum: { KnownFPClass KnownLHS, KnownRHS; computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedClasses, KnownLHS, Depth + 1, Q, TLI); + InterestedClasses, KnownLHS, Depth + 1, Q); computeKnownFPClass(II->getArgOperand(1), DemandedElts, - InterestedClasses, KnownRHS, Depth + 1, Q, TLI); + InterestedClasses, KnownRHS, Depth + 1, Q); bool NeverNaN = KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN(); @@ -4547,7 +4533,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, } case Intrinsic::canonicalize: { computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedClasses, Known, Depth + 1, Q, TLI); + InterestedClasses, Known, Depth + 1, Q); // Canonicalize is guaranteed to quiet signaling nans. Known.knownNot(fcSNan); @@ -4584,7 +4570,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, if (InterestedSrcs & fcNegFinite) InterestedSrcs |= fcNegFinite; computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedSrcs, KnownSrc, Depth + 1, Q, TLI); + InterestedSrcs, KnownSrc, Depth + 1, Q); // Integer results cannot be subnormal. Known.knownNot(fcSubnormal); @@ -4616,7 +4602,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedClasses, KnownSrc, Depth + 1, Q, TLI); + InterestedClasses, KnownSrc, Depth + 1, Q); if (KnownSrc.isKnownNeverNaN()) { Known.knownNot(fcNan); Known.SignBit = false; @@ -4626,7 +4612,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, } case Intrinsic::fptrunc_round: { computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, - Known, Depth, Q, TLI); + Known, Depth, Q); break; } case Intrinsic::log: @@ -4650,7 +4636,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, - KnownSrc, Depth + 1, Q, TLI); + KnownSrc, Depth + 1, Q); if (KnownSrc.isKnownNeverPosInfinity()) Known.knownNot(fcPosInf); @@ -4690,14 +4676,14 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // pow(-inf, exp) --> -inf if exp is positive odd. KnownFPClass KnownSrc; computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative, - KnownSrc, Depth + 1, Q, TLI); + KnownSrc, Depth + 1, Q); if (KnownSrc.isKnownNever(fcNegative)) Known.knownNot(fcNegative); break; } case Intrinsic::arithmetic_fence: { computeKnownFPClass(II->getArgOperand(0), DemandedElts, - InterestedClasses, Known, Depth + 1, Q, TLI); + InterestedClasses, Known, Depth + 1, Q); break; } case Intrinsic::experimental_constrained_sitofp: @@ -4727,14 +4713,14 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, case Instruction::FSub: { KnownFPClass KnownLHS, KnownRHS; computeKnownFPClass(Op->getOperand(1), DemandedElts, fcNan | fcInf, - KnownRHS, Depth + 1, Q, TLI); + KnownRHS, Depth + 1, Q); if (KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNegZero() || (Opc == Instruction::FSub && KnownRHS.isKnownNeverPosZero())) { // RHS is canonically cheaper to compute. Skip inspecting the LHS if // there's no point. computeKnownFPClass(Op->getOperand(0), DemandedElts, fcNan | fcInf, - KnownLHS, Depth + 1, Q, TLI); + KnownLHS, Depth + 1, Q); // Adding positive and negative infinity produces NaN. // TODO: Check sign of infinities. if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() && @@ -4776,11 +4762,11 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass KnownLHS, KnownRHS; computeKnownFPClass(Op->getOperand(1), DemandedElts, fcNan | fcInf | fcZero | fcSubnormal, KnownRHS, - Depth + 1, Q, TLI); + Depth + 1, Q); if (KnownRHS.isKnownNeverNaN() && (KnownRHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverZero())) { computeKnownFPClass(Op->getOperand(0), DemandedElts, - fcNan | fcInf | fcZero, KnownLHS, Depth + 1, Q, TLI); + fcNan | fcInf | fcZero, KnownLHS, Depth + 1, Q); if (!KnownLHS.isKnownNeverNaN()) break; @@ -4824,7 +4810,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, computeKnownFPClass(Op->getOperand(1), DemandedElts, fcNan | fcInf | fcZero | fcNegative, KnownRHS, - Depth + 1, Q, TLI); + Depth + 1, Q); bool KnowSomethingUseful = KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative); @@ -4836,7 +4822,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses & InterestedLHS, KnownLHS, - Depth + 1, Q, TLI); + Depth + 1, Q); } const Function *F = cast<Instruction>(Op)->getFunction(); @@ -4881,7 +4867,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, case Instruction::FPExt: { // Infinity, nan and zero propagate from source. computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, - Known, Depth + 1, Q, TLI); + Known, Depth + 1, Q); const fltSemantics &DstTy = Op->getType()->getScalarType()->getFltSemantics(); @@ -4899,7 +4885,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, } case Instruction::FPTrunc: { computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known, - Depth, Q, TLI); + Depth, Q); break; } case Instruction::SIToFP: @@ -4945,7 +4931,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, if (CIdx && CIdx->getValue().ult(NumElts)) DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known, - Depth + 1, Q, TLI); + Depth + 1, Q); } break; @@ -4965,7 +4951,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, unsigned EltIdx = CIdx->getZExtValue(); // Do we demand the inserted element? if (DemandedElts[EltIdx]) { - computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q, TLI); + computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q); // If we don't know any bits, early out. if (Known.isUnknown()) break; @@ -4979,7 +4965,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, if (!!DemandedVecElts) { KnownFPClass Known2; computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2, - Depth + 1, Q, TLI); + Depth + 1, Q); Known |= Known2; } @@ -4996,7 +4982,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, if (!!DemandedLHS) { const Value *LHS = Shuf->getOperand(0); computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known, - Depth + 1, Q, TLI); + Depth + 1, Q); // If we don't know any bits, early out. if (Known.isUnknown()) @@ -5009,7 +4995,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, KnownFPClass Known2; const Value *RHS = Shuf->getOperand(1); computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2, - Depth + 1, Q, TLI); + Depth + 1, Q); Known |= Known2; } @@ -5017,7 +5003,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, } case Instruction::ExtractValue: { computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, - Known, Depth + 1, Q, TLI); + Known, Depth + 1, Q); break; } case Instruction::PHI: { @@ -5047,7 +5033,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts, // to waste time spinning around in loops. We need at least depth 2 to // detect known sign bits. computeKnownFPClass(IncValue, DemandedElts, InterestedClasses, KnownSrc, - PhiRecursionLimit, Q, TLI); + PhiRecursionLimit, Q); if (First) { Known = KnownSrc; @@ -5074,22 +5060,20 @@ KnownFPClass llvm::computeKnownFPClass( AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { KnownFPClass KnownClasses; - ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth, - Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo), - TLI); + ::computeKnownFPClass( + V, DemandedElts, InterestedClasses, KnownClasses, Depth, + SimplifyQuery(DL, TLI, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); return KnownClasses; } -KnownFPClass -llvm::computeKnownFPClass(const Value *V, const DataLayout &DL, - FPClassTest InterestedClasses, unsigned Depth, - const TargetLibraryInfo *TLI, AssumptionCache *AC, - const Instruction *CxtI, const DominatorTree *DT, - bool UseInstrInfo) { +KnownFPClass llvm::computeKnownFPClass( + const Value *V, const DataLayout &DL, FPClassTest InterestedClasses, + unsigned Depth, const TargetLibraryInfo *TLI, AssumptionCache *AC, + const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { KnownFPClass Known; - ::computeKnownFPClass(V, Known, InterestedClasses, Depth, - Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo), - TLI); + ::computeKnownFPClass( + V, Known, InterestedClasses, Depth, + SimplifyQuery(DL, TLI, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); return Known; } @@ -6165,7 +6149,8 @@ static OverflowResult computeOverflowForSignedAdd(const Value *LHS, if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { KnownBits AddKnown(LHSRange.getBitWidth()); computeKnownBitsFromAssume( - Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true)); + Add, AddKnown, /*Depth=*/0, + SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, CxtI, DT)); if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || (AddKnown.isNegative() && LHSOrRHSKnownNegative)) return OverflowResult::NeverOverflows; |