diff options
author | Quentin Colombet <qcolombet@apple.com> | 2020-10-20 14:43:25 -0700 |
---|---|---|
committer | Quentin Colombet <qcolombet@apple.com> | 2020-10-21 15:07:04 -0700 |
commit | ee6abef5323d59b983129bf3514ef6775d1d6cd5 (patch) | |
tree | 1015b4f90f192f16ecd1437372b34dfba53bedd9 /llvm/lib/Analysis/ValueTracking.cpp | |
parent | e97e9851b227e98e39c27c4c8f5558e331cde8b4 (diff) | |
download | llvm-ee6abef5323d59b983129bf3514ef6775d1d6cd5.zip llvm-ee6abef5323d59b983129bf3514ef6775d1d6cd5.tar.gz llvm-ee6abef5323d59b983129bf3514ef6775d1d6cd5.tar.bz2 |
[ValueTracking] Interpret GEPs as a series of adds multiplied by the related scaling factor
Prior to this patch, computeKnownBits would only try to deduce trailing zeros
bits for getelementptrs. This patch adds the logic to treat geps as a series
of add * scaling factor.
Thanks to this patch, using a gep or performing an address computation
directly "by hand" (ptrtoint followed by adds and mul followed by inttoptr)
offers the same computeKnownBits information.
Previously, the "by hand" approach would have given more information.
This is related to https://llvm.org/PR47241.
Differential Revision: https://reviews.llvm.org/D86364
Diffstat (limited to 'llvm/lib/Analysis/ValueTracking.cpp')
-rw-r--r-- | llvm/lib/Analysis/ValueTracking.cpp | 90 |
1 files changed, 65 insertions, 25 deletions
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 4dd536e..9ecc5ce 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1358,24 +1358,32 @@ static void computeKnownBitsFromOperator(const Operator *I, case Instruction::GetElementPtr: { // Analyze all of the subscripts of this getelementptr instruction // to determine if we can prove known low zero bits. - KnownBits LocalKnown(BitWidth); - computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); - unsigned TrailZ = LocalKnown.countMinTrailingZeros(); + computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); + // Accumulate the constant indices in a separate variable + // to minimize the number of calls to computeForAddSub. + APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true); gep_type_iterator GTI = gep_type_begin(I); + // If the inbounds keyword is not present, the offsets are added to the + // base address with silently-wrapping two’s complement arithmetic. + bool IsInBounds = cast<GEPOperator>(I)->isInBounds(); for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { // TrailZ can only become smaller, short-circuit if we hit zero. - if (TrailZ == 0) + if (Known.isUnknown()) break; Value *Index = I->getOperand(i); + + // Handle case when index is zero. + Constant *CIndex = dyn_cast<Constant>(Index); + if (CIndex && CIndex->isZeroValue()) + continue; + if (StructType *STy = GTI.getStructTypeOrNull()) { // Handle struct member offset arithmetic. - // Handle case when index is vector zeroinitializer - Constant *CIndex = cast<Constant>(Index); - if (CIndex->isZeroValue()) - continue; + assert(CIndex && + "Access to structure field must be known at compile time"); if (CIndex->getType()->isVectorTy()) Index = CIndex->getSplatValue(); @@ -1383,26 +1391,58 @@ static void computeKnownBitsFromOperator(const Operator *I, unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); const StructLayout *SL = Q.DL.getStructLayout(STy); uint64_t Offset = SL->getElementOffset(Idx); - TrailZ = std::min<unsigned>(TrailZ, - countTrailingZeros(Offset)); + AccConstIndices += Offset; + continue; + } + + // Handle array index arithmetic. + Type *IndexedTy = GTI.getIndexedType(); + if (!IndexedTy->isSized()) { + Known.resetAll(); + break; + } + + unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); + KnownBits IndexBits(IndexBitWidth); + computeKnownBits(Index, IndexBits, Depth + 1, Q); + TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy); + uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize(); + KnownBits ScalingFactor(IndexBitWidth); + // Multiply by current sizeof type. + // &A[i] == A + i * sizeof(*A[i]). + if (IndexTypeSize.isScalable()) { + // For scalable types the only thing we know about sizeof is + // that this is a multiple of the minimum size. + ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes)); + } else if (IndexBits.isConstant()) { + APInt IndexConst = IndexBits.getConstant(); + APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes); + IndexConst *= ScalingFactor; + AccConstIndices += IndexConst.sextOrTrunc(BitWidth); + continue; } else { - // Handle array index arithmetic. - Type *IndexedTy = GTI.getIndexedType(); - if (!IndexedTy->isSized()) { - TrailZ = 0; - break; - } - unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); - uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy).getKnownMinSize(); - LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); - computeKnownBits(Index, LocalKnown, Depth + 1, Q); - TrailZ = std::min(TrailZ, - unsigned(countTrailingZeros(TypeSize) + - LocalKnown.countMinTrailingZeros())); + ScalingFactor.Zero = ~TypeSizeInBytes; + ScalingFactor.One = TypeSizeInBytes; } - } + IndexBits = KnownBits::computeForMul(IndexBits, ScalingFactor); + + // If the offsets have a different width from the pointer, according + // to the language reference we need to sign-extend or truncate them + // to the width of the pointer. + IndexBits = IndexBits.sextOrTrunc(BitWidth); - Known.Zero.setLowBits(TrailZ); + Known = KnownBits::computeForAddSub( + /*Add=*/true, + /*NSW=*/IsInBounds, Known, IndexBits); + } + if (!Known.isUnknown() && !AccConstIndices.isNullValue()) { + KnownBits Index(BitWidth); + Index.Zero = ~AccConstIndices; + Index.One = AccConstIndices; + Known = KnownBits::computeForAddSub( + /*Add=*/true, + /*NSW=*/IsInBounds, Known, Index); + } break; } case Instruction::PHI: { |