diff options
author | mingmingl <mingmingl@google.com> | 2025-02-04 11:11:14 -0800 |
---|---|---|
committer | mingmingl <mingmingl@google.com> | 2025-02-04 11:11:14 -0800 |
commit | e91747a92d27ecf799427bf563f9f64f7c4d2447 (patch) | |
tree | 7aa5a8a9170deec293e152bdf2be804399dcd612 /llvm/lib/Analysis | |
parent | 3a8d9337d816aef41c3ca1484be8b933a71a3c46 (diff) | |
parent | 53d6e59b594639417cdbfcfa2d18cea64acb4009 (diff) | |
download | llvm-users/mingmingl-llvm/spr/sdpglobalvariable.zip llvm-users/mingmingl-llvm/spr/sdpglobalvariable.tar.gz llvm-users/mingmingl-llvm/spr/sdpglobalvariable.tar.bz2 |
Merge branch 'main' into users/mingmingl-llvm/spr/sdpglobalvariableusers/mingmingl-llvm/spr/sdpglobalvariable
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r-- | llvm/lib/Analysis/TargetTransformInfo.cpp | 9 | ||||
-rw-r--r-- | llvm/lib/Analysis/ValueTracking.cpp | 66 |
2 files changed, 45 insertions, 30 deletions
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index 424bb7b..dc06609 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -1153,6 +1153,15 @@ InstructionCost TargetTransformInfo::getGatherScatterOpCost( return Cost; } +InstructionCost TargetTransformInfo::getExpandCompressMemoryOpCost( + unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, + TTI::TargetCostKind CostKind, const Instruction *I) const { + InstructionCost Cost = TTIImpl->getExpandCompressMemoryOpCost( + Opcode, DataTy, VariableMask, Alignment, CostKind, I); + assert(Cost >= 0 && "TTI should not produce negative costs!"); + return Cost; +} + InstructionCost TargetTransformInfo::getStridedMemoryOpCost( unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 6b61a35..55feb15 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1426,7 +1426,22 @@ static void computeKnownBitsFromOperator(const Operator *I, computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); // Accumulate the constant indices in a separate variable // to minimize the number of calls to computeForAddSub. - APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true); + unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(I->getType()); + APInt AccConstIndices(IndexWidth, 0); + + auto AddIndexToKnown = [&](KnownBits IndexBits) { + if (IndexWidth == BitWidth) { + // Note that inbounds does *not* guarantee nsw for the addition, as only + // the offset is signed, while the base address is unsigned. + Known = KnownBits::add(Known, IndexBits); + } else { + // If the index width is smaller than the pointer width, only add the + // value to the low bits. + assert(IndexWidth < BitWidth && + "Index width can't be larger than pointer width"); + Known.insertBits(KnownBits::add(Known.trunc(IndexWidth), IndexBits), 0); + } + }; gep_type_iterator GTI = gep_type_begin(I); for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { @@ -1464,43 +1479,34 @@ static void computeKnownBitsFromOperator(const Operator *I, break; } - unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); - KnownBits IndexBits(IndexBitWidth); - computeKnownBits(Index, IndexBits, Depth + 1, Q); - TypeSize IndexTypeSize = GTI.getSequentialElementStride(Q.DL); - uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue(); - KnownBits ScalingFactor(IndexBitWidth); + TypeSize Stride = GTI.getSequentialElementStride(Q.DL); + uint64_t StrideInBytes = Stride.getKnownMinValue(); + if (!Stride.isScalable()) { + // Fast path for constant offset. + if (auto *CI = dyn_cast<ConstantInt>(Index)) { + AccConstIndices += + CI->getValue().sextOrTrunc(IndexWidth) * StrideInBytes; + continue; + } + } + + KnownBits IndexBits = + computeKnownBits(Index, Depth + 1, Q).sextOrTrunc(IndexWidth); + KnownBits ScalingFactor(IndexWidth); // Multiply by current sizeof type. // &A[i] == A + i * sizeof(*A[i]). - if (IndexTypeSize.isScalable()) { + if (Stride.isScalable()) { // For scalable types the only thing we know about sizeof is // that this is a multiple of the minimum size. - ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes)); - } else if (IndexBits.isConstant()) { - APInt IndexConst = IndexBits.getConstant(); - APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes); - IndexConst *= ScalingFactor; - AccConstIndices += IndexConst.sextOrTrunc(BitWidth); - continue; + ScalingFactor.Zero.setLowBits(llvm::countr_zero(StrideInBytes)); } else { ScalingFactor = - KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes)); + KnownBits::makeConstant(APInt(IndexWidth, StrideInBytes)); } - IndexBits = KnownBits::mul(IndexBits, ScalingFactor); - - // If the offsets have a different width from the pointer, according - // to the language reference we need to sign-extend or truncate them - // to the width of the pointer. - IndexBits = IndexBits.sextOrTrunc(BitWidth); - - // Note that inbounds does *not* guarantee nsw for the addition, as only - // the offset is signed, while the base address is unsigned. - Known = KnownBits::add(Known, IndexBits); - } - if (!Known.isUnknown() && !AccConstIndices.isZero()) { - KnownBits Index = KnownBits::makeConstant(AccConstIndices); - Known = KnownBits::add(Known, Index); + AddIndexToKnown(KnownBits::mul(IndexBits, ScalingFactor)); } + if (!Known.isUnknown() && !AccConstIndices.isZero()) + AddIndexToKnown(KnownBits::makeConstant(AccConstIndices)); break; } case Instruction::PHI: { |