diff options
author | Nikita Popov <npopov@redhat.com> | 2025-09-01 09:25:56 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-09-01 09:25:56 +0200 |
commit | a987022f33a27610732544b0c5f4475ce818c982 (patch) | |
tree | 41799ae38e4464583d3356ea5d5d4868f6f99c9b /llvm/lib/Analysis/MemoryBuiltins.cpp | |
parent | af41d0d7057d8365c7b48ce9f88d80b669057993 (diff) | |
download | llvm-a987022f33a27610732544b0c5f4475ce818c982.zip llvm-a987022f33a27610732544b0c5f4475ce818c982.tar.gz llvm-a987022f33a27610732544b0c5f4475ce818c982.tar.bz2 |
[MemoryBuiltins] Add getBaseObjectSize() (NFCI) (#155911)
getObjectSize() is based on ObjectSizeOffsetVisitor, which has become
very expensive over time. The implementation is geared towards computing
as-good-as-possible results for the objectsize intrinsics and similar.
However, we also use it in BasicAA, which is very hot, and really only
cares about the base cases like alloca/malloc/global, not any of the
analysis for GEPs, phis, or loads.
Add a new getBaseObjectSize() API for this use case, which only handles
the non-recursive cases. As a bonus, this API can easily return a
TypeSize and thus support scalable vectors. For now, I'm explicitly
discarding the scalable sizes in BasicAA just to avoid unnecessary
behavior changes during this refactor.
Diffstat (limited to 'llvm/lib/Analysis/MemoryBuiltins.cpp')
-rw-r--r-- | llvm/lib/Analysis/MemoryBuiltins.cpp | 53 |
1 files changed, 53 insertions, 0 deletions
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp index e0b7f65d..1df4eda2 100644 --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -589,6 +589,59 @@ bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, return true; } +std::optional<TypeSize> llvm::getBaseObjectSize(const Value *Ptr, + const DataLayout &DL, + const TargetLibraryInfo *TLI, + ObjectSizeOpts Opts) { + assert(Opts.EvalMode == ObjectSizeOpts::Mode::ExactSizeFromOffset && + "Other modes are currently not supported"); + + auto Align = [&](TypeSize Size, MaybeAlign Alignment) { + if (Opts.RoundToAlign && Alignment && !Size.isScalable()) + return TypeSize::getFixed(alignTo(Size.getFixedValue(), *Alignment)); + return Size; + }; + + if (isa<UndefValue>(Ptr)) + return TypeSize::getZero(); + + if (isa<ConstantPointerNull>(Ptr)) { + if (Opts.NullIsUnknownSize || Ptr->getType()->getPointerAddressSpace()) + return std::nullopt; + return TypeSize::getZero(); + } + + if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) { + if (!GV->getValueType()->isSized() || GV->hasExternalWeakLinkage() || + !GV->hasInitializer() || GV->isInterposable()) + return std::nullopt; + return Align(DL.getTypeAllocSize(GV->getValueType()), GV->getAlign()); + } + + if (auto *A = dyn_cast<Argument>(Ptr)) { + Type *MemoryTy = A->getPointeeInMemoryValueType(); + if (!MemoryTy || !MemoryTy->isSized()) + return std::nullopt; + return Align(DL.getTypeAllocSize(MemoryTy), A->getParamAlign()); + } + + if (auto *AI = dyn_cast<AllocaInst>(Ptr)) { + if (std::optional<TypeSize> Size = AI->getAllocationSize(DL)) + return Align(*Size, AI->getAlign()); + return std::nullopt; + } + + if (auto *CB = dyn_cast<CallBase>(Ptr)) { + if (std::optional<APInt> Size = getAllocSize(CB, TLI)) { + if (std::optional<uint64_t> ZExtSize = Size->tryZExtValue()) + return TypeSize::getFixed(*ZExtSize); + } + return std::nullopt; + } + + return std::nullopt; +} + Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, |