aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/MemoryProfileInfo.cpp28
-rw-r--r--llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp9
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp16
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.cpp9
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanHelpers.h16
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp122
6 files changed, 63 insertions, 137 deletions
diff --git a/llvm/lib/Analysis/MemoryProfileInfo.cpp b/llvm/lib/Analysis/MemoryProfileInfo.cpp
index 0c1f8db..92a5b6f 100644
--- a/llvm/lib/Analysis/MemoryProfileInfo.cpp
+++ b/llvm/lib/Analysis/MemoryProfileInfo.cpp
@@ -54,6 +54,10 @@ cl::opt<unsigned> MinPercentMaxColdSize(
"memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden,
cl::desc("Min percent of max cold bytes for critical cold context"));
+LLVM_ABI cl::opt<bool> MemProfUseAmbiguousAttributes(
+ "memprof-ambiguous-attributes", cl::init(true), cl::Hidden,
+ cl::desc("Apply ambiguous memprof attribute to ambiguous allocations"));
+
} // end namespace llvm
bool llvm::memprof::metadataIncludesAllContextSizeInfo() {
@@ -125,6 +129,26 @@ bool llvm::memprof::hasSingleAllocType(uint8_t AllocTypes) {
return NumAllocTypes == 1;
}
+void llvm::memprof::removeAnyExistingAmbiguousAttribute(CallBase *CB) {
+ if (!CB->hasFnAttr("memprof"))
+ return;
+ assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous");
+ CB->removeFnAttr("memprof");
+}
+
+void llvm::memprof::addAmbiguousAttribute(CallBase *CB) {
+ if (!MemProfUseAmbiguousAttributes)
+ return;
+ // We may have an existing ambiguous attribute if we are reanalyzing
+ // after inlining.
+ if (CB->hasFnAttr("memprof")) {
+ assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous");
+ } else {
+ auto A = llvm::Attribute::get(CB->getContext(), "memprof", "ambiguous");
+ CB->addFnAttr(A);
+ }
+}
+
void CallStackTrie::addCallStack(
AllocationType AllocType, ArrayRef<uint64_t> StackIds,
std::vector<ContextTotalSize> ContextSizeInfo) {
@@ -470,6 +494,9 @@ void CallStackTrie::addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT,
StringRef Descriptor) {
auto AllocTypeString = getAllocTypeAttributeString(AT);
auto A = llvm::Attribute::get(CI->getContext(), "memprof", AllocTypeString);
+ // After inlining we may be able to convert an existing ambiguous allocation
+ // to an unambiguous one.
+ removeAnyExistingAmbiguousAttribute(CI);
CI->addFnAttr(A);
if (MemProfReportHintedSizes) {
std::vector<ContextTotalSize> ContextSizeInfo;
@@ -529,6 +556,7 @@ bool CallStackTrie::buildAndAttachMIBMetadata(CallBase *CI) {
assert(MIBCallStack.size() == 1 &&
"Should only be left with Alloc's location in stack");
CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes));
+ addAmbiguousAttribute(CI);
return true;
}
// If there exists corner case that CallStackTrie has one chain to leaf
diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index faeab95..cfdfd94 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -3986,6 +3986,7 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
void ModuleCallsiteContextGraph::updateAllocationCall(
CallInfo &Call, AllocationType AllocType) {
std::string AllocTypeString = getAllocTypeAttributeString(AllocType);
+ removeAnyExistingAmbiguousAttribute(cast<CallBase>(Call.call()));
auto A = llvm::Attribute::get(Call.call()->getFunction()->getContext(),
"memprof", AllocTypeString);
cast<CallBase>(Call.call())->addFnAttr(A);
@@ -5661,9 +5662,10 @@ bool MemProfContextDisambiguation::applyImport(Module &M) {
auto *MemProfMD = I.getMetadata(LLVMContext::MD_memprof);
// Include allocs that were already assigned a memprof function
- // attribute in the statistics.
- if (CB->getAttributes().hasFnAttr("memprof")) {
- assert(!MemProfMD);
+ // attribute in the statistics. Only do this for those that do not have
+ // memprof metadata, since we add an "ambiguous" memprof attribute by
+ // default.
+ if (CB->getAttributes().hasFnAttr("memprof") && !MemProfMD) {
CB->getAttributes().getFnAttr("memprof").getValueAsString() == "cold"
? AllocTypeColdThinBackend++
: AllocTypeNotColdThinBackend++;
@@ -5740,6 +5742,7 @@ bool MemProfContextDisambiguation::applyImport(Module &M) {
// clone J-1 (J==0 is the original clone and does not have a VMaps
// entry).
CBClone = cast<CallBase>((*VMaps[J - 1])[CB]);
+ removeAnyExistingAmbiguousAttribute(CBClone);
CBClone->addFnAttr(A);
ORE.emit(OptimizationRemark(DEBUG_TYPE, "MemprofAttribute", CBClone)
<< ore::NV("AllocationCall", CBClone) << " in clone "
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index cb6bfb2..56a3d6d 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -3903,8 +3903,7 @@ void LoopVectorizationPlanner::emitInvalidCostRemarks(
if (VF.isScalar())
continue;
- VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind,
- *CM.PSE.getSE());
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind);
precomputeCosts(*Plan, VF, CostCtx);
auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
@@ -4161,8 +4160,7 @@ VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
// Add on other costs that are modelled in VPlan, but not in the legacy
// cost model.
- VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind,
- *CM.PSE.getSE());
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind);
VPRegionBlock *VectorRegion = P->getVectorLoopRegion();
assert(VectorRegion && "Expected to have a vector region!");
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
@@ -6854,7 +6852,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan,
ElementCount VF) const {
- VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, *PSE.getSE());
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind);
InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
// Now compute and add the VPlan-based cost.
@@ -7087,8 +7085,7 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() {
// simplifications not accounted for in the legacy cost model. If that's the
// case, don't trigger the assertion, as the extra simplifications may cause a
// different VF to be picked by the VPlan-based cost model.
- VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind,
- *CM.PSE.getSE());
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind);
precomputeCosts(BestPlan, BestFactor.Width, CostCtx);
// Verify that the VPlan-based and legacy cost models agree, except for VPlans
// with early exits and plans with additional VPlan simplifications. The
@@ -8624,8 +8621,7 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
// TODO: Enable following transform when the EVL-version of extended-reduction
// and mulacc-reduction are implemented.
if (!CM.foldTailWithEVL()) {
- VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind,
- *CM.PSE.getSE());
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind);
VPlanTransforms::runPass(VPlanTransforms::convertToAbstractRecipes, *Plan,
CostCtx, Range);
}
@@ -10079,7 +10075,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
bool ForceVectorization =
Hints.getForce() == LoopVectorizeHints::FK_Enabled;
VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM,
- CM.CostKind, *CM.PSE.getSE());
+ CM.CostKind);
if (!ForceVectorization &&
!isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx,
LVP.getPlanFor(VF.Width), SEL,
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 2555ebe..07b191a 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -1772,8 +1772,7 @@ VPCostContext::getOperandInfo(VPValue *V) const {
}
InstructionCost VPCostContext::getScalarizationOverhead(
- Type *ResultTy, ArrayRef<const VPValue *> Operands, ElementCount VF,
- bool AlwaysIncludeReplicatingR) {
+ Type *ResultTy, ArrayRef<const VPValue *> Operands, ElementCount VF) {
if (VF.isScalar())
return 0;
@@ -1793,11 +1792,7 @@ InstructionCost VPCostContext::getScalarizationOverhead(
SmallPtrSet<const VPValue *, 4> UniqueOperands;
SmallVector<Type *> Tys;
for (auto *Op : Operands) {
- if (Op->isLiveIn() ||
- (!AlwaysIncludeReplicatingR &&
- isa<VPReplicateRecipe, VPPredInstPHIRecipe>(Op)) ||
- (isa<VPReplicateRecipe>(Op) &&
- cast<VPReplicateRecipe>(Op)->getOpcode() == Instruction::Load) ||
+ if (Op->isLiveIn() || isa<VPReplicateRecipe, VPPredInstPHIRecipe>(Op) ||
!UniqueOperands.insert(Op).second)
continue;
Tys.push_back(toVectorizedTy(Types.inferScalarType(Op), VF));
diff --git a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
index 1580a3b..fc1a09e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
@@ -349,14 +349,12 @@ struct VPCostContext {
LoopVectorizationCostModel &CM;
SmallPtrSet<Instruction *, 8> SkipCostComputation;
TargetTransformInfo::TargetCostKind CostKind;
- ScalarEvolution &SE;
VPCostContext(const TargetTransformInfo &TTI, const TargetLibraryInfo &TLI,
const VPlan &Plan, LoopVectorizationCostModel &CM,
- TargetTransformInfo::TargetCostKind CostKind,
- ScalarEvolution &SE)
+ TargetTransformInfo::TargetCostKind CostKind)
: TTI(TTI), TLI(TLI), Types(Plan), LLVMCtx(Plan.getContext()), CM(CM),
- CostKind(CostKind), SE(SE) {}
+ CostKind(CostKind) {}
/// Return the cost for \p UI with \p VF using the legacy cost model as
/// fallback until computing the cost of all recipes migrates to VPlan.
@@ -376,12 +374,10 @@ struct VPCostContext {
/// Estimate the overhead of scalarizing a recipe with result type \p ResultTy
/// and \p Operands with \p VF. This is a convenience wrapper for the
- /// type-based getScalarizationOverhead API. If \p AlwaysIncludeReplicatingR
- /// is true, always compute the cost of scalarizing replicating operands.
- InstructionCost
- getScalarizationOverhead(Type *ResultTy, ArrayRef<const VPValue *> Operands,
- ElementCount VF,
- bool AlwaysIncludeReplicatingR = false);
+ /// type-based getScalarizationOverhead API.
+ InstructionCost getScalarizationOverhead(Type *ResultTy,
+ ArrayRef<const VPValue *> Operands,
+ ElementCount VF);
};
/// This class can be used to assign names to VPValues. For VPValues without
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 43d61f2..67b9244 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -40,7 +40,6 @@
#include <cassert>
using namespace llvm;
-using namespace llvm::VPlanPatternMatch;
using VectorParts = SmallVector<Value *, 2>;
@@ -304,6 +303,7 @@ VPPartialReductionRecipe::computeCost(ElementCount VF,
VPRecipeBase *OpR = Op->getDefiningRecipe();
// If the partial reduction is predicated, a select will be operand 0
+ using namespace llvm::VPlanPatternMatch;
if (match(getOperand(1), m_Select(m_VPValue(), m_VPValue(Op), m_VPValue()))) {
OpR = Op->getDefiningRecipe();
}
@@ -1963,6 +1963,7 @@ InstructionCost VPWidenSelectRecipe::computeCost(ElementCount VF,
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
VPValue *Op0, *Op1;
+ using namespace llvm::VPlanPatternMatch;
if (!ScalarCond && ScalarTy->getScalarSizeInBits() == 1 &&
(match(this, m_LogicalAnd(m_VPValue(Op0), m_VPValue(Op1))) ||
match(this, m_LogicalOr(m_VPValue(Op0), m_VPValue(Op1))))) {
@@ -3110,62 +3111,6 @@ bool VPReplicateRecipe::shouldPack() const {
});
}
-/// Returns true if \p Ptr is a pointer computation for which the legacy cost
-/// model computes a SCEV expression when computing the address cost.
-static bool shouldUseAddressAccessSCEV(const VPValue *Ptr) {
- auto *PtrR = Ptr->getDefiningRecipe();
- if (!PtrR || !((isa<VPReplicateRecipe>(PtrR) &&
- cast<VPReplicateRecipe>(PtrR)->getOpcode() ==
- Instruction::GetElementPtr) ||
- isa<VPWidenGEPRecipe>(PtrR) ||
- match(Ptr, m_GetElementPtr(m_VPValue(), m_VPValue()))))
- return false;
-
- // We are looking for a GEP where all indices are either loop invariant or
- // inductions.
- for (VPValue *Opd : drop_begin(PtrR->operands())) {
- if (!Opd->isDefinedOutsideLoopRegions() &&
- !isa<VPScalarIVStepsRecipe, VPWidenIntOrFpInductionRecipe>(Opd))
- return false;
- }
-
- return true;
-}
-
-/// Returns true if \p V is used as part of the address of another load or
-/// store.
-static bool isUsedByLoadStoreAddress(const VPUser *V) {
- SmallPtrSet<const VPUser *, 4> Seen;
- SmallVector<const VPUser *> WorkList = {V};
-
- while (!WorkList.empty()) {
- auto *Cur = dyn_cast<VPSingleDefRecipe>(WorkList.pop_back_val());
- if (!Cur || !Seen.insert(Cur).second)
- continue;
-
- for (VPUser *U : Cur->users()) {
- if (auto *InterleaveR = dyn_cast<VPInterleaveBase>(U))
- if (InterleaveR->getAddr() == Cur)
- return true;
- if (auto *RepR = dyn_cast<VPReplicateRecipe>(U)) {
- if (RepR->getOpcode() == Instruction::Load &&
- RepR->getOperand(0) == Cur)
- return true;
- if (RepR->getOpcode() == Instruction::Store &&
- RepR->getOperand(1) == Cur)
- return true;
- }
- if (auto *MemR = dyn_cast<VPWidenMemoryRecipe>(U)) {
- if (MemR->getAddr() == Cur && MemR->isConsecutive())
- return true;
- }
- }
-
- append_range(WorkList, cast<VPSingleDefRecipe>(Cur)->users());
- }
- return false;
-}
-
InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
Instruction *UI = cast<Instruction>(getUnderlyingValue());
@@ -3273,58 +3218,21 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
}
case Instruction::Load:
case Instruction::Store: {
- if (VF.isScalable() && !isSingleScalar())
- return InstructionCost::getInvalid();
-
+ if (isSingleScalar()) {
+ bool IsLoad = UI->getOpcode() == Instruction::Load;
+ Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0));
+ Type *ScalarPtrTy = Ctx.Types.inferScalarType(getOperand(IsLoad ? 0 : 1));
+ const Align Alignment = getLoadStoreAlignment(UI);
+ unsigned AS = getLoadStoreAddressSpace(UI);
+ TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(UI->getOperand(0));
+ InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost(
+ UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo, UI);
+ return ScalarMemOpCost + Ctx.TTI.getAddressComputationCost(
+ ScalarPtrTy, nullptr, nullptr, Ctx.CostKind);
+ }
// TODO: See getMemInstScalarizationCost for how to handle replicating and
// predicated cases.
- const VPRegionBlock *ParentRegion = getParent()->getParent();
- if (ParentRegion && ParentRegion->isReplicator())
- break;
-
- bool IsLoad = UI->getOpcode() == Instruction::Load;
- const VPValue *PtrOp = getOperand(!IsLoad);
- // TODO: Handle cases where we need to pass a SCEV to
- // getAddressComputationCost.
- if (shouldUseAddressAccessSCEV(PtrOp))
- break;
-
- Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0));
- Type *ScalarPtrTy = Ctx.Types.inferScalarType(PtrOp);
- const Align Alignment = getLoadStoreAlignment(UI);
- unsigned AS = getLoadStoreAddressSpace(UI);
- TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(UI->getOperand(0));
- InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost(
- UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo);
-
- Type *PtrTy = isSingleScalar() ? ScalarPtrTy : toVectorTy(ScalarPtrTy, VF);
-
- InstructionCost ScalarCost =
- ScalarMemOpCost + Ctx.TTI.getAddressComputationCost(
- PtrTy, &Ctx.SE, nullptr, Ctx.CostKind);
- if (isSingleScalar())
- return ScalarCost;
-
- SmallVector<const VPValue *> OpsToScalarize;
- Type *ResultTy = Type::getVoidTy(PtrTy->getContext());
- // Set ResultTy and OpsToScalarize, if scalarization is needed. Currently we
- // don't assign scalarization overhead in general, if the target prefers
- // vectorized addressing or the loaded value is used as part of an address
- // of another load or store.
- bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing();
- if (PreferVectorizedAddressing || !isUsedByLoadStoreAddress(this)) {
- bool EfficientVectorLoadStore =
- Ctx.TTI.supportsEfficientVectorElementLoadStore();
- if (!(IsLoad && !PreferVectorizedAddressing) &&
- !(!IsLoad && EfficientVectorLoadStore))
- append_range(OpsToScalarize, operands());
-
- if (!EfficientVectorLoadStore)
- ResultTy = Ctx.Types.inferScalarType(this);
- }
-
- return (ScalarCost * VF.getFixedValue()) +
- Ctx.getScalarizationOverhead(ResultTy, OpsToScalarize, VF, true);
+ break;
}
}