aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis')
-rwxr-xr-xllvm/lib/Analysis/ConstantFolding.cpp4
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp11
-rw-r--r--llvm/lib/Analysis/MLInlineAdvisor.cpp58
-rw-r--r--llvm/lib/Analysis/MemoryLocation.cpp4
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp84
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp5
6 files changed, 95 insertions, 71 deletions
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index a5ba197..e9e2e7d 100755
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -4056,8 +4056,8 @@ static Constant *ConstantFoldFixedVectorCall(
switch (IntrinsicID) {
case Intrinsic::masked_load: {
auto *SrcPtr = Operands[0];
- auto *Mask = Operands[2];
- auto *Passthru = Operands[3];
+ auto *Mask = Operands[1];
+ auto *Passthru = Operands[2];
Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index e08ef60..dc813f6 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -5440,9 +5440,10 @@ static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
// ptrtoint (ptradd (Ptr, X - ptrtoint(Ptr))) -> X
Value *Ptr, *X;
- if (CastOpc == Instruction::PtrToInt &&
- match(Op, m_PtrAdd(m_Value(Ptr),
- m_Sub(m_Value(X), m_PtrToInt(m_Deferred(Ptr))))) &&
+ if ((CastOpc == Instruction::PtrToInt || CastOpc == Instruction::PtrToAddr) &&
+ match(Op,
+ m_PtrAdd(m_Value(Ptr),
+ m_Sub(m_Value(X), m_PtrToIntOrAddr(m_Deferred(Ptr))))) &&
X->getType() == Ty && Ty == Q.DL.getIndexType(Ptr->getType()))
return X;
@@ -6987,8 +6988,8 @@ static Value *simplifyIntrinsic(CallBase *Call, Value *Callee,
switch (IID) {
case Intrinsic::masked_load:
case Intrinsic::masked_gather: {
- Value *MaskArg = Args[2];
- Value *PassthruArg = Args[3];
+ Value *MaskArg = Args[1];
+ Value *PassthruArg = Args[2];
// If the mask is all zeros or undef, the "passthru" argument is the result.
if (maskIsAllZeroOrUndef(MaskArg))
return PassthruArg;
diff --git a/llvm/lib/Analysis/MLInlineAdvisor.cpp b/llvm/lib/Analysis/MLInlineAdvisor.cpp
index 1d1a5560..9a5ae2a 100644
--- a/llvm/lib/Analysis/MLInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/MLInlineAdvisor.cpp
@@ -324,32 +324,44 @@ void MLInlineAdvisor::onSuccessfulInlining(const MLInlineAdvice &Advice,
FAM.invalidate(*Caller, PA);
}
Advice.updateCachedCallerFPI(FAM);
- int64_t IRSizeAfter =
- getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize);
- CurrentIRSize += IRSizeAfter - (Advice.CallerIRSize + Advice.CalleeIRSize);
+ if (Caller == Callee) {
+ assert(!CalleeWasDeleted);
+ // We double-counted CallerAndCalleeEdges - since the caller and callee
+ // would be the same
+ assert(Advice.CallerAndCalleeEdges % 2 == 0);
+ CurrentIRSize += getIRSize(*Caller) - Advice.CallerIRSize;
+ EdgeCount += getCachedFPI(*Caller).DirectCallsToDefinedFunctions -
+ Advice.CallerAndCalleeEdges / 2;
+ // The NodeCount would stay the same.
+ } else {
+ int64_t IRSizeAfter =
+ getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize);
+ CurrentIRSize += IRSizeAfter - (Advice.CallerIRSize + Advice.CalleeIRSize);
+
+ // We can delta-update module-wide features. We know the inlining only
+ // changed the caller, and maybe the callee (by deleting the latter). Nodes
+ // are simple to update. For edges, we 'forget' the edges that the caller
+ // and callee used to have before inlining, and add back what they currently
+ // have together.
+ int64_t NewCallerAndCalleeEdges =
+ getCachedFPI(*Caller).DirectCallsToDefinedFunctions;
+
+ // A dead function's node is not actually removed from the call graph until
+ // the end of the call graph walk, but the node no longer belongs to any
+ // valid SCC.
+ if (CalleeWasDeleted) {
+ --NodeCount;
+ NodesInLastSCC.erase(CG.lookup(*Callee));
+ DeadFunctions.insert(Callee);
+ } else {
+ NewCallerAndCalleeEdges +=
+ getCachedFPI(*Callee).DirectCallsToDefinedFunctions;
+ }
+ EdgeCount += (NewCallerAndCalleeEdges - Advice.CallerAndCalleeEdges);
+ }
if (CurrentIRSize > SizeIncreaseThreshold * InitialIRSize)
ForceStop = true;
- // We can delta-update module-wide features. We know the inlining only changed
- // the caller, and maybe the callee (by deleting the latter).
- // Nodes are simple to update.
- // For edges, we 'forget' the edges that the caller and callee used to have
- // before inlining, and add back what they currently have together.
- int64_t NewCallerAndCalleeEdges =
- getCachedFPI(*Caller).DirectCallsToDefinedFunctions;
-
- // A dead function's node is not actually removed from the call graph until
- // the end of the call graph walk, but the node no longer belongs to any valid
- // SCC.
- if (CalleeWasDeleted) {
- --NodeCount;
- NodesInLastSCC.erase(CG.lookup(*Callee));
- DeadFunctions.insert(Callee);
- } else {
- NewCallerAndCalleeEdges +=
- getCachedFPI(*Callee).DirectCallsToDefinedFunctions;
- }
- EdgeCount += (NewCallerAndCalleeEdges - Advice.CallerAndCalleeEdges);
assert(CurrentIRSize >= 0 && EdgeCount >= 0 && NodeCount >= 0);
}
diff --git a/llvm/lib/Analysis/MemoryLocation.cpp b/llvm/lib/Analysis/MemoryLocation.cpp
index dcc5117..1c5f08e 100644
--- a/llvm/lib/Analysis/MemoryLocation.cpp
+++ b/llvm/lib/Analysis/MemoryLocation.cpp
@@ -245,7 +245,7 @@ MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
assert(ArgIdx == 0 && "Invalid argument index");
auto *Ty = cast<VectorType>(II->getType());
- if (auto KnownType = getKnownTypeFromMaskedOp(II->getOperand(2), Ty))
+ if (auto KnownType = getKnownTypeFromMaskedOp(II->getOperand(1), Ty))
return MemoryLocation(Arg, DL.getTypeStoreSize(*KnownType), AATags);
return MemoryLocation(
@@ -255,7 +255,7 @@ MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
assert(ArgIdx == 1 && "Invalid argument index");
auto *Ty = cast<VectorType>(II->getArgOperand(0)->getType());
- if (auto KnownType = getKnownTypeFromMaskedOp(II->getOperand(3), Ty))
+ if (auto KnownType = getKnownTypeFromMaskedOp(II->getOperand(2), Ty))
return MemoryLocation(Arg, DL.getTypeStoreSize(*KnownType), AATags);
return MemoryLocation(
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 425420f..6f7dd79 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -15473,6 +15473,38 @@ void ScalarEvolution::LoopGuards::collectFromPHI(
}
}
+// Return a new SCEV that modifies \p Expr to the closest number divides by
+// \p Divisor and less or equal than Expr. For now, only handle constant
+// Expr.
+static const SCEV *getPreviousSCEVDivisibleByDivisor(const SCEV *Expr,
+ const APInt &DivisorVal,
+ ScalarEvolution &SE) {
+ const APInt *ExprVal;
+ if (!match(Expr, m_scev_APInt(ExprVal)) || ExprVal->isNegative() ||
+ DivisorVal.isNonPositive())
+ return Expr;
+ APInt Rem = ExprVal->urem(DivisorVal);
+ // return the SCEV: Expr - Expr % Divisor
+ return SE.getConstant(*ExprVal - Rem);
+}
+
+// Return a new SCEV that modifies \p Expr to the closest number divides by
+// \p Divisor and greater or equal than Expr. For now, only handle constant
+// Expr.
+static const SCEV *getNextSCEVDivisibleByDivisor(const SCEV *Expr,
+ const APInt &DivisorVal,
+ ScalarEvolution &SE) {
+ const APInt *ExprVal;
+ if (!match(Expr, m_scev_APInt(ExprVal)) || ExprVal->isNegative() ||
+ DivisorVal.isNonPositive())
+ return Expr;
+ APInt Rem = ExprVal->urem(DivisorVal);
+ if (Rem.isZero())
+ return Expr;
+ // return the SCEV: Expr + Divisor - Expr % Divisor
+ return SE.getConstant(*ExprVal + DivisorVal - Rem);
+}
+
void ScalarEvolution::LoopGuards::collectFromBlock(
ScalarEvolution &SE, ScalarEvolution::LoopGuards &Guards,
const BasicBlock *Block, const BasicBlock *Pred,
@@ -15540,36 +15572,6 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
match(LHS, m_scev_APInt(C)) && C->isNonNegative();
};
- // Return a new SCEV that modifies \p Expr to the closest number divides by
- // \p Divisor and greater or equal than Expr. For now, only handle constant
- // Expr.
- auto GetNextSCEVDividesByDivisor = [&](const SCEV *Expr,
- const APInt &DivisorVal) {
- const APInt *ExprVal;
- if (!match(Expr, m_scev_APInt(ExprVal)) || ExprVal->isNegative() ||
- DivisorVal.isNonPositive())
- return Expr;
- APInt Rem = ExprVal->urem(DivisorVal);
- if (Rem.isZero())
- return Expr;
- // return the SCEV: Expr + Divisor - Expr % Divisor
- return SE.getConstant(*ExprVal + DivisorVal - Rem);
- };
-
- // Return a new SCEV that modifies \p Expr to the closest number divides by
- // \p Divisor and less or equal than Expr. For now, only handle constant
- // Expr.
- auto GetPreviousSCEVDividesByDivisor = [&](const SCEV *Expr,
- const APInt &DivisorVal) {
- const APInt *ExprVal;
- if (!match(Expr, m_scev_APInt(ExprVal)) || ExprVal->isNegative() ||
- DivisorVal.isNonPositive())
- return Expr;
- APInt Rem = ExprVal->urem(DivisorVal);
- // return the SCEV: Expr - Expr % Divisor
- return SE.getConstant(*ExprVal - Rem);
- };
-
// Apply divisibilty by \p Divisor on MinMaxExpr with constant values,
// recursively. This is done by aligning up/down the constant value to the
// Divisor.
@@ -15591,8 +15593,9 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
assert(SE.isKnownNonNegative(MinMaxLHS) &&
"Expected non-negative operand!");
auto *DivisibleExpr =
- IsMin ? GetPreviousSCEVDividesByDivisor(MinMaxLHS, DivisorVal)
- : GetNextSCEVDividesByDivisor(MinMaxLHS, DivisorVal);
+ IsMin
+ ? getPreviousSCEVDivisibleByDivisor(MinMaxLHS, DivisorVal, SE)
+ : getNextSCEVDivisibleByDivisor(MinMaxLHS, DivisorVal, SE);
SmallVector<const SCEV *> Ops = {
ApplyDivisibiltyOnMinMaxExpr(MinMaxRHS, Divisor), DivisibleExpr};
return SE.getMinMaxExpr(SCTy, Ops);
@@ -15669,21 +15672,21 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
[[fallthrough]];
case CmpInst::ICMP_SLT: {
RHS = SE.getMinusSCEV(RHS, One);
- RHS = GetPreviousSCEVDividesByDivisor(RHS, DividesBy);
+ RHS = getPreviousSCEVDivisibleByDivisor(RHS, DividesBy, SE);
break;
}
case CmpInst::ICMP_UGT:
case CmpInst::ICMP_SGT:
RHS = SE.getAddExpr(RHS, One);
- RHS = GetNextSCEVDividesByDivisor(RHS, DividesBy);
+ RHS = getNextSCEVDivisibleByDivisor(RHS, DividesBy, SE);
break;
case CmpInst::ICMP_ULE:
case CmpInst::ICMP_SLE:
- RHS = GetPreviousSCEVDividesByDivisor(RHS, DividesBy);
+ RHS = getPreviousSCEVDivisibleByDivisor(RHS, DividesBy, SE);
break;
case CmpInst::ICMP_UGE:
case CmpInst::ICMP_SGE:
- RHS = GetNextSCEVDividesByDivisor(RHS, DividesBy);
+ RHS = getNextSCEVDivisibleByDivisor(RHS, DividesBy, SE);
break;
default:
break;
@@ -15737,7 +15740,7 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
case CmpInst::ICMP_NE:
if (match(RHS, m_scev_Zero())) {
const SCEV *OneAlignedUp =
- GetNextSCEVDividesByDivisor(One, DividesBy);
+ getNextSCEVDivisibleByDivisor(One, DividesBy, SE);
To = SE.getUMaxExpr(FromRewritten, OneAlignedUp);
} else {
// LHS != RHS can be rewritten as (LHS - RHS) = UMax(1, LHS - RHS),
@@ -15963,8 +15966,11 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
if (MatchBinarySub(S, LHS, RHS)) {
if (LHS > RHS)
std::swap(LHS, RHS);
- if (NotEqual.contains({LHS, RHS}))
- return SE.getUMaxExpr(S, SE.getOne(S->getType()));
+ if (NotEqual.contains({LHS, RHS})) {
+ const SCEV *OneAlignedUp = getNextSCEVDivisibleByDivisor(
+ SE.getOne(S->getType()), SE.getConstantMultiple(S), SE);
+ return SE.getUMaxExpr(OneAlignedUp, S);
+ }
}
return nullptr;
};
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 9655c88..0a72076 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -7695,6 +7695,11 @@ static bool isGuaranteedNotToBeUndefOrPoison(
}
if (IsWellDefined)
return true;
+ } else if (auto *Splat = isa<ShuffleVectorInst>(Opr) ? getSplatValue(Opr)
+ : nullptr) {
+ // For splats we only need to check the value being splatted.
+ if (OpCheck(Splat))
+ return true;
} else if (all_of(Opr->operands(), OpCheck))
return true;
}