aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/AliasAnalysis.cpp2
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp198
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp6
-rw-r--r--llvm/lib/BinaryFormat/Dwarf.cpp7
-rw-r--r--llvm/lib/BinaryFormat/MsgPackDocumentYAML.cpp4
-rw-r--r--llvm/lib/CGData/OutlinedHashTreeRecord.cpp4
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp3
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp2
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp4
-rw-r--r--llvm/lib/CodeGen/MachineOperand.cpp8
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp4
-rw-r--r--llvm/lib/CodeGen/MachineStableHash.cpp3
-rw-r--r--llvm/lib/CodeGen/RegAllocFast.cpp2
-rw-r--r--llvm/lib/CodeGen/RegisterCoalescer.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp20
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp8
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h2
-rw-r--r--llvm/lib/CodeGenTypes/LowLevelType.cpp6
-rw-r--r--llvm/lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp3
-rw-r--r--llvm/lib/ExecutionEngine/Orc/TargetProcess/CMakeLists.txt4
-rw-r--r--llvm/lib/ExecutionEngine/Orc/TargetProcess/LibraryResolver.cpp370
-rw-r--r--llvm/lib/ExecutionEngine/Orc/TargetProcess/LibraryScanner.cpp1161
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp13
-rw-r--r--llvm/lib/IR/ConstantsContext.h2
-rw-r--r--llvm/lib/IR/ModuleSummaryIndex.cpp4
-rw-r--r--llvm/lib/MC/MCParser/ELFAsmParser.cpp18
-rw-r--r--llvm/lib/MC/MCParser/MasmParser.cpp6
-rw-r--r--llvm/lib/Object/WindowsMachineFlag.cpp4
-rw-r--r--llvm/lib/Remarks/RemarkFormat.cpp2
-rw-r--r--llvm/lib/Support/AArch64BuildAttributes.cpp4
-rw-r--r--llvm/lib/Support/APFloat.cpp6
-rw-r--r--llvm/lib/Support/CommandLine.cpp4
-rw-r--r--llvm/lib/Support/StringRef.cpp5
-rw-r--r--llvm/lib/Support/Windows/Signals.inc7
-rw-r--r--llvm/lib/Support/raw_ostream.cpp11
-rw-r--r--llvm/lib/Support/raw_socket_stream.cpp2
-rw-r--r--llvm/lib/TableGen/TGLexer.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp8
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/MIMGInstructions.td4
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp28
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp84
-rw-r--r--llvm/lib/Target/CSKY/CSKYISelLowering.cpp74
-rw-r--r--llvm/lib/Target/Hexagon/HexagonDepIICHVX.td132
-rw-r--r--llvm/lib/Target/Hexagon/HexagonDepInstrInfo.td391
-rw-r--r--llvm/lib/Target/Hexagon/HexagonDepMapAsm2Intrin.td116
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp8
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td6
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td5
-rw-r--r--llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp6
-rw-r--r--llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXIntrinsics.td13
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h2
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp66
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h4
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h3
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblySortRegion.h2
-rw-r--r--llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp74
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp156
-rw-r--r--llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp16
-rw-r--r--llvm/lib/Target/X86/X86TargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/X86/X86TargetTransformInfo.h2
-rw-r--r--llvm/lib/TargetParser/PPCTargetParser.cpp8
-rw-r--r--llvm/lib/TextAPI/BinaryReader/DylibReader.cpp2
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroCloner.h2
-rw-r--r--llvm/lib/Transforms/IPO/AttributorAttributes.cpp4
-rw-r--r--llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/GVNSink.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp30
-rw-r--r--llvm/lib/Transforms/Scalar/StructurizeCFG.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp2
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h27
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp18
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp21
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h59
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp7
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp19
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanSLP.h3
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp68
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp19
86 files changed, 2802 insertions, 632 deletions
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index f2dc25f..26a5602 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -75,7 +75,7 @@ AAResults::AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {}
AAResults::AAResults(AAResults &&Arg)
: TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) {}
-AAResults::~AAResults() {}
+AAResults::~AAResults() = default;
bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv) {
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index c9baeda..a31f17b 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -2424,10 +2424,10 @@ ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
// We're trying to construct a SCEV of type `Type' with `Ops' as operands and
// `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
// can't-overflow flags for the operation if possible.
-static SCEV::NoWrapFlags
-StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
- const ArrayRef<const SCEV *> Ops,
- SCEV::NoWrapFlags Flags) {
+static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE,
+ SCEVTypes Type,
+ ArrayRef<const SCEV *> Ops,
+ SCEV::NoWrapFlags Flags) {
using namespace std::placeholders;
using OBO = OverflowingBinaryOperator;
@@ -2540,7 +2540,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
unsigned Idx = isa<SCEVConstant>(Ops[0]) ? 1 : 0;
// Delay expensive flag strengthening until necessary.
- auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
+ auto ComputeFlags = [this, OrigFlags](ArrayRef<const SCEV *> Ops) {
return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
};
@@ -3125,7 +3125,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
return Folded;
// Delay expensive flag strengthening until necessary.
- auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
+ auto ComputeFlags = [this, OrigFlags](ArrayRef<const SCEV *> Ops) {
return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
};
@@ -15510,6 +15510,78 @@ static const SCEV *getNextSCEVDivisibleByDivisor(const SCEV *Expr,
return SE.getConstant(*ExprVal + DivisorVal - Rem);
}
+static bool collectDivisibilityInformation(
+ ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS,
+ DenseMap<const SCEV *, const SCEV *> &DivInfo,
+ DenseMap<const SCEV *, APInt> &Multiples, ScalarEvolution &SE) {
+ // If we have LHS == 0, check if LHS is computing a property of some unknown
+ // SCEV %v which we can rewrite %v to express explicitly.
+ if (Predicate != CmpInst::ICMP_EQ || !match(RHS, m_scev_Zero()))
+ return false;
+ // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to
+ // explicitly express that.
+ const SCEVUnknown *URemLHS = nullptr;
+ const SCEV *URemRHS = nullptr;
+ if (!match(LHS, m_scev_URem(m_SCEVUnknown(URemLHS), m_SCEV(URemRHS), SE)))
+ return false;
+
+ const SCEV *Multiple =
+ SE.getMulExpr(SE.getUDivExpr(URemLHS, URemRHS), URemRHS);
+ DivInfo[URemLHS] = Multiple;
+ if (auto *C = dyn_cast<SCEVConstant>(URemRHS))
+ Multiples[URemLHS] = C->getAPInt();
+ return true;
+}
+
+// Check if the condition is a divisibility guard (A % B == 0).
+static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS,
+ ScalarEvolution &SE) {
+ const SCEV *X, *Y;
+ return match(LHS, m_scev_URem(m_SCEV(X), m_SCEV(Y), SE)) && RHS->isZero();
+}
+
+// Apply divisibility by \p Divisor on MinMaxExpr with constant values,
+// recursively. This is done by aligning up/down the constant value to the
+// Divisor.
+static const SCEV *applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr,
+ APInt Divisor,
+ ScalarEvolution &SE) {
+ // Return true if \p Expr is a MinMax SCEV expression with a non-negative
+ // constant operand. If so, return in \p SCTy the SCEV type and in \p RHS
+ // the non-constant operand and in \p LHS the constant operand.
+ auto IsMinMaxSCEVWithNonNegativeConstant =
+ [&](const SCEV *Expr, SCEVTypes &SCTy, const SCEV *&LHS,
+ const SCEV *&RHS) {
+ if (auto *MinMax = dyn_cast<SCEVMinMaxExpr>(Expr)) {
+ if (MinMax->getNumOperands() != 2)
+ return false;
+ if (auto *C = dyn_cast<SCEVConstant>(MinMax->getOperand(0))) {
+ if (C->getAPInt().isNegative())
+ return false;
+ SCTy = MinMax->getSCEVType();
+ LHS = MinMax->getOperand(0);
+ RHS = MinMax->getOperand(1);
+ return true;
+ }
+ }
+ return false;
+ };
+
+ const SCEV *MinMaxLHS = nullptr, *MinMaxRHS = nullptr;
+ SCEVTypes SCTy;
+ if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
+ MinMaxRHS))
+ return MinMaxExpr;
+ auto IsMin = isa<SCEVSMinExpr>(MinMaxExpr) || isa<SCEVUMinExpr>(MinMaxExpr);
+ assert(SE.isKnownNonNegative(MinMaxLHS) && "Expected non-negative operand!");
+ auto *DivisibleExpr =
+ IsMin ? getPreviousSCEVDivisibleByDivisor(MinMaxLHS, Divisor, SE)
+ : getNextSCEVDivisibleByDivisor(MinMaxLHS, Divisor, SE);
+ SmallVector<const SCEV *> Ops = {
+ applyDivisibilityOnMinMaxExpr(MinMaxRHS, Divisor, SE), DivisibleExpr};
+ return SE.getMinMaxExpr(SCTy, Ops);
+}
+
void ScalarEvolution::LoopGuards::collectFromBlock(
ScalarEvolution &SE, ScalarEvolution::LoopGuards &Guards,
const BasicBlock *Block, const BasicBlock *Pred,
@@ -15520,19 +15592,13 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
SmallVector<const SCEV *> ExprsToRewrite;
auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
const SCEV *RHS,
- DenseMap<const SCEV *, const SCEV *>
- &RewriteMap) {
+ DenseMap<const SCEV *, const SCEV *> &RewriteMap,
+ const LoopGuards &DivGuards) {
// WARNING: It is generally unsound to apply any wrap flags to the proposed
// replacement SCEV which isn't directly implied by the structure of that
// SCEV. In particular, using contextual facts to imply flags is *NOT*
// legal. See the scoping rules for flags in the header to understand why.
- // If LHS is a constant, apply information to the other expression.
- if (isa<SCEVConstant>(LHS)) {
- std::swap(LHS, RHS);
- Predicate = CmpInst::getSwappedPredicate(Predicate);
- }
-
// Check for a condition of the form (-C1 + X < C2). InstCombine will
// create this form when combining two checks of the form (X u< C2 + C1) and
// (X >=u C1).
@@ -15565,67 +15631,6 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
if (MatchRangeCheckIdiom())
return;
- // Return true if \p Expr is a MinMax SCEV expression with a non-negative
- // constant operand. If so, return in \p SCTy the SCEV type and in \p RHS
- // the non-constant operand and in \p LHS the constant operand.
- auto IsMinMaxSCEVWithNonNegativeConstant =
- [&](const SCEV *Expr, SCEVTypes &SCTy, const SCEV *&LHS,
- const SCEV *&RHS) {
- const APInt *C;
- SCTy = Expr->getSCEVType();
- return match(Expr, m_scev_MinMax(m_SCEV(LHS), m_SCEV(RHS))) &&
- match(LHS, m_scev_APInt(C)) && C->isNonNegative();
- };
-
- // Apply divisibilty by \p Divisor on MinMaxExpr with constant values,
- // recursively. This is done by aligning up/down the constant value to the
- // Divisor.
- std::function<const SCEV *(const SCEV *, const SCEV *)>
- ApplyDivisibiltyOnMinMaxExpr = [&](const SCEV *MinMaxExpr,
- const SCEV *Divisor) {
- auto *ConstDivisor = dyn_cast<SCEVConstant>(Divisor);
- if (!ConstDivisor)
- return MinMaxExpr;
- const APInt &DivisorVal = ConstDivisor->getAPInt();
-
- const SCEV *MinMaxLHS = nullptr, *MinMaxRHS = nullptr;
- SCEVTypes SCTy;
- if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
- MinMaxRHS))
- return MinMaxExpr;
- auto IsMin =
- isa<SCEVSMinExpr>(MinMaxExpr) || isa<SCEVUMinExpr>(MinMaxExpr);
- assert(SE.isKnownNonNegative(MinMaxLHS) &&
- "Expected non-negative operand!");
- auto *DivisibleExpr =
- IsMin
- ? getPreviousSCEVDivisibleByDivisor(MinMaxLHS, DivisorVal, SE)
- : getNextSCEVDivisibleByDivisor(MinMaxLHS, DivisorVal, SE);
- SmallVector<const SCEV *> Ops = {
- ApplyDivisibiltyOnMinMaxExpr(MinMaxRHS, Divisor), DivisibleExpr};
- return SE.getMinMaxExpr(SCTy, Ops);
- };
-
- // If we have LHS == 0, check if LHS is computing a property of some unknown
- // SCEV %v which we can rewrite %v to express explicitly.
- if (Predicate == CmpInst::ICMP_EQ && match(RHS, m_scev_Zero())) {
- // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to
- // explicitly express that.
- const SCEVUnknown *URemLHS = nullptr;
- const SCEV *URemRHS = nullptr;
- if (match(LHS,
- m_scev_URem(m_SCEVUnknown(URemLHS), m_SCEV(URemRHS), SE))) {
- auto I = RewriteMap.find(URemLHS);
- const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : URemLHS;
- RewrittenLHS = ApplyDivisibiltyOnMinMaxExpr(RewrittenLHS, URemRHS);
- const auto *Multiple =
- SE.getMulExpr(SE.getUDivExpr(RewrittenLHS, URemRHS), URemRHS);
- RewriteMap[URemLHS] = Multiple;
- ExprsToRewrite.push_back(URemLHS);
- return;
- }
- }
-
// Do not apply information for constants or if RHS contains an AddRec.
if (isa<SCEVConstant>(LHS) || SE.containsAddRecurrence(RHS))
return;
@@ -15655,7 +15660,9 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
};
const SCEV *RewrittenLHS = GetMaybeRewritten(LHS);
- const APInt &DividesBy = SE.getConstantMultiple(RewrittenLHS);
+ // Apply divisibility information when computing the constant multiple.
+ const APInt &DividesBy =
+ SE.getConstantMultiple(DivGuards.rewrite(RewrittenLHS));
// Collect rewrites for LHS and its transitive operands based on the
// condition.
@@ -15840,8 +15847,11 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
// Now apply the information from the collected conditions to
// Guards.RewriteMap. Conditions are processed in reverse order, so the
- // earliest conditions is processed first. This ensures the SCEVs with the
+ // earliest conditions is processed first, except guards with divisibility
+ // information, which are moved to the back. This ensures the SCEVs with the
// shortest dependency chains are constructed first.
+ SmallVector<std::tuple<CmpInst::Predicate, const SCEV *, const SCEV *>>
+ GuardsToProcess;
for (auto [Term, EnterIfTrue] : reverse(Terms)) {
SmallVector<Value *, 8> Worklist;
SmallPtrSet<Value *, 8> Visited;
@@ -15856,7 +15866,14 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate();
const auto *LHS = SE.getSCEV(Cmp->getOperand(0));
const auto *RHS = SE.getSCEV(Cmp->getOperand(1));
- CollectCondition(Predicate, LHS, RHS, Guards.RewriteMap);
+ // If LHS is a constant, apply information to the other expression.
+ // TODO: If LHS is not a constant, check if using CompareSCEVComplexity
+ // can improve results.
+ if (isa<SCEVConstant>(LHS)) {
+ std::swap(LHS, RHS);
+ Predicate = CmpInst::getSwappedPredicate(Predicate);
+ }
+ GuardsToProcess.emplace_back(Predicate, LHS, RHS);
continue;
}
@@ -15869,6 +15886,31 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
}
}
+ // Process divisibility guards in reverse order to populate DivGuards early.
+ DenseMap<const SCEV *, APInt> Multiples;
+ LoopGuards DivGuards(SE);
+ for (const auto &[Predicate, LHS, RHS] : GuardsToProcess) {
+ if (!isDivisibilityGuard(LHS, RHS, SE))
+ continue;
+ collectDivisibilityInformation(Predicate, LHS, RHS, DivGuards.RewriteMap,
+ Multiples, SE);
+ }
+
+ for (const auto &[Predicate, LHS, RHS] : GuardsToProcess)
+ CollectCondition(Predicate, LHS, RHS, Guards.RewriteMap, DivGuards);
+
+ // Apply divisibility information last. This ensures it is applied to the
+ // outermost expression after other rewrites for the given value.
+ for (const auto &[K, Divisor] : Multiples) {
+ const SCEV *DivisorSCEV = SE.getConstant(Divisor);
+ Guards.RewriteMap[K] =
+ SE.getMulExpr(SE.getUDivExpr(applyDivisibilityOnMinMaxExpr(
+ Guards.rewrite(K), Divisor, SE),
+ DivisorSCEV),
+ DivisorSCEV);
+ ExprsToRewrite.push_back(K);
+ }
+
// Let the rewriter preserve NUW/NSW flags if the unsigned/signed ranges of
// the replacement expressions are contained in the ranges of the replaced
// expressions.
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index c47a1c1..0426ac7 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1353,9 +1353,9 @@ TargetTransformInfo::getInlineCallPenalty(const Function *F,
return TTIImpl->getInlineCallPenalty(F, Call, DefaultCallPenalty);
}
-bool TargetTransformInfo::areTypesABICompatible(
- const Function *Caller, const Function *Callee,
- const ArrayRef<Type *> &Types) const {
+bool TargetTransformInfo::areTypesABICompatible(const Function *Caller,
+ const Function *Callee,
+ ArrayRef<Type *> Types) const {
return TTIImpl->areTypesABICompatible(Caller, Callee, Types);
}
diff --git a/llvm/lib/BinaryFormat/Dwarf.cpp b/llvm/lib/BinaryFormat/Dwarf.cpp
index 55fa2df..a6c7e6a 100644
--- a/llvm/lib/BinaryFormat/Dwarf.cpp
+++ b/llvm/lib/BinaryFormat/Dwarf.cpp
@@ -1076,10 +1076,3 @@ StringRef (*const llvm::dwarf::EnumTraits<LineNumberOps>::StringFn)(unsigned) =
LNStandardString;
StringRef (*const llvm::dwarf::EnumTraits<Index>::StringFn)(unsigned) =
IndexString;
-
-constexpr char llvm::dwarf::EnumTraits<Attribute>::Type[];
-constexpr char llvm::dwarf::EnumTraits<Form>::Type[];
-constexpr char llvm::dwarf::EnumTraits<Index>::Type[];
-constexpr char llvm::dwarf::EnumTraits<Tag>::Type[];
-constexpr char llvm::dwarf::EnumTraits<LineNumberOps>::Type[];
-constexpr char llvm::dwarf::EnumTraits<LocationAtom>::Type[];
diff --git a/llvm/lib/BinaryFormat/MsgPackDocumentYAML.cpp b/llvm/lib/BinaryFormat/MsgPackDocumentYAML.cpp
index 3de3dcc..80b421d 100644
--- a/llvm/lib/BinaryFormat/MsgPackDocumentYAML.cpp
+++ b/llvm/lib/BinaryFormat/MsgPackDocumentYAML.cpp
@@ -209,12 +209,12 @@ template <> struct CustomMappingTraits<MapDocNode> {
static void inputOne(IO &IO, StringRef Key, MapDocNode &M) {
ScalarDocNode KeyObj = M.getDocument()->getNode();
KeyObj.fromString(Key, "");
- IO.mapRequired(Key.str().c_str(), M.getMap()[KeyObj]);
+ IO.mapRequired(Key, M.getMap()[KeyObj]);
}
static void output(IO &IO, MapDocNode &M) {
for (auto I : M.getMap()) {
- IO.mapRequired(I.first.toString().c_str(), I.second);
+ IO.mapRequired(I.first.toString(), I.second);
}
}
};
diff --git a/llvm/lib/CGData/OutlinedHashTreeRecord.cpp b/llvm/lib/CGData/OutlinedHashTreeRecord.cpp
index cc76063..2b6e2f0 100644
--- a/llvm/lib/CGData/OutlinedHashTreeRecord.cpp
+++ b/llvm/lib/CGData/OutlinedHashTreeRecord.cpp
@@ -37,7 +37,7 @@ template <> struct MappingTraits<HashNodeStable> {
template <> struct CustomMappingTraits<IdHashNodeStableMapTy> {
static void inputOne(IO &io, StringRef Key, IdHashNodeStableMapTy &V) {
HashNodeStable NodeStable;
- io.mapRequired(Key.str().c_str(), NodeStable);
+ io.mapRequired(Key, NodeStable);
unsigned Id;
if (Key.getAsInteger(0, Id)) {
io.setError("Id not an integer");
@@ -48,7 +48,7 @@ template <> struct CustomMappingTraits<IdHashNodeStableMapTy> {
static void output(IO &io, IdHashNodeStableMapTy &V) {
for (auto Iter = V.begin(); Iter != V.end(); ++Iter)
- io.mapRequired(utostr(Iter->first).c_str(), Iter->second);
+ io.mapRequired(utostr(Iter->first), Iter->second);
}
};
diff --git a/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp b/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp
index 171fb83..98cdada 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp
@@ -112,8 +112,7 @@ void DbgValueHistoryMap::Entry::endEntry(EntryIndex Index) {
/// to the first intersecting scope range if one exists.
static std::optional<ArrayRef<InsnRange>::iterator>
intersects(const MachineInstr *StartMI, const MachineInstr *EndMI,
- const ArrayRef<InsnRange> &Ranges,
- const InstructionOrdering &Ordering) {
+ ArrayRef<InsnRange> Ranges, const InstructionOrdering &Ordering) {
for (auto RangesI = Ranges.begin(), RangesE = Ranges.end();
RangesI != RangesE; ++RangesI) {
if (EndMI && Ordering.isBefore(EndMI, RangesI->first))
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 8ea1326..0309e22 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -368,7 +368,7 @@ class CodeGenPrepare {
std::unique_ptr<DominatorTree> DT;
public:
- CodeGenPrepare(){};
+ CodeGenPrepare() = default;
CodeGenPrepare(const TargetMachine *TM) : TM(TM){};
/// If encounter huge function, we need to limit the build time.
bool IsHugeFunc = false;
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 9ace7d6..ec4d13f 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -589,8 +589,8 @@ bool CombinerHelper::matchCombineShuffleVector(
return true;
}
-void CombinerHelper::applyCombineShuffleVector(
- MachineInstr &MI, const ArrayRef<Register> Ops) const {
+void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
+ ArrayRef<Register> Ops) const {
Register DstReg = MI.getOperand(0).getReg();
Builder.setInsertPt(*MI.getParent(), MI);
Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp
index bb9c76f..8c6d219 100644
--- a/llvm/lib/CodeGen/MachineOperand.cpp
+++ b/llvm/lib/CodeGen/MachineOperand.cpp
@@ -363,8 +363,9 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
case MachineOperand::MO_RegisterMask:
case MachineOperand::MO_RegisterLiveOut: {
// Shallow compare of the two RegMasks
- const uint32_t *RegMask = getRegMask();
- const uint32_t *OtherRegMask = Other.getRegMask();
+ const uint32_t *RegMask = isRegMask() ? getRegMask() : getRegLiveOut();
+ const uint32_t *OtherRegMask =
+ isRegMask() ? Other.getRegMask() : Other.getRegLiveOut();
if (RegMask == OtherRegMask)
return true;
@@ -434,7 +435,8 @@ hash_code llvm::hash_value(const MachineOperand &MO) {
if (const MachineFunction *MF = getMFIfAvailable(MO)) {
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
- const uint32_t *RegMask = MO.getRegMask();
+ const uint32_t *RegMask =
+ MO.isRegMask() ? MO.getRegMask() : MO.getRegLiveOut();
std::vector<stable_hash> RegMaskHashes(RegMask, RegMask + RegMaskSize);
return hash_combine(MO.getType(), MO.getTargetFlags(),
stable_hash_combine(RegMaskHashes));
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 3ed1045..f18c051 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -334,7 +334,7 @@ public:
LiveIntervals &LIS;
};
- MachineSchedulerImpl() {}
+ MachineSchedulerImpl() = default;
// Migration only
void setLegacyPass(MachineFunctionPass *P) { this->P = P; }
void setMFAM(MachineFunctionAnalysisManager *MFAM) { this->MFAM = MFAM; }
@@ -358,7 +358,7 @@ public:
MachineLoopInfo &MLI;
AAResults &AA;
};
- PostMachineSchedulerImpl() {}
+ PostMachineSchedulerImpl() = default;
// Migration only
void setLegacyPass(MachineFunctionPass *P) { this->P = P; }
void setMFAM(MachineFunctionAnalysisManager *MFAM) { this->MFAM = MFAM; }
diff --git a/llvm/lib/CodeGen/MachineStableHash.cpp b/llvm/lib/CodeGen/MachineStableHash.cpp
index 9d56696..6da708d 100644
--- a/llvm/lib/CodeGen/MachineStableHash.cpp
+++ b/llvm/lib/CodeGen/MachineStableHash.cpp
@@ -136,7 +136,8 @@ stable_hash llvm::stableHashValue(const MachineOperand &MO) {
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
unsigned RegMaskSize =
MachineOperand::getRegMaskSize(TRI->getNumRegs());
- const uint32_t *RegMask = MO.getRegMask();
+ const uint32_t *RegMask =
+ MO.isRegMask() ? MO.getRegMask() : MO.getRegLiveOut();
std::vector<llvm::stable_hash> RegMaskHashes(RegMask,
RegMask + RegMaskSize);
return stable_hash_combine(MO.getType(), MO.getTargetFlags(),
diff --git a/llvm/lib/CodeGen/RegAllocFast.cpp b/llvm/lib/CodeGen/RegAllocFast.cpp
index 697b779..ec6ffd4 100644
--- a/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -206,7 +206,7 @@ private:
bool Error = false; ///< Could not allocate.
explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {}
- explicit LiveReg() {}
+ explicit LiveReg() = default;
unsigned getSparseSetIndex() const { return VirtReg.virtRegIndex(); }
};
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index e17a214b..38f6deb 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -378,7 +378,7 @@ class RegisterCoalescer : private LiveRangeEdit::Delegate {
public:
// For legacy pass only.
- RegisterCoalescer() {}
+ RegisterCoalescer() = default;
RegisterCoalescer &operator=(RegisterCoalescer &&Other) = default;
RegisterCoalescer(LiveIntervals *LIS, SlotIndexes *SI,
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index bdd6bf0..46c4bb8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -9374,7 +9374,7 @@ static unsigned bigEndianByteAt(unsigned BW, unsigned i) {
// Check if the bytes offsets we are looking at match with either big or
// little endian value loaded. Return true for big endian, false for little
// endian, and std::nullopt if match failed.
-static std::optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets,
+static std::optional<bool> isBigEndian(ArrayRef<int64_t> ByteOffsets,
int64_t FirstOffset) {
// The endian can be decided only when it is 2 bytes at least.
unsigned Width = ByteOffsets.size();
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index bf1abfe..58983cb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -1172,6 +1172,12 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
case ISD::FAKE_USE:
Res = SoftenFloatOp_FAKE_USE(N);
break;
+ case ISD::STACKMAP:
+ Res = SoftenFloatOp_STACKMAP(N, OpNo);
+ break;
+ case ISD::PATCHPOINT:
+ Res = SoftenFloatOp_PATCHPOINT(N, OpNo);
+ break;
}
// If the result is null, the sub-method took care of registering results etc.
@@ -1512,6 +1518,20 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_FAKE_USE(SDNode *N) {
N->getOperand(0), Op1);
}
+SDValue DAGTypeLegalizer::SoftenFloatOp_STACKMAP(SDNode *N, unsigned OpNo) {
+ assert(OpNo > 1); // Because the first two arguments are guaranteed legal.
+ SmallVector<SDValue> NewOps(N->ops());
+ NewOps[OpNo] = GetSoftenedFloat(NewOps[OpNo]);
+ return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
+}
+
+SDValue DAGTypeLegalizer::SoftenFloatOp_PATCHPOINT(SDNode *N, unsigned OpNo) {
+ assert(OpNo >= 7);
+ SmallVector<SDValue> NewOps(N->ops());
+ NewOps[OpNo] = GetSoftenedFloat(NewOps[OpNo]);
+ return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
+}
+
//===----------------------------------------------------------------------===//
// Float Result Expansion
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index b1776ea..44e5a18 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2871,18 +2871,14 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SET_ROUNDING(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntOp_STACKMAP(SDNode *N, unsigned OpNo) {
assert(OpNo > 1); // Because the first two arguments are guaranteed legal.
SmallVector<SDValue> NewOps(N->ops());
- SDValue Operand = N->getOperand(OpNo);
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), Operand.getValueType());
- NewOps[OpNo] = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), NVT, Operand);
+ NewOps[OpNo] = GetPromotedInteger(NewOps[OpNo]);
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_PATCHPOINT(SDNode *N, unsigned OpNo) {
assert(OpNo >= 7);
SmallVector<SDValue> NewOps(N->ops());
- SDValue Operand = N->getOperand(OpNo);
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), Operand.getValueType());
- NewOps[OpNo] = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), NVT, Operand);
+ NewOps[OpNo] = GetPromotedInteger(NewOps[OpNo]);
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 9656a30..ede522e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -658,6 +658,8 @@ private:
SDValue SoftenFloatOp_ATOMIC_STORE(SDNode *N, unsigned OpNo);
SDValue SoftenFloatOp_FCOPYSIGN(SDNode *N);
SDValue SoftenFloatOp_FAKE_USE(SDNode *N);
+ SDValue SoftenFloatOp_STACKMAP(SDNode *N, unsigned OpNo);
+ SDValue SoftenFloatOp_PATCHPOINT(SDNode *N, unsigned OpNo);
//===--------------------------------------------------------------------===//
// Float Expansion Support: LegalizeFloatTypes.cpp
diff --git a/llvm/lib/CodeGenTypes/LowLevelType.cpp b/llvm/lib/CodeGenTypes/LowLevelType.cpp
index 4785f26..92b7fad 100644
--- a/llvm/lib/CodeGenTypes/LowLevelType.cpp
+++ b/llvm/lib/CodeGenTypes/LowLevelType.cpp
@@ -54,9 +54,3 @@ LLVM_DUMP_METHOD void LLT::dump() const {
dbgs() << '\n';
}
#endif
-
-const constexpr LLT::BitFieldInfo LLT::ScalarSizeFieldInfo;
-const constexpr LLT::BitFieldInfo LLT::PointerSizeFieldInfo;
-const constexpr LLT::BitFieldInfo LLT::PointerAddressSpaceFieldInfo;
-const constexpr LLT::BitFieldInfo LLT::VectorElementsFieldInfo;
-const constexpr LLT::BitFieldInfo LLT::VectorScalableFieldInfo;
diff --git a/llvm/lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp b/llvm/lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp
index 6c23ba8..23ab534 100644
--- a/llvm/lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp
+++ b/llvm/lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp
@@ -102,7 +102,8 @@ std::optional<CVType> LazyRandomTypeCollection::tryGetType(TypeIndex Index) {
return std::nullopt;
}
- assert(contains(Index));
+ if (!contains(Index))
+ return std::nullopt;
return Records[Index.toArrayIndex()].Type;
}
diff --git a/llvm/lib/ExecutionEngine/Orc/TargetProcess/CMakeLists.txt b/llvm/lib/ExecutionEngine/Orc/TargetProcess/CMakeLists.txt
index 9275586..ca8192b 100644
--- a/llvm/lib/ExecutionEngine/Orc/TargetProcess/CMakeLists.txt
+++ b/llvm/lib/ExecutionEngine/Orc/TargetProcess/CMakeLists.txt
@@ -16,9 +16,11 @@ add_llvm_component_library(LLVMOrcTargetProcess
ExecutorSharedMemoryMapperService.cpp
DefaultHostBootstrapValues.cpp
ExecutorResolver.cpp
+ LibraryResolver.cpp
JITLoaderGDB.cpp
JITLoaderPerf.cpp
JITLoaderVTune.cpp
+ LibraryScanner.cpp
OrcRTBootstrap.cpp
RegisterEHFrames.cpp
SimpleExecutorDylibManager.cpp
@@ -36,6 +38,8 @@ add_llvm_component_library(LLVMOrcTargetProcess
LINK_COMPONENTS
${intel_jit_profiling}
+ BinaryFormat
+ Object
OrcShared
Support
TargetParser
diff --git a/llvm/lib/ExecutionEngine/Orc/TargetProcess/LibraryResolver.cpp b/llvm/lib/ExecutionEngine/Orc/TargetProcess/LibraryResolver.cpp
new file mode 100644
index 0000000..35da82a
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/TargetProcess/LibraryResolver.cpp
@@ -0,0 +1,370 @@
+//===- LibraryResolver.cpp - Library Resolution of Unresolved Symbols ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Library resolution impl for unresolved symbols
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/LibraryResolver.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/LibraryScanner.h"
+
+#include "llvm/ADT/StringSet.h"
+
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+
+#include <mutex>
+#include <thread>
+
+#define DEBUG_TYPE "orc-resolver"
+
+namespace llvm::orc {
+
+LibraryResolver::LibraryResolver(const LibraryResolver::Setup &S)
+ : LibPathCache(S.Cache ? S.Cache : std::make_shared<LibraryPathCache>()),
+ LibPathResolver(S.PResolver
+ ? S.PResolver
+ : std::make_shared<PathResolver>(LibPathCache)),
+ ScanHelper(S.BasePaths, LibPathCache, LibPathResolver),
+ FB(S.FilterBuilder), LibMgr(),
+ ShouldScanCall(S.ShouldScanCall ? S.ShouldScanCall
+ : [](StringRef) -> bool { return true; }),
+ scanBatchSize(S.ScanBatchSize) {
+
+ if (ScanHelper.getAllUnits().empty()) {
+ LLVM_DEBUG(dbgs() << "Warning: No base paths provided for scanning.\n");
+ }
+}
+
+std::unique_ptr<LibraryResolutionDriver>
+LibraryResolutionDriver::create(const LibraryResolver::Setup &S) {
+ auto LR = std::make_unique<LibraryResolver>(S);
+ return std::unique_ptr<LibraryResolutionDriver>(
+ new LibraryResolutionDriver(std::move(LR)));
+}
+
+void LibraryResolutionDriver::addScanPath(const std::string &Path, PathType K) {
+ LR->ScanHelper.addBasePath(Path, K);
+}
+
+bool LibraryResolutionDriver::markLibraryLoaded(StringRef Path) {
+ auto Lib = LR->LibMgr.getLibrary(Path);
+ if (!Lib)
+ return false;
+
+ Lib->setState(LibraryManager::LibState::Loaded);
+
+ return true;
+}
+
+bool LibraryResolutionDriver::markLibraryUnLoaded(StringRef Path) {
+ auto Lib = LR->LibMgr.getLibrary(Path);
+ if (!Lib)
+ return false;
+
+ Lib->setState(LibraryManager::LibState::Unloaded);
+
+ return true;
+}
+
+void LibraryResolutionDriver::resolveSymbols(
+ std::vector<std::string> Syms,
+ LibraryResolver::OnSearchComplete OnCompletion,
+ const SearchConfig &Config) {
+ LR->searchSymbolsInLibraries(Syms, std::move(OnCompletion), Config);
+}
+
+static bool shouldIgnoreSymbol(const object::SymbolRef &Sym,
+ uint32_t IgnoreFlags) {
+ Expected<uint32_t> FlagsOrErr = Sym.getFlags();
+ if (!FlagsOrErr) {
+ consumeError(FlagsOrErr.takeError());
+ return true;
+ }
+
+ uint32_t Flags = *FlagsOrErr;
+
+ using Filter = SymbolEnumeratorOptions;
+ if ((IgnoreFlags & Filter::IgnoreUndefined) &&
+ (Flags & object::SymbolRef::SF_Undefined))
+ return true;
+ if ((IgnoreFlags & Filter::IgnoreIndirect) &&
+ (Flags & object::SymbolRef::SF_Indirect))
+ return true;
+ if ((IgnoreFlags & Filter::IgnoreWeak) &&
+ (Flags & object::SymbolRef::SF_Weak))
+ return true;
+
+ return false;
+}
+
+bool SymbolEnumerator::enumerateSymbols(StringRef Path, OnEachSymbolFn OnEach,
+ const SymbolEnumeratorOptions &Opts) {
+ if (Path.empty())
+ return false;
+
+ ObjectFileLoader ObjLoader(Path);
+
+ auto ObjOrErr = ObjLoader.getObjectFile();
+ if (!ObjOrErr) {
+ std::string ErrMsg;
+ handleAllErrors(ObjOrErr.takeError(),
+ [&](const ErrorInfoBase &EIB) { ErrMsg = EIB.message(); });
+ LLVM_DEBUG(dbgs() << "Failed loading object file: " << Path
+ << "\nError: " << ErrMsg << "\n");
+ return false;
+ }
+
+ object::ObjectFile *Obj = &ObjOrErr.get();
+
+ auto processSymbolRange =
+ [&](object::ObjectFile::symbol_iterator_range Range) -> EnumerateResult {
+ for (const auto &Sym : Range) {
+ if (shouldIgnoreSymbol(Sym, Opts.FilterFlags))
+ continue;
+
+ auto NameOrErr = Sym.getName();
+ if (!NameOrErr) {
+ consumeError(NameOrErr.takeError());
+ continue;
+ }
+
+ StringRef Name = *NameOrErr;
+ if (Name.empty())
+ continue;
+
+ EnumerateResult Res = OnEach(Name);
+ if (Res != EnumerateResult::Continue)
+ return Res;
+ }
+ return EnumerateResult::Continue;
+ };
+
+ EnumerateResult Res = processSymbolRange(Obj->symbols());
+ if (Res != EnumerateResult::Continue)
+ return Res == EnumerateResult::Stop;
+
+ if (Obj->isELF()) {
+ const auto *ElfObj = cast<object::ELFObjectFileBase>(Obj);
+ Res = processSymbolRange(ElfObj->getDynamicSymbolIterators());
+ if (Res != EnumerateResult::Continue)
+ return Res == EnumerateResult::Stop;
+ } else if (Obj->isCOFF()) {
+ const auto *CoffObj = cast<object::COFFObjectFile>(Obj);
+ for (auto I = CoffObj->export_directory_begin(),
+ E = CoffObj->export_directory_end();
+ I != E; ++I) {
+ StringRef Name;
+ if (I->getSymbolName(Name))
+ continue;
+ if (Name.empty())
+ continue;
+
+ EnumerateResult Res = OnEach(Name);
+ if (Res != EnumerateResult::Continue)
+ return Res == EnumerateResult::Stop;
+ }
+ } else if (Obj->isMachO()) {
+ }
+
+ return true;
+}
+
+class SymbolSearchContext {
+public:
+ SymbolSearchContext(SymbolQuery &Q) : Q(Q) {}
+
+ bool hasSearched(LibraryInfo *Lib) const { return Searched.count(Lib); }
+
+ void markSearched(LibraryInfo *Lib) { Searched.insert(Lib); }
+
+ inline bool allResolved() const { return Q.allResolved(); }
+
+ SymbolQuery &query() { return Q; }
+
+private:
+ SymbolQuery &Q;
+ DenseSet<LibraryInfo *> Searched;
+};
+
+void LibraryResolver::resolveSymbolsInLibrary(
+ LibraryInfo &Lib, SymbolQuery &UnresolvedSymbols,
+ const SymbolEnumeratorOptions &Opts) {
+ LLVM_DEBUG(dbgs() << "Checking unresolved symbols "
+ << " in library : " << Lib.getFileName() << "\n";);
+ StringSet<> DiscoveredSymbols;
+
+ if (!UnresolvedSymbols.hasUnresolved()) {
+ LLVM_DEBUG(dbgs() << "Skipping library: " << Lib.getFullPath()
+ << " — unresolved symbols exist.\n";);
+ return;
+ }
+
+ bool HasEnumerated = false;
+ auto enumerateSymbolsIfNeeded = [&]() {
+ if (HasEnumerated)
+ return;
+
+ HasEnumerated = true;
+
+ LLVM_DEBUG(dbgs() << "Enumerating symbols in library: " << Lib.getFullPath()
+ << "\n";);
+ SymbolEnumerator::enumerateSymbols(
+ Lib.getFullPath(),
+ [&](StringRef sym) {
+ DiscoveredSymbols.insert(sym);
+ return EnumerateResult::Continue;
+ },
+ Opts);
+
+ if (DiscoveredSymbols.empty()) {
+ LLVM_DEBUG(dbgs() << " No symbols and remove library : "
+ << Lib.getFullPath() << "\n";);
+ LibMgr.removeLibrary(Lib.getFullPath());
+ return;
+ }
+ };
+
+ if (!Lib.hasFilter()) {
+ LLVM_DEBUG(dbgs() << "Building filter for library: " << Lib.getFullPath()
+ << "\n";);
+ enumerateSymbolsIfNeeded();
+ SmallVector<StringRef> SymbolVec;
+ SymbolVec.reserve(DiscoveredSymbols.size());
+ for (const auto &KV : DiscoveredSymbols)
+ SymbolVec.push_back(KV.first());
+
+ Lib.ensureFilterBuilt(FB, SymbolVec);
+ LLVM_DEBUG({
+ dbgs() << "DiscoveredSymbols : " << DiscoveredSymbols.size() << "\n";
+ for (const auto &KV : DiscoveredSymbols)
+ dbgs() << "DiscoveredSymbols : " << KV.first() << "\n";
+ });
+ }
+
+ const auto &Unresolved = UnresolvedSymbols.getUnresolvedSymbols();
+ bool HadAnySym = false;
+ LLVM_DEBUG(dbgs() << "Total unresolved symbols : " << Unresolved.size()
+ << "\n";);
+ for (const auto &Sym : Unresolved) {
+ if (Lib.mayContain(Sym)) {
+ LLVM_DEBUG(dbgs() << "Checking symbol '" << Sym
+ << "' in library: " << Lib.getFullPath() << "\n";);
+ enumerateSymbolsIfNeeded();
+ if (DiscoveredSymbols.count(Sym) > 0) {
+ LLVM_DEBUG(dbgs() << " Resolved symbol: " << Sym
+ << " in library: " << Lib.getFullPath() << "\n";);
+ UnresolvedSymbols.resolve(Sym, Lib.getFullPath());
+ HadAnySym = true;
+ }
+ }
+ }
+
+ using LibraryState = LibraryManager::LibState;
+ if (HadAnySym && Lib.getState() != LibraryState::Loaded)
+ Lib.setState(LibraryState::Queried);
+}
+
+void LibraryResolver::searchSymbolsInLibraries(
+ std::vector<std::string> &SymbolList, OnSearchComplete OnComplete,
+ const SearchConfig &Config) {
+ SymbolQuery Q(SymbolList);
+
+ using LibraryState = LibraryManager::LibState;
+ using LibraryType = PathType;
+ auto tryResolveFrom = [&](LibraryState S, LibraryType K) {
+ LLVM_DEBUG(dbgs() << "Trying resolve from state=" << static_cast<int>(S)
+ << " type=" << static_cast<int>(K) << "\n";);
+
+ SymbolSearchContext Ctx(Q);
+ while (!Ctx.allResolved()) {
+
+ for (auto &Lib : LibMgr.getView(S, K)) {
+ if (Ctx.hasSearched(Lib.get()))
+ continue;
+
+ // can use Async here?
+ resolveSymbolsInLibrary(*Lib, Ctx.query(), Config.Options);
+ Ctx.markSearched(Lib.get());
+
+ if (Ctx.allResolved())
+ return;
+ }
+
+ if (Ctx.allResolved())
+ return;
+
+ if (!scanLibrariesIfNeeded(K, scanBatchSize))
+ break; // no more new libs to scan
+ }
+ };
+
+ for (const auto &[St, Ty] : Config.Policy.Plan) {
+ tryResolveFrom(St, Ty);
+ if (Q.allResolved())
+ break;
+ }
+
+ // done:
+ LLVM_DEBUG({
+ dbgs() << "Search complete.\n";
+ for (const auto &r : Q.getAllResults())
+ dbgs() << "Resolved Symbol:" << r->Name << " -> " << r->ResolvedLibPath
+ << "\n";
+ });
+
+ OnComplete(Q);
+}
+
+bool LibraryResolver::scanLibrariesIfNeeded(PathType PK, size_t BatchSize) {
+ LLVM_DEBUG(dbgs() << "LibraryResolver::scanLibrariesIfNeeded: Scanning for "
+ << (PK == PathType::User ? "User" : "System")
+ << " libraries\n";);
+ if (!ScanHelper.leftToScan(PK))
+ return false;
+
+ LibraryScanner Scanner(ScanHelper, LibMgr, ShouldScanCall);
+ Scanner.scanNext(PK, BatchSize);
+ return true;
+}
+
+bool LibraryResolver::symbolExistsInLibrary(const LibraryInfo &Lib,
+ StringRef SymName,
+ std::vector<std::string> *AllSyms) {
+ SymbolEnumeratorOptions Opts;
+ return symbolExistsInLibrary(Lib, SymName, AllSyms, Opts);
+}
+
+bool LibraryResolver::symbolExistsInLibrary(
+ const LibraryInfo &Lib, StringRef SymName,
+ std::vector<std::string> *AllSyms, const SymbolEnumeratorOptions &Opts) {
+ bool Found = false;
+
+ SymbolEnumerator::enumerateSymbols(
+ Lib.getFullPath(),
+ [&](StringRef Sym) {
+ if (AllSyms)
+ AllSyms->emplace_back(Sym.str());
+
+ if (Sym == SymName) {
+ Found = true;
+ }
+
+ return EnumerateResult::Continue;
+ },
+ Opts);
+
+ return Found;
+}
+
+} // end namespace llvm::orc
diff --git a/llvm/lib/ExecutionEngine/Orc/TargetProcess/LibraryScanner.cpp b/llvm/lib/ExecutionEngine/Orc/TargetProcess/LibraryScanner.cpp
new file mode 100644
index 0000000..d93f686
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/TargetProcess/LibraryScanner.cpp
@@ -0,0 +1,1161 @@
+//===- LibraryScanner.cpp - Provide Library Scanning Implementation ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/LibraryScanner.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/LibraryResolver.h"
+
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ELFTypes.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Object/MachOUniversal.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
+
+#ifdef LLVM_ON_UNIX
+#include <sys/stat.h>
+#include <unistd.h>
+#endif // LLVM_ON_UNIX
+
+#ifdef __APPLE__
+#include <sys/stat.h>
+#undef LC_LOAD_DYLIB
+#undef LC_RPATH
+#endif // __APPLE__
+
+#define DEBUG_TYPE "orc-scanner"
+
+namespace llvm::orc {
+
+void handleError(Error Err, StringRef context = "") {
+ consumeError(handleErrors(std::move(Err), [&](const ErrorInfoBase &EIB) {
+ dbgs() << "LLVM Error";
+ if (!context.empty())
+ dbgs() << " [" << context << "]";
+ dbgs() << ": " << EIB.message() << "\n";
+ }));
+}
+
+bool ObjectFileLoader::isArchitectureCompatible(const object::ObjectFile &Obj) {
+ Triple HostTriple(sys::getDefaultTargetTriple());
+ Triple ObjTriple = Obj.makeTriple();
+
+ LLVM_DEBUG({
+ dbgs() << "Host triple: " << HostTriple.str()
+ << ", Object triple: " << ObjTriple.str() << "\n";
+ });
+
+ if (ObjTriple.getArch() != Triple::UnknownArch &&
+ HostTriple.getArch() != ObjTriple.getArch())
+ return false;
+
+ if (ObjTriple.getOS() != Triple::UnknownOS &&
+ HostTriple.getOS() != ObjTriple.getOS())
+ return false;
+
+ if (ObjTriple.getEnvironment() != Triple::UnknownEnvironment &&
+ HostTriple.getEnvironment() != Triple::UnknownEnvironment &&
+ HostTriple.getEnvironment() != ObjTriple.getEnvironment())
+ return false;
+
+ return true;
+}
+
+Expected<object::OwningBinary<object::ObjectFile>>
+ObjectFileLoader::loadObjectFileWithOwnership(StringRef FilePath) {
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: Attempting to open file " << FilePath
+ << "\n";);
+ auto BinOrErr = object::createBinary(FilePath);
+ if (!BinOrErr) {
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: Failed to open file " << FilePath
+ << "\n";);
+ return BinOrErr.takeError();
+ }
+
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: Successfully opened file " << FilePath
+ << "\n";);
+
+ auto OwningBin = BinOrErr->takeBinary();
+ object::Binary *Bin = OwningBin.first.get();
+
+ if (Bin->isArchive()) {
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: File is an archive, not supported: "
+ << FilePath << "\n";);
+ return createStringError(std::errc::invalid_argument,
+ "Archive files are not supported: %s",
+ FilePath.str().c_str());
+ }
+
+#if defined(__APPLE__)
+ if (auto *UB = dyn_cast<object::MachOUniversalBinary>(Bin)) {
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: Detected Mach-O universal binary: "
+ << FilePath << "\n";);
+ for (auto ObjForArch : UB->objects()) {
+ auto ObjOrErr = ObjForArch.getAsObjectFile();
+ if (!ObjOrErr) {
+ LLVM_DEBUG(
+ dbgs()
+ << "ObjectFileLoader: Skipping invalid architecture slice\n";);
+
+ consumeError(ObjOrErr.takeError());
+ continue;
+ }
+
+ std::unique_ptr<object::ObjectFile> Obj = std::move(ObjOrErr.get());
+ if (isArchitectureCompatible(*Obj)) {
+ LLVM_DEBUG(
+ dbgs() << "ObjectFileLoader: Found compatible object slice\n";);
+
+ return object::OwningBinary<object::ObjectFile>(
+ std::move(Obj), std::move(OwningBin.second));
+
+ } else {
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: Incompatible architecture "
+ "slice skipped\n";);
+ }
+ }
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: No compatible slices found in "
+ "universal binary\n";);
+ return createStringError(inconvertibleErrorCode(),
+ "No compatible object found in fat binary: %s",
+ FilePath.str().c_str());
+ }
+#endif
+
+ auto ObjOrErr =
+ object::ObjectFile::createObjectFile(Bin->getMemoryBufferRef());
+ if (!ObjOrErr) {
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: Failed to create object file\n";);
+ return ObjOrErr.takeError();
+ }
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: Detected object file\n";);
+
+ std::unique_ptr<object::ObjectFile> Obj = std::move(*ObjOrErr);
+ if (!isArchitectureCompatible(*Obj)) {
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: Incompatible architecture: "
+ << FilePath << "\n";);
+ return createStringError(inconvertibleErrorCode(),
+ "Incompatible object file: %s",
+ FilePath.str().c_str());
+ }
+
+ LLVM_DEBUG(dbgs() << "ObjectFileLoader: Object file is compatible\n";);
+
+ return object::OwningBinary<object::ObjectFile>(std::move(Obj),
+ std::move(OwningBin.second));
+}
+
+template <class ELFT>
+bool isELFSharedLibrary(const object::ELFFile<ELFT> &ELFObj) {
+ if (ELFObj.getHeader().e_type != ELF::ET_DYN)
+ return false;
+
+ auto PHOrErr = ELFObj.program_headers();
+ if (!PHOrErr) {
+ consumeError(PHOrErr.takeError());
+ return true;
+ }
+
+ for (auto Phdr : *PHOrErr) {
+ if (Phdr.p_type == ELF::PT_INTERP)
+ return false;
+ }
+
+ return true;
+}
+
+bool isSharedLibraryObject(object::ObjectFile &Obj) {
+ if (Obj.isELF()) {
+ if (auto *ELF32LE = dyn_cast<object::ELF32LEObjectFile>(&Obj))
+ return isELFSharedLibrary(ELF32LE->getELFFile());
+ if (auto *ELF64LE = dyn_cast<object::ELF64LEObjectFile>(&Obj))
+ return isELFSharedLibrary(ELF64LE->getELFFile());
+ if (auto *ELF32BE = dyn_cast<object::ELF32BEObjectFile>(&Obj))
+ return isELFSharedLibrary(ELF32BE->getELFFile());
+ if (auto *ELF64BE = dyn_cast<object::ELF64BEObjectFile>(&Obj))
+ return isELFSharedLibrary(ELF64BE->getELFFile());
+ } else if (Obj.isMachO()) {
+ const object::MachOObjectFile *MachO =
+ dyn_cast<object::MachOObjectFile>(&Obj);
+ if (!MachO) {
+ LLVM_DEBUG(dbgs() << "Failed to cast to MachOObjectFile.\n";);
+ return false;
+ }
+ LLVM_DEBUG({
+ bool Result =
+ MachO->getHeader().filetype == MachO::HeaderFileType::MH_DYLIB;
+ dbgs() << "Mach-O filetype: " << MachO->getHeader().filetype
+ << " (MH_DYLIB == " << MachO::HeaderFileType::MH_DYLIB
+ << "), shared: " << Result << "\n";
+ });
+
+ return MachO->getHeader().filetype == MachO::HeaderFileType::MH_DYLIB;
+ } else if (Obj.isCOFF()) {
+ const object::COFFObjectFile *coff = dyn_cast<object::COFFObjectFile>(&Obj);
+ if (!coff)
+ return false;
+ return coff->getCharacteristics() & COFF::IMAGE_FILE_DLL;
+ } else {
+ LLVM_DEBUG(dbgs() << "Binary is not an ObjectFile.\n";);
+ }
+
+ return false;
+}
+
+bool DylibPathValidator::isSharedLibrary(StringRef Path) {
+ LLVM_DEBUG(dbgs() << "Checking if path is a shared library: " << Path
+ << "\n";);
+
+ auto FileType = sys::fs::get_file_type(Path, /*Follow*/ true);
+ if (FileType != sys::fs::file_type::regular_file) {
+ LLVM_DEBUG(dbgs() << "File type is not a regular file for path: " << Path
+ << "\n";);
+ return false;
+ }
+
+ file_magic MagicCode;
+ identify_magic(Path, MagicCode);
+
+ // Skip archives.
+ if (MagicCode == file_magic::archive)
+ return false;
+
+ // Universal binary handling.
+#if defined(__APPLE__)
+ if (MagicCode == file_magic::macho_universal_binary) {
+ ObjectFileLoader ObjLoader(Path);
+ auto ObjOrErr = ObjLoader.getObjectFile();
+ if (!ObjOrErr) {
+ consumeError(ObjOrErr.takeError());
+ return false;
+ }
+ return isSharedLibraryObject(ObjOrErr.get());
+ }
+#endif
+
+ // Object file inspection for PE/COFF, ELF, and Mach-O
+ bool NeedsObjectInspection =
+#if defined(_WIN32)
+ (MagicCode == file_magic::pecoff_executable);
+#elif defined(__APPLE__)
+ (MagicCode == file_magic::macho_fixed_virtual_memory_shared_lib ||
+ MagicCode == file_magic::macho_dynamically_linked_shared_lib ||
+ MagicCode == file_magic::macho_dynamically_linked_shared_lib_stub);
+#elif defined(LLVM_ON_UNIX)
+#ifdef __CYGWIN__
+ (MagicCode == file_magic::pecoff_executable);
+#else
+ (MagicCode == file_magic::elf_shared_object);
+#endif
+#else
+#error "Unsupported platform."
+#endif
+
+ if (NeedsObjectInspection) {
+ ObjectFileLoader ObjLoader(Path);
+ auto ObjOrErr = ObjLoader.getObjectFile();
+ if (!ObjOrErr) {
+ consumeError(ObjOrErr.takeError());
+ return false;
+ }
+ return isSharedLibraryObject(ObjOrErr.get());
+ }
+
+ LLVM_DEBUG(dbgs() << "Path is not identified as a shared library: " << Path
+ << "\n";);
+ return false;
+}
+
+void DylibSubstitutor::configure(StringRef LoaderPath) {
+ SmallString<512> ExecPath(sys::fs::getMainExecutable(nullptr, nullptr));
+ sys::path::remove_filename(ExecPath);
+
+ SmallString<512> LoaderDir;
+ if (LoaderPath.empty()) {
+ LoaderDir = ExecPath;
+ } else {
+ LoaderDir = LoaderPath.str();
+ if (!sys::fs::is_directory(LoaderPath))
+ sys::path::remove_filename(LoaderDir);
+ }
+
+#ifdef __APPLE__
+ Placeholders["@loader_path"] = std::string(LoaderDir);
+ Placeholders["@executable_path"] = std::string(ExecPath);
+#else
+ Placeholders["$origin"] = std::string(LoaderDir);
+#endif
+}
+
+std::optional<std::string>
+SearchPathResolver::resolve(StringRef Stem, const DylibSubstitutor &Subst,
+ DylibPathValidator &Validator) const {
+ for (const auto &SP : Paths) {
+ std::string Base = Subst.substitute(SP);
+
+ SmallString<512> FullPath(Base);
+ if (!PlaceholderPrefix.empty() &&
+ Stem.starts_with_insensitive(PlaceholderPrefix))
+ FullPath.append(Stem.drop_front(PlaceholderPrefix.size()));
+ else
+ sys::path::append(FullPath, Stem);
+
+ LLVM_DEBUG(dbgs() << "SearchPathResolver::resolve FullPath = " << FullPath
+ << "\n";);
+
+ if (auto Valid = Validator.validate(FullPath.str()))
+ return Valid;
+ }
+
+ return std::nullopt;
+}
+
+std::optional<std::string>
+DylibResolverImpl::tryWithExtensions(StringRef LibStem) const {
+ LLVM_DEBUG(dbgs() << "tryWithExtensions: baseName = " << LibStem << "\n";);
+ SmallVector<SmallString<256>, 8> Candidates;
+
+ // Add extensions by platform
+#if defined(__APPLE__)
+ Candidates.emplace_back(LibStem);
+ Candidates.back() += ".dylib";
+#elif defined(_WIN32)
+ Candidates.emplace_back(LibStem);
+ Candidates.back() += ".dll";
+#else
+ Candidates.emplace_back(LibStem);
+ Candidates.back() += ".so";
+#endif
+
+ // Optionally try "lib" prefix if not already there
+ StringRef FileName = sys::path::filename(LibStem);
+ StringRef Base = sys::path::parent_path(LibStem);
+ if (!FileName.starts_with("lib")) {
+ SmallString<256> WithPrefix(Base);
+ if (!WithPrefix.empty())
+ sys::path::append(WithPrefix, ""); // ensure separator if needed
+ WithPrefix += "lib";
+ WithPrefix += FileName;
+
+#if defined(__APPLE__)
+ WithPrefix += ".dylib";
+#elif defined(_WIN32)
+ WithPrefix += ".dll";
+#else
+ WithPrefix += ".so";
+#endif
+
+ Candidates.push_back(std::move(WithPrefix));
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " Candidates to try:\n";
+ for (const auto &C : Candidates)
+ dbgs() << " " << C << "\n";
+ });
+
+ // Try all variants using tryAllPaths
+ for (const auto &Name : Candidates) {
+
+ LLVM_DEBUG(dbgs() << " Trying candidate: " << Name << "\n";);
+
+ for (const auto &R : Resolvers) {
+ if (auto Res = R.resolve(Name, Substitutor, Validator))
+ return Res;
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << " -> No candidate Resolved.\n";);
+
+ return std::nullopt;
+}
+
+std::optional<std::string>
+DylibResolverImpl::resolve(StringRef LibStem, bool VariateLibStem) const {
+ LLVM_DEBUG(dbgs() << "Resolving library stem: " << LibStem << "\n";);
+
+ // If it is an absolute path, don't try iterate over the paths.
+ if (sys::path::is_absolute(LibStem)) {
+ LLVM_DEBUG(dbgs() << " -> Absolute path detected.\n";);
+ return Validator.validate(LibStem);
+ }
+
+ if (!LibStem.starts_with_insensitive("@rpath")) {
+ if (auto norm = Validator.validate(Substitutor.substitute(LibStem))) {
+ LLVM_DEBUG(dbgs() << " -> Resolved after substitution: " << *norm
+ << "\n";);
+
+ return norm;
+ }
+ }
+
+ for (const auto &R : Resolvers) {
+ LLVM_DEBUG(dbgs() << " -> Resolving via search path ... \n";);
+ if (auto Result = R.resolve(LibStem, Substitutor, Validator)) {
+ LLVM_DEBUG(dbgs() << " -> Resolved via search path: " << *Result
+ << "\n";);
+
+ return Result;
+ }
+ }
+
+ // Expand libStem with paths, extensions, etc.
+ // std::string foundName;
+ if (VariateLibStem) {
+ LLVM_DEBUG(dbgs() << " -> Trying with extensions...\n";);
+
+ if (auto Norm = tryWithExtensions(LibStem)) {
+ LLVM_DEBUG(dbgs() << " -> Resolved via tryWithExtensions: " << *Norm
+ << "\n";);
+
+ return Norm;
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << " -> Could not resolve: " << LibStem << "\n";);
+
+ return std::nullopt;
+}
+
+#ifndef _WIN32
+mode_t PathResolver::lstatCached(StringRef Path) {
+ // If already cached - retun cached result
+ if (auto Cache = LibPathCache->read_lstat(Path))
+ return *Cache;
+
+ // Not cached: perform lstat and store
+ struct stat buf{};
+ mode_t st_mode = (lstat(Path.str().c_str(), &buf) == -1) ? 0 : buf.st_mode;
+
+ LibPathCache->insert_lstat(Path, st_mode);
+
+ return st_mode;
+}
+
+std::optional<std::string> PathResolver::readlinkCached(StringRef Path) {
+ // If already cached - retun cached result
+ if (auto Cache = LibPathCache->read_link(Path))
+ return Cache;
+
+ // If result not in cache - call system function and cache result
+ char buf[PATH_MAX];
+ ssize_t len;
+ if ((len = readlink(Path.str().c_str(), buf, sizeof(buf))) != -1) {
+ buf[len] = '\0';
+ std::string s(buf);
+ LibPathCache->insert_link(Path, s);
+ return s;
+ }
+ return std::nullopt;
+}
+
+void createComponent(StringRef Path, StringRef BasePath, bool BaseIsResolved,
+ SmallVector<StringRef, 16> &Component) {
+ StringRef Separator = sys::path::get_separator();
+ if (!BaseIsResolved) {
+ if (Path[0] == '~' &&
+ (Path.size() == 1 || sys::path::is_separator(Path[1]))) {
+ static SmallString<128> HomeP;
+ if (HomeP.str().empty())
+ sys::path::home_directory(HomeP);
+ StringRef(HomeP).split(Component, Separator, /*MaxSplit*/ -1,
+ /*KeepEmpty*/ false);
+ } else if (BasePath.empty()) {
+ static SmallString<256> CurrentPath;
+ if (CurrentPath.str().empty())
+ sys::fs::current_path(CurrentPath);
+ StringRef(CurrentPath)
+ .split(Component, Separator, /*MaxSplit*/ -1, /*KeepEmpty*/ false);
+ } else {
+ BasePath.split(Component, Separator, /*MaxSplit*/ -1,
+ /*KeepEmpty*/ false);
+ }
+ }
+
+ Path.split(Component, Separator, /*MaxSplit*/ -1, /*KeepEmpty*/ false);
+}
+
+void normalizePathSegments(SmallVector<StringRef, 16> &PathParts) {
+ SmallVector<StringRef, 16> NormalizedPath;
+ for (auto &Part : PathParts) {
+ if (Part == ".") {
+ continue;
+ } else if (Part == "..") {
+ if (!NormalizedPath.empty() && NormalizedPath.back() != "..") {
+ NormalizedPath.pop_back();
+ } else {
+ NormalizedPath.push_back("..");
+ }
+ } else {
+ NormalizedPath.push_back(Part);
+ }
+ }
+ PathParts.swap(NormalizedPath);
+}
+#endif
+
+std::optional<std::string> PathResolver::realpathCached(StringRef Path,
+ std::error_code &EC,
+ StringRef Base,
+ bool BaseIsResolved,
+ long SymLoopLevel) {
+ EC.clear();
+
+ if (Path.empty()) {
+ EC = std::make_error_code(std::errc::no_such_file_or_directory);
+ LLVM_DEBUG(dbgs() << "PathResolver::realpathCached: Empty path\n";);
+
+ return std::nullopt;
+ }
+
+ if (SymLoopLevel <= 0) {
+ EC = std::make_error_code(std::errc::too_many_symbolic_link_levels);
+ LLVM_DEBUG(
+ dbgs() << "PathResolver::realpathCached: Too many Symlink levels: "
+ << Path << "\n";);
+
+ return std::nullopt;
+ }
+
+ // If already cached - retun cached result
+ bool isRelative = sys::path::is_relative(Path);
+ if (!isRelative) {
+ if (auto Cached = LibPathCache->read_realpath(Path)) {
+ EC = Cached->ErrnoCode;
+ if (EC) {
+ LLVM_DEBUG(dbgs() << "PathResolver::realpathCached: Cached (error) for "
+ << Path << "\n";);
+ } else {
+ LLVM_DEBUG(
+ dbgs() << "PathResolver::realpathCached: Cached (success) for "
+ << Path << " => " << Cached->canonicalPath << "\n";);
+ }
+ return Cached->canonicalPath.empty()
+ ? std::nullopt
+ : std::make_optional(Cached->canonicalPath);
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << "PathResolver::realpathCached: Resolving path: " << Path
+ << "\n";);
+
+ // If result not in cache - call system function and cache result
+
+ StringRef Separator(sys::path::get_separator());
+ SmallString<256> Resolved(Separator);
+#ifndef _WIN32
+ SmallVector<StringRef, 16> Components;
+
+ if (isRelative) {
+ if (BaseIsResolved) {
+ Resolved.assign(Base);
+ LLVM_DEBUG(dbgs() << " Using Resolved base: " << Base << "\n";);
+ }
+ createComponent(Path, Base, BaseIsResolved, Components);
+ } else {
+ Path.split(Components, Separator, /*MaxSplit*/ -1, /*KeepEmpty*/ false);
+ }
+
+ normalizePathSegments(Components);
+ LLVM_DEBUG({
+ for (auto &C : Components)
+ dbgs() << " " << C << " ";
+
+ dbgs() << "\n";
+ });
+
+ // Handle path list items
+ for (const auto &Component : Components) {
+ if (Component == ".")
+ continue;
+ if (Component == "..") {
+ // collapse "a/b/../c" to "a/c"
+ size_t S = Resolved.rfind(Separator);
+ if (S != llvm::StringRef::npos)
+ Resolved.resize(S);
+ if (Resolved.empty())
+ Resolved = Separator;
+ continue;
+ }
+
+ size_t oldSize = Resolved.size();
+ sys::path::append(Resolved, Component);
+ const char *ResolvedPath = Resolved.c_str();
+ LLVM_DEBUG(dbgs() << " Processing Component: " << Component << " => "
+ << ResolvedPath << "\n";);
+ mode_t st_mode = lstatCached(ResolvedPath);
+
+ if (S_ISLNK(st_mode)) {
+ LLVM_DEBUG(dbgs() << " Found symlink: " << ResolvedPath << "\n";);
+
+ auto SymlinkOpt = readlinkCached(ResolvedPath);
+ if (!SymlinkOpt) {
+ EC = std::make_error_code(std::errc::no_such_file_or_directory);
+ LibPathCache->insert_realpath(Path, LibraryPathCache::PathInfo{"", EC});
+ LLVM_DEBUG(dbgs() << " Failed to read symlink: " << ResolvedPath
+ << "\n";);
+
+ return std::nullopt;
+ }
+
+ StringRef Symlink = *SymlinkOpt;
+ LLVM_DEBUG(dbgs() << " Symlink points to: " << Symlink << "\n";);
+
+ std::string resolvedBase = "";
+ if (sys::path::is_relative(Symlink)) {
+ Resolved.resize(oldSize);
+ resolvedBase = Resolved.str().str();
+ }
+
+ auto RealSymlink =
+ realpathCached(Symlink, EC, resolvedBase,
+ /*BaseIsResolved=*/true, SymLoopLevel - 1);
+ if (!RealSymlink) {
+ LibPathCache->insert_realpath(Path, LibraryPathCache::PathInfo{"", EC});
+ LLVM_DEBUG(dbgs() << " Failed to resolve symlink target: " << Symlink
+ << "\n";);
+
+ return std::nullopt;
+ }
+
+ Resolved.assign(*RealSymlink);
+ LLVM_DEBUG(dbgs() << " Symlink Resolved to: " << Resolved << "\n";);
+
+ } else if (st_mode == 0) {
+ EC = std::make_error_code(std::errc::no_such_file_or_directory);
+ LibPathCache->insert_realpath(Path, LibraryPathCache::PathInfo{"", EC});
+ LLVM_DEBUG(dbgs() << " Component does not exist: " << ResolvedPath
+ << "\n";);
+
+ return std::nullopt;
+ }
+ }
+#else
+ EC = sys::fs::real_path(Path, Resolved); // Windows fallback
+#endif
+
+ std::string Canonical = Resolved.str().str();
+ {
+ LibPathCache->insert_realpath(Path, LibraryPathCache::PathInfo{
+ Canonical,
+ std::error_code() // success
+ });
+ }
+ LLVM_DEBUG(dbgs() << "PathResolver::realpathCached: Final Resolved: " << Path
+ << " => " << Canonical << "\n";);
+ return Canonical;
+}
+
+void LibraryScanHelper::addBasePath(const std::string &Path, PathType K) {
+ std::error_code EC;
+ std::string Canon = resolveCanonical(Path, EC);
+ if (EC) {
+ LLVM_DEBUG(
+ dbgs()
+ << "LibraryScanHelper::addBasePath: Failed to canonicalize path: "
+ << Path << "\n";);
+ return;
+ }
+ std::unique_lock<std::shared_mutex> Lock(Mtx);
+ if (LibSearchPaths.count(Canon)) {
+ LLVM_DEBUG(dbgs() << "LibraryScanHelper::addBasePath: Already added: "
+ << Canon << "\n";);
+ return;
+ }
+ K = K == PathType::Unknown ? classifyKind(Canon) : K;
+ auto SP = std::make_shared<LibrarySearchPath>(Canon, K);
+ LibSearchPaths[Canon] = SP;
+
+ if (K == PathType::User) {
+ LLVM_DEBUG(dbgs() << "LibraryScanHelper::addBasePath: Added User path: "
+ << Canon << "\n";);
+ UnscannedUsr.push_back(StringRef(SP->BasePath));
+ } else {
+ LLVM_DEBUG(dbgs() << "LibraryScanHelper::addBasePath: Added System path: "
+ << Canon << "\n";);
+ UnscannedSys.push_back(StringRef(SP->BasePath));
+ }
+}
+
+std::vector<std::shared_ptr<LibrarySearchPath>>
+LibraryScanHelper::getNextBatch(PathType K, size_t BatchSize) {
+ std::vector<std::shared_ptr<LibrarySearchPath>> Result;
+ auto &Queue = (K == PathType::User) ? UnscannedUsr : UnscannedSys;
+
+ std::unique_lock<std::shared_mutex> Lock(Mtx);
+
+ while (!Queue.empty() && (BatchSize == 0 || Result.size() < BatchSize)) {
+ StringRef Base = Queue.front();
+ auto It = LibSearchPaths.find(Base);
+ if (It != LibSearchPaths.end()) {
+ auto &SP = It->second;
+ ScanState Expected = ScanState::NotScanned;
+ if (SP->State.compare_exchange_strong(Expected, ScanState::Scanning)) {
+ Result.push_back(SP);
+ }
+ }
+ Queue.pop_front();
+ }
+
+ return Result;
+}
+
+bool LibraryScanHelper::isTrackedBasePath(StringRef Path) const {
+ std::error_code EC;
+ std::string Canon = resolveCanonical(Path, EC);
+ if (EC)
+ return false;
+
+ std::shared_lock<std::shared_mutex> Lock(Mtx);
+ return LibSearchPaths.count(Canon) > 0;
+}
+
+bool LibraryScanHelper::leftToScan(PathType K) const {
+ std::shared_lock<std::shared_mutex> Lock(Mtx);
+ for (const auto &KV : LibSearchPaths) {
+ const auto &SP = KV.second;
+ if (SP->Kind == K && SP->State == ScanState::NotScanned)
+ return true;
+ }
+ return false;
+}
+
+void LibraryScanHelper::resetToScan() {
+ std::shared_lock<std::shared_mutex> Lock(Mtx);
+
+ for (auto &[_, SP] : LibSearchPaths) {
+ ScanState Expected = ScanState::Scanned;
+
+ if (!SP->State.compare_exchange_strong(Expected, ScanState::NotScanned))
+ continue;
+
+ auto &TargetList =
+ (SP->Kind == PathType::User) ? UnscannedUsr : UnscannedSys;
+ TargetList.emplace_back(SP->BasePath);
+ }
+}
+
+std::vector<std::shared_ptr<LibrarySearchPath>>
+LibraryScanHelper::getAllUnits() const {
+ std::shared_lock<std::shared_mutex> Lock(Mtx);
+ std::vector<std::shared_ptr<LibrarySearchPath>> Result;
+ Result.reserve(LibSearchPaths.size());
+ for (const auto &[_, SP] : LibSearchPaths) {
+ Result.push_back(SP);
+ }
+ return Result;
+}
+
+std::string LibraryScanHelper::resolveCanonical(StringRef Path,
+ std::error_code &EC) const {
+ auto Canon = LibPathResolver->resolve(Path, EC);
+ return EC ? Path.str() : *Canon;
+}
+
+PathType LibraryScanHelper::classifyKind(StringRef Path) const {
+ // Detect home directory
+ const char *Home = getenv("HOME");
+ if (Home && Path.find(Home) == 0)
+ return PathType::User;
+
+ static const std::array<std::string, 5> UserPrefixes = {
+ "/usr/local", // often used by users for manual installs
+ "/opt/homebrew", // common on macOS
+ "/opt/local", // MacPorts
+ "/home", // Linux home dirs
+ "/Users", // macOS user dirs
+ };
+
+ for (const auto &Prefix : UserPrefixes) {
+ if (Path.find(Prefix) == 0)
+ return PathType::User;
+ }
+
+ return PathType::System;
+}
+
+Expected<LibraryDepsInfo> parseMachODeps(const object::MachOObjectFile &Obj) {
+ LibraryDepsInfo Libdeps;
+ LLVM_DEBUG(dbgs() << "Parsing Mach-O dependencies...\n";);
+ for (const auto &Command : Obj.load_commands()) {
+ switch (Command.C.cmd) {
+ case MachO::LC_LOAD_DYLIB: {
+ MachO::dylib_command dylibCmd = Obj.getDylibIDLoadCommand(Command);
+ const char *name = Command.Ptr + dylibCmd.dylib.name;
+ Libdeps.addDep(name);
+ LLVM_DEBUG(dbgs() << " Found LC_LOAD_DYLIB: " << name << "\n";);
+ } break;
+ case MachO::LC_LOAD_WEAK_DYLIB:
+ case MachO::LC_REEXPORT_DYLIB:
+ case MachO::LC_LOAD_UPWARD_DYLIB:
+ case MachO::LC_LAZY_LOAD_DYLIB:
+ break;
+ case MachO::LC_RPATH: {
+ // Extract RPATH
+ MachO::rpath_command rpathCmd = Obj.getRpathCommand(Command);
+ const char *rpath = Command.Ptr + rpathCmd.path;
+ LLVM_DEBUG(dbgs() << " Found LC_RPATH: " << rpath << "\n";);
+
+ SmallVector<StringRef, 4> RawPaths;
+ SplitString(StringRef(rpath), RawPaths,
+ sys::EnvPathSeparator == ':' ? ":" : ";");
+
+ for (const auto &raw : RawPaths) {
+ Libdeps.addRPath(raw.str()); // Convert to std::string
+ LLVM_DEBUG(dbgs() << " Parsed RPATH entry: " << raw << "\n";);
+ }
+ break;
+ }
+ }
+ }
+
+ return Expected<LibraryDepsInfo>(std::move(Libdeps));
+}
+
+template <class ELFT>
+static Expected<StringRef> getDynamicStrTab(const object::ELFFile<ELFT> &Elf) {
+ auto DynamicEntriesOrError = Elf.dynamicEntries();
+ if (!DynamicEntriesOrError)
+ return DynamicEntriesOrError.takeError();
+
+ for (const typename ELFT::Dyn &Dyn : *DynamicEntriesOrError) {
+ if (Dyn.d_tag == ELF::DT_STRTAB) {
+ auto MappedAddrOrError = Elf.toMappedAddr(Dyn.getPtr());
+ if (!MappedAddrOrError)
+ return MappedAddrOrError.takeError();
+ return StringRef(reinterpret_cast<const char *>(*MappedAddrOrError));
+ }
+ }
+
+ // If the dynamic segment is not present, we fall back on the sections.
+ auto SectionsOrError = Elf.sections();
+ if (!SectionsOrError)
+ return SectionsOrError.takeError();
+
+ for (const typename ELFT::Shdr &Sec : *SectionsOrError) {
+ if (Sec.sh_type == ELF::SHT_DYNSYM)
+ return Elf.getStringTableForSymtab(Sec);
+ }
+
+ return make_error<StringError>("dynamic string table not found",
+ inconvertibleErrorCode());
+}
+
+template <typename ELFT>
+Expected<LibraryDepsInfo> parseELF(const object::ELFFile<ELFT> &Elf) {
+ LibraryDepsInfo Deps;
+ Expected<StringRef> StrTabOrErr = getDynamicStrTab(Elf);
+ if (!StrTabOrErr)
+ return StrTabOrErr.takeError();
+
+ const char *Data = StrTabOrErr->data();
+
+ auto DynamicEntriesOrError = Elf.dynamicEntries();
+ if (!DynamicEntriesOrError) {
+ return DynamicEntriesOrError.takeError();
+ }
+
+ for (const typename ELFT::Dyn &Dyn : *DynamicEntriesOrError) {
+ switch (Dyn.d_tag) {
+ case ELF::DT_NEEDED:
+ Deps.addDep(Data + Dyn.d_un.d_val);
+ break;
+ case ELF::DT_RPATH: {
+ SmallVector<StringRef, 4> RawPaths;
+ SplitString(Data + Dyn.d_un.d_val, RawPaths,
+ sys::EnvPathSeparator == ':' ? ":" : ";");
+ for (const auto &raw : RawPaths)
+ Deps.addRPath(raw.str());
+ break;
+ }
+ case ELF::DT_RUNPATH: {
+ SmallVector<StringRef, 4> RawPaths;
+ SplitString(Data + Dyn.d_un.d_val, RawPaths,
+ sys::EnvPathSeparator == ':' ? ":" : ";");
+ for (const auto &raw : RawPaths)
+ Deps.addRunPath(raw.str());
+ break;
+ }
+ case ELF::DT_FLAGS_1:
+ // Check if this is not a pie executable.
+ if (Dyn.d_un.d_val & ELF::DF_1_PIE)
+ Deps.isPIE = true;
+ break;
+ // (Dyn.d_tag == ELF::DT_NULL) continue;
+ // (Dyn.d_tag == ELF::DT_AUXILIARY || Dyn.d_tag == ELF::DT_FILTER)
+ default:
+ break;
+ }
+ }
+
+ return Expected<LibraryDepsInfo>(std::move(Deps));
+}
+
+Expected<LibraryDepsInfo> parseELFDeps(const object::ELFObjectFileBase &Obj) {
+ using namespace object;
+ LLVM_DEBUG(dbgs() << "parseELFDeps: Detected ELF object\n";);
+ if (const auto *ELF = dyn_cast<ELF32LEObjectFile>(&Obj))
+ return parseELF(ELF->getELFFile());
+ else if (const auto *ELF = dyn_cast<ELF32BEObjectFile>(&Obj))
+ return parseELF(ELF->getELFFile());
+ else if (const auto *ELF = dyn_cast<ELF64LEObjectFile>(&Obj))
+ return parseELF(ELF->getELFFile());
+ else if (const auto *ELF = dyn_cast<ELF64BEObjectFile>(&Obj))
+ return parseELF(ELF->getELFFile());
+
+ LLVM_DEBUG(dbgs() << "parseELFDeps: Unknown ELF format\n";);
+ return createStringError(std::errc::not_supported, "Unknown ELF format");
+}
+
+Expected<LibraryDepsInfo> LibraryScanner::extractDeps(StringRef FilePath) {
+ LLVM_DEBUG(dbgs() << "extractDeps: Attempting to open file " << FilePath
+ << "\n";);
+
+ ObjectFileLoader ObjLoader(FilePath);
+ auto ObjOrErr = ObjLoader.getObjectFile();
+ if (!ObjOrErr) {
+ LLVM_DEBUG(dbgs() << "extractDeps: Failed to open " << FilePath << "\n";);
+ return ObjOrErr.takeError();
+ }
+
+ object::ObjectFile *Obj = &ObjOrErr.get();
+
+ if (auto *elfObj = dyn_cast<object::ELFObjectFileBase>(Obj)) {
+ LLVM_DEBUG(dbgs() << "extractDeps: File " << FilePath
+ << " is an ELF object\n";);
+
+ return parseELFDeps(*elfObj);
+ }
+
+ if (auto *macho = dyn_cast<object::MachOObjectFile>(Obj)) {
+ LLVM_DEBUG(dbgs() << "extractDeps: File " << FilePath
+ << " is a Mach-O object\n";);
+ return parseMachODeps(*macho);
+ }
+
+ if (Obj->isCOFF()) {
+ // TODO: COFF support
+ return LibraryDepsInfo();
+ }
+
+ LLVM_DEBUG(dbgs() << "extractDeps: Unsupported binary format for file "
+ << FilePath << "\n";);
+ return createStringError(inconvertibleErrorCode(),
+ "Unsupported binary format: %s",
+ FilePath.str().c_str());
+}
+
+std::optional<std::string> LibraryScanner::shouldScan(StringRef FilePath) {
+ std::error_code EC;
+
+ LLVM_DEBUG(dbgs() << "[shouldScan] Checking: " << FilePath << "\n";);
+
+ // [1] Check file existence early
+ if (!sys::fs::exists(FilePath)) {
+ LLVM_DEBUG(dbgs() << " -> Skipped: file does not exist.\n";);
+
+ return std::nullopt;
+ }
+
+ // [2] Resolve to canonical path
+ auto CanonicalPathOpt = ScanHelper.resolve(FilePath, EC);
+ if (EC || !CanonicalPathOpt) {
+ LLVM_DEBUG(dbgs() << " -> Skipped: failed to resolve path (EC="
+ << EC.message() << ").\n";);
+
+ return std::nullopt;
+ }
+
+ const std::string &CanonicalPath = *CanonicalPathOpt;
+ LLVM_DEBUG(dbgs() << " -> Canonical path: " << CanonicalPath << "\n");
+
+ // [3] Check if it's a directory — skip directories
+ if (sys::fs::is_directory(CanonicalPath)) {
+ LLVM_DEBUG(dbgs() << " -> Skipped: path is a directory.\n";);
+
+ return std::nullopt;
+ }
+
+ // [4] Skip if it's not a shared library.
+ if (!DylibPathValidator::isSharedLibrary(CanonicalPath)) {
+ LLVM_DEBUG(dbgs() << " -> Skipped: not a shared library.\n";);
+ return std::nullopt;
+ }
+
+ // [5] Skip if we've already seen this path (via cache)
+ if (ScanHelper.hasSeenOrMark(CanonicalPath)) {
+ LLVM_DEBUG(dbgs() << " -> Skipped: already seen.\n";);
+
+ return std::nullopt;
+ }
+
+ // [6] Already tracked in LibraryManager?
+ if (LibMgr.hasLibrary(CanonicalPath)) {
+ LLVM_DEBUG(dbgs() << " -> Skipped: already tracked by LibraryManager.\n";);
+
+ return std::nullopt;
+ }
+
+ // [7] Run user-defined hook (default: always true)
+ if (!ShouldScanCall(CanonicalPath)) {
+ LLVM_DEBUG(dbgs() << " -> Skipped: user-defined hook rejected.\n";);
+
+ return std::nullopt;
+ }
+
+ LLVM_DEBUG(dbgs() << " -> Accepted: ready to scan " << CanonicalPath
+ << "\n";);
+ return CanonicalPath;
+}
+
+void LibraryScanner::handleLibrary(StringRef FilePath, PathType K, int level) {
+ LLVM_DEBUG(dbgs() << "LibraryScanner::handleLibrary: Scanning: " << FilePath
+ << ", level=" << level << "\n";);
+ auto CanonPathOpt = shouldScan(FilePath);
+ if (!CanonPathOpt) {
+ LLVM_DEBUG(dbgs() << " Skipped (shouldScan returned false): " << FilePath
+ << "\n";);
+
+ return;
+ }
+ const std::string CanonicalPath = *CanonPathOpt;
+
+ auto DepsOrErr = extractDeps(CanonicalPath);
+ if (!DepsOrErr) {
+ LLVM_DEBUG(dbgs() << " Failed to extract deps for: " << CanonicalPath
+ << "\n";);
+ handleError(DepsOrErr.takeError());
+ return;
+ }
+
+ LibraryDepsInfo &Deps = *DepsOrErr;
+
+ LLVM_DEBUG({
+ dbgs() << " Found deps : \n";
+ for (const auto &dep : Deps.deps)
+ dbgs() << " : " << dep << "\n";
+ dbgs() << " Found @rpath : " << Deps.rpath.size() << "\n";
+ for (const auto &r : Deps.rpath)
+ dbgs() << " : " << r << "\n";
+ dbgs() << " Found @runpath : \n";
+ for (const auto &r : Deps.runPath)
+ dbgs() << " : " << r << "\n";
+ });
+
+ if (Deps.isPIE && level == 0) {
+ LLVM_DEBUG(dbgs() << " Skipped PIE executable at top level: "
+ << CanonicalPath << "\n";);
+
+ return;
+ }
+
+ bool Added = LibMgr.addLibrary(CanonicalPath, K);
+ if (!Added) {
+ LLVM_DEBUG(dbgs() << " Already added: " << CanonicalPath << "\n";);
+ return;
+ }
+
+ // Heuristic 1: No RPATH/RUNPATH, skip deps
+ if (Deps.rpath.empty() && Deps.runPath.empty()) {
+ LLVM_DEBUG(
+ dbgs() << "LibraryScanner::handleLibrary: Skipping deps (Heuristic1): "
+ << CanonicalPath << "\n";);
+ return;
+ }
+
+ // Heuristic 2: All RPATH and RUNPATH already tracked
+ auto allTracked = [&](const auto &Paths) {
+ LLVM_DEBUG(dbgs() << " Checking : " << Paths.size() << "\n";);
+ return std::all_of(Paths.begin(), Paths.end(), [&](StringRef P) {
+ LLVM_DEBUG(dbgs() << " Checking isTrackedBasePath : " << P << "\n";);
+ return ScanHelper.isTrackedBasePath(
+ DylibResolver::resolvelinkerFlag(P, CanonicalPath));
+ });
+ };
+
+ if (allTracked(Deps.rpath) && allTracked(Deps.runPath)) {
+ LLVM_DEBUG(
+ dbgs() << "LibraryScanner::handleLibrary: Skipping deps (Heuristic2): "
+ << CanonicalPath << "\n";);
+ return;
+ }
+
+ DylibPathValidator Validator(ScanHelper.getPathResolver());
+ DylibResolver Resolver(Validator);
+ Resolver.configure(CanonicalPath,
+ {{Deps.rpath, SearchPathType::RPath},
+ {ScanHelper.getSearchPaths(), SearchPathType::UsrOrSys},
+ {Deps.runPath, SearchPathType::RunPath}});
+ for (StringRef Dep : Deps.deps) {
+ LLVM_DEBUG(dbgs() << " Resolving dep: " << Dep << "\n";);
+ auto DepFullOpt = Resolver.resolve(Dep);
+ if (!DepFullOpt) {
+ LLVM_DEBUG(dbgs() << " Failed to resolve dep: " << Dep << "\n";);
+
+ continue;
+ }
+ LLVM_DEBUG(dbgs() << " Resolved dep to: " << *DepFullOpt << "\n";);
+
+ handleLibrary(*DepFullOpt, K, level + 1);
+ }
+}
+
+void LibraryScanner::scanBaseDir(std::shared_ptr<LibrarySearchPath> SP) {
+ if (!sys::fs::is_directory(SP->BasePath) || SP->BasePath.empty()) {
+ LLVM_DEBUG(
+ dbgs() << "LibraryScanner::scanBaseDir: Invalid or empty basePath: "
+ << SP->BasePath << "\n";);
+ return;
+ }
+
+ LLVM_DEBUG(dbgs() << "LibraryScanner::scanBaseDir: Scanning directory: "
+ << SP->BasePath << "\n";);
+ std::error_code EC;
+
+ SP->State.store(ScanState::Scanning);
+
+ for (sys::fs::directory_iterator It(SP->BasePath, EC), end; It != end && !EC;
+ It.increment(EC)) {
+ auto Entry = *It;
+ if (!Entry.status())
+ continue;
+
+ auto Status = *Entry.status();
+ if (sys::fs::is_regular_file(Status) || sys::fs::is_symlink_file(Status)) {
+ LLVM_DEBUG(dbgs() << " Found file: " << Entry.path() << "\n";);
+ // async support ?
+ handleLibrary(Entry.path(), SP->Kind);
+ }
+ }
+
+ SP->State.store(ScanState::Scanned);
+}
+
+void LibraryScanner::scanNext(PathType K, size_t BatchSize) {
+ LLVM_DEBUG(dbgs() << "LibraryScanner::scanNext: Scanning next batch of size "
+ << BatchSize << " for kind "
+ << (K == PathType::User ? "User" : "System") << "\n";);
+
+ auto SearchPaths = ScanHelper.getNextBatch(K, BatchSize);
+ for (auto &SP : SearchPaths) {
+ LLVM_DEBUG(dbgs() << " Scanning unit with basePath: " << SP->BasePath
+ << "\n";);
+
+ scanBaseDir(SP);
+ }
+}
+
+} // end namespace llvm::orc
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index b838e36..58b7ddd 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -730,7 +730,7 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
// (arm|aarch64).neon.bfdot.*'.
Intrinsic::ID ID =
StringSwitch<Intrinsic::ID>(Name)
- .Cases("v2f32.v8i8", "v4f32.v16i8",
+ .Cases({"v2f32.v8i8", "v4f32.v16i8"},
IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfdot
: (Intrinsic::ID)Intrinsic::aarch64_neon_bfdot)
.Default(Intrinsic::not_intrinsic);
@@ -1456,7 +1456,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
if (F->arg_size() == 1) {
Intrinsic::ID IID =
StringSwitch<Intrinsic::ID>(Name)
- .Cases("brev32", "brev64", Intrinsic::bitreverse)
+ .Cases({"brev32", "brev64"}, Intrinsic::bitreverse)
.Case("clz.i", Intrinsic::ctlz)
.Case("popc.i", Intrinsic::ctpop)
.Default(Intrinsic::not_intrinsic);
@@ -1504,6 +1504,10 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
else if (Name.consume_front("fabs."))
// nvvm.fabs.{f,ftz.f,d}
Expand = Name == "f" || Name == "ftz.f" || Name == "d";
+ else if (Name.consume_front("ex2.approx."))
+ // nvvm.ex2.approx.{f,ftz.f,d,f16x2}
+ Expand =
+ Name == "f" || Name == "ftz.f" || Name == "d" || Name == "f16x2";
else if (Name.consume_front("max.") || Name.consume_front("min."))
// nvvm.{min,max}.{i,ii,ui,ull}
Expand = Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
@@ -2550,6 +2554,11 @@ static Value *upgradeNVVMIntrinsicCall(StringRef Name, CallBase *CI,
Intrinsic::ID IID = (Name == "fabs.ftz.f") ? Intrinsic::nvvm_fabs_ftz
: Intrinsic::nvvm_fabs;
Rep = Builder.CreateUnaryIntrinsic(IID, CI->getArgOperand(0));
+ } else if (Name.consume_front("ex2.approx.")) {
+ // nvvm.ex2.approx.{f,ftz.f,d,f16x2}
+ Intrinsic::ID IID = Name.starts_with("ftz") ? Intrinsic::nvvm_ex2_approx_ftz
+ : Intrinsic::nvvm_ex2_approx;
+ Rep = Builder.CreateUnaryIntrinsic(IID, CI->getArgOperand(0));
} else if (Name.starts_with("atomic.load.add.f32.p") ||
Name.starts_with("atomic.load.add.f64.p")) {
Value *Ptr = CI->getArgOperand(0);
diff --git a/llvm/lib/IR/ConstantsContext.h b/llvm/lib/IR/ConstantsContext.h
index 51fb40b..e3e8d89 100644
--- a/llvm/lib/IR/ConstantsContext.h
+++ b/llvm/lib/IR/ConstantsContext.h
@@ -535,7 +535,7 @@ struct ConstantPtrAuthKeyType {
unsigned getHash() const { return hash_combine_range(Operands); }
- using TypeClass = typename ConstantInfo<ConstantPtrAuth>::TypeClass;
+ using TypeClass = ConstantInfo<ConstantPtrAuth>::TypeClass;
ConstantPtrAuth *create(TypeClass *Ty) const {
return new ConstantPtrAuth(Operands[0], cast<ConstantInt>(Operands[1]),
diff --git a/llvm/lib/IR/ModuleSummaryIndex.cpp b/llvm/lib/IR/ModuleSummaryIndex.cpp
index 62fd62c..3394754 100644
--- a/llvm/lib/IR/ModuleSummaryIndex.cpp
+++ b/llvm/lib/IR/ModuleSummaryIndex.cpp
@@ -34,8 +34,6 @@ static cl::opt<bool> ImportConstantsWithRefs(
"import-constants-with-refs", cl::init(true), cl::Hidden,
cl::desc("Import constant global variables with references"));
-constexpr uint32_t FunctionSummary::ParamAccess::RangeWidth;
-
FunctionSummary FunctionSummary::ExternalNode =
FunctionSummary::makeDummyFunctionSummary(
SmallVector<FunctionSummary::EdgeTy, 0>());
@@ -88,8 +86,6 @@ std::pair<unsigned, unsigned> FunctionSummary::specialRefCounts() const {
return {RORefCnt, WORefCnt};
}
-constexpr uint64_t ModuleSummaryIndex::BitcodeSummaryVersion;
-
uint64_t ModuleSummaryIndex::getFlags() const {
uint64_t Flags = 0;
// Flags & 0x4 is reserved. DO NOT REUSE.
diff --git a/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/llvm/lib/MC/MCParser/ELFAsmParser.cpp
index 1a3752f..911d92c 100644
--- a/llvm/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/llvm/lib/MC/MCParser/ELFAsmParser.cpp
@@ -695,15 +695,15 @@ bool ELFAsmParser::parseDirectivePrevious(StringRef DirName, SMLoc) {
static MCSymbolAttr MCAttrForString(StringRef Type) {
return StringSwitch<MCSymbolAttr>(Type)
- .Cases("STT_FUNC", "function", MCSA_ELF_TypeFunction)
- .Cases("STT_OBJECT", "object", MCSA_ELF_TypeObject)
- .Cases("STT_TLS", "tls_object", MCSA_ELF_TypeTLS)
- .Cases("STT_COMMON", "common", MCSA_ELF_TypeCommon)
- .Cases("STT_NOTYPE", "notype", MCSA_ELF_TypeNoType)
- .Cases("STT_GNU_IFUNC", "gnu_indirect_function",
- MCSA_ELF_TypeIndFunction)
- .Case("gnu_unique_object", MCSA_ELF_TypeGnuUniqueObject)
- .Default(MCSA_Invalid);
+ .Cases({"STT_FUNC", "function"}, MCSA_ELF_TypeFunction)
+ .Cases({"STT_OBJECT", "object"}, MCSA_ELF_TypeObject)
+ .Cases({"STT_TLS", "tls_object"}, MCSA_ELF_TypeTLS)
+ .Cases({"STT_COMMON", "common"}, MCSA_ELF_TypeCommon)
+ .Cases({"STT_NOTYPE", "notype"}, MCSA_ELF_TypeNoType)
+ .Cases({"STT_GNU_IFUNC", "gnu_indirect_function"},
+ MCSA_ELF_TypeIndFunction)
+ .Case("gnu_unique_object", MCSA_ELF_TypeGnuUniqueObject)
+ .Default(MCSA_Invalid);
}
/// parseDirectiveELFType
diff --git a/llvm/lib/MC/MCParser/MasmParser.cpp b/llvm/lib/MC/MCParser/MasmParser.cpp
index 3462954..3a85770 100644
--- a/llvm/lib/MC/MCParser/MasmParser.cpp
+++ b/llvm/lib/MC/MCParser/MasmParser.cpp
@@ -5323,10 +5323,10 @@ void MasmParser::initializeDirectiveKindMap() {
bool MasmParser::isMacroLikeDirective() {
if (getLexer().is(AsmToken::Identifier)) {
bool IsMacroLike = StringSwitch<bool>(getTok().getIdentifier())
- .CasesLower("repeat", "rept", true)
+ .CasesLower({"repeat", "rept"}, true)
.CaseLower("while", true)
- .CasesLower("for", "irp", true)
- .CasesLower("forc", "irpc", true)
+ .CasesLower({"for", "irp"}, true)
+ .CasesLower({"forc", "irpc"}, true)
.Default(false);
if (IsMacroLike)
return true;
diff --git a/llvm/lib/Object/WindowsMachineFlag.cpp b/llvm/lib/Object/WindowsMachineFlag.cpp
index caf357e8..14c14f6 100644
--- a/llvm/lib/Object/WindowsMachineFlag.cpp
+++ b/llvm/lib/Object/WindowsMachineFlag.cpp
@@ -23,8 +23,8 @@ using namespace llvm;
COFF::MachineTypes llvm::getMachineType(StringRef S) {
// Flags must be a superset of Microsoft lib.exe /machine flags.
return StringSwitch<COFF::MachineTypes>(S.lower())
- .Cases("x64", "amd64", COFF::IMAGE_FILE_MACHINE_AMD64)
- .Cases("x86", "i386", COFF::IMAGE_FILE_MACHINE_I386)
+ .Cases({"x64", "amd64"}, COFF::IMAGE_FILE_MACHINE_AMD64)
+ .Cases({"x86", "i386"}, COFF::IMAGE_FILE_MACHINE_I386)
.Case("arm", COFF::IMAGE_FILE_MACHINE_ARMNT)
.Case("arm64", COFF::IMAGE_FILE_MACHINE_ARM64)
.Case("arm64ec", COFF::IMAGE_FILE_MACHINE_ARM64EC)
diff --git a/llvm/lib/Remarks/RemarkFormat.cpp b/llvm/lib/Remarks/RemarkFormat.cpp
index 1c52e35..f9fd4af 100644
--- a/llvm/lib/Remarks/RemarkFormat.cpp
+++ b/llvm/lib/Remarks/RemarkFormat.cpp
@@ -19,7 +19,7 @@ using namespace llvm::remarks;
Expected<Format> llvm::remarks::parseFormat(StringRef FormatStr) {
auto Result = StringSwitch<Format>(FormatStr)
- .Cases("", "yaml", Format::YAML)
+ .Cases({"", "yaml"}, Format::YAML)
.Case("bitstream", Format::Bitstream)
.Default(Format::Unknown);
diff --git a/llvm/lib/Support/AArch64BuildAttributes.cpp b/llvm/lib/Support/AArch64BuildAttributes.cpp
index 4a6b2fd..be4d1f1 100644
--- a/llvm/lib/Support/AArch64BuildAttributes.cpp
+++ b/llvm/lib/Support/AArch64BuildAttributes.cpp
@@ -67,8 +67,8 @@ StringRef AArch64BuildAttributes::getTypeStr(unsigned Type) {
}
SubsectionType AArch64BuildAttributes::getTypeID(StringRef Type) {
return StringSwitch<SubsectionType>(Type)
- .Cases("uleb128", "ULEB128", ULEB128)
- .Cases("ntbs", "NTBS", NTBS)
+ .Cases({"uleb128", "ULEB128"}, ULEB128)
+ .Cases({"ntbs", "NTBS"}, NTBS)
.Default(TYPE_NOT_FOUND);
}
StringRef AArch64BuildAttributes::getSubsectionTypeUnknownError() {
diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp
index e21cf8e..e2645fa 100644
--- a/llvm/lib/Support/APFloat.cpp
+++ b/llvm/lib/Support/APFloat.cpp
@@ -269,12 +269,6 @@ bool APFloatBase::isRepresentableBy(const fltSemantics &A,
A.precision <= B.precision;
}
-constexpr RoundingMode APFloatBase::rmNearestTiesToEven;
-constexpr RoundingMode APFloatBase::rmTowardPositive;
-constexpr RoundingMode APFloatBase::rmTowardNegative;
-constexpr RoundingMode APFloatBase::rmTowardZero;
-constexpr RoundingMode APFloatBase::rmNearestTiesToAway;
-
/* A tight upper bound on number of parts required to hold the value
pow(5, power) is
diff --git a/llvm/lib/Support/CommandLine.cpp b/llvm/lib/Support/CommandLine.cpp
index 9491ec0..de5bd79 100644
--- a/llvm/lib/Support/CommandLine.cpp
+++ b/llvm/lib/Support/CommandLine.cpp
@@ -382,7 +382,7 @@ public:
RegisteredSubCommands.erase(sub);
}
- iterator_range<typename SmallPtrSet<SubCommand *, 4>::iterator>
+ iterator_range<SmallPtrSet<SubCommand *, 4>::iterator>
getRegisteredSubcommands() {
return make_range(RegisteredSubCommands.begin(),
RegisteredSubCommands.end());
@@ -2830,7 +2830,7 @@ StringMap<Option *> &cl::getRegisteredOptions(SubCommand &Sub) {
return Sub.OptionsMap;
}
-iterator_range<typename SmallPtrSet<SubCommand *, 4>::iterator>
+iterator_range<SmallPtrSet<SubCommand *, 4>::iterator>
cl::getRegisteredSubcommands() {
return GlobalParser->getRegisteredSubcommands();
}
diff --git a/llvm/lib/Support/StringRef.cpp b/llvm/lib/Support/StringRef.cpp
index b6a2f8a..2e8fba8 100644
--- a/llvm/lib/Support/StringRef.cpp
+++ b/llvm/lib/Support/StringRef.cpp
@@ -17,11 +17,6 @@
using namespace llvm;
-// MSVC emits references to this into the translation units which reference it.
-#ifndef _MSC_VER
-constexpr size_t StringRef::npos;
-#endif
-
// strncasecmp() is not available on non-POSIX systems, so define an
// alternative function here.
static int ascii_strncasecmp(StringRef LHS, StringRef RHS) {
diff --git a/llvm/lib/Support/Windows/Signals.inc b/llvm/lib/Support/Windows/Signals.inc
index 648d6a5..da68994 100644
--- a/llvm/lib/Support/Windows/Signals.inc
+++ b/llvm/lib/Support/Windows/Signals.inc
@@ -421,8 +421,13 @@ bool sys::RemoveFileOnSignal(StringRef Filename, std::string *ErrMsg) {
return true;
}
- if (FilesToRemove == NULL)
+ if (FilesToRemove == NULL) {
FilesToRemove = new std::vector<std::string>;
+ std::atexit([]() {
+ delete FilesToRemove;
+ FilesToRemove = NULL;
+ });
+ }
FilesToRemove->push_back(std::string(Filename));
diff --git a/llvm/lib/Support/raw_ostream.cpp b/llvm/lib/Support/raw_ostream.cpp
index 07b9989..d6f27fb 100644
--- a/llvm/lib/Support/raw_ostream.cpp
+++ b/llvm/lib/Support/raw_ostream.cpp
@@ -61,17 +61,6 @@
using namespace llvm;
-constexpr raw_ostream::Colors raw_ostream::BLACK;
-constexpr raw_ostream::Colors raw_ostream::RED;
-constexpr raw_ostream::Colors raw_ostream::GREEN;
-constexpr raw_ostream::Colors raw_ostream::YELLOW;
-constexpr raw_ostream::Colors raw_ostream::BLUE;
-constexpr raw_ostream::Colors raw_ostream::MAGENTA;
-constexpr raw_ostream::Colors raw_ostream::CYAN;
-constexpr raw_ostream::Colors raw_ostream::WHITE;
-constexpr raw_ostream::Colors raw_ostream::SAVEDCOLOR;
-constexpr raw_ostream::Colors raw_ostream::RESET;
-
raw_ostream::~raw_ostream() {
// raw_ostream's subclasses should take care to flush the buffer
// in their destructors.
diff --git a/llvm/lib/Support/raw_socket_stream.cpp b/llvm/lib/Support/raw_socket_stream.cpp
index 3b510d3..f716317 100644
--- a/llvm/lib/Support/raw_socket_stream.cpp
+++ b/llvm/lib/Support/raw_socket_stream.cpp
@@ -332,7 +332,7 @@ ListeningSocket::~ListeningSocket() {
raw_socket_stream::raw_socket_stream(int SocketFD)
: raw_fd_stream(SocketFD, true) {}
-raw_socket_stream::~raw_socket_stream() {}
+raw_socket_stream::~raw_socket_stream() = default;
Expected<std::unique_ptr<raw_socket_stream>>
raw_socket_stream::createConnectedUnix(StringRef SocketPath) {
diff --git a/llvm/lib/TableGen/TGLexer.cpp b/llvm/lib/TableGen/TGLexer.cpp
index 30eae6e..e8e6469 100644
--- a/llvm/lib/TableGen/TGLexer.cpp
+++ b/llvm/lib/TableGen/TGLexer.cpp
@@ -682,8 +682,10 @@ tgtok::TokKind TGLexer::LexExclaim() {
.Case("instances", tgtok::XInstances)
.Case("substr", tgtok::XSubstr)
.Case("find", tgtok::XFind)
- .Cases("setdagop", "setop", tgtok::XSetDagOp) // !setop is deprecated.
- .Cases("getdagop", "getop", tgtok::XGetDagOp) // !getop is deprecated.
+ .Cases({"setdagop", "setop"},
+ tgtok::XSetDagOp) // !setop is deprecated.
+ .Cases({"getdagop", "getop"},
+ tgtok::XGetDagOp) // !getop is deprecated.
.Case("setdagopname", tgtok::XSetDagOpName)
.Case("getdagopname", tgtok::XGetDagOpName)
.Case("getdagarg", tgtok::XGetDagArg)
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 47c1ac4..e8352be 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -308,9 +308,9 @@ bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
return (EffectiveCallerBits & EffectiveCalleeBits) == EffectiveCalleeBits;
}
-bool AArch64TTIImpl::areTypesABICompatible(
- const Function *Caller, const Function *Callee,
- const ArrayRef<Type *> &Types) const {
+bool AArch64TTIImpl::areTypesABICompatible(const Function *Caller,
+ const Function *Callee,
+ ArrayRef<Type *> Types) const {
if (!BaseT::areTypesABICompatible(Caller, Callee, Types))
return false;
@@ -2227,7 +2227,7 @@ static std::optional<Instruction *> instCombineSVEPTest(InstCombiner &IC,
return std::nullopt;
}
-template <Intrinsic::ID MulOpc, typename Intrinsic::ID FuseOpc>
+template <Intrinsic::ID MulOpc, Intrinsic::ID FuseOpc>
static std::optional<Instruction *>
instCombineSVEVectorFuseMulAddSub(InstCombiner &IC, IntrinsicInst &II,
bool MergeIntoAddendOp) {
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index fe2e849..b39546a 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -84,7 +84,7 @@ public:
const Function *Callee) const override;
bool areTypesABICompatible(const Function *Caller, const Function *Callee,
- const ArrayRef<Type *> &Types) const override;
+ ArrayRef<Type *> Types) const override;
unsigned getInlineCallPenalty(const Function *F, const CallBase &Call,
unsigned DefaultCallPenalty) const override;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index b28c50e..b87b54f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -816,7 +816,7 @@ parseAMDGPUAtomicOptimizerStrategy(StringRef Params) {
Params.consume_front("strategy=");
auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
.Case("dpp", ScanOptions::DPP)
- .Cases("iterative", "", ScanOptions::Iterative)
+ .Cases({"iterative", ""}, ScanOptions::Iterative)
.Case("none", ScanOptions::None)
.Default(std::nullopt);
if (Result)
diff --git a/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
index d950131..65dce74 100644
--- a/llvm/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
@@ -2116,8 +2116,10 @@ class VIMAGE_TENSOR_Real <bits<8> op, VIMAGE_TENSOR_Pseudo ps, string opName = p
let vaddr2 = !if(ps.UpTo2D, !cast<int>(SGPR_NULL_gfx11plus.HWEncoding), ?);
let vaddr3 = !if(ps.UpTo2D, !cast<int>(SGPR_NULL_gfx11plus.HWEncoding), ?);
+ // Set VADDR4 to NULL
+ let vaddr4 = !cast<int>(SGPR_NULL_gfx11plus.HWEncoding);
+
// set to 0 based on SPG.
- let vaddr4 = 0;
let rsrc = 0;
let vdata = 0;
let d16 = 0;
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 6dcbced..b7fa899 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -1288,18 +1288,38 @@ void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) {
}
void WaitcntBrackets::applyXcnt(const AMDGPU::Waitcnt &Wait) {
+ // On entry to a block with multiple predescessors, there may
+ // be pending SMEM and VMEM events active at the same time.
+ // In such cases, only clear one active event at a time.
+ auto applyPendingXcntGroup = [this](unsigned E) {
+ unsigned LowerBound = getScoreLB(X_CNT);
+ applyWaitcnt(X_CNT, 0);
+ PendingEvents |= (1 << E);
+ setScoreLB(X_CNT, LowerBound);
+ };
+
// Wait on XCNT is redundant if we are already waiting for a load to complete.
// SMEM can return out of order, so only omit XCNT wait if we are waiting till
// zero.
- if (Wait.KmCnt == 0 && hasPendingEvent(SMEM_GROUP))
- return applyWaitcnt(X_CNT, 0);
+ if (Wait.KmCnt == 0 && hasPendingEvent(SMEM_GROUP)) {
+ if (hasPendingEvent(VMEM_GROUP))
+ applyPendingXcntGroup(VMEM_GROUP);
+ else
+ applyWaitcnt(X_CNT, 0);
+ return;
+ }
// If we have pending store we cannot optimize XCnt because we do not wait for
// stores. VMEM loads retun in order, so if we only have loads XCnt is
// decremented to the same number as LOADCnt.
if (Wait.LoadCnt != ~0u && hasPendingEvent(VMEM_GROUP) &&
- !hasPendingEvent(STORE_CNT))
- return applyWaitcnt(X_CNT, std::min(Wait.XCnt, Wait.LoadCnt));
+ !hasPendingEvent(STORE_CNT)) {
+ if (hasPendingEvent(SMEM_GROUP))
+ applyPendingXcntGroup(SMEM_GROUP);
+ else
+ applyWaitcnt(X_CNT, std::min(Wait.XCnt, Wait.LoadCnt));
+ return;
+ }
applyWaitcnt(X_CNT, Wait.XCnt);
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index d930a21..d9f76c9 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -10618,6 +10618,42 @@ bool SIInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
return false;
}
+// SCC is already valid after SCCValid.
+// SCCRedefine will redefine SCC to the same value already available after
+// SCCValid. If there are no intervening SCC conflicts delete SCCRedefine and
+// update kill/dead flags if necessary.
+static bool optimizeSCC(MachineInstr *SCCValid, MachineInstr *SCCRedefine,
+ const SIRegisterInfo &RI) {
+ MachineInstr *KillsSCC = nullptr;
+ for (MachineInstr &MI : make_range(std::next(SCCValid->getIterator()),
+ SCCRedefine->getIterator())) {
+ if (MI.modifiesRegister(AMDGPU::SCC, &RI))
+ return false;
+ if (MI.killsRegister(AMDGPU::SCC, &RI))
+ KillsSCC = &MI;
+ }
+ if (MachineOperand *SccDef =
+ SCCValid->findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr))
+ SccDef->setIsDead(false);
+ if (KillsSCC)
+ KillsSCC->clearRegisterKills(AMDGPU::SCC, /*TRI=*/nullptr);
+ SCCRedefine->eraseFromParent();
+ return true;
+}
+
+static bool foldableSelect(const MachineInstr &Def) {
+ if (Def.getOpcode() != AMDGPU::S_CSELECT_B32 &&
+ Def.getOpcode() != AMDGPU::S_CSELECT_B64)
+ return false;
+ bool Op1IsNonZeroImm =
+ Def.getOperand(1).isImm() && Def.getOperand(1).getImm() != 0;
+ bool Op2IsZeroImm =
+ Def.getOperand(2).isImm() && Def.getOperand(2).getImm() == 0;
+ if (!Op1IsNonZeroImm || !Op2IsZeroImm)
+ return false;
+ return true;
+}
+
bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
Register SrcReg2, int64_t CmpMask,
int64_t CmpValue,
@@ -10637,19 +10673,6 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
if (!Def || Def->getParent() != CmpInstr.getParent())
return false;
- const auto foldableSelect = [](MachineInstr *Def) -> bool {
- if (Def->getOpcode() == AMDGPU::S_CSELECT_B32 ||
- Def->getOpcode() == AMDGPU::S_CSELECT_B64) {
- bool Op1IsNonZeroImm =
- Def->getOperand(1).isImm() && Def->getOperand(1).getImm() != 0;
- bool Op2IsZeroImm =
- Def->getOperand(2).isImm() && Def->getOperand(2).getImm() == 0;
- if (Op1IsNonZeroImm && Op2IsZeroImm)
- return true;
- }
- return false;
- };
-
// For S_OP that set SCC = DST!=0, do the transformation
//
// s_cmp_lg_* (S_OP ...), 0 => (S_OP ...)
@@ -10660,24 +10683,12 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
//
// s_cmp_lg_* (S_CSELECT* (non-zero imm), 0), 0 => (S_CSELECT* (non-zero
// imm), 0)
- if (!setsSCCifResultIsNonZero(*Def) && !foldableSelect(Def))
+ if (!setsSCCifResultIsNonZero(*Def) && !foldableSelect(*Def))
return false;
- MachineInstr *KillsSCC = nullptr;
- for (MachineInstr &MI :
- make_range(std::next(Def->getIterator()), CmpInstr.getIterator())) {
- if (MI.modifiesRegister(AMDGPU::SCC, &RI))
- return false;
- if (MI.killsRegister(AMDGPU::SCC, &RI))
- KillsSCC = &MI;
- }
+ if (!optimizeSCC(Def, &CmpInstr, RI))
+ return false;
- if (MachineOperand *SccDef =
- Def->findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr))
- SccDef->setIsDead(false);
- if (KillsSCC)
- KillsSCC->clearRegisterKills(AMDGPU::SCC, /*TRI=*/nullptr);
- CmpInstr.eraseFromParent();
return true;
};
@@ -10755,21 +10766,8 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
if (IsReversedCC && !MRI->hasOneNonDBGUse(DefReg))
return false;
- MachineInstr *KillsSCC = nullptr;
- for (MachineInstr &MI :
- make_range(std::next(Def->getIterator()), CmpInstr.getIterator())) {
- if (MI.modifiesRegister(AMDGPU::SCC, &RI))
- return false;
- if (MI.killsRegister(AMDGPU::SCC, &RI))
- KillsSCC = &MI;
- }
-
- MachineOperand *SccDef =
- Def->findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr);
- SccDef->setIsDead(false);
- if (KillsSCC)
- KillsSCC->clearRegisterKills(AMDGPU::SCC, /*TRI=*/nullptr);
- CmpInstr.eraseFromParent();
+ if (!optimizeSCC(Def, &CmpInstr, RI))
+ return false;
if (!MRI->use_nodbg_empty(DefReg)) {
assert(!IsReversedCC);
diff --git a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp
index e5b4f6e..08f196b 100644
--- a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp
+++ b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp
@@ -884,13 +884,13 @@ CSKYTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
.Case("{t4}", CSKY::R20)
.Case("{t5}", CSKY::R21)
.Case("{t6}", CSKY::R22)
- .Cases("{t7}", "{fp}", CSKY::R23)
- .Cases("{t8}", "{top}", CSKY::R24)
- .Cases("{t9}", "{bsp}", CSKY::R25)
+ .Cases({"{t7}", "{fp}"}, CSKY::R23)
+ .Cases({"{t8}", "{top}"}, CSKY::R24)
+ .Cases({"{t9}", "{bsp}"}, CSKY::R25)
.Case("{r26}", CSKY::R26)
.Case("{r27}", CSKY::R27)
- .Cases("{gb}", "{rgb}", "{rdb}", CSKY::R28)
- .Cases("{tb}", "{rtb}", CSKY::R29)
+ .Cases({"{gb}", "{rgb}", "{rdb}"}, CSKY::R28)
+ .Cases({"{tb}", "{rtb}"}, CSKY::R29)
.Case("{svbr}", CSKY::R30)
.Case("{tls}", CSKY::R31)
.Default(CSKY::NoRegister);
@@ -907,38 +907,38 @@ CSKYTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
// use the ABI names in register constraint lists.
if (Subtarget.useHardFloat()) {
unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
- .Cases("{fr0}", "{vr0}", CSKY::F0_32)
- .Cases("{fr1}", "{vr1}", CSKY::F1_32)
- .Cases("{fr2}", "{vr2}", CSKY::F2_32)
- .Cases("{fr3}", "{vr3}", CSKY::F3_32)
- .Cases("{fr4}", "{vr4}", CSKY::F4_32)
- .Cases("{fr5}", "{vr5}", CSKY::F5_32)
- .Cases("{fr6}", "{vr6}", CSKY::F6_32)
- .Cases("{fr7}", "{vr7}", CSKY::F7_32)
- .Cases("{fr8}", "{vr8}", CSKY::F8_32)
- .Cases("{fr9}", "{vr9}", CSKY::F9_32)
- .Cases("{fr10}", "{vr10}", CSKY::F10_32)
- .Cases("{fr11}", "{vr11}", CSKY::F11_32)
- .Cases("{fr12}", "{vr12}", CSKY::F12_32)
- .Cases("{fr13}", "{vr13}", CSKY::F13_32)
- .Cases("{fr14}", "{vr14}", CSKY::F14_32)
- .Cases("{fr15}", "{vr15}", CSKY::F15_32)
- .Cases("{fr16}", "{vr16}", CSKY::F16_32)
- .Cases("{fr17}", "{vr17}", CSKY::F17_32)
- .Cases("{fr18}", "{vr18}", CSKY::F18_32)
- .Cases("{fr19}", "{vr19}", CSKY::F19_32)
- .Cases("{fr20}", "{vr20}", CSKY::F20_32)
- .Cases("{fr21}", "{vr21}", CSKY::F21_32)
- .Cases("{fr22}", "{vr22}", CSKY::F22_32)
- .Cases("{fr23}", "{vr23}", CSKY::F23_32)
- .Cases("{fr24}", "{vr24}", CSKY::F24_32)
- .Cases("{fr25}", "{vr25}", CSKY::F25_32)
- .Cases("{fr26}", "{vr26}", CSKY::F26_32)
- .Cases("{fr27}", "{vr27}", CSKY::F27_32)
- .Cases("{fr28}", "{vr28}", CSKY::F28_32)
- .Cases("{fr29}", "{vr29}", CSKY::F29_32)
- .Cases("{fr30}", "{vr30}", CSKY::F30_32)
- .Cases("{fr31}", "{vr31}", CSKY::F31_32)
+ .Cases({"{fr0}", "{vr0}"}, CSKY::F0_32)
+ .Cases({"{fr1}", "{vr1}"}, CSKY::F1_32)
+ .Cases({"{fr2}", "{vr2}"}, CSKY::F2_32)
+ .Cases({"{fr3}", "{vr3}"}, CSKY::F3_32)
+ .Cases({"{fr4}", "{vr4}"}, CSKY::F4_32)
+ .Cases({"{fr5}", "{vr5}"}, CSKY::F5_32)
+ .Cases({"{fr6}", "{vr6}"}, CSKY::F6_32)
+ .Cases({"{fr7}", "{vr7}"}, CSKY::F7_32)
+ .Cases({"{fr8}", "{vr8}"}, CSKY::F8_32)
+ .Cases({"{fr9}", "{vr9}"}, CSKY::F9_32)
+ .Cases({"{fr10}", "{vr10}"}, CSKY::F10_32)
+ .Cases({"{fr11}", "{vr11}"}, CSKY::F11_32)
+ .Cases({"{fr12}", "{vr12}"}, CSKY::F12_32)
+ .Cases({"{fr13}", "{vr13}"}, CSKY::F13_32)
+ .Cases({"{fr14}", "{vr14}"}, CSKY::F14_32)
+ .Cases({"{fr15}", "{vr15}"}, CSKY::F15_32)
+ .Cases({"{fr16}", "{vr16}"}, CSKY::F16_32)
+ .Cases({"{fr17}", "{vr17}"}, CSKY::F17_32)
+ .Cases({"{fr18}", "{vr18}"}, CSKY::F18_32)
+ .Cases({"{fr19}", "{vr19}"}, CSKY::F19_32)
+ .Cases({"{fr20}", "{vr20}"}, CSKY::F20_32)
+ .Cases({"{fr21}", "{vr21}"}, CSKY::F21_32)
+ .Cases({"{fr22}", "{vr22}"}, CSKY::F22_32)
+ .Cases({"{fr23}", "{vr23}"}, CSKY::F23_32)
+ .Cases({"{fr24}", "{vr24}"}, CSKY::F24_32)
+ .Cases({"{fr25}", "{vr25}"}, CSKY::F25_32)
+ .Cases({"{fr26}", "{vr26}"}, CSKY::F26_32)
+ .Cases({"{fr27}", "{vr27}"}, CSKY::F27_32)
+ .Cases({"{fr28}", "{vr28}"}, CSKY::F28_32)
+ .Cases({"{fr29}", "{vr29}"}, CSKY::F29_32)
+ .Cases({"{fr30}", "{vr30}"}, CSKY::F30_32)
+ .Cases({"{fr31}", "{vr31}"}, CSKY::F31_32)
.Default(CSKY::NoRegister);
if (FReg != CSKY::NoRegister) {
assert(CSKY::F0_32 <= FReg && FReg <= CSKY::F31_32 && "Unknown fp-reg");
diff --git a/llvm/lib/Target/Hexagon/HexagonDepIICHVX.td b/llvm/lib/Target/Hexagon/HexagonDepIICHVX.td
index f4e36fa7..e661c94 100644
--- a/llvm/lib/Target/Hexagon/HexagonDepIICHVX.td
+++ b/llvm/lib/Target/Hexagon/HexagonDepIICHVX.td
@@ -26,6 +26,7 @@ def tc_20a4bbec : InstrItinClass;
def tc_227864f7 : InstrItinClass;
def tc_257f6f7c : InstrItinClass;
def tc_26a377fe : InstrItinClass;
+def tc_2a698a03 : InstrItinClass;
def tc_2b4c548e : InstrItinClass;
def tc_2c745bb8 : InstrItinClass;
def tc_2d4051cd : InstrItinClass;
@@ -52,6 +53,7 @@ def tc_561aaa58 : InstrItinClass;
def tc_56c4f9fe : InstrItinClass;
def tc_56e64202 : InstrItinClass;
def tc_58d21193 : InstrItinClass;
+def tc_57a4709c : InstrItinClass;
def tc_5bf8afbb : InstrItinClass;
def tc_5cdf8c84 : InstrItinClass;
def tc_61bf7c03 : InstrItinClass;
@@ -220,6 +222,11 @@ class DepHVXItinV55 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -356,6 +363,11 @@ class DepHVXItinV55 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -812,6 +824,11 @@ class DepHVXItinV60 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -948,6 +965,11 @@ class DepHVXItinV60 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -1404,6 +1426,11 @@ class DepHVXItinV62 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -1540,6 +1567,11 @@ class DepHVXItinV62 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -1996,6 +2028,11 @@ class DepHVXItinV65 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -2132,6 +2169,11 @@ class DepHVXItinV65 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -2588,6 +2630,11 @@ class DepHVXItinV66 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -2724,6 +2771,11 @@ class DepHVXItinV66 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -3180,6 +3232,11 @@ class DepHVXItinV67 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -3316,6 +3373,11 @@ class DepHVXItinV67 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -3772,6 +3834,11 @@ class DepHVXItinV68 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -3908,6 +3975,11 @@ class DepHVXItinV68 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -4364,6 +4436,11 @@ class DepHVXItinV69 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -4500,6 +4577,11 @@ class DepHVXItinV69 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -4956,6 +5038,11 @@ class DepHVXItinV71 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -5092,6 +5179,11 @@ class DepHVXItinV71 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -5548,6 +5640,11 @@ class DepHVXItinV73 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -5684,6 +5781,11 @@ class DepHVXItinV73 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -6140,6 +6242,11 @@ class DepHVXItinV75 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -6276,6 +6383,11 @@ class DepHVXItinV75 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -6732,6 +6844,11 @@ class DepHVXItinV79 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -6868,6 +6985,11 @@ class DepHVXItinV79 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
@@ -7324,6 +7446,11 @@ class DepHVXItinV81 {
InstrStage<1, [CVI_ALL_NOMEM]>], [9, 3, 5, 2],
[HVX_FWD, Hex_FWD, HVX_FWD, Hex_FWD]>,
+ InstrItinData <tc_2a698a03, /*SLOT0123,VSorVP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT, CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
InstrItinData <tc_2b4c548e, /*SLOT23,VX_DV*/
[InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_MPY01]>], [9, 5, 5, 2],
@@ -7460,6 +7587,11 @@ class DepHVXItinV81 {
InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [7, 1, 2, 7, 7],
[HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+ InstrItinData <tc_57a4709c, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
InstrItinData <tc_5bf8afbb, /*SLOT0123,VP*/
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
InstrStage<1, [CVI_XLANE]>], [9, 2],
diff --git a/llvm/lib/Target/Hexagon/HexagonDepInstrInfo.td b/llvm/lib/Target/Hexagon/HexagonDepInstrInfo.td
index f8f1c2a..b188134 100644
--- a/llvm/lib/Target/Hexagon/HexagonDepInstrInfo.td
+++ b/llvm/lib/Target/Hexagon/HexagonDepInstrInfo.td
@@ -29939,6 +29939,58 @@ let opNewValue = 0;
let isCVI = 1;
let DecoderNamespace = "EXT_mmvec";
}
+def V6_vabs_qf16_hf : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf16 = vabs($Vu32.hf)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b110;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001110;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vabs_qf16_qf16 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf16 = vabs($Vu32.qf16)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b111;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001110;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vabs_qf32_qf32 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf32 = vabs($Vu32.qf32)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b101;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001110;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vabs_qf32_sf : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf32 = vabs($Vu32.sf)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b100;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001110;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
def V6_vabs_sf : HInst<
(outs HvxVR:$Vd32),
(ins HvxVR:$Vu32),
@@ -31302,6 +31354,21 @@ let isPseudo = 1;
let isCodeGenOnly = 1;
let DecoderNamespace = "EXT_mmvec";
}
+def V6_valign4 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32, HvxVR:$Vv32, IntRegsLow8:$Rt8),
+"$Vd32 = valign4($Vu32,$Vv32,$Rt8)",
+tc_57a4709c, TypeCVI_VA>, Enc_a30110, Requires<[UseHVXV81]> {
+let Inst{7-5} = 0b101;
+let Inst{13-13} = 0b0;
+let Inst{31-24} = 0b00011000;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let isHVXALU = 1;
+let isHVXALU2SRC = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
def V6_valignb : HInst<
(outs HvxVR:$Vd32),
(ins HvxVR:$Vu32, HvxVR:$Vv32, IntRegsLow8:$Rt8),
@@ -32583,6 +32650,32 @@ let isCVI = 1;
let hasHvxTmp = 1;
let DecoderNamespace = "EXT_mmvec";
}
+def V6_vconv_bf_qf32 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxWR:$Vuu32),
+"$Vd32.bf = $Vuu32.qf32",
+tc_2a698a03, TypeCVI_VS>, Enc_a33d04, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b111;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000000110;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vconv_f8_qf16 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.f8 = $Vu32.qf16",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b111;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001100;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
def V6_vconv_h_hf : HInst<
(outs HvxVR:$Vd32),
(ins HvxVR:$Vu32),
@@ -32596,6 +32689,19 @@ let opNewValue = 0;
let isCVI = 1;
let DecoderNamespace = "EXT_mmvec";
}
+def V6_vconv_h_hf_rnd : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.h = $Vu32.hf:rnd",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81]> {
+let Inst{7-5} = 0b110;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000000110;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
def V6_vconv_hf_h : HInst<
(outs HvxVR:$Vd32),
(ins HvxVR:$Vu32),
@@ -32635,6 +32741,71 @@ let opNewValue = 0;
let isCVI = 1;
let DecoderNamespace = "EXT_mmvec";
}
+def V6_vconv_qf16_f8 : HInst<
+(outs HvxWR:$Vdd32),
+(ins HvxVR:$Vu32),
+"$Vdd32.qf16 = $Vu32.f8",
+tc_04da405a, TypeCVI_VP_VS>, Enc_dd766a, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b101;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001100;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vconv_qf16_hf : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf16 = $Vu32.hf",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b100;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001100;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vconv_qf16_qf16 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf16 = $Vu32.qf16",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b110;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001100;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vconv_qf32_qf32 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf32 = $Vu32.qf32",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b111;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001101;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vconv_qf32_sf : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf32 = $Vu32.sf",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b110;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001101;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
def V6_vconv_sf_qf32 : HInst<
(outs HvxVR:$Vd32),
(ins HvxVR:$Vu32),
@@ -33720,6 +33891,122 @@ let isHVXALU2SRC = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Qx4 = $Qx4in";
}
+def V6_veqhf : HInst<
+(outs HvxQR:$Qd4),
+(ins HvxVR:$Vu32, HvxVR:$Vv32),
+"$Qd4 = vcmp.eq($Vu32.hf,$Vv32.hf)",
+tc_56c4f9fe, TypeCVI_VA>, Enc_95441f, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-2} = 0b000111;
+let Inst{13-13} = 0b0;
+let Inst{31-21} = 0b00011111100;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let isHVXALU = 1;
+let isHVXALU2SRC = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_veqhf_and : HInst<
+(outs HvxQR:$Qx4),
+(ins HvxQR:$Qx4in, HvxVR:$Vu32, HvxVR:$Vv32),
+"$Qx4 &= vcmp.eq($Vu32.hf,$Vv32.hf)",
+tc_257f6f7c, TypeCVI_VA>, Enc_eaa9f8, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-2} = 0b000111;
+let Inst{13-13} = 0b1;
+let Inst{31-21} = 0b00011100100;
+let isCVI = 1;
+let isHVXALU = 1;
+let isHVXALU2SRC = 1;
+let DecoderNamespace = "EXT_mmvec";
+let Constraints = "$Qx4 = $Qx4in";
+}
+def V6_veqhf_or : HInst<
+(outs HvxQR:$Qx4),
+(ins HvxQR:$Qx4in, HvxVR:$Vu32, HvxVR:$Vv32),
+"$Qx4 |= vcmp.eq($Vu32.hf,$Vv32.hf)",
+tc_257f6f7c, TypeCVI_VA>, Enc_eaa9f8, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-2} = 0b010111;
+let Inst{13-13} = 0b1;
+let Inst{31-21} = 0b00011100100;
+let isAccumulator = 1;
+let isCVI = 1;
+let isHVXALU = 1;
+let isHVXALU2SRC = 1;
+let DecoderNamespace = "EXT_mmvec";
+let Constraints = "$Qx4 = $Qx4in";
+}
+def V6_veqhf_xor : HInst<
+(outs HvxQR:$Qx4),
+(ins HvxQR:$Qx4in, HvxVR:$Vu32, HvxVR:$Vv32),
+"$Qx4 ^= vcmp.eq($Vu32.hf,$Vv32.hf)",
+tc_257f6f7c, TypeCVI_VA>, Enc_eaa9f8, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-2} = 0b100111;
+let Inst{13-13} = 0b1;
+let Inst{31-21} = 0b00011100100;
+let isCVI = 1;
+let isHVXALU = 1;
+let isHVXALU2SRC = 1;
+let DecoderNamespace = "EXT_mmvec";
+let Constraints = "$Qx4 = $Qx4in";
+}
+def V6_veqsf : HInst<
+(outs HvxQR:$Qd4),
+(ins HvxVR:$Vu32, HvxVR:$Vv32),
+"$Qd4 = vcmp.eq($Vu32.sf,$Vv32.sf)",
+tc_56c4f9fe, TypeCVI_VA>, Enc_95441f, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-2} = 0b000011;
+let Inst{13-13} = 0b0;
+let Inst{31-21} = 0b00011111100;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let isHVXALU = 1;
+let isHVXALU2SRC = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_veqsf_and : HInst<
+(outs HvxQR:$Qx4),
+(ins HvxQR:$Qx4in, HvxVR:$Vu32, HvxVR:$Vv32),
+"$Qx4 &= vcmp.eq($Vu32.sf,$Vv32.sf)",
+tc_257f6f7c, TypeCVI_VA>, Enc_eaa9f8, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-2} = 0b000011;
+let Inst{13-13} = 0b1;
+let Inst{31-21} = 0b00011100100;
+let isCVI = 1;
+let isHVXALU = 1;
+let isHVXALU2SRC = 1;
+let DecoderNamespace = "EXT_mmvec";
+let Constraints = "$Qx4 = $Qx4in";
+}
+def V6_veqsf_or : HInst<
+(outs HvxQR:$Qx4),
+(ins HvxQR:$Qx4in, HvxVR:$Vu32, HvxVR:$Vv32),
+"$Qx4 |= vcmp.eq($Vu32.sf,$Vv32.sf)",
+tc_257f6f7c, TypeCVI_VA>, Enc_eaa9f8, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-2} = 0b010011;
+let Inst{13-13} = 0b1;
+let Inst{31-21} = 0b00011100100;
+let isAccumulator = 1;
+let isCVI = 1;
+let isHVXALU = 1;
+let isHVXALU2SRC = 1;
+let DecoderNamespace = "EXT_mmvec";
+let Constraints = "$Qx4 = $Qx4in";
+}
+def V6_veqsf_xor : HInst<
+(outs HvxQR:$Qx4),
+(ins HvxQR:$Qx4in, HvxVR:$Vu32, HvxVR:$Vv32),
+"$Qx4 ^= vcmp.eq($Vu32.sf,$Vv32.sf)",
+tc_257f6f7c, TypeCVI_VA>, Enc_eaa9f8, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-2} = 0b100011;
+let Inst{13-13} = 0b1;
+let Inst{31-21} = 0b00011100100;
+let isCVI = 1;
+let isHVXALU = 1;
+let isHVXALU2SRC = 1;
+let DecoderNamespace = "EXT_mmvec";
+let Constraints = "$Qx4 = $Qx4in";
+}
def V6_veqw : HInst<
(outs HvxQR:$Qd4),
(ins HvxVR:$Vu32, HvxVR:$Vv32),
@@ -34538,6 +34825,58 @@ let Inst{31-24} = 0b00011110;
let isCVI = 1;
let DecoderNamespace = "EXT_mmvec";
}
+def V6_vilog2_hf : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.w = vilog2($Vu32.hf)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b011;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001100;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vilog2_qf16 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.w = vilog2($Vu32.qf16)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b001;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001100;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vilog2_qf32 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.w = vilog2($Vu32.qf32)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b000;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001100;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vilog2_sf : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.w = vilog2($Vu32.sf)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b010;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001100;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
def V6_vinsertwr : HInst<
(outs HvxVR:$Vx32),
(ins HvxVR:$Vx32in, IntRegs:$Rt32),
@@ -37170,6 +37509,58 @@ let isCVI = 1;
let isHVXALU = 1;
let DecoderNamespace = "EXT_mmvec";
}
+def V6_vneg_qf16_hf : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf16 = vneg($Vu32.hf)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b010;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001110;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vneg_qf16_qf16 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf16 = vneg($Vu32.qf16)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b011;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001110;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vneg_qf32_qf32 : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf32 = vneg($Vu32.qf32)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b001;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001110;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_vneg_qf32_sf : HInst<
+(outs HvxVR:$Vd32),
+(ins HvxVR:$Vu32),
+"$Vd32.qf32 = vneg($Vu32.sf)",
+tc_2a698a03, TypeCVI_VS>, Enc_e7581c, Requires<[UseHVXV81,UseHVXQFloat]> {
+let Inst{7-5} = 0b000;
+let Inst{13-13} = 0b1;
+let Inst{31-16} = 0b0001111000001110;
+let hasNewValue = 1;
+let opNewValue = 0;
+let isCVI = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
def V6_vnormamth : HInst<
(outs HvxVR:$Vd32),
(ins HvxVR:$Vu32),
diff --git a/llvm/lib/Target/Hexagon/HexagonDepMapAsm2Intrin.td b/llvm/lib/Target/Hexagon/HexagonDepMapAsm2Intrin.td
index 23f4b3a..c11483b 100644
--- a/llvm/lib/Target/Hexagon/HexagonDepMapAsm2Intrin.td
+++ b/llvm/lib/Target/Hexagon/HexagonDepMapAsm2Intrin.td
@@ -3830,6 +3830,122 @@ def: Pat<(int_hexagon_V6_vsub_hf_f8_128B HvxVR:$src1, HvxVR:$src2),
// V81 HVX Instructions.
+def: Pat<(int_hexagon_V6_vabs_qf16_hf HvxVR:$src1),
+ (V6_vabs_qf16_hf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vabs_qf16_hf_128B HvxVR:$src1),
+ (V6_vabs_qf16_hf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vabs_qf16_qf16 HvxVR:$src1),
+ (V6_vabs_qf16_qf16 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vabs_qf16_qf16_128B HvxVR:$src1),
+ (V6_vabs_qf16_qf16 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vabs_qf32_qf32 HvxVR:$src1),
+ (V6_vabs_qf32_qf32 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vabs_qf32_qf32_128B HvxVR:$src1),
+ (V6_vabs_qf32_qf32 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vabs_qf32_sf HvxVR:$src1),
+ (V6_vabs_qf32_sf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vabs_qf32_sf_128B HvxVR:$src1),
+ (V6_vabs_qf32_sf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_valign4 HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3),
+ (V6_valign4 HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>, Requires<[UseHVXV81, UseHVX64B]>;
+def: Pat<(int_hexagon_V6_valign4_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3),
+ (V6_valign4 HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>, Requires<[UseHVXV81, UseHVX128B]>;
+def: Pat<(int_hexagon_V6_vconv_bf_qf32 HvxWR:$src1),
+ (V6_vconv_bf_qf32 HvxWR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_bf_qf32_128B HvxWR:$src1),
+ (V6_vconv_bf_qf32 HvxWR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_f8_qf16 HvxVR:$src1),
+ (V6_vconv_f8_qf16 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_f8_qf16_128B HvxVR:$src1),
+ (V6_vconv_f8_qf16 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_h_hf_rnd HvxVR:$src1),
+ (V6_vconv_h_hf_rnd HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B]>;
+def: Pat<(int_hexagon_V6_vconv_h_hf_rnd_128B HvxVR:$src1),
+ (V6_vconv_h_hf_rnd HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B]>;
+def: Pat<(int_hexagon_V6_vconv_qf16_f8 HvxVR:$src1),
+ (V6_vconv_qf16_f8 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_qf16_f8_128B HvxVR:$src1),
+ (V6_vconv_qf16_f8 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_qf16_hf HvxVR:$src1),
+ (V6_vconv_qf16_hf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_qf16_hf_128B HvxVR:$src1),
+ (V6_vconv_qf16_hf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_qf16_qf16 HvxVR:$src1),
+ (V6_vconv_qf16_qf16 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_qf16_qf16_128B HvxVR:$src1),
+ (V6_vconv_qf16_qf16 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_qf32_qf32 HvxVR:$src1),
+ (V6_vconv_qf32_qf32 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_qf32_qf32_128B HvxVR:$src1),
+ (V6_vconv_qf32_qf32 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_qf32_sf HvxVR:$src1),
+ (V6_vconv_qf32_sf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vconv_qf32_sf_128B HvxVR:$src1),
+ (V6_vconv_qf32_sf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqhf HvxVR:$src1, HvxVR:$src2),
+ (V6_veqhf HvxVR:$src1, HvxVR:$src2)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqhf_128B HvxVR:$src1, HvxVR:$src2),
+ (V6_veqhf HvxVR:$src1, HvxVR:$src2)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqhf_and HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqhf_and HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqhf_and_128B HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqhf_and HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqhf_or HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqhf_or HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqhf_or_128B HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqhf_or HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqhf_xor HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqhf_xor HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqhf_xor_128B HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqhf_xor HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqsf HvxVR:$src1, HvxVR:$src2),
+ (V6_veqsf HvxVR:$src1, HvxVR:$src2)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqsf_128B HvxVR:$src1, HvxVR:$src2),
+ (V6_veqsf HvxVR:$src1, HvxVR:$src2)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqsf_and HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqsf_and HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqsf_and_128B HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqsf_and HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqsf_or HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqsf_or HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqsf_or_128B HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqsf_or HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqsf_xor HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqsf_xor HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_veqsf_xor_128B HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
+ (V6_veqsf_xor HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vilog2_hf HvxVR:$src1),
+ (V6_vilog2_hf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vilog2_hf_128B HvxVR:$src1),
+ (V6_vilog2_hf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vilog2_qf16 HvxVR:$src1),
+ (V6_vilog2_qf16 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vilog2_qf16_128B HvxVR:$src1),
+ (V6_vilog2_qf16 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vilog2_qf32 HvxVR:$src1),
+ (V6_vilog2_qf32 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vilog2_qf32_128B HvxVR:$src1),
+ (V6_vilog2_qf32 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vilog2_sf HvxVR:$src1),
+ (V6_vilog2_sf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vilog2_sf_128B HvxVR:$src1),
+ (V6_vilog2_sf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vneg_qf16_hf HvxVR:$src1),
+ (V6_vneg_qf16_hf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vneg_qf16_hf_128B HvxVR:$src1),
+ (V6_vneg_qf16_hf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vneg_qf16_qf16 HvxVR:$src1),
+ (V6_vneg_qf16_qf16 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vneg_qf16_qf16_128B HvxVR:$src1),
+ (V6_vneg_qf16_qf16 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vneg_qf32_qf32 HvxVR:$src1),
+ (V6_vneg_qf32_qf32 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vneg_qf32_qf32_128B HvxVR:$src1),
+ (V6_vneg_qf32_qf32 HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vneg_qf32_sf HvxVR:$src1),
+ (V6_vneg_qf32_sf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
+def: Pat<(int_hexagon_V6_vneg_qf32_sf_128B HvxVR:$src1),
+ (V6_vneg_qf32_sf HvxVR:$src1)>, Requires<[UseHVXV81, UseHVX128B, UseHVXQFloat]>;
def: Pat<(int_hexagon_V6_vsub_hf_mix HvxVR:$src1, HvxVR:$src2),
(V6_vsub_hf_mix HvxVR:$src1, HvxVR:$src2)>, Requires<[UseHVXV81, UseHVX64B, UseHVXQFloat]>;
def: Pat<(int_hexagon_V6_vsub_hf_mix_128B HvxVR:$src1, HvxVR:$src2),
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index a6de839..904aabed 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -371,6 +371,10 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
ISD::SETUGE, ISD::SETUGT},
VT, Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
+ setOperationAction(ISD::FCEIL, VT, Legal);
+ setOperationAction(ISD::FFLOOR, VT, Legal);
+ setOperationAction(ISD::FTRUNC, VT, Legal);
+ setOperationAction(ISD::FROUNDEVEN, VT, Legal);
}
setOperationAction(ISD::CTPOP, GRLenVT, Legal);
setOperationAction(ISD::FCEIL, {MVT::f32, MVT::f64}, Legal);
@@ -453,6 +457,10 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
ISD::SETUGE, ISD::SETUGT},
VT, Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
+ setOperationAction(ISD::FCEIL, VT, Legal);
+ setOperationAction(ISD::FFLOOR, VT, Legal);
+ setOperationAction(ISD::FTRUNC, VT, Legal);
+ setOperationAction(ISD::FROUNDEVEN, VT, Legal);
}
}
diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
index ca4ee5f..610ba05 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
@@ -2424,6 +2424,12 @@ def : Pat<(int_loongarch_lasx_xvpickve_w_f v8f32:$xj, timm:$imm),
def : Pat<(int_loongarch_lasx_xvpickve_d_f v4f64:$xj, timm:$imm),
(XVPICKVE_D v4f64:$xj, (to_valid_timm timm:$imm))>;
+// Vector floating-point conversion
+defm : PatXrF<fceil, "XVFRINTRP">;
+defm : PatXrF<ffloor, "XVFRINTRM">;
+defm : PatXrF<ftrunc, "XVFRINTRZ">;
+defm : PatXrF<froundeven, "XVFRINTRNE">;
+
// load
def : Pat<(int_loongarch_lasx_xvld GPR:$rj, timm:$imm),
(XVLD GPR:$rj, (to_valid_timm timm:$imm))>;
diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
index 92402ba..6470842 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
@@ -2552,6 +2552,11 @@ def : Pat<(f64 (froundeven FPR64:$fj)),
(f64 (EXTRACT_SUBREG (VFRINTRNE_D (VREPLVEI_D
(SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64), 0)), sub_64))>;
+defm : PatVrF<fceil, "VFRINTRP">;
+defm : PatVrF<ffloor, "VFRINTRM">;
+defm : PatVrF<ftrunc, "VFRINTRZ">;
+defm : PatVrF<froundeven, "VFRINTRNE">;
+
// load
def : Pat<(int_loongarch_lsx_vld GPR:$rj, timm:$imm),
(VLD GPR:$rj, (to_valid_timm timm:$imm))>;
diff --git a/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp b/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp
index e37f3a66..fb5cd5c2 100644
--- a/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp
+++ b/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp
@@ -690,9 +690,9 @@ bool M68kAsmParser::parseRegisterName(MCRegister &RegNo, SMLoc Loc,
} else {
// Floating point control register.
RegNo = StringSwitch<unsigned>(RegisterNameLower)
- .Cases("fpc", "fpcr", M68k::FPC)
- .Cases("fps", "fpsr", M68k::FPS)
- .Cases("fpi", "fpiar", M68k::FPIAR)
+ .Cases({"fpc", "fpcr"}, M68k::FPC)
+ .Cases({"fps", "fpsr"}, M68k::FPS)
+ .Cases({"fpi", "fpiar"}, M68k::FPIAR)
.Default(M68k::NoRegister);
assert(RegNo != M68k::NoRegister &&
"Unrecognized FP control register name");
diff --git a/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp b/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp
index fe83dc6..51bafe4 100644
--- a/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp
+++ b/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp
@@ -49,7 +49,7 @@ public:
M68kAsmBackend(const Target &T, const MCSubtargetInfo &STI)
: MCAsmBackend(llvm::endianness::big),
Allows32BitBranch(llvm::StringSwitch<bool>(STI.getCPU())
- .CasesLower("m68020", "m68030", "m68040", true)
+ .CasesLower({"m68020", "m68030", "m68040"}, true)
.Default(false)) {}
void applyFixup(const MCFragment &, const MCFixup &, const MCValue &,
diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 97379d7..f588e56 100644
--- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -6176,7 +6176,7 @@ int MipsAsmParser::matchCPURegisterName(StringRef Name) {
CC = StringSwitch<unsigned>(Name)
.Case("zero", 0)
- .Cases("at", "AT", 1)
+ .Cases({"at", "AT"}, 1)
.Case("a0", 4)
.Case("a1", 5)
.Case("a2", 6)
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index e8758aa..50827bd 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -1562,12 +1562,17 @@ def : Pat<(int_nvvm_saturate_d f64:$a), (CVT_f64_f64 $a, CvtSAT)>;
// Exp2 Log2
//
-def : Pat<(int_nvvm_ex2_approx_ftz_f f32:$a), (EX2_APPROX_f32 $a, FTZ)>;
-def : Pat<(int_nvvm_ex2_approx_f f32:$a), (EX2_APPROX_f32 $a, NoFTZ)>;
+def : Pat<(f32 (int_nvvm_ex2_approx_ftz f32:$a)), (EX2_APPROX_f32 $a, FTZ)>;
+def : Pat<(f32 (int_nvvm_ex2_approx f32:$a)), (EX2_APPROX_f32 $a, NoFTZ)>;
let Predicates = [hasPTX<70>, hasSM<75>] in {
- def : Pat<(int_nvvm_ex2_approx_f16 f16:$a), (EX2_APPROX_f16 $a)>;
- def : Pat<(int_nvvm_ex2_approx_f16x2 v2f16:$a), (EX2_APPROX_f16x2 $a)>;
+ def : Pat<(f16 (int_nvvm_ex2_approx f16:$a)), (EX2_APPROX_f16 $a)>;
+ def : Pat<(v2f16 (int_nvvm_ex2_approx v2f16:$a)), (EX2_APPROX_f16x2 $a)>;
+}
+
+let Predicates = [hasPTX<78>, hasSM<90>] in {
+ def : Pat<(bf16 (int_nvvm_ex2_approx_ftz bf16:$a)), (EX2_APPROX_bf16 $a)>;
+ def : Pat<(v2bf16 (int_nvvm_ex2_approx_ftz v2bf16:$a)), (EX2_APPROX_bf16x2 $a)>;
}
def LG2_APPROX_f32 :
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index 729c077..64593e6 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -318,7 +318,7 @@ static Instruction *convertNvvmIntrinsicToLlvm(InstCombiner &IC,
// answer. These include:
//
// - nvvm_cos_approx_{f,ftz_f}
- // - nvvm_ex2_approx_{d,f,ftz_f}
+ // - nvvm_ex2_approx(_ftz)
// - nvvm_lg2_approx_{d,f,ftz_f}
// - nvvm_sin_approx_{f,ftz_f}
// - nvvm_sqrt_approx_{f,ftz_f}
diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index bcb3f50..780e124 100644
--- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -2702,7 +2702,7 @@ static bool isSpecialLLVMGlobalArrayToSkip(const GlobalVariable *GV) {
static bool isSpecialLLVMGlobalArrayForStaticInit(const GlobalVariable *GV) {
return StringSwitch<bool>(GV->getName())
- .Cases("llvm.global_ctors", "llvm.global_dtors", true)
+ .Cases({"llvm.global_ctors", "llvm.global_dtors"}, true)
.Default(false);
}
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 2fba090..b04e887 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -912,7 +912,7 @@ bool PPCTTIImpl::areInlineCompatible(const Function *Caller,
bool PPCTTIImpl::areTypesABICompatible(const Function *Caller,
const Function *Callee,
- const ArrayRef<Type *> &Types) const {
+ ArrayRef<Type *> Types) const {
// We need to ensure that argument promotion does not
// attempt to promote pointers to MMA types (__vector_pair
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
index 475472a..8d7f255 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
@@ -147,7 +147,7 @@ public:
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const override;
bool areTypesABICompatible(const Function *Caller, const Function *Callee,
- const ArrayRef<Type *> &Types) const override;
+ ArrayRef<Type *> Types) const override;
bool supportsTailCallFor(const CallBase *CB) const override;
private:
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c6a8b84..e0cf739 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -23946,7 +23946,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
.Case("{t0}", RISCV::X5)
.Case("{t1}", RISCV::X6)
.Case("{t2}", RISCV::X7)
- .Cases("{s0}", "{fp}", RISCV::X8)
+ .Cases({"{s0}", "{fp}"}, RISCV::X8)
.Case("{s1}", RISCV::X9)
.Case("{a0}", RISCV::X10)
.Case("{a1}", RISCV::X11)
@@ -23983,38 +23983,38 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
// use the ABI names in register constraint lists.
if (Subtarget.hasStdExtF()) {
unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
- .Cases("{f0}", "{ft0}", RISCV::F0_F)
- .Cases("{f1}", "{ft1}", RISCV::F1_F)
- .Cases("{f2}", "{ft2}", RISCV::F2_F)
- .Cases("{f3}", "{ft3}", RISCV::F3_F)
- .Cases("{f4}", "{ft4}", RISCV::F4_F)
- .Cases("{f5}", "{ft5}", RISCV::F5_F)
- .Cases("{f6}", "{ft6}", RISCV::F6_F)
- .Cases("{f7}", "{ft7}", RISCV::F7_F)
- .Cases("{f8}", "{fs0}", RISCV::F8_F)
- .Cases("{f9}", "{fs1}", RISCV::F9_F)
- .Cases("{f10}", "{fa0}", RISCV::F10_F)
- .Cases("{f11}", "{fa1}", RISCV::F11_F)
- .Cases("{f12}", "{fa2}", RISCV::F12_F)
- .Cases("{f13}", "{fa3}", RISCV::F13_F)
- .Cases("{f14}", "{fa4}", RISCV::F14_F)
- .Cases("{f15}", "{fa5}", RISCV::F15_F)
- .Cases("{f16}", "{fa6}", RISCV::F16_F)
- .Cases("{f17}", "{fa7}", RISCV::F17_F)
- .Cases("{f18}", "{fs2}", RISCV::F18_F)
- .Cases("{f19}", "{fs3}", RISCV::F19_F)
- .Cases("{f20}", "{fs4}", RISCV::F20_F)
- .Cases("{f21}", "{fs5}", RISCV::F21_F)
- .Cases("{f22}", "{fs6}", RISCV::F22_F)
- .Cases("{f23}", "{fs7}", RISCV::F23_F)
- .Cases("{f24}", "{fs8}", RISCV::F24_F)
- .Cases("{f25}", "{fs9}", RISCV::F25_F)
- .Cases("{f26}", "{fs10}", RISCV::F26_F)
- .Cases("{f27}", "{fs11}", RISCV::F27_F)
- .Cases("{f28}", "{ft8}", RISCV::F28_F)
- .Cases("{f29}", "{ft9}", RISCV::F29_F)
- .Cases("{f30}", "{ft10}", RISCV::F30_F)
- .Cases("{f31}", "{ft11}", RISCV::F31_F)
+ .Cases({"{f0}", "{ft0}"}, RISCV::F0_F)
+ .Cases({"{f1}", "{ft1}"}, RISCV::F1_F)
+ .Cases({"{f2}", "{ft2}"}, RISCV::F2_F)
+ .Cases({"{f3}", "{ft3}"}, RISCV::F3_F)
+ .Cases({"{f4}", "{ft4}"}, RISCV::F4_F)
+ .Cases({"{f5}", "{ft5}"}, RISCV::F5_F)
+ .Cases({"{f6}", "{ft6}"}, RISCV::F6_F)
+ .Cases({"{f7}", "{ft7}"}, RISCV::F7_F)
+ .Cases({"{f8}", "{fs0}"}, RISCV::F8_F)
+ .Cases({"{f9}", "{fs1}"}, RISCV::F9_F)
+ .Cases({"{f10}", "{fa0}"}, RISCV::F10_F)
+ .Cases({"{f11}", "{fa1}"}, RISCV::F11_F)
+ .Cases({"{f12}", "{fa2}"}, RISCV::F12_F)
+ .Cases({"{f13}", "{fa3}"}, RISCV::F13_F)
+ .Cases({"{f14}", "{fa4}"}, RISCV::F14_F)
+ .Cases({"{f15}", "{fa5}"}, RISCV::F15_F)
+ .Cases({"{f16}", "{fa6}"}, RISCV::F16_F)
+ .Cases({"{f17}", "{fa7}"}, RISCV::F17_F)
+ .Cases({"{f18}", "{fs2}"}, RISCV::F18_F)
+ .Cases({"{f19}", "{fs3}"}, RISCV::F19_F)
+ .Cases({"{f20}", "{fs4}"}, RISCV::F20_F)
+ .Cases({"{f21}", "{fs5}"}, RISCV::F21_F)
+ .Cases({"{f22}", "{fs6}"}, RISCV::F22_F)
+ .Cases({"{f23}", "{fs7}"}, RISCV::F23_F)
+ .Cases({"{f24}", "{fs8}"}, RISCV::F24_F)
+ .Cases({"{f25}", "{fs9}"}, RISCV::F25_F)
+ .Cases({"{f26}", "{fs10}"}, RISCV::F26_F)
+ .Cases({"{f27}", "{fs11}"}, RISCV::F27_F)
+ .Cases({"{f28}", "{ft8}"}, RISCV::F28_F)
+ .Cases({"{f29}", "{ft9}"}, RISCV::F29_F)
+ .Cases({"{f30}", "{ft10}"}, RISCV::F30_F)
+ .Cases({"{f31}", "{ft11}"}, RISCV::F31_F)
.Default(RISCV::NoRegister);
if (FReg != RISCV::NoRegister) {
assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h
index 7845cdf..1bfc61f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h
@@ -76,7 +76,7 @@ public:
BlockSet.insert(MBB);
}
ArrayRef<MachineBasicBlock *> getBlocks() const { return Blocks; }
- using block_iterator = typename ArrayRef<MachineBasicBlock *>::const_iterator;
+ using block_iterator = ArrayRef<MachineBasicBlock *>::const_iterator;
block_iterator block_begin() const { return getBlocks().begin(); }
block_iterator block_end() const { return getBlocks().end(); }
inline iterator_range<block_iterator> blocks() const {
@@ -96,7 +96,7 @@ public:
void addSubException(std::unique_ptr<WebAssemblyException> E) {
SubExceptions.push_back(std::move(E));
}
- using iterator = typename decltype(SubExceptions)::const_iterator;
+ using iterator = decltype(SubExceptions)::const_iterator;
iterator begin() const { return SubExceptions.begin(); }
iterator end() const { return SubExceptions.end(); }
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
index ff4d6469..ee575e3 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
@@ -207,8 +207,7 @@ template <> struct MappingTraits<WebAssemblyFunctionInfo> {
template <> struct CustomMappingTraits<BBNumberMap> {
static void inputOne(IO &YamlIO, StringRef Key,
BBNumberMap &SrcToUnwindDest) {
- YamlIO.mapRequired(Key.str().c_str(),
- SrcToUnwindDest[std::atoi(Key.str().c_str())]);
+ YamlIO.mapRequired(Key, SrcToUnwindDest[std::atoi(Key.str().c_str())]);
}
static void output(IO &YamlIO, BBNumberMap &SrcToUnwindDest) {
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.h b/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.h
index e92bf17..96b8a4e 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.h
@@ -35,7 +35,7 @@ public:
virtual MachineBasicBlock *getHeader() const = 0;
virtual bool contains(const MachineBasicBlock *MBB) const = 0;
virtual unsigned getNumBlocks() const = 0;
- using block_iterator = typename ArrayRef<MachineBasicBlock *>::const_iterator;
+ using block_iterator = ArrayRef<MachineBasicBlock *>::const_iterator;
virtual iterator_range<block_iterator> blocks() const = 0;
virtual bool isLoop() const = 0;
};
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index b7ea672..bac3692 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -2470,10 +2470,10 @@ bool X86AsmParser::ParseIntelOffsetOperator(const MCExpr *&Val, StringRef &ID,
// Report back its kind, or IOK_INVALID if does not evaluated as a known one
unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(StringRef Name) {
return StringSwitch<unsigned>(Name)
- .Cases("TYPE","type",IOK_TYPE)
- .Cases("SIZE","size",IOK_SIZE)
- .Cases("LENGTH","length",IOK_LENGTH)
- .Default(IOK_INVALID);
+ .Cases({"TYPE", "type"}, IOK_TYPE)
+ .Cases({"SIZE", "size"}, IOK_SIZE)
+ .Cases({"LENGTH", "length"}, IOK_LENGTH)
+ .Default(IOK_INVALID);
}
/// Parse the 'LENGTH', 'TYPE' and 'SIZE' operators. The LENGTH operator
@@ -2516,8 +2516,8 @@ unsigned X86AsmParser::ParseIntelInlineAsmOperator(unsigned OpKind) {
unsigned X86AsmParser::IdentifyMasmOperator(StringRef Name) {
return StringSwitch<unsigned>(Name.lower())
.Case("type", MOK_TYPE)
- .Cases("size", "sizeof", MOK_SIZEOF)
- .Cases("length", "lengthof", MOK_LENGTHOF)
+ .Cases({"size", "sizeof"}, MOK_SIZEOF)
+ .Cases({"length", "lengthof"}, MOK_LENGTHOF)
.Default(MOK_INVALID);
}
@@ -2581,21 +2581,21 @@ bool X86AsmParser::ParseMasmOperator(unsigned OpKind, int64_t &Val) {
bool X86AsmParser::ParseIntelMemoryOperandSize(unsigned &Size,
StringRef *SizeStr) {
Size = StringSwitch<unsigned>(getTok().getString())
- .Cases("BYTE", "byte", 8)
- .Cases("WORD", "word", 16)
- .Cases("DWORD", "dword", 32)
- .Cases("FLOAT", "float", 32)
- .Cases("LONG", "long", 32)
- .Cases("FWORD", "fword", 48)
- .Cases("DOUBLE", "double", 64)
- .Cases("QWORD", "qword", 64)
- .Cases("MMWORD","mmword", 64)
- .Cases("XWORD", "xword", 80)
- .Cases("TBYTE", "tbyte", 80)
- .Cases("XMMWORD", "xmmword", 128)
- .Cases("YMMWORD", "ymmword", 256)
- .Cases("ZMMWORD", "zmmword", 512)
- .Default(0);
+ .Cases({"BYTE", "byte"}, 8)
+ .Cases({"WORD", "word"}, 16)
+ .Cases({"DWORD", "dword"}, 32)
+ .Cases({"FLOAT", "float"}, 32)
+ .Cases({"LONG", "long"}, 32)
+ .Cases({"FWORD", "fword"}, 48)
+ .Cases({"DOUBLE", "double"}, 64)
+ .Cases({"QWORD", "qword"}, 64)
+ .Cases({"MMWORD", "mmword"}, 64)
+ .Cases({"XWORD", "xword"}, 80)
+ .Cases({"TBYTE", "tbyte"}, 80)
+ .Cases({"XMMWORD", "xmmword"}, 128)
+ .Cases({"YMMWORD", "ymmword"}, 256)
+ .Cases({"ZMMWORD", "zmmword"}, 512)
+ .Default(0);
if (Size) {
if (SizeStr)
*SizeStr = getTok().getString();
@@ -2886,22 +2886,22 @@ bool X86AsmParser::parseATTOperand(OperandVector &Operands) {
// otherwise the EFLAGS Condition Code enumerator.
X86::CondCode X86AsmParser::ParseConditionCode(StringRef CC) {
return StringSwitch<X86::CondCode>(CC)
- .Case("o", X86::COND_O) // Overflow
- .Case("no", X86::COND_NO) // No Overflow
- .Cases("b", "nae", X86::COND_B) // Below/Neither Above nor Equal
- .Cases("ae", "nb", X86::COND_AE) // Above or Equal/Not Below
- .Cases("e", "z", X86::COND_E) // Equal/Zero
- .Cases("ne", "nz", X86::COND_NE) // Not Equal/Not Zero
- .Cases("be", "na", X86::COND_BE) // Below or Equal/Not Above
- .Cases("a", "nbe", X86::COND_A) // Above/Neither Below nor Equal
- .Case("s", X86::COND_S) // Sign
- .Case("ns", X86::COND_NS) // No Sign
- .Cases("p", "pe", X86::COND_P) // Parity/Parity Even
- .Cases("np", "po", X86::COND_NP) // No Parity/Parity Odd
- .Cases("l", "nge", X86::COND_L) // Less/Neither Greater nor Equal
- .Cases("ge", "nl", X86::COND_GE) // Greater or Equal/Not Less
- .Cases("le", "ng", X86::COND_LE) // Less or Equal/Not Greater
- .Cases("g", "nle", X86::COND_G) // Greater/Neither Less nor Equal
+ .Case("o", X86::COND_O) // Overflow
+ .Case("no", X86::COND_NO) // No Overflow
+ .Cases({"b", "nae"}, X86::COND_B) // Below/Neither Above nor Equal
+ .Cases({"ae", "nb"}, X86::COND_AE) // Above or Equal/Not Below
+ .Cases({"e", "z"}, X86::COND_E) // Equal/Zero
+ .Cases({"ne", "nz"}, X86::COND_NE) // Not Equal/Not Zero
+ .Cases({"be", "na"}, X86::COND_BE) // Below or Equal/Not Above
+ .Cases({"a", "nbe"}, X86::COND_A) // Above/Neither Below nor Equal
+ .Case("s", X86::COND_S) // Sign
+ .Case("ns", X86::COND_NS) // No Sign
+ .Cases({"p", "pe"}, X86::COND_P) // Parity/Parity Even
+ .Cases({"np", "po"}, X86::COND_NP) // No Parity/Parity Odd
+ .Cases({"l", "nge"}, X86::COND_L) // Less/Neither Greater nor Equal
+ .Cases({"ge", "nl"}, X86::COND_GE) // Greater or Equal/Not Less
+ .Cases({"le", "ng"}, X86::COND_LE) // Less or Equal/Not Greater
+ .Cases({"g", "nle"}, X86::COND_G) // Greater/Neither Less nor Equal
.Default(X86::COND_INVALID);
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 007074c..133406b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -22861,6 +22861,13 @@ static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y,
if (!OpVT.isScalarInteger() || OpSize < 128)
return SDValue();
+ // Don't do this if we're not supposed to use the FPU.
+ bool NoImplicitFloatOps =
+ DAG.getMachineFunction().getFunction().hasFnAttribute(
+ Attribute::NoImplicitFloat);
+ if (Subtarget.useSoftFloat() || NoImplicitFloatOps)
+ return SDValue();
+
// Ignore a comparison with zero because that gets special treatment in
// EmitTest(). But make an exception for the special case of a pair of
// logically-combined vector-sized operands compared to zero. This pattern may
@@ -22883,13 +22890,9 @@ static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y,
// Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
// Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
// Otherwise use PCMPEQ (plus AND) and mask testing.
- bool NoImplicitFloatOps =
- DAG.getMachineFunction().getFunction().hasFnAttribute(
- Attribute::NoImplicitFloat);
- if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
- ((OpSize == 128 && Subtarget.hasSSE2()) ||
- (OpSize == 256 && Subtarget.hasAVX()) ||
- (OpSize == 512 && Subtarget.useAVX512Regs()))) {
+ if ((OpSize == 128 && Subtarget.hasSSE2()) ||
+ (OpSize == 256 && Subtarget.hasAVX()) ||
+ (OpSize == 512 && Subtarget.useAVX512Regs())) {
bool HasPT = Subtarget.hasSSE41();
// PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
@@ -53344,105 +53347,6 @@ static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-// Look for a RMW operation that only touches one bit of a larger than legal
-// type and fold it to a BTC/BTR/BTS or bit insertion pattern acting on a single
-// i32 sub value.
-static SDValue narrowBitOpRMW(StoreSDNode *St, const SDLoc &DL,
- SelectionDAG &DAG,
- const X86Subtarget &Subtarget) {
- using namespace SDPatternMatch;
-
- // Only handle normal stores and its chain was a matching normal load.
- auto *Ld = dyn_cast<LoadSDNode>(St->getChain());
- if (!ISD::isNormalStore(St) || !St->isSimple() || !Ld ||
- !ISD::isNormalLoad(Ld) || !Ld->isSimple() ||
- Ld->getBasePtr() != St->getBasePtr() ||
- Ld->getOffset() != St->getOffset())
- return SDValue();
-
- SDValue LoadVal(Ld, 0);
- SDValue StoredVal = St->getValue();
- EVT VT = StoredVal.getValueType();
-
- // Only narrow larger than legal scalar integers.
- if (!VT.isScalarInteger() ||
- VT.getSizeInBits() <= (Subtarget.is64Bit() ? 64 : 32))
- return SDValue();
-
- // BTR: X & ~(1 << ShAmt)
- // BTS: X | (1 << ShAmt)
- // BTC: X ^ (1 << ShAmt)
- //
- // BitInsert: (X & ~(1 << ShAmt)) | (InsertBit << ShAmt)
- SDValue InsertBit, ShAmt;
- if (!StoredVal.hasOneUse() ||
- !(sd_match(StoredVal, m_And(m_Specific(LoadVal),
- m_Not(m_Shl(m_One(), m_Value(ShAmt))))) ||
- sd_match(StoredVal,
- m_Or(m_Specific(LoadVal), m_Shl(m_One(), m_Value(ShAmt)))) ||
- sd_match(StoredVal,
- m_Xor(m_Specific(LoadVal), m_Shl(m_One(), m_Value(ShAmt)))) ||
- sd_match(StoredVal,
- m_Or(m_And(m_Specific(LoadVal),
- m_Not(m_Shl(m_One(), m_Value(ShAmt)))),
- m_Shl(m_Value(InsertBit), m_Deferred(ShAmt))))))
- return SDValue();
-
- // Ensure the shift amount is in bounds.
- KnownBits KnownAmt = DAG.computeKnownBits(ShAmt);
- if (KnownAmt.getMaxValue().uge(VT.getSizeInBits()))
- return SDValue();
-
- // If we're inserting a bit then it must be the LSB.
- if (InsertBit) {
- KnownBits KnownInsert = DAG.computeKnownBits(InsertBit);
- if (KnownInsert.countMinLeadingZeros() < (VT.getSizeInBits() - 1))
- return SDValue();
- }
-
- // Split the shift into an alignment shift that moves the active i32 block to
- // the bottom bits for truncation and a modulo shift that can act on the i32.
- EVT AmtVT = ShAmt.getValueType();
- SDValue AlignAmt = DAG.getNode(ISD::AND, DL, AmtVT, ShAmt,
- DAG.getSignedConstant(-32LL, DL, AmtVT));
- SDValue ModuloAmt =
- DAG.getNode(ISD::AND, DL, AmtVT, ShAmt, DAG.getConstant(31, DL, AmtVT));
- ModuloAmt = DAG.getZExtOrTrunc(ModuloAmt, DL, MVT::i8);
-
- // Compute the byte offset for the i32 block that is changed by the RMW.
- // combineTruncate will adjust the load for us in a similar way.
- EVT PtrVT = St->getBasePtr().getValueType();
- SDValue PtrBitOfs = DAG.getZExtOrTrunc(AlignAmt, DL, PtrVT);
- SDValue PtrByteOfs = DAG.getNode(ISD::SRL, DL, PtrVT, PtrBitOfs,
- DAG.getShiftAmountConstant(3, PtrVT, DL));
- SDValue NewPtr = DAG.getMemBasePlusOffset(St->getBasePtr(), PtrByteOfs, DL,
- SDNodeFlags::NoUnsignedWrap);
-
- // Reconstruct the BTC/BTR/BTS pattern for the i32 block and store.
- SDValue X = DAG.getNode(ISD::SRL, DL, VT, LoadVal, AlignAmt);
- X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
-
- SDValue Mask = DAG.getNode(ISD::SHL, DL, MVT::i32,
- DAG.getConstant(1, DL, MVT::i32), ModuloAmt);
-
- SDValue Res;
- if (InsertBit) {
- SDValue BitMask =
- DAG.getNode(ISD::SHL, DL, MVT::i32,
- DAG.getZExtOrTrunc(InsertBit, DL, MVT::i32), ModuloAmt);
- Res =
- DAG.getNode(ISD::AND, DL, MVT::i32, X, DAG.getNOT(DL, Mask, MVT::i32));
- Res = DAG.getNode(ISD::OR, DL, MVT::i32, Res, BitMask);
- } else {
- if (StoredVal.getOpcode() == ISD::AND)
- Mask = DAG.getNOT(DL, Mask, MVT::i32);
- Res = DAG.getNode(StoredVal.getOpcode(), DL, MVT::i32, X, Mask);
- }
-
- return DAG.getStore(St->getChain(), DL, Res, NewPtr, St->getPointerInfo(),
- Align(), St->getMemOperand()->getFlags());
-}
-
static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
@@ -53669,9 +53573,6 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
}
}
- if (SDValue R = narrowBitOpRMW(St, dl, DAG, Subtarget))
- return R;
-
// Convert store(cmov(load(p), x, CC), p) to cstore(x, p, CC)
// store(cmov(x, load(p), CC), p) to cstore(x, p, InvertCC)
if ((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
@@ -54604,9 +54505,8 @@ static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
// truncation, see if we can convert the shift into a pointer offset instead.
// Limit this to normal (non-ext) scalar integer loads.
if (SrcVT.isScalarInteger() && Src.getOpcode() == ISD::SRL &&
- Src.hasOneUse() && ISD::isNormalLoad(Src.getOperand(0).getNode()) &&
- (Src.getOperand(0).hasOneUse() ||
- !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, SrcVT))) {
+ Src.hasOneUse() && Src.getOperand(0).hasOneUse() &&
+ ISD::isNormalLoad(Src.getOperand(0).getNode())) {
auto *Ld = cast<LoadSDNode>(Src.getOperand(0));
if (Ld->isSimple() && VT.isByteSized() &&
isPowerOf2_64(VT.getSizeInBits())) {
@@ -56406,7 +56306,6 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
- using namespace SDPatternMatch;
const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
const SDValue LHS = N->getOperand(0);
const SDValue RHS = N->getOperand(1);
@@ -56465,37 +56364,6 @@ static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
if (SDValue AndN = MatchAndCmpEq(RHS, LHS))
return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
- // If we're performing a bit test on a larger than legal type, attempt
- // to (aligned) shift down the value to the bottom 32-bits and then
- // perform the bittest on the i32 value.
- // ICMP_ZERO(AND(X,SHL(1,IDX)))
- // --> ICMP_ZERO(AND(TRUNC(SRL(X,AND(IDX,-32))),SHL(1,AND(IDX,31))))
- if (isNullConstant(RHS) &&
- OpVT.getScalarSizeInBits() > (Subtarget.is64Bit() ? 64 : 32)) {
- SDValue X, ShAmt;
- if (sd_match(LHS, m_OneUse(m_And(m_Value(X),
- m_Shl(m_One(), m_Value(ShAmt)))))) {
- // Only attempt this if the shift amount is known to be in bounds.
- KnownBits KnownAmt = DAG.computeKnownBits(ShAmt);
- if (KnownAmt.getMaxValue().ult(OpVT.getScalarSizeInBits())) {
- EVT AmtVT = ShAmt.getValueType();
- SDValue AlignAmt =
- DAG.getNode(ISD::AND, DL, AmtVT, ShAmt,
- DAG.getSignedConstant(-32LL, DL, AmtVT));
- SDValue ModuloAmt = DAG.getNode(ISD::AND, DL, AmtVT, ShAmt,
- DAG.getConstant(31, DL, AmtVT));
- SDValue Mask = DAG.getNode(
- ISD::SHL, DL, MVT::i32, DAG.getConstant(1, DL, MVT::i32),
- DAG.getZExtOrTrunc(ModuloAmt, DL, MVT::i8));
- X = DAG.getNode(ISD::SRL, DL, OpVT, X, AlignAmt);
- X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
- X = DAG.getNode(ISD::AND, DL, MVT::i32, X, Mask);
- return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, MVT::i32),
- CC);
- }
- }
- }
-
// cmpeq(trunc(x),C) --> cmpeq(x,C)
// cmpne(trunc(x),C) --> cmpne(x,C)
// iff x upper bits are zero.
diff --git a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
index 090060e..b655183 100644
--- a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
@@ -115,9 +115,9 @@ struct MachineGadgetGraph : ImmutableGraph<MachineInstr *, int> {
static constexpr MachineInstr *const ArgNodeSentinel = nullptr;
using GraphT = ImmutableGraph<MachineInstr *, int>;
- using Node = typename GraphT::Node;
- using Edge = typename GraphT::Edge;
- using size_type = typename GraphT::size_type;
+ using Node = GraphT::Node;
+ using Edge = GraphT::Edge;
+ using size_type = GraphT::size_type;
MachineGadgetGraph(std::unique_ptr<Node[]> Nodes,
std::unique_ptr<Edge[]> Edges, size_type NodesSize,
size_type EdgesSize, int NumFences = 0, int NumGadgets = 0)
@@ -191,10 +191,10 @@ template <>
struct DOTGraphTraits<MachineGadgetGraph *> : DefaultDOTGraphTraits {
using GraphType = MachineGadgetGraph;
using Traits = llvm::GraphTraits<GraphType *>;
- using NodeRef = typename Traits::NodeRef;
- using EdgeRef = typename Traits::EdgeRef;
- using ChildIteratorType = typename Traits::ChildIteratorType;
- using ChildEdgeIteratorType = typename Traits::ChildEdgeIteratorType;
+ using NodeRef = Traits::NodeRef;
+ using EdgeRef = Traits::EdgeRef;
+ using ChildIteratorType = Traits::ChildIteratorType;
+ using ChildEdgeIteratorType = Traits::ChildEdgeIteratorType;
DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
@@ -335,7 +335,7 @@ X86LoadValueInjectionLoadHardeningPass::getGadgetGraph(
L.computePhiInfo();
GraphBuilder Builder;
- using GraphIter = typename GraphBuilder::BuilderNodeRef;
+ using GraphIter = GraphBuilder::BuilderNodeRef;
DenseMap<MachineInstr *, GraphIter> NodeMap;
int FenceCount = 0, GadgetCount = 0;
auto MaybeAddNode = [&NodeMap, &Builder](MachineInstr *MI) {
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index 3d8d0a23..0b1430e 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -6562,7 +6562,7 @@ bool X86TTIImpl::areInlineCompatible(const Function *Caller,
bool X86TTIImpl::areTypesABICompatible(const Function *Caller,
const Function *Callee,
- const ArrayRef<Type *> &Types) const {
+ ArrayRef<Type *> Types) const {
if (!BaseT::areTypesABICompatible(Caller, Callee, Types))
return false;
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h
index 133b366..de5e1c2 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.h
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h
@@ -296,7 +296,7 @@ public:
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const override;
bool areTypesABICompatible(const Function *Caller, const Function *Callee,
- const ArrayRef<Type *> &Type) const override;
+ ArrayRef<Type *> Type) const override;
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override {
return ST->getMaxInlineSizeThreshold();
diff --git a/llvm/lib/TargetParser/PPCTargetParser.cpp b/llvm/lib/TargetParser/PPCTargetParser.cpp
index d510445..f74d670 100644
--- a/llvm/lib/TargetParser/PPCTargetParser.cpp
+++ b/llvm/lib/TargetParser/PPCTargetParser.cpp
@@ -48,9 +48,9 @@ StringRef normalizeCPUName(StringRef CPUName) {
// accepting it. Clang has always ignored it and passed the
// generic CPU ID to the back end.
return StringSwitch<StringRef>(CPUName)
- .Cases("common", "405", "generic")
- .Cases("ppc440", "440fp", "440")
- .Cases("630", "power3", "pwr3")
+ .Cases({"common", "405"}, "generic")
+ .Cases({"ppc440", "440fp"}, "440")
+ .Cases({"630", "power3"}, "pwr3")
.Case("G3", "g3")
.Case("G4", "g4")
.Case("G4+", "g4+")
@@ -69,7 +69,7 @@ StringRef normalizeCPUName(StringRef CPUName) {
.Case("power9", "pwr9")
.Case("power10", "pwr10")
.Case("power11", "pwr11")
- .Cases("powerpc", "powerpc32", "ppc")
+ .Cases({"powerpc", "powerpc32"}, "ppc")
.Case("powerpc64", "ppc64")
.Case("powerpc64le", "ppc64le")
.Default(CPUName);
diff --git a/llvm/lib/TextAPI/BinaryReader/DylibReader.cpp b/llvm/lib/TextAPI/BinaryReader/DylibReader.cpp
index cda07e8..f55bc9c 100644
--- a/llvm/lib/TextAPI/BinaryReader/DylibReader.cpp
+++ b/llvm/lib/TextAPI/BinaryReader/DylibReader.cpp
@@ -32,7 +32,7 @@ using namespace llvm::MachO;
using namespace llvm::MachO::DylibReader;
using TripleVec = std::vector<Triple>;
-static typename TripleVec::iterator emplace(TripleVec &Container, Triple &&T) {
+static TripleVec::iterator emplace(TripleVec &Container, Triple &&T) {
auto I = partition_point(Container, [=](const Triple &CT) {
return std::forward_as_tuple(CT.getArch(), CT.getOS(),
CT.getEnvironment()) <
diff --git a/llvm/lib/Transforms/Coroutines/CoroCloner.h b/llvm/lib/Transforms/Coroutines/CoroCloner.h
index e05fe28..1e549f1 100644
--- a/llvm/lib/Transforms/Coroutines/CoroCloner.h
+++ b/llvm/lib/Transforms/Coroutines/CoroCloner.h
@@ -77,7 +77,7 @@ public:
: OrigF(OrigF), Suffix(Suffix), Shape(Shape), FKind(FKind),
Builder(OrigF.getContext()), TTI(TTI) {}
- virtual ~BaseCloner() {}
+ virtual ~BaseCloner() = default;
/// Create a clone for a continuation lowering.
static Function *createClone(Function &OrigF, const Twine &Suffix,
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 5048561..5ed47ae 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -3619,7 +3619,7 @@ struct AAIntraFnReachabilityFunction final
return true;
RQITy StackRQI(A, From, To, ExclusionSet, false);
- typename RQITy::Reachable Result;
+ RQITy::Reachable Result;
if (!NonConstThis->checkQueryCache(A, StackRQI, Result))
return NonConstThis->isReachableImpl(A, StackRQI,
/*IsTemporaryRQI=*/true);
@@ -10701,7 +10701,7 @@ struct AAInterFnReachabilityFunction
auto *NonConstThis = const_cast<AAInterFnReachabilityFunction *>(this);
RQITy StackRQI(A, From, To, ExclusionSet, false);
- typename RQITy::Reachable Result;
+ RQITy::Reachable Result;
if (!NonConstThis->checkQueryCache(A, StackRQI, Result))
return NonConstThis->isReachableImpl(A, StackRQI,
/*IsTemporaryRQI=*/true);
diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index 894d83f..d35ae47 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -1034,11 +1034,11 @@ private:
} // namespace
template <>
-struct llvm::DenseMapInfo<typename CallsiteContextGraph<
+struct llvm::DenseMapInfo<CallsiteContextGraph<
ModuleCallsiteContextGraph, Function, Instruction *>::CallInfo>
: public DenseMapInfo<std::pair<Instruction *, unsigned>> {};
template <>
-struct llvm::DenseMapInfo<typename CallsiteContextGraph<
+struct llvm::DenseMapInfo<CallsiteContextGraph<
IndexCallsiteContextGraph, FunctionSummary, IndexCall>::CallInfo>
: public DenseMapInfo<std::pair<IndexCall, unsigned>> {};
template <>
diff --git a/llvm/lib/Transforms/Scalar/GVNSink.cpp b/llvm/lib/Transforms/Scalar/GVNSink.cpp
index a06f832..d564e32 100644
--- a/llvm/lib/Transforms/Scalar/GVNSink.cpp
+++ b/llvm/lib/Transforms/Scalar/GVNSink.cpp
@@ -514,7 +514,7 @@ public:
class GVNSink {
public:
- GVNSink() {}
+ GVNSink() = default;
bool run(Function &F) {
LLVM_DEBUG(dbgs() << "GVNSink: running on function @" << F.getName()
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index 3487e81..7e70ba2 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -245,11 +245,14 @@ raw_ostream &operator<<(raw_ostream &OS, ShapeInfo SI) {
} // namespace
-static bool isUniformShape(Value *V) {
+static bool isShapePreserving(Value *V) {
Instruction *I = dyn_cast<Instruction>(V);
if (!I)
return true;
+ if (isa<SelectInst>(I))
+ return true;
+
if (I->isBinaryOp())
return true;
@@ -300,6 +303,16 @@ static bool isUniformShape(Value *V) {
}
}
+/// Return an iterator over the operands of \p I that should share shape
+/// information with \p I.
+static iterator_range<Use *> getShapedOperandsForInst(Instruction *I) {
+ assert(isShapePreserving(I) &&
+ "Can't retrieve shaped operands for an instruction that does not "
+ "preserve shape information");
+ auto Ops = I->operands();
+ return isa<SelectInst>(I) ? drop_begin(Ops) : Ops;
+}
+
/// Return the ShapeInfo for the result of \p I, it it can be determined.
static std::optional<ShapeInfo>
computeShapeInfoForInst(Instruction *I,
@@ -329,9 +342,8 @@ computeShapeInfoForInst(Instruction *I,
return OpShape->second;
}
- if (isUniformShape(I) || isa<SelectInst>(I)) {
- auto Ops = I->operands();
- auto ShapedOps = isa<SelectInst>(I) ? drop_begin(Ops) : Ops;
+ if (isShapePreserving(I)) {
+ auto ShapedOps = getShapedOperandsForInst(I);
// Find the first operand that has a known shape and use that.
for (auto &Op : ShapedOps) {
auto OpShape = ShapeMap.find(Op.get());
@@ -710,10 +722,9 @@ public:
case Intrinsic::matrix_column_major_store:
return true;
default:
- return isUniformShape(II);
+ break;
}
- return isUniformShape(V) || isa<StoreInst>(V) || isa<LoadInst>(V) ||
- isa<SelectInst>(V);
+ return isShapePreserving(V) || isa<StoreInst>(V) || isa<LoadInst>(V);
}
/// Propagate the shape information of instructions to their users.
@@ -800,9 +811,8 @@ public:
} else if (isa<StoreInst>(V)) {
// Nothing to do. We forward-propagated to this so we would just
// backward propagate to an instruction with an already known shape.
- } else if (isUniformShape(V) || isa<SelectInst>(V)) {
- auto Ops = cast<Instruction>(V)->operands();
- auto ShapedOps = isa<SelectInst>(V) ? drop_begin(Ops) : Ops;
+ } else if (isShapePreserving(V)) {
+ auto ShapedOps = getShapedOperandsForInst(cast<Instruction>(V));
// Propagate to all operands.
ShapeInfo Shape = ShapeMap[V];
for (Use &U : ShapedOps) {
diff --git a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
index 0f3978f..5f6f66a 100644
--- a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -143,8 +143,8 @@ struct SubGraphTraits {
class WrappedSuccIterator
: public iterator_adaptor_base<
WrappedSuccIterator, BaseSuccIterator,
- typename std::iterator_traits<BaseSuccIterator>::iterator_category,
- NodeRef, std::ptrdiff_t, NodeRef *, NodeRef> {
+ std::iterator_traits<BaseSuccIterator>::iterator_category, NodeRef,
+ std::ptrdiff_t, NodeRef *, NodeRef> {
SmallDenseSet<RegionNode *> *Nodes;
public:
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 6addcfa..cbc604e 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -5956,7 +5956,7 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI,
}
// Update weight for the newly-created conditional branch.
- if (hasBranchWeightMD(*SI)) {
+ if (hasBranchWeightMD(*SI) && NewBI->isConditional()) {
SmallVector<uint64_t, 8> Weights;
getBranchWeights(SI, Weights);
if (Weights.size() == 1 + SI->getNumCases()) {
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
index 3fed003..04b0562 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
@@ -167,7 +167,7 @@ public:
DebugLoc DL = DebugLoc::getUnknown(),
const Twine &Name = "") {
return tryInsertInstruction(
- new VPInstruction(Opcode, Operands, Flags, DL, Name));
+ new VPInstruction(Opcode, Operands, Flags, {}, DL, Name));
}
VPInstruction *createNaryOp(unsigned Opcode, ArrayRef<VPValue *> Operands,
@@ -184,7 +184,7 @@ public:
DebugLoc DL = DebugLoc::getUnknown(),
const Twine &Name = "") {
return tryInsertInstruction(
- new VPInstruction(Opcode, Operands, WrapFlags, DL, Name));
+ new VPInstruction(Opcode, Operands, WrapFlags, {}, DL, Name));
}
VPInstruction *createNot(VPValue *Operand,
@@ -205,7 +205,7 @@ public:
return tryInsertInstruction(new VPInstruction(
Instruction::BinaryOps::Or, {LHS, RHS},
- VPRecipeWithIRFlags::DisjointFlagsTy(false), DL, Name));
+ VPRecipeWithIRFlags::DisjointFlagsTy(false), {}, DL, Name));
}
VPInstruction *createLogicalAnd(VPValue *LHS, VPValue *RHS,
@@ -221,7 +221,7 @@ public:
std::optional<FastMathFlags> FMFs = std::nullopt) {
auto *Select =
FMFs ? new VPInstruction(Instruction::Select, {Cond, TrueVal, FalseVal},
- *FMFs, DL, Name)
+ *FMFs, {}, DL, Name)
: new VPInstruction(Instruction::Select, {Cond, TrueVal, FalseVal},
DL, Name);
return tryInsertInstruction(Select);
@@ -235,7 +235,7 @@ public:
assert(Pred >= CmpInst::FIRST_ICMP_PREDICATE &&
Pred <= CmpInst::LAST_ICMP_PREDICATE && "invalid predicate");
return tryInsertInstruction(
- new VPInstruction(Instruction::ICmp, {A, B}, Pred, DL, Name));
+ new VPInstruction(Instruction::ICmp, {A, B}, Pred, {}, DL, Name));
}
/// Create a new FCmp VPInstruction with predicate \p Pred and operands \p A
@@ -246,7 +246,7 @@ public:
assert(Pred >= CmpInst::FIRST_FCMP_PREDICATE &&
Pred <= CmpInst::LAST_FCMP_PREDICATE && "invalid predicate");
return tryInsertInstruction(
- new VPInstruction(Instruction::FCmp, {A, B}, Pred, DL, Name));
+ new VPInstruction(Instruction::FCmp, {A, B}, Pred, {}, DL, Name));
}
VPInstruction *createPtrAdd(VPValue *Ptr, VPValue *Offset,
@@ -254,7 +254,7 @@ public:
const Twine &Name = "") {
return tryInsertInstruction(
new VPInstruction(VPInstruction::PtrAdd, {Ptr, Offset},
- GEPNoWrapFlags::none(), DL, Name));
+ GEPNoWrapFlags::none(), {}, DL, Name));
}
VPInstruction *createNoWrapPtrAdd(VPValue *Ptr, VPValue *Offset,
@@ -262,7 +262,7 @@ public:
DebugLoc DL = DebugLoc::getUnknown(),
const Twine &Name = "") {
return tryInsertInstruction(new VPInstruction(
- VPInstruction::PtrAdd, {Ptr, Offset}, GEPFlags, DL, Name));
+ VPInstruction::PtrAdd, {Ptr, Offset}, GEPFlags, {}, DL, Name));
}
VPInstruction *createWidePtrAdd(VPValue *Ptr, VPValue *Offset,
@@ -270,7 +270,7 @@ public:
const Twine &Name = "") {
return tryInsertInstruction(
new VPInstruction(VPInstruction::WidePtrAdd, {Ptr, Offset},
- GEPNoWrapFlags::none(), DL, Name));
+ GEPNoWrapFlags::none(), {}, DL, Name));
}
VPPhi *createScalarPhi(ArrayRef<VPValue *> IncomingValues, DebugLoc DL,
@@ -280,8 +280,7 @@ public:
VPValue *createElementCount(Type *Ty, ElementCount EC) {
VPlan &Plan = *getInsertBlock()->getPlan();
- VPValue *RuntimeEC =
- Plan.getOrAddLiveIn(ConstantInt::get(Ty, EC.getKnownMinValue()));
+ VPValue *RuntimeEC = Plan.getConstantInt(Ty, EC.getKnownMinValue());
if (EC.isScalable()) {
VPValue *VScale = createNaryOp(VPInstruction::VScale, {}, Ty);
RuntimeEC = EC.getKnownMinValue() == 1
@@ -304,9 +303,11 @@ public:
}
VPInstruction *createScalarCast(Instruction::CastOps Opcode, VPValue *Op,
- Type *ResultTy, DebugLoc DL) {
+ Type *ResultTy, DebugLoc DL,
+ const VPIRFlags &Flags = {},
+ const VPIRMetadata &Metadata = {}) {
return tryInsertInstruction(
- new VPInstructionWithType(Opcode, Op, ResultTy, {}, DL));
+ new VPInstructionWithType(Opcode, Op, ResultTy, DL, Flags, Metadata));
}
VPValue *createScalarZExtOrTrunc(VPValue *Op, Type *ResultTy, Type *SrcTy,
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 25bf49d..e5c3f17 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7752,8 +7752,7 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
if (CM.isPredicatedInst(I)) {
SmallVector<VPValue *> Ops(Operands);
VPValue *Mask = getBlockInMask(Builder.getInsertBlock());
- VPValue *One =
- Plan.getOrAddLiveIn(ConstantInt::get(I->getType(), 1u, false));
+ VPValue *One = Plan.getConstantInt(I->getType(), 1u);
auto *SafeRHS = Builder.createSelect(Mask, Ops[1], One, I->getDebugLoc());
Ops[1] = SafeRHS;
return new VPWidenRecipe(*I, Ops);
@@ -7806,11 +7805,10 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
}
case Instruction::ExtractValue: {
SmallVector<VPValue *> NewOps(Operands);
- Type *I32Ty = IntegerType::getInt32Ty(I->getContext());
auto *EVI = cast<ExtractValueInst>(I);
assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
unsigned Idx = EVI->getIndices()[0];
- NewOps.push_back(Plan.getOrAddLiveIn(ConstantInt::get(I32Ty, Idx, false)));
+ NewOps.push_back(Plan.getConstantInt(32, Idx));
return new VPWidenRecipe(*I, NewOps);
}
};
@@ -8179,8 +8177,7 @@ VPRecipeBuilder::tryToCreatePartialReduction(Instruction *Reduction,
"Expected an ADD or SUB operation for predicated partial "
"reductions (because the neutral element in the mask is zero)!");
Cond = getBlockInMask(Builder.getInsertBlock());
- VPValue *Zero =
- Plan.getOrAddLiveIn(ConstantInt::get(Reduction->getType(), 0));
+ VPValue *Zero = Plan.getConstantInt(Reduction->getType(), 0);
BinOp = Builder.createSelect(Cond, BinOp, Zero, Reduction->getDebugLoc());
}
return new VPPartialReductionRecipe(ReductionOpcode, Accumulator, BinOp, Cond,
@@ -8643,7 +8640,7 @@ void LoopVectorizationPlanner::adjustRecipesForReductions(
} else if (PhiR->isInLoop() && Kind == RecurKind::AddChainWithSubs &&
CurrentLinkI->getOpcode() == Instruction::Sub) {
Type *PhiTy = PhiR->getUnderlyingValue()->getType();
- auto *Zero = Plan->getOrAddLiveIn(ConstantInt::get(PhiTy, 0));
+ auto *Zero = Plan->getConstantInt(PhiTy, 0);
VPWidenRecipe *Sub = new VPWidenRecipe(
Instruction::Sub, {Zero, CurrentLink->getOperand(1)}, {},
VPIRMetadata(), CurrentLinkI->getDebugLoc());
@@ -8857,8 +8854,7 @@ void LoopVectorizationPlanner::adjustRecipesForReductions(
ToDelete.push_back(Select);
// Convert the reduction phi to operate on bools.
- PhiR->setOperand(0, Plan->getOrAddLiveIn(ConstantInt::getFalse(
- OrigLoop->getHeader()->getContext())));
+ PhiR->setOperand(0, Plan->getFalse());
continue;
}
@@ -8880,9 +8876,7 @@ void LoopVectorizationPlanner::adjustRecipesForReductions(
unsigned ScaleFactor =
RecipeBuilder.getScalingForReduction(RdxDesc.getLoopExitInstr())
.value_or(1);
- Type *I32Ty = IntegerType::getInt32Ty(PhiTy->getContext());
- auto *ScaleFactorVPV =
- Plan->getOrAddLiveIn(ConstantInt::get(I32Ty, ScaleFactor));
+ auto *ScaleFactorVPV = Plan->getConstantInt(32, ScaleFactor);
VPValue *StartV = PHBuilder.createNaryOp(
VPInstruction::ReductionStartVector,
{PhiR->getStartValue(), Iden, ScaleFactorVPV},
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 1b55a3b..34b405c 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -22134,6 +22134,27 @@ bool BoUpSLP::collectValuesToDemote(
{VectorizableTree[E.CombinedEntriesWithIndices.front().first].get(),
VectorizableTree[E.CombinedEntriesWithIndices.back().first].get()});
+ if (E.isAltShuffle()) {
+ // Combining these opcodes may lead to incorrect analysis, skip for now.
+ auto IsDangerousOpcode = [](unsigned Opcode) {
+ switch (Opcode) {
+ case Instruction::Shl:
+ case Instruction::AShr:
+ case Instruction::LShr:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ return true;
+ default:
+ break;
+ }
+ return false;
+ };
+ if (IsDangerousOpcode(E.getAltOpcode()))
+ return FinalAnalysis();
+ }
+
switch (E.getOpcode()) {
// We can always demote truncations and extensions. Since truncations can
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 1f10058..aba6d35 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -939,7 +939,7 @@ class VPIRMetadata {
SmallVector<std::pair<unsigned, MDNode *>> Metadata;
public:
- VPIRMetadata() {}
+ VPIRMetadata() = default;
/// Adds metatadata that can be preserved from the original instruction
/// \p I.
@@ -950,12 +950,9 @@ public:
VPIRMetadata(Instruction &I, LoopVersioning *LVer);
/// Copy constructor for cloning.
- VPIRMetadata(const VPIRMetadata &Other) : Metadata(Other.Metadata) {}
+ VPIRMetadata(const VPIRMetadata &Other) = default;
- VPIRMetadata &operator=(const VPIRMetadata &Other) {
- Metadata = Other.Metadata;
- return *this;
- }
+ VPIRMetadata &operator=(const VPIRMetadata &Other) = default;
/// Add all metadata to \p I.
void applyMetadata(Instruction &I) const;
@@ -1107,14 +1104,15 @@ public:
VPIRMetadata(), Opcode(Opcode), Name(Name.str()) {}
VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands,
- const VPIRFlags &Flags, DebugLoc DL = DebugLoc::getUnknown(),
- const Twine &Name = "");
+ const VPIRFlags &Flags, const VPIRMetadata &MD = {},
+ DebugLoc DL = DebugLoc::getUnknown(), const Twine &Name = "");
VP_CLASSOF_IMPL(VPDef::VPInstructionSC)
VPInstruction *clone() override {
SmallVector<VPValue *, 2> Operands(operands());
- auto *New = new VPInstruction(Opcode, Operands, *this, getDebugLoc(), Name);
+ auto *New =
+ new VPInstruction(Opcode, Operands, *this, *this, getDebugLoc(), Name);
if (getUnderlyingValue())
New->setUnderlyingValue(getUnderlyingInstr());
return New;
@@ -1196,7 +1194,14 @@ public:
VPInstructionWithType(unsigned Opcode, ArrayRef<VPValue *> Operands,
Type *ResultTy, const VPIRFlags &Flags, DebugLoc DL,
const Twine &Name = "")
- : VPInstruction(Opcode, Operands, Flags, DL, Name), ResultTy(ResultTy) {}
+ : VPInstruction(Opcode, Operands, Flags, {}, DL, Name),
+ ResultTy(ResultTy) {}
+
+ VPInstructionWithType(unsigned Opcode, ArrayRef<VPValue *> Operands,
+ Type *ResultTy, DebugLoc DL, const VPIRFlags &Flags,
+ const VPIRMetadata &Metadata, const Twine &Name = "")
+ : VPInstruction(Opcode, Operands, Flags, Metadata, DL, Name),
+ ResultTy(ResultTy) {}
static inline bool classof(const VPRecipeBase *R) {
// VPInstructionWithType are VPInstructions with specific opcodes requiring
@@ -3977,7 +3982,7 @@ class VPIRBasicBlock : public VPBasicBlock {
IRBB(IRBB) {}
public:
- ~VPIRBasicBlock() override {}
+ ~VPIRBasicBlock() override = default;
static inline bool classof(const VPBlockBase *V) {
return V->getVPBlockID() == VPBlockBase::VPIRBasicBlockSC;
@@ -4029,7 +4034,7 @@ class LLVM_ABI_FOR_TEST VPRegionBlock : public VPBlockBase {
IsReplicator(IsReplicator) {}
public:
- ~VPRegionBlock() override {}
+ ~VPRegionBlock() override = default;
/// Method to support type inquiry through isa, cast, and dyn_cast.
static inline bool classof(const VPBlockBase *V) {
@@ -4109,6 +4114,12 @@ public:
const VPCanonicalIVPHIRecipe *getCanonicalIV() const {
return const_cast<VPRegionBlock *>(this)->getCanonicalIV();
}
+
+ /// Return the type of the canonical IV for loop regions.
+ Type *getCanonicalIVType() { return getCanonicalIV()->getScalarType(); }
+ const Type *getCanonicalIVType() const {
+ return getCanonicalIV()->getScalarType();
+ }
};
inline VPRegionBlock *VPRecipeBase::getRegion() {
@@ -4387,15 +4398,25 @@ public:
}
/// Return a VPValue wrapping i1 true.
- VPValue *getTrue() {
- LLVMContext &Ctx = getContext();
- return getOrAddLiveIn(ConstantInt::getTrue(Ctx));
- }
+ VPValue *getTrue() { return getConstantInt(1, 1); }
/// Return a VPValue wrapping i1 false.
- VPValue *getFalse() {
- LLVMContext &Ctx = getContext();
- return getOrAddLiveIn(ConstantInt::getFalse(Ctx));
+ VPValue *getFalse() { return getConstantInt(1, 0); }
+
+ /// Return a VPValue wrapping a ConstantInt with the given type and value.
+ VPValue *getConstantInt(Type *Ty, uint64_t Val, bool IsSigned = false) {
+ return getOrAddLiveIn(ConstantInt::get(Ty, Val, IsSigned));
+ }
+
+ /// Return a VPValue wrapping a ConstantInt with the given bitwidth and value.
+ VPValue *getConstantInt(unsigned BitWidth, uint64_t Val,
+ bool IsSigned = false) {
+ return getConstantInt(APInt(BitWidth, Val, IsSigned));
+ }
+
+ /// Return a VPValue wrapping a ConstantInt with the given APInt value.
+ VPValue *getConstantInt(const APInt &Val) {
+ return getOrAddLiveIn(ConstantInt::get(getContext(), Val));
}
/// Return the live-in VPValue for \p V, if there is one or nullptr otherwise.
diff --git a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
index 65688a3..1a66d20 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
@@ -612,8 +612,7 @@ void VPlanTransforms::addMiddleCheck(VPlan &Plan,
if (!RequiresScalarEpilogueCheck)
Cmp = Plan.getFalse();
else if (TailFolded)
- Cmp = Plan.getOrAddLiveIn(
- ConstantInt::getTrue(IntegerType::getInt1Ty(Plan.getContext())));
+ Cmp = Plan.getTrue();
else
Cmp = Builder.createICmp(CmpInst::ICMP_EQ, Plan.getTripCount(),
&Plan.getVectorTripCount(), LatchDL, "cmp.n");
@@ -712,8 +711,8 @@ void VPlanTransforms::addMinimumIterationCheck(
// additional overflow check is required before entering the vector loop.
// Get the maximum unsigned value for the type.
- VPValue *MaxUIntTripCount = Plan.getOrAddLiveIn(ConstantInt::get(
- TripCountTy, cast<IntegerType>(TripCountTy)->getMask()));
+ VPValue *MaxUIntTripCount =
+ Plan.getConstantInt(cast<IntegerType>(TripCountTy)->getMask());
VPValue *DistanceToMax = Builder.createNaryOp(
Instruction::Sub, {MaxUIntTripCount, TripCountVPV},
DebugLoc::getUnknown());
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index bde62dd..1ee405a 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -162,8 +162,12 @@ bool VPRecipeBase::mayHaveSideEffects() const {
case VPPredInstPHISC:
case VPVectorEndPointerSC:
return false;
- case VPInstructionSC:
- return mayWriteToMemory();
+ case VPInstructionSC: {
+ auto *VPI = cast<VPInstruction>(this);
+ return mayWriteToMemory() ||
+ VPI->getOpcode() == VPInstruction::BranchOnCount ||
+ VPI->getOpcode() == VPInstruction::BranchOnCond;
+ }
case VPWidenCallSC: {
Function *Fn = cast<VPWidenCallRecipe>(this)->getCalledScalarFunction();
return mayWriteToMemory() || !Fn->doesNotThrow() || !Fn->willReturn();
@@ -490,10 +494,10 @@ template class VPUnrollPartAccessor<3>;
}
VPInstruction::VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands,
- const VPIRFlags &Flags, DebugLoc DL,
- const Twine &Name)
+ const VPIRFlags &Flags, const VPIRMetadata &MD,
+ DebugLoc DL, const Twine &Name)
: VPRecipeWithIRFlags(VPDef::VPInstructionSC, Operands, Flags, DL),
- VPIRMetadata(), Opcode(Opcode), Name(Name.str()) {
+ VPIRMetadata(MD), Opcode(Opcode), Name(Name.str()) {
assert(flagsValidForOpcode(getOpcode()) &&
"Set flags not supported for the provided opcode");
assert((getNumOperandsForOpcode(Opcode) == -1u ||
@@ -1241,6 +1245,8 @@ bool VPInstruction::opcodeMayReadOrWriteFromMemory() const {
case Instruction::Select:
case Instruction::PHI:
case VPInstruction::AnyOf:
+ case VPInstruction::BranchOnCond:
+ case VPInstruction::BranchOnCount:
case VPInstruction::Broadcast:
case VPInstruction::BuildStructVector:
case VPInstruction::BuildVector:
@@ -2372,9 +2378,8 @@ bool VPWidenIntOrFpInductionRecipe::isCanonical() const {
return false;
auto *StepC = dyn_cast<ConstantInt>(getStepValue()->getLiveInIRValue());
auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue());
- auto *CanIV = getRegion()->getCanonicalIV();
return StartC && StartC->isZero() && StepC && StepC->isOne() &&
- getScalarType() == CanIV->getScalarType();
+ getScalarType() == getRegion()->getCanonicalIVType();
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanSLP.h b/llvm/lib/Transforms/Vectorize/VPlanSLP.h
index 77ff36c..44972c68 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanSLP.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanSLP.h
@@ -89,8 +89,7 @@ class VPlanSlp {
/// Width of the widest combined bundle in bits.
unsigned WidestBundleBits = 0;
- using MultiNodeOpTy =
- typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
+ using MultiNodeOpTy = std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
// Input operand bundles for the current multi node. Each multi node operand
// bundle contains values not matching the multi node's opcode. They will
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 986c801..3757a59 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -699,8 +699,7 @@ static void legalizeAndOptimizeInductions(VPlan &Plan) {
continue;
const InductionDescriptor &ID = PtrIV->getInductionDescriptor();
- VPValue *StartV =
- Plan.getOrAddLiveIn(ConstantInt::get(ID.getStep()->getType(), 0));
+ VPValue *StartV = Plan.getConstantInt(ID.getStep()->getType(), 0);
VPValue *StepV = PtrIV->getOperand(1);
VPScalarIVStepsRecipe *Steps = createScalarIVSteps(
Plan, InductionDescriptor::IK_IntInduction, Instruction::Add, nullptr,
@@ -820,7 +819,7 @@ static VPValue *optimizeEarlyExitInductionUser(VPlan &Plan,
// Calculate the final index.
VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
auto *CanonicalIV = LoopRegion->getCanonicalIV();
- Type *CanonicalIVType = CanonicalIV->getScalarType();
+ Type *CanonicalIVType = LoopRegion->getCanonicalIVType();
VPBuilder B(cast<VPBasicBlock>(PredVPBB));
DebugLoc DL = cast<VPInstruction>(Op)->getDebugLoc();
@@ -836,7 +835,7 @@ static VPValue *optimizeEarlyExitInductionUser(VPlan &Plan,
// changed it means the exit is using the incremented value, so we need to
// add the step.
if (Incoming != WideIV) {
- VPValue *One = Plan.getOrAddLiveIn(ConstantInt::get(CanonicalIVType, 1));
+ VPValue *One = Plan.getConstantInt(CanonicalIVType, 1);
EndValue = B.createNaryOp(Instruction::Add, {EndValue, One}, DL);
}
@@ -882,7 +881,7 @@ static VPValue *optimizeLatchExitInductionUser(
return B.createNaryOp(Instruction::Sub, {EndValue, Step}, {}, "ind.escape");
if (ScalarTy->isPointerTy()) {
Type *StepTy = TypeInfo.inferScalarType(Step);
- auto *Zero = Plan.getOrAddLiveIn(ConstantInt::get(StepTy, 0));
+ auto *Zero = Plan.getConstantInt(StepTy, 0);
return B.createPtrAdd(EndValue,
B.createNaryOp(Instruction::Sub, {Zero, Step}),
DebugLoc::getUnknown(), "ind.escape");
@@ -1282,6 +1281,12 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
return;
}
+ if (match(Def, m_BuildVector()) && all_equal(R.operands())) {
+ Def->replaceAllUsesWith(
+ Builder.createNaryOp(VPInstruction::Broadcast, Def->getOperand(0)));
+ return;
+ }
+
if (auto *Phi = dyn_cast<VPPhi>(Def)) {
if (Phi->getNumOperands() == 1)
Phi->replaceAllUsesWith(Phi->getOperand(0));
@@ -1574,9 +1579,9 @@ static bool optimizeVectorInductionWidthForTCAndVFUF(VPlan &Plan,
continue;
// Update IV operands and comparison bound to use new narrower type.
- auto *NewStart = Plan.getOrAddLiveIn(ConstantInt::get(NewIVTy, 0));
+ auto *NewStart = Plan.getConstantInt(NewIVTy, 0);
WideIV->setStartValue(NewStart);
- auto *NewStep = Plan.getOrAddLiveIn(ConstantInt::get(NewIVTy, 1));
+ auto *NewStep = Plan.getConstantInt(NewIVTy, 1);
WideIV->setStepValue(NewStep);
auto *NewBTC = new VPWidenCastRecipe(
@@ -1695,8 +1700,7 @@ static bool tryToReplaceALMWithWideALM(VPlan &Plan, ElementCount VF,
// When using wide lane masks, the return type of the get.active.lane.mask
// intrinsic is VF x UF (last operand).
- VPValue *ALMMultiplier =
- Plan.getOrAddLiveIn(ConstantInt::get(IntegerType::getInt64Ty(Ctx), UF));
+ VPValue *ALMMultiplier = Plan.getConstantInt(64, UF);
EntryALM->setOperand(2, ALMMultiplier);
LoopALM->setOperand(2, ALMMultiplier);
@@ -2402,8 +2406,8 @@ static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch(
"index.part.next");
// Create the active lane mask instruction in the VPlan preheader.
- VPValue *ALMMultiplier = Plan.getOrAddLiveIn(
- ConstantInt::get(TopRegion->getCanonicalIV()->getScalarType(), 1));
+ VPValue *ALMMultiplier =
+ Plan.getConstantInt(TopRegion->getCanonicalIVType(), 1);
auto *EntryALM = Builder.createNaryOp(VPInstruction::ActiveLaneMask,
{EntryIncrement, TC, ALMMultiplier}, DL,
"active.lane.mask.entry");
@@ -2503,7 +2507,7 @@ void VPlanTransforms::addActiveLaneMask(
} else {
VPBuilder B = VPBuilder::getToInsertAfter(WideCanonicalIV);
VPValue *ALMMultiplier = Plan.getOrAddLiveIn(
- ConstantInt::get(LoopRegion->getCanonicalIV()->getScalarType(), 1));
+ ConstantInt::get(LoopRegion->getCanonicalIVType(), 1));
LaneMask =
B.createNaryOp(VPInstruction::ActiveLaneMask,
{WideCanonicalIV, Plan.getTripCount(), ALMMultiplier},
@@ -2775,7 +2779,7 @@ void VPlanTransforms::addExplicitVectorLength(
VPBasicBlock *Header = LoopRegion->getEntryBasicBlock();
auto *CanonicalIVPHI = LoopRegion->getCanonicalIV();
- auto *CanIVTy = CanonicalIVPHI->getScalarType();
+ auto *CanIVTy = LoopRegion->getCanonicalIVType();
VPValue *StartV = CanonicalIVPHI->getStartValue();
// Create the ExplicitVectorLengthPhi recipe in the main loop.
@@ -2790,8 +2794,7 @@ void VPlanTransforms::addExplicitVectorLength(
if (MaxSafeElements) {
// Support for MaxSafeDist for correct loop emission.
- VPValue *AVLSafe =
- Plan.getOrAddLiveIn(ConstantInt::get(CanIVTy, *MaxSafeElements));
+ VPValue *AVLSafe = Plan.getConstantInt(CanIVTy, *MaxSafeElements);
VPValue *Cmp = Builder.createICmp(ICmpInst::ICMP_ULT, AVL, AVLSafe);
AVL = Builder.createSelect(Cmp, AVL, AVLSafe, DebugLoc::getUnknown(),
"safe_avl");
@@ -2904,9 +2907,8 @@ void VPlanTransforms::canonicalizeEVLLoops(VPlan &Plan) {
Type *AVLTy = VPTypeAnalysis(Plan).inferScalarType(AVLNext);
VPBuilder Builder(LatchExitingBr);
- VPValue *Cmp =
- Builder.createICmp(CmpInst::ICMP_EQ, AVLNext,
- Plan.getOrAddLiveIn(ConstantInt::getNullValue(AVLTy)));
+ VPValue *Cmp = Builder.createICmp(CmpInst::ICMP_EQ, AVLNext,
+ Plan.getConstantInt(AVLTy, 0));
Builder.createNaryOp(VPInstruction::BranchOnCond, Cmp);
LatchExitingBr->eraseFromParent();
}
@@ -2930,8 +2932,7 @@ void VPlanTransforms::replaceSymbolicStrides(
// Only handle constant strides for now.
continue;
- auto *CI =
- Plan.getOrAddLiveIn(ConstantInt::get(Stride->getType(), *StrideConst));
+ auto *CI = Plan.getConstantInt(*StrideConst);
if (VPValue *StrideVPV = Plan.getLiveIn(StrideV))
StrideVPV->replaceUsesWithIf(CI, CanUseVersionedStride);
@@ -2946,7 +2947,7 @@ void VPlanTransforms::replaceSymbolicStrides(
unsigned BW = U->getType()->getScalarSizeInBits();
APInt C =
isa<SExtInst>(U) ? StrideConst->sext(BW) : StrideConst->zext(BW);
- VPValue *CI = Plan.getOrAddLiveIn(ConstantInt::get(U->getType(), C));
+ VPValue *CI = Plan.getConstantInt(C);
StrideVPV->replaceUsesWithIf(CI, CanUseVersionedStride);
}
RewriteMap[StrideV] = PSE.getSCEV(StrideV);
@@ -3125,8 +3126,7 @@ void VPlanTransforms::createInterleaveGroups(
DL.getTypeAllocSize(getLoadStoreType(IRInsertPos)) *
IG->getIndex(IRInsertPos),
/*IsSigned=*/true);
- VPValue *OffsetVPV =
- Plan.getOrAddLiveIn(ConstantInt::get(Plan.getContext(), -Offset));
+ VPValue *OffsetVPV = Plan.getConstantInt(-Offset);
VPBuilder B(InsertPos);
Addr = B.createNoWrapPtrAdd(InsertPos->getAddr(), OffsetVPV, NW);
}
@@ -3867,8 +3867,7 @@ void VPlanTransforms::materializeBackedgeTakenCount(VPlan &Plan,
VPBuilder Builder(VectorPH, VectorPH->begin());
auto *TCTy = VPTypeAnalysis(Plan).inferScalarType(Plan.getTripCount());
auto *TCMO = Builder.createNaryOp(
- Instruction::Sub,
- {Plan.getTripCount(), Plan.getOrAddLiveIn(ConstantInt::get(TCTy, 1))},
+ Instruction::Sub, {Plan.getTripCount(), Plan.getConstantInt(TCTy, 1)},
DebugLoc::getCompilerGenerated(), "trip.count.minus.1");
BTC->replaceAllUsesWith(TCMO);
}
@@ -3993,9 +3992,8 @@ void VPlanTransforms::materializeVectorTripCount(VPlan &Plan,
if (TailByMasking) {
TC = Builder.createNaryOp(
Instruction::Add,
- {TC, Builder.createNaryOp(
- Instruction::Sub,
- {Step, Plan.getOrAddLiveIn(ConstantInt::get(TCTy, 1))})},
+ {TC, Builder.createNaryOp(Instruction::Sub,
+ {Step, Plan.getConstantInt(TCTy, 1)})},
DebugLoc::getCompilerGenerated(), "n.rnd.up");
}
@@ -4017,8 +4015,8 @@ void VPlanTransforms::materializeVectorTripCount(VPlan &Plan,
if (RequiresScalarEpilogue) {
assert(!TailByMasking &&
"requiring scalar epilogue is not supported with fail folding");
- VPValue *IsZero = Builder.createICmp(
- CmpInst::ICMP_EQ, R, Plan.getOrAddLiveIn(ConstantInt::get(TCTy, 0)));
+ VPValue *IsZero =
+ Builder.createICmp(CmpInst::ICMP_EQ, R, Plan.getConstantInt(TCTy, 0));
R = Builder.createSelect(IsZero, Step, R);
}
@@ -4056,7 +4054,7 @@ void VPlanTransforms::materializeVFAndVFxUF(VPlan &Plan, VPBasicBlock *VectorPH,
}
VF.replaceAllUsesWith(RuntimeVF);
- VPValue *UF = Plan.getOrAddLiveIn(ConstantInt::get(TCTy, Plan.getUF()));
+ VPValue *UF = Plan.getConstantInt(TCTy, Plan.getUF());
VPValue *MulByUF = Builder.createNaryOp(Instruction::Mul, {RuntimeVF, UF});
VFxUF.replaceAllUsesWith(MulByUF);
}
@@ -4176,7 +4174,7 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
unsigned VFMinVal = VF.getKnownMinValue();
SmallVector<VPInterleaveRecipe *> StoreGroups;
for (auto &R : *VectorLoop->getEntryBasicBlock()) {
- if (isa<VPCanonicalIVPHIRecipe>(&R) || match(&R, m_BranchOnCount()))
+ if (isa<VPCanonicalIVPHIRecipe>(&R))
continue;
if (isa<VPDerivedIVRecipe, VPScalarIVStepsRecipe>(&R) &&
@@ -4336,17 +4334,17 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
VPBuilder PHBuilder(Plan.getVectorPreheader());
VPValue *UF = Plan.getOrAddLiveIn(
- ConstantInt::get(CanIV->getScalarType(), 1 * Plan.getUF()));
+ ConstantInt::get(VectorLoop->getCanonicalIVType(), 1 * Plan.getUF()));
if (VF.isScalable()) {
VPValue *VScale = PHBuilder.createElementCount(
- CanIV->getScalarType(), ElementCount::getScalable(1));
+ VectorLoop->getCanonicalIVType(), ElementCount::getScalable(1));
VPValue *VScaleUF = PHBuilder.createNaryOp(Instruction::Mul, {VScale, UF});
Inc->setOperand(1, VScaleUF);
Plan.getVF().replaceAllUsesWith(VScale);
} else {
Inc->setOperand(1, UF);
Plan.getVF().replaceAllUsesWith(
- Plan.getOrAddLiveIn(ConstantInt::get(CanIV->getScalarType(), 1)));
+ Plan.getConstantInt(CanIV->getScalarType(), 1));
}
removeDeadRecipes(Plan);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
index cfd1a74..d6a0028 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
@@ -68,10 +68,9 @@ class UnrollState {
void unrollWidenInductionByUF(VPWidenInductionRecipe *IV,
VPBasicBlock::iterator InsertPtForPhi);
- VPValue *getConstantVPV(unsigned Part) {
- Type *CanIVIntTy =
- Plan.getVectorLoopRegion()->getCanonicalIV()->getScalarType();
- return Plan.getOrAddLiveIn(ConstantInt::get(CanIVIntTy, Part));
+ VPValue *getConstantInt(unsigned Part) {
+ Type *CanIVIntTy = Plan.getVectorLoopRegion()->getCanonicalIVType();
+ return Plan.getConstantInt(CanIVIntTy, Part);
}
public:
@@ -138,7 +137,7 @@ void UnrollState::unrollReplicateRegionByUF(VPRegionBlock *VPR) {
for (const auto &[PartIR, Part0R] : zip(*PartIVPBB, *Part0VPBB)) {
remapOperands(&PartIR, Part);
if (auto *ScalarIVSteps = dyn_cast<VPScalarIVStepsRecipe>(&PartIR)) {
- ScalarIVSteps->addOperand(getConstantVPV(Part));
+ ScalarIVSteps->addOperand(getConstantInt(Part));
}
addRecipeForPart(&Part0R, &PartIR, Part);
@@ -250,7 +249,7 @@ void UnrollState::unrollHeaderPHIByUF(VPHeaderPHIRecipe *R,
for (unsigned Part = 1; Part != UF; ++Part)
VPV2Parts[VPI][Part - 1] = StartV;
}
- Copy->addOperand(getConstantVPV(Part));
+ Copy->addOperand(getConstantInt(Part));
} else {
assert(isa<VPActiveLaneMaskPHIRecipe>(R) &&
"unexpected header phi recipe not needing unrolled part");
@@ -319,7 +318,7 @@ void UnrollState::unrollRecipeByUF(VPRecipeBase &R) {
VPVectorPointerRecipe, VPVectorEndPointerRecipe>(Copy) ||
match(Copy,
m_VPInstruction<VPInstruction::CanonicalIVIncrementForPart>()))
- Copy->addOperand(getConstantVPV(Part));
+ Copy->addOperand(getConstantInt(Part));
if (isa<VPVectorPointerRecipe, VPVectorEndPointerRecipe>(R))
Copy->setOperand(0, R.getOperand(0));
@@ -475,8 +474,7 @@ cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy,
if (LaneDefs != Def2LaneDefs.end())
return LaneDefs->second[Lane.getKnownLane()];
- VPValue *Idx =
- Plan.getOrAddLiveIn(ConstantInt::get(IdxTy, Lane.getKnownLane()));
+ VPValue *Idx = Plan.getConstantInt(IdxTy, Lane.getKnownLane());
return Builder.createNaryOp(Instruction::ExtractElement, {Op, Idx});
}
@@ -510,8 +508,7 @@ cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy,
cast<VPInstruction>(Op)->getOperand(Lane.getKnownLane()));
continue;
}
- VPValue *Idx =
- Plan.getOrAddLiveIn(ConstantInt::get(IdxTy, Lane.getKnownLane()));
+ VPValue *Idx = Plan.getConstantInt(IdxTy, Lane.getKnownLane());
VPValue *Ext = Builder.createNaryOp(Instruction::ExtractElement, {Op, Idx});
NewOps.push_back(Ext);
}