aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/CMakeLists.txt1
-rw-r--r--llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp106
-rw-r--r--llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp281
-rw-r--r--llvm/lib/Analysis/TargetLibraryInfo.cpp42
4 files changed, 21 insertions, 409 deletions
diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index 88ebd65..bff9b62 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -89,7 +89,6 @@ add_llvm_component_library(LLVMAnalysis
InlineCost.cpp
InlineAdvisor.cpp
InlineOrder.cpp
- InlineSizeEstimatorAnalysis.cpp
InstCount.cpp
InstructionPrecedenceTracking.cpp
InstructionSimplify.cpp
diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
index 67e38ab..d2be805 100644
--- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
@@ -16,7 +16,6 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/Analysis/CallGraph.h"
-#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
#include "llvm/Analysis/MLInlineAdvisor.h"
#include "llvm/Analysis/ModelUnderTrainingRunner.h"
#include "llvm/Analysis/NoInferenceModelRunner.h"
@@ -89,9 +88,6 @@ struct InlineEvent {
/// error, even if AdvisedDecision were true, otherwise it agrees with
/// AdvisedDecision.
bool Effect = false;
-
- /// What the change in size was: size_after - size_before
- int64_t Reward = 0;
};
/// Collect data we may use for training a model.
@@ -150,31 +146,15 @@ public:
GetModelRunner,
std::function<bool(CallBase &)> GetDefaultAdvice);
- size_t getTotalSizeEstimate();
-
- void updateNativeSizeEstimate(int64_t Change) {
- *CurrentNativeSize += Change;
- }
- void resetNativeSize(Function *F) {
- PreservedAnalyses PA = PreservedAnalyses::all();
- PA.abandon<InlineSizeEstimatorAnalysis>();
- FAM.invalidate(*F, PA);
- }
-
std::unique_ptr<MLInlineAdvice>
getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE) override;
- std::optional<size_t> getNativeSizeEstimate(const Function &F) const;
-
private:
bool isLogging() const { return !!Logger; }
std::unique_ptr<MLInlineAdvice> getMandatoryAdviceImpl(CallBase &CB) override;
const bool IsDoingInference;
std::unique_ptr<TrainingLogger> Logger;
-
- const std::optional<int32_t> InitialNativeSize;
- std::optional<int32_t> CurrentNativeSize;
};
/// A variant of MLInlineAdvice that tracks all non-trivial inlining
@@ -183,13 +163,9 @@ class LoggingMLInlineAdvice : public MLInlineAdvice {
public:
LoggingMLInlineAdvice(DevelopmentModeMLInlineAdvisor *Advisor, CallBase &CB,
OptimizationRemarkEmitter &ORE, bool Recommendation,
- TrainingLogger &Logger,
- std::optional<size_t> CallerSizeEstimateBefore,
- std::optional<size_t> CalleeSizeEstimateBefore,
- bool DefaultDecision, bool Mandatory = false)
+ TrainingLogger &Logger, bool DefaultDecision,
+ bool Mandatory = false)
: MLInlineAdvice(Advisor, CB, ORE, Recommendation), Logger(Logger),
- CallerSizeEstimateBefore(CallerSizeEstimateBefore),
- CalleeSizeEstimateBefore(CalleeSizeEstimateBefore),
DefaultDecision(DefaultDecision), Mandatory(Mandatory) {}
virtual ~LoggingMLInlineAdvice() = default;
@@ -200,59 +176,35 @@ private:
}
void recordInliningImpl() override {
MLInlineAdvice::recordInliningImpl();
- getAdvisor()->resetNativeSize(Caller);
- int Reward = std::numeric_limits<int>::max();
- if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() &&
- !getAdvisor()->isForcedToStop()) {
- int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller) +
- *CalleeSizeEstimateBefore;
- Reward = NativeSizeAfter -
- (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore);
- getAdvisor()->updateNativeSizeEstimate(Reward);
- }
- log(Reward, /*Success=*/true);
+ log(/*Success=*/true);
}
void recordInliningWithCalleeDeletedImpl() override {
MLInlineAdvice::recordInliningWithCalleeDeletedImpl();
- getAdvisor()->resetNativeSize(Caller);
- if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() &&
- !getAdvisor()->isForcedToStop()) {
- int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller);
- int Reward = NativeSizeAfter -
- (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore);
- getAdvisor()->updateNativeSizeEstimate(Reward);
- log(Reward, /*Success=*/true);
- } else {
- log(NoReward, /*Success=*/true);
- }
+ log(/*Success=*/true);
}
void recordUnsuccessfulInliningImpl(const InlineResult &Result) override {
MLInlineAdvice::recordUnsuccessfulInliningImpl(Result);
- log(NoReward, /*Success=*/false);
+ log(/*Success=*/false);
}
void recordUnattemptedInliningImpl() override {
MLInlineAdvice::recordUnattemptedInliningImpl();
- log(NoReward, /*Success=*/false);
+ log(/*Success=*/false);
}
- void log(int64_t Reward, bool Success) {
+ void log(bool Success) {
if (Mandatory)
return;
InlineEvent Event;
Event.AdvisedDecision = isInliningRecommended();
Event.DefaultDecision = DefaultDecision;
Event.Effect = Success;
- Event.Reward = Reward;
Logger.logInlineEvent(Event, getAdvisor()->getModelRunner());
}
- static const int64_t NoReward = 0;
TrainingLogger &Logger;
- const std::optional<size_t> CallerSizeEstimateBefore;
- const std::optional<size_t> CalleeSizeEstimateBefore;
const int64_t DefaultDecision;
const int64_t Mandatory;
};
@@ -296,9 +248,9 @@ TrainingLogger::TrainingLogger(StringRef LogFileName,
if (EC)
dbgs() << (EC.message() + ":" + TrainingLog);
- L = std::make_unique<Logger>(
- std::move(OS), FT, TensorSpec::createSpec<int64_t>(RewardName, {1}),
- InlineSizeEstimatorAnalysis::isEvaluatorRequested());
+ L = std::make_unique<Logger>(std::move(OS), FT,
+ TensorSpec::createSpec<int64_t>(RewardName, {1}),
+ false);
L->switchContext("");
}
@@ -326,8 +278,6 @@ void TrainingLogger::logInlineEvent(const InlineEvent &Event,
L->logTensorValue(DecisionPos,
reinterpret_cast<const char *>(&Event.AdvisedDecision));
L->endObservation();
- if (InlineSizeEstimatorAnalysis::isEvaluatorRequested())
- L->logReward(Event.Reward);
// For debugging / later use
Effects.push_back(Event.Effect);
@@ -340,9 +290,7 @@ DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
GetModelRunner,
std::function<bool(CallBase &)> GetDefaultAdvice)
: MLInlineAdvisor(M, MAM, GetModelRunner, GetDefaultAdvice),
- IsDoingInference(isa<ModelUnderTrainingRunner>(getModelRunner())),
- InitialNativeSize(isLogging() ? getTotalSizeEstimate() : 0),
- CurrentNativeSize(InitialNativeSize) {
+ IsDoingInference(isa<ModelUnderTrainingRunner>(getModelRunner())) {
// We cannot have the case of neither inference nor logging.
if (!TrainingLog.empty())
Logger = std::make_unique<TrainingLogger>(
@@ -351,29 +299,12 @@ DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
assert(IsDoingInference || isLogging());
}
-std::optional<size_t>
-DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(const Function &F) const {
- if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested())
- return std::nullopt;
- auto &R =
- FAM.getResult<InlineSizeEstimatorAnalysis>(const_cast<Function &>(F));
- if (!R) {
- F.getParent()->getContext().emitError(
- "Native size estimator is not present.");
- return 0;
- }
- return *R;
-}
-
std::unique_ptr<MLInlineAdvice>
DevelopmentModeMLInlineAdvisor::getMandatoryAdviceImpl(CallBase &CB) {
return std::make_unique<LoggingMLInlineAdvice>(
/*Advisor=*/this,
/*CB=*/CB, /*ORE=*/getCallerORE(CB), /*Recommendation=*/true,
/*Logger=*/*Logger,
- /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()),
- /*CalleeSizeEstimateBefore=*/
- getNativeSizeEstimate(*CB.getCalledFunction()),
/*DefaultDecision=*/true, /*Mandatory*/ true);
}
@@ -391,24 +322,9 @@ DevelopmentModeMLInlineAdvisor::getAdviceFromModel(
/*Advisor=*/this,
/*CB=*/CB, /*ORE=*/ORE, /*Recommendation=*/Recommendation,
/*Logger=*/*Logger,
- /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()),
- /*CalleeSizeEstimateBefore=*/
- getNativeSizeEstimate(*CB.getCalledFunction()),
/*DefaultDecision=*/DefaultAdvice);
}
-size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() {
- if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested())
- return 0;
- size_t Ret = 0;
- for (auto &F : M) {
- if (F.isDeclaration())
- continue;
- Ret += *getNativeSizeEstimate(F);
- }
- return Ret;
-}
-
std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
Module &M, ModuleAnalysisManager &MAM,
std::function<bool(CallBase &)> GetDefaultAdvice) {
diff --git a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
deleted file mode 100644
index fc635726a..0000000
--- a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
+++ /dev/null
@@ -1,281 +0,0 @@
-//===- InlineSizeEstimatorAnalysis.cpp - IR to native size from ML model --===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This implements feature and label extraction for offline supervised learning
-// of a IR to native size model.
-//
-//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
-
-#ifdef LLVM_HAVE_TFLITE
-#include "llvm/Analysis/Utils/TFUtils.h"
-#endif
-#include "llvm/IR/Function.h"
-#include "llvm/IR/PassManager.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-AnalysisKey InlineSizeEstimatorAnalysis::Key;
-
-#ifdef LLVM_HAVE_TFLITE
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/CommandLine.h"
-#include <algorithm>
-#include <deque>
-#include <optional>
-
-static cl::opt<std::string> TFIR2NativeModelPath(
- "ml-inliner-ir2native-model", cl::Hidden,
- cl::desc("Path to saved model evaluating native size from IR."));
-
-#define DEBUG_TYPE "inline-size-estimator"
-namespace {
-unsigned getMaxInstructionID() {
-#define LAST_OTHER_INST(NR) return NR;
-#include "llvm/IR/Instruction.def"
-}
-
-class IRToNativeSizeLearning {
-public:
- enum class NamedFeatureIndex : size_t {
- InitialSize,
- Blocks,
- Calls,
- IsLocal,
- IsLinkOnceODR,
- IsLinkOnce,
- Loops,
- MaxLoopDepth,
- MaxDomTreeLevel,
-
- NumNamedFeatures
- };
- static const size_t NumNamedFeatures =
- static_cast<size_t>(NamedFeatureIndex::NumNamedFeatures);
- struct FunctionFeatures {
- static const size_t FeatureCount;
-
- std::array<int32_t, NumNamedFeatures> NamedFeatures = {0};
- std::vector<int32_t> InstructionHistogram;
- std::vector<int32_t> InstructionPairHistogram;
-
- void fillTensor(int32_t *Ptr) const;
- int32_t &operator[](NamedFeatureIndex Pos) {
- return NamedFeatures[static_cast<size_t>(Pos)];
- }
- };
- IRToNativeSizeLearning() = default;
-
- static FunctionFeatures getFunctionFeatures(Function &F,
- FunctionAnalysisManager &FAM);
-};
-
-// This is a point in time - we determined including these pairs of
-// consecutive instructions (in the IR layout available at inline time) as
-// features improves the model performance. We want to move away from manual
-// feature selection.
-// The array is given in opcode pairs rather than labels because 1) labels
-// weren't readily available, and 2) the successions were hand - extracted.
-//
-// This array must be sorted.
-static const std::array<std::pair<size_t, size_t>, 137>
- ImportantInstructionSuccessions{
- {{1, 1}, {1, 4}, {1, 5}, {1, 7}, {1, 8}, {1, 9}, {1, 11},
- {1, 12}, {1, 13}, {1, 14}, {1, 18}, {1, 20}, {1, 22}, {1, 24},
- {1, 25}, {1, 26}, {1, 27}, {1, 28}, {1, 29}, {1, 30}, {1, 31},
- {1, 32}, {1, 33}, {1, 34}, {1, 39}, {1, 40}, {1, 42}, {1, 45},
- {2, 1}, {2, 2}, {2, 13}, {2, 28}, {2, 29}, {2, 32}, {2, 33},
- {2, 34}, {2, 38}, {2, 48}, {2, 49}, {2, 53}, {2, 55}, {2, 56},
- {13, 2}, {13, 13}, {13, 26}, {13, 33}, {13, 34}, {13, 56}, {15, 27},
- {28, 2}, {28, 48}, {28, 53}, {29, 2}, {29, 33}, {29, 56}, {31, 31},
- {31, 33}, {31, 34}, {31, 49}, {32, 1}, {32, 2}, {32, 13}, {32, 15},
- {32, 28}, {32, 29}, {32, 32}, {32, 33}, {32, 34}, {32, 39}, {32, 40},
- {32, 48}, {32, 49}, {32, 53}, {32, 56}, {33, 1}, {33, 2}, {33, 32},
- {33, 33}, {33, 34}, {33, 49}, {33, 53}, {33, 56}, {34, 1}, {34, 2},
- {34, 32}, {34, 33}, {34, 34}, {34, 49}, {34, 53}, {34, 56}, {38, 34},
- {39, 57}, {40, 34}, {47, 15}, {47, 49}, {48, 2}, {48, 34}, {48, 56},
- {49, 1}, {49, 2}, {49, 28}, {49, 32}, {49, 33}, {49, 34}, {49, 39},
- {49, 49}, {49, 56}, {53, 1}, {53, 2}, {53, 28}, {53, 34}, {53, 53},
- {53, 57}, {55, 1}, {55, 28}, {55, 34}, {55, 53}, {55, 55}, {55, 56},
- {56, 1}, {56, 2}, {56, 7}, {56, 13}, {56, 32}, {56, 33}, {56, 34},
- {56, 49}, {56, 53}, {56, 56}, {56, 64}, {57, 34}, {57, 56}, {57, 57},
- {64, 1}, {64, 64}, {65, 1}, {65, 65}}};
-
-// We have: 9 calculated features (the features here); 1 feature for each
-// instruction opcode; and 1 feature for each manually-identified sequence.
-// For the latter 2, we build a histogram: we count the number of
-// occurrences of each instruction opcode or succession of instructions,
-// respectively.
-// Note that instruction opcodes start from 1. For convenience, we also have an
-// always 0 feature for the '0' opcode, hence the extra 1.
-const size_t IRToNativeSizeLearning::FunctionFeatures::FeatureCount =
- ImportantInstructionSuccessions.size() + getMaxInstructionID() + 1 +
- IRToNativeSizeLearning::NumNamedFeatures;
-
-size_t getSize(Function &F, TargetTransformInfo &TTI) {
- size_t Ret = 0;
- for (const auto &BB : F)
- for (const auto &I : BB)
- Ret += TTI.getInstructionCost(
- &I, TargetTransformInfo::TargetCostKind::TCK_CodeSize)
- .getValue();
- return Ret;
-}
-
-size_t getSize(Function &F, FunctionAnalysisManager &FAM) {
- auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
- return getSize(F, TTI);
-}
-
-unsigned getMaxDominatorTreeDepth(const Function &F,
- const DominatorTree &Tree) {
- unsigned Ret = 0;
- for (const auto &BB : F)
- if (const auto *TN = Tree.getNode(&BB))
- Ret = std::max(Ret, TN->getLevel());
- return Ret;
-}
-} // namespace
-
-IRToNativeSizeLearning::FunctionFeatures
-IRToNativeSizeLearning::getFunctionFeatures(Function &F,
- FunctionAnalysisManager &FAM) {
- assert(llvm::is_sorted(ImportantInstructionSuccessions) &&
- "expected function features are sorted");
-
- auto &DomTree = FAM.getResult<DominatorTreeAnalysis>(F);
- FunctionFeatures FF;
- size_t InstrCount = getMaxInstructionID() + 1;
- FF.InstructionHistogram.resize(InstrCount);
-
- FF.InstructionPairHistogram.resize(ImportantInstructionSuccessions.size());
-
- int StartID = 0;
- int LastID = StartID;
- auto getPairIndex = [](size_t a, size_t b) {
- auto I = llvm::find(ImportantInstructionSuccessions, std::make_pair(a, b));
- if (I == ImportantInstructionSuccessions.end())
- return -1;
- return static_cast<int>(
- std::distance(ImportantInstructionSuccessions.begin(), I));
- };
-
- // We don't want debug calls, because they'd just add noise.
- for (const auto &BB : F) {
- for (const auto &I : BB.instructionsWithoutDebug()) {
- auto ID = I.getOpcode();
-
- ++FF.InstructionHistogram[ID];
- int PairIndex = getPairIndex(LastID, ID);
- if (PairIndex >= 0)
- ++FF.InstructionPairHistogram[PairIndex];
- LastID = ID;
- if (isa<CallBase>(I))
- ++FF[NamedFeatureIndex::Calls];
- }
- }
-
- FF[NamedFeatureIndex::InitialSize] = getSize(F, FAM);
- FF[NamedFeatureIndex::IsLocal] = F.hasLocalLinkage();
- FF[NamedFeatureIndex::IsLinkOnceODR] = F.hasLinkOnceODRLinkage();
- FF[NamedFeatureIndex::IsLinkOnce] = F.hasLinkOnceLinkage();
- FF[NamedFeatureIndex::Blocks] = F.size();
- auto &LI = FAM.getResult<LoopAnalysis>(F);
- FF[NamedFeatureIndex::Loops] = std::distance(LI.begin(), LI.end());
- for (auto &L : LI)
- FF[NamedFeatureIndex::MaxLoopDepth] =
- std::max(FF[NamedFeatureIndex::MaxLoopDepth],
- static_cast<int32_t>(L->getLoopDepth()));
- FF[NamedFeatureIndex::MaxDomTreeLevel] = getMaxDominatorTreeDepth(F, DomTree);
- return FF;
-}
-
-void IRToNativeSizeLearning::FunctionFeatures::fillTensor(int32_t *Ptr) const {
- std::copy(NamedFeatures.begin(), NamedFeatures.end(), Ptr);
- Ptr += NamedFeatures.size();
- std::copy(InstructionHistogram.begin(), InstructionHistogram.end(), Ptr);
- Ptr += InstructionHistogram.size();
- std::copy(InstructionPairHistogram.begin(), InstructionPairHistogram.end(),
- Ptr);
-}
-
-bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() {
- return !TFIR2NativeModelPath.empty();
-}
-
-InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() {
- if (!isEvaluatorRequested()) {
- return;
- }
- std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
- "serving_default_input_1",
- {1, static_cast<int64_t>(
- IRToNativeSizeLearning::FunctionFeatures::FeatureCount)})};
- std::vector<TensorSpec> OutputSpecs{
- TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
- Evaluator = std::make_unique<TFModelEvaluator>(
- TFIR2NativeModelPath.getValue().c_str(), InputSpecs, OutputSpecs);
- if (!Evaluator || !Evaluator->isValid()) {
- Evaluator.reset();
- return;
- }
-}
-
-InlineSizeEstimatorAnalysis::Result
-InlineSizeEstimatorAnalysis::run(const Function &F,
- FunctionAnalysisManager &FAM) {
- if (!Evaluator)
- return std::nullopt;
- auto Features = IRToNativeSizeLearning::getFunctionFeatures(
- const_cast<Function &>(F), FAM);
- int32_t *V = Evaluator->getInput<int32_t>(0);
- Features.fillTensor(V);
- auto ER = Evaluator->evaluate();
- if (!ER)
- return std::nullopt;
- float Ret = *ER->getTensorValue<float>(0);
- if (Ret < 0.0)
- Ret = 0.0;
- return static_cast<size_t>(Ret);
-}
-
-InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() {}
-InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis(
- InlineSizeEstimatorAnalysis &&Other)
- : Evaluator(std::move(Other.Evaluator)) {}
-
-#else
-namespace llvm {
-class TFModelEvaluator {};
-} // namespace llvm
-InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() = default;
-InlineSizeEstimatorAnalysis ::InlineSizeEstimatorAnalysis(
- InlineSizeEstimatorAnalysis &&) {}
-InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() = default;
-InlineSizeEstimatorAnalysis::Result
-InlineSizeEstimatorAnalysis::run(const Function &F,
- FunctionAnalysisManager &FAM) {
- return std::nullopt;
-}
-bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() { return false; }
-#endif
-
-PreservedAnalyses
-InlineSizeEstimatorAnalysisPrinterPass::run(Function &F,
- FunctionAnalysisManager &AM) {
- OS << "[InlineSizeEstimatorAnalysis] size estimate for " << F.getName()
- << ": " << AM.getResult<InlineSizeEstimatorAnalysis>(F) << "\n";
- return PreservedAnalyses::all();
-}
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 74f3a7d..f97abc9 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -15,33 +15,11 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/SystemLibraries.h"
#include "llvm/InitializePasses.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/TargetParser/Triple.h"
using namespace llvm;
-static cl::opt<TargetLibraryInfoImpl::VectorLibrary> ClVectorLibrary(
- "vector-library", cl::Hidden, cl::desc("Vector functions library"),
- cl::init(TargetLibraryInfoImpl::NoLibrary),
- cl::values(clEnumValN(TargetLibraryInfoImpl::NoLibrary, "none",
- "No vector functions library"),
- clEnumValN(TargetLibraryInfoImpl::Accelerate, "Accelerate",
- "Accelerate framework"),
- clEnumValN(TargetLibraryInfoImpl::DarwinLibSystemM,
- "Darwin_libsystem_m", "Darwin libsystem_m"),
- clEnumValN(TargetLibraryInfoImpl::LIBMVEC, "LIBMVEC",
- "GLIBC Vector Math library"),
- clEnumValN(TargetLibraryInfoImpl::MASSV, "MASSV",
- "IBM MASS vector library"),
- clEnumValN(TargetLibraryInfoImpl::SVML, "SVML",
- "Intel SVML library"),
- clEnumValN(TargetLibraryInfoImpl::SLEEFGNUABI, "sleefgnuabi",
- "SIMD Library for Evaluating Elementary Functions"),
- clEnumValN(TargetLibraryInfoImpl::ArmPL, "ArmPL",
- "Arm Performance Libraries"),
- clEnumValN(TargetLibraryInfoImpl::AMDLIBM, "AMDLIBM",
- "AMD vector math library")));
-
StringLiteral const TargetLibraryInfoImpl::StandardNames[LibFunc::NumLibFuncs] =
{
#define TLI_DEFINE_STRING
@@ -1392,15 +1370,15 @@ const VecDesc VecFuncs_AMDLIBM[] = {
void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
enum VectorLibrary VecLib, const llvm::Triple &TargetTriple) {
switch (VecLib) {
- case Accelerate: {
+ case VectorLibrary::Accelerate: {
addVectorizableFunctions(VecFuncs_Accelerate);
break;
}
- case DarwinLibSystemM: {
+ case VectorLibrary::DarwinLibSystemM: {
addVectorizableFunctions(VecFuncs_DarwinLibSystemM);
break;
}
- case LIBMVEC: {
+ case VectorLibrary::LIBMVEC: {
switch (TargetTriple.getArch()) {
default:
break;
@@ -1415,15 +1393,15 @@ void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
}
break;
}
- case MASSV: {
+ case VectorLibrary::MASSV: {
addVectorizableFunctions(VecFuncs_MASSV);
break;
}
- case SVML: {
+ case VectorLibrary::SVML: {
addVectorizableFunctions(VecFuncs_SVML);
break;
}
- case SLEEFGNUABI: {
+ case VectorLibrary::SLEEFGNUABI: {
switch (TargetTriple.getArch()) {
default:
break;
@@ -1439,7 +1417,7 @@ void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
}
break;
}
- case ArmPL: {
+ case VectorLibrary::ArmPL: {
switch (TargetTriple.getArch()) {
default:
break;
@@ -1450,11 +1428,11 @@ void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
}
break;
}
- case AMDLIBM: {
+ case VectorLibrary::AMDLIBM: {
addVectorizableFunctions(VecFuncs_AMDLIBM);
break;
}
- case NoLibrary:
+ case VectorLibrary::NoLibrary:
break;
}
}