aboutsummaryrefslogtreecommitdiff
path: root/llvm/include
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/include')
-rw-r--r--llvm/include/llvm/ADT/BitVector.h5
-rw-r--r--llvm/include/llvm/ADT/ConcurrentHashtable.h5
-rw-r--r--llvm/include/llvm/ADT/DirectedGraph.h10
-rw-r--r--llvm/include/llvm/ADT/IntervalTree.h3
-rw-r--r--llvm/include/llvm/ADT/SmallPtrSet.h10
-rw-r--r--llvm/include/llvm/Analysis/IR2Vec.h272
-rw-r--r--llvm/include/llvm/Analysis/MemoryProfileInfo.h8
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h23
-rw-r--r--llvm/include/llvm/Analysis/ValueTracking.h6
-rw-r--r--llvm/include/llvm/BinaryFormat/DXContainer.h1
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h4
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h7
-rw-r--r--llvm/include/llvm/CodeGen/MIRYamlMapping.h2
-rw-r--r--llvm/include/llvm/CodeGen/MachineFrameInfo.h13
-rw-r--r--llvm/include/llvm/CodeGen/TargetFrameLowering.h1
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h24
-rw-r--r--llvm/include/llvm/CodeGen/TargetRegisterInfo.h13
-rw-r--r--llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h1
-rw-r--r--llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h7
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMP.td3
-rw-r--r--llvm/include/llvm/IR/Instructions.h7
-rw-r--r--llvm/include/llvm/IR/IntrinsicInst.h20
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAArch64.td30
-rw-r--r--llvm/include/llvm/IR/PatternMatch.h8
-rw-r--r--llvm/include/llvm/IR/ProfDataUtils.h8
-rw-r--r--llvm/include/llvm/IR/ValueMap.h105
-rw-r--r--llvm/include/llvm/MC/MCRegisterInfo.h4
-rw-r--r--llvm/include/llvm/Object/ELF.h6
-rw-r--r--llvm/include/llvm/Support/FileSystem.h12
-rw-r--r--llvm/include/llvm/Support/Format.h14
-rw-r--r--llvm/include/llvm/Support/FormatProviders.h19
-rw-r--r--llvm/include/llvm/Support/FormatVariadicDetails.h3
-rw-r--r--llvm/include/llvm/Support/HashBuilder.h3
-rw-r--r--llvm/include/llvm/Support/InstructionCost.h16
-rw-r--r--llvm/include/llvm/Support/Path.h12
-rw-r--r--llvm/include/llvm/Support/TargetOpcodes.def3
-rw-r--r--llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h6
-rw-r--r--llvm/include/llvm/Target/GenericOpcodes.td7
-rw-r--r--llvm/include/llvm/Target/TargetMachine.h4
-rw-r--r--llvm/include/llvm/Transforms/Scalar/GVN.h2
-rw-r--r--llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h7
41 files changed, 449 insertions, 265 deletions
diff --git a/llvm/include/llvm/ADT/BitVector.h b/llvm/include/llvm/ADT/BitVector.h
index 83350e6..9e81a4b 100644
--- a/llvm/include/llvm/ADT/BitVector.h
+++ b/llvm/include/llvm/ADT/BitVector.h
@@ -570,10 +570,7 @@ public:
template <class F, class... ArgTys>
static BitVector &apply(F &&f, BitVector &Out, BitVector const &Arg,
ArgTys const &...Args) {
- assert(llvm::all_of(
- std::initializer_list<unsigned>{Args.size()...},
- [&Arg](auto const &BV) { return Arg.size() == BV; }) &&
- "consistent sizes");
+ assert(((Arg.size() == Args.size()) && ...) && "consistent sizes");
Out.resize(Arg.size());
for (size_type I = 0, E = Arg.Bits.size(); I != E; ++I)
Out.Bits[I] = f(Arg.Bits[I], Args.Bits[I]...);
diff --git a/llvm/include/llvm/ADT/ConcurrentHashtable.h b/llvm/include/llvm/ADT/ConcurrentHashtable.h
index 6de194d..6a943c5 100644
--- a/llvm/include/llvm/ADT/ConcurrentHashtable.h
+++ b/llvm/include/llvm/ADT/ConcurrentHashtable.h
@@ -253,9 +253,8 @@ public:
OS << "\nOverall number of entries = " << OverallNumberOfEntries;
OS << "\nOverall number of non empty buckets = " << NumberOfNonEmptyBuckets;
- for (auto &BucketSize : BucketSizesMap)
- OS << "\n Number of buckets with size " << BucketSize.first << ": "
- << BucketSize.second;
+ for (auto [Size, Count] : BucketSizesMap)
+ OS << "\n Number of buckets with size " << Size << ": " << Count;
std::stringstream stream;
stream << std::fixed << std::setprecision(2)
diff --git a/llvm/include/llvm/ADT/DirectedGraph.h b/llvm/include/llvm/ADT/DirectedGraph.h
index 83c0bea..fb6b180 100644
--- a/llvm/include/llvm/ADT/DirectedGraph.h
+++ b/llvm/include/llvm/ADT/DirectedGraph.h
@@ -181,16 +181,6 @@ public:
DirectedGraph() = default;
explicit DirectedGraph(NodeType &N) : Nodes() { addNode(N); }
- DirectedGraph(const DGraphType &G) : Nodes(G.Nodes) {}
- DirectedGraph(DGraphType &&RHS) : Nodes(std::move(RHS.Nodes)) {}
- DGraphType &operator=(const DGraphType &G) {
- Nodes = G.Nodes;
- return *this;
- }
- DGraphType &operator=(const DGraphType &&G) {
- Nodes = std::move(G.Nodes);
- return *this;
- }
const_iterator begin() const { return Nodes.begin(); }
const_iterator end() const { return Nodes.end(); }
diff --git a/llvm/include/llvm/ADT/IntervalTree.h b/llvm/include/llvm/ADT/IntervalTree.h
index 918c862..d14de06 100644
--- a/llvm/include/llvm/ADT/IntervalTree.h
+++ b/llvm/include/llvm/ADT/IntervalTree.h
@@ -236,8 +236,7 @@ public:
//===----------------------------------------------------------------------===//
// Helper class template that is used by the IntervalTree to ensure that one
// does instantiate using only fundamental and/or pointer types.
-template <typename T>
-using PointTypeIsValid = std::bool_constant<std::is_fundamental<T>::value>;
+template <typename T> using PointTypeIsValid = std::is_fundamental<T>;
template <typename T>
using ValueTypeIsValid = std::bool_constant<std::is_fundamental<T>::value ||
diff --git a/llvm/include/llvm/ADT/SmallPtrSet.h b/llvm/include/llvm/ADT/SmallPtrSet.h
index e24cd641..f588a77 100644
--- a/llvm/include/llvm/ADT/SmallPtrSet.h
+++ b/llvm/include/llvm/ADT/SmallPtrSet.h
@@ -476,18 +476,20 @@ public:
}
[[nodiscard]] iterator begin() const {
- if (shouldReverseIterate())
+ if constexpr (shouldReverseIterate())
return makeIterator(EndPointer() - 1);
- return makeIterator(CurArray);
+ else
+ return makeIterator(CurArray);
}
[[nodiscard]] iterator end() const { return makeIterator(EndPointer()); }
private:
/// Create an iterator that dereferences to same place as the given pointer.
iterator makeIterator(const void *const *P) const {
- if (shouldReverseIterate())
+ if constexpr (shouldReverseIterate())
return iterator(P == EndPointer() ? CurArray : P + 1, CurArray, *this);
- return iterator(P, EndPointer(), *this);
+ else
+ return iterator(P, EndPointer(), *this);
}
};
diff --git a/llvm/include/llvm/Analysis/IR2Vec.h b/llvm/include/llvm/Analysis/IR2Vec.h
index 3671c1c..ed43f19 100644
--- a/llvm/include/llvm/Analysis/IR2Vec.h
+++ b/llvm/include/llvm/Analysis/IR2Vec.h
@@ -36,6 +36,7 @@
#define LLVM_ANALYSIS_IR2VEC_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/CommandLine.h"
@@ -44,6 +45,7 @@
#include "llvm/Support/JSON.h"
#include <array>
#include <map>
+#include <optional>
namespace llvm {
@@ -143,6 +145,80 @@ public:
using InstEmbeddingsMap = DenseMap<const Instruction *, Embedding>;
using BBEmbeddingsMap = DenseMap<const BasicBlock *, Embedding>;
+/// Generic storage class for section-based vocabularies.
+/// VocabStorage provides a generic foundation for storing and accessing
+/// embeddings organized into sections.
+class VocabStorage {
+private:
+ /// Section-based storage
+ std::vector<std::vector<Embedding>> Sections;
+
+ const size_t TotalSize;
+ const unsigned Dimension;
+
+public:
+ /// Default constructor creates empty storage (invalid state)
+ VocabStorage() : Sections(), TotalSize(0), Dimension(0) {}
+
+ /// Create a VocabStorage with pre-organized section data
+ VocabStorage(std::vector<std::vector<Embedding>> &&SectionData);
+
+ VocabStorage(VocabStorage &&) = default;
+ VocabStorage &operator=(VocabStorage &&) = delete;
+
+ VocabStorage(const VocabStorage &) = delete;
+ VocabStorage &operator=(const VocabStorage &) = delete;
+
+ /// Get total number of entries across all sections
+ size_t size() const { return TotalSize; }
+
+ /// Get number of sections
+ unsigned getNumSections() const {
+ return static_cast<unsigned>(Sections.size());
+ }
+
+ /// Section-based access: Storage[sectionId][localIndex]
+ const std::vector<Embedding> &operator[](unsigned SectionId) const {
+ assert(SectionId < Sections.size() && "Invalid section ID");
+ return Sections[SectionId];
+ }
+
+ /// Get vocabulary dimension
+ unsigned getDimension() const { return Dimension; }
+
+ /// Check if vocabulary is valid (has data)
+ bool isValid() const { return TotalSize > 0; }
+
+ /// Iterator support for section-based access
+ class const_iterator {
+ const VocabStorage *Storage;
+ unsigned SectionId = 0;
+ size_t LocalIndex = 0;
+
+ public:
+ const_iterator(const VocabStorage *Storage, unsigned SectionId,
+ size_t LocalIndex)
+ : Storage(Storage), SectionId(SectionId), LocalIndex(LocalIndex) {}
+
+ LLVM_ABI const Embedding &operator*() const;
+ LLVM_ABI const_iterator &operator++();
+ LLVM_ABI bool operator==(const const_iterator &Other) const;
+ LLVM_ABI bool operator!=(const const_iterator &Other) const;
+ };
+
+ const_iterator begin() const { return const_iterator(this, 0, 0); }
+ const_iterator end() const {
+ return const_iterator(this, getNumSections(), 0);
+ }
+
+ using VocabMap = std::map<std::string, Embedding>;
+ /// Parse a vocabulary section from JSON and populate the target vocabulary
+ /// map.
+ static Error parseVocabSection(StringRef Key,
+ const json::Value &ParsedVocabValue,
+ VocabMap &TargetVocab, unsigned &Dim);
+};
+
/// Class for storing and accessing the IR2Vec vocabulary.
/// The Vocabulary class manages seed embeddings for LLVM IR entities. The
/// seed embeddings are the initial learned representations of the entities
@@ -162,15 +238,42 @@ using BBEmbeddingsMap = DenseMap<const BasicBlock *, Embedding>;
/// embeddings.
class Vocabulary {
friend class llvm::IR2VecVocabAnalysis;
- using VocabVector = std::vector<ir2vec::Embedding>;
- VocabVector Vocab;
-public:
- // Slot layout:
- // [0 .. MaxOpcodes-1] => Instruction opcodes
- // [MaxOpcodes .. MaxOpcodes+MaxCanonicalTypeIDs-1] => Canonicalized types
- // [MaxOpcodes+MaxCanonicalTypeIDs .. NumCanonicalEntries-1] => Operand kinds
+ // Vocabulary Layout:
+ // +----------------+------------------------------------------------------+
+ // | Entity Type | Index Range |
+ // +----------------+------------------------------------------------------+
+ // | Opcodes | [0 .. (MaxOpcodes-1)] |
+ // | Canonical Types| [MaxOpcodes .. (MaxOpcodes+MaxCanonicalTypeIDs-1)] |
+ // | Operands | [(MaxOpcodes+MaxCanonicalTypeIDs) .. NumCanEntries] |
+ // +----------------+------------------------------------------------------+
+ // Note: MaxOpcodes is the number of unique opcodes supported by LLVM IR.
+ // MaxCanonicalTypeIDs is the number of canonicalized type IDs.
+ // "Similar" LLVM Types are grouped/canonicalized together. E.g., all
+ // float variants (FloatTy, DoubleTy, HalfTy, etc.) map to
+ // CanonicalTypeID::FloatTy. This helps reduce the vocabulary size
+ // and improves learning. Operands include Comparison predicates
+ // (ICmp/FCmp) along with other operand types. This can be extended to
+ // include other specializations in future.
+ enum class Section : unsigned {
+ Opcodes = 0,
+ CanonicalTypes = 1,
+ Operands = 2,
+ Predicates = 3,
+ MaxSections
+ };
+
+ // Use section-based storage for better organization and efficiency
+ VocabStorage Storage;
+
+ static constexpr unsigned NumICmpPredicates =
+ static_cast<unsigned>(CmpInst::LAST_ICMP_PREDICATE) -
+ static_cast<unsigned>(CmpInst::FIRST_ICMP_PREDICATE) + 1;
+ static constexpr unsigned NumFCmpPredicates =
+ static_cast<unsigned>(CmpInst::LAST_FCMP_PREDICATE) -
+ static_cast<unsigned>(CmpInst::FIRST_FCMP_PREDICATE) + 1;
+public:
/// Canonical type IDs supported by IR2Vec Vocabulary
enum class CanonicalTypeID : unsigned {
FloatTy,
@@ -207,59 +310,114 @@ public:
static_cast<unsigned>(CanonicalTypeID::MaxCanonicalType);
static constexpr unsigned MaxOperandKinds =
static_cast<unsigned>(OperandKind::MaxOperandKind);
+ // CmpInst::Predicate has gaps. We want the vocabulary to be dense without
+ // empty slots.
+ static constexpr unsigned MaxPredicateKinds =
+ NumICmpPredicates + NumFCmpPredicates;
Vocabulary() = default;
- LLVM_ABI Vocabulary(VocabVector &&Vocab) : Vocab(std::move(Vocab)) {}
+ LLVM_ABI Vocabulary(VocabStorage &&Storage) : Storage(std::move(Storage)) {}
+
+ Vocabulary(const Vocabulary &) = delete;
+ Vocabulary &operator=(const Vocabulary &) = delete;
- LLVM_ABI bool isValid() const { return Vocab.size() == NumCanonicalEntries; };
- LLVM_ABI unsigned getDimension() const;
- /// Total number of entries (opcodes + canonicalized types + operand kinds)
+ Vocabulary(Vocabulary &&) = default;
+ Vocabulary &operator=(Vocabulary &&Other) = delete;
+
+ LLVM_ABI bool isValid() const {
+ return Storage.size() == NumCanonicalEntries;
+ }
+
+ LLVM_ABI unsigned getDimension() const {
+ assert(isValid() && "IR2Vec Vocabulary is invalid");
+ return Storage.getDimension();
+ }
+
+ /// Total number of entries (opcodes + canonicalized types + operand kinds +
+ /// predicates)
static constexpr size_t getCanonicalSize() { return NumCanonicalEntries; }
/// Function to get vocabulary key for a given Opcode
LLVM_ABI static StringRef getVocabKeyForOpcode(unsigned Opcode);
/// Function to get vocabulary key for a given TypeID
- LLVM_ABI static StringRef getVocabKeyForTypeID(Type::TypeID TypeID);
+ LLVM_ABI static StringRef getVocabKeyForTypeID(Type::TypeID TypeID) {
+ return getVocabKeyForCanonicalTypeID(getCanonicalTypeID(TypeID));
+ }
/// Function to get vocabulary key for a given OperandKind
- LLVM_ABI static StringRef getVocabKeyForOperandKind(OperandKind Kind);
+ LLVM_ABI static StringRef getVocabKeyForOperandKind(OperandKind Kind) {
+ unsigned Index = static_cast<unsigned>(Kind);
+ assert(Index < MaxOperandKinds && "Invalid OperandKind");
+ return OperandKindNames[Index];
+ }
/// Function to classify an operand into OperandKind
LLVM_ABI static OperandKind getOperandKind(const Value *Op);
- /// Functions to return the slot index or position of a given Opcode, TypeID,
- /// or OperandKind in the vocabulary.
- LLVM_ABI static unsigned getSlotIndex(unsigned Opcode);
- LLVM_ABI static unsigned getSlotIndex(Type::TypeID TypeID);
- LLVM_ABI static unsigned getSlotIndex(const Value &Op);
+ /// Function to get vocabulary key for a given predicate
+ LLVM_ABI static StringRef getVocabKeyForPredicate(CmpInst::Predicate P);
+
+ /// Functions to return flat index
+ LLVM_ABI static unsigned getIndex(unsigned Opcode) {
+ assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode");
+ return Opcode - 1; // Convert to zero-based index
+ }
+
+ LLVM_ABI static unsigned getIndex(Type::TypeID TypeID) {
+ assert(static_cast<unsigned>(TypeID) < MaxTypeIDs && "Invalid type ID");
+ return MaxOpcodes + static_cast<unsigned>(getCanonicalTypeID(TypeID));
+ }
+
+ LLVM_ABI static unsigned getIndex(const Value &Op) {
+ unsigned Index = static_cast<unsigned>(getOperandKind(&Op));
+ assert(Index < MaxOperandKinds && "Invalid OperandKind");
+ return OperandBaseOffset + Index;
+ }
+
+ LLVM_ABI static unsigned getIndex(CmpInst::Predicate P) {
+ return PredicateBaseOffset + getPredicateLocalIndex(P);
+ }
/// Accessors to get the embedding for a given entity.
- LLVM_ABI const ir2vec::Embedding &operator[](unsigned Opcode) const;
- LLVM_ABI const ir2vec::Embedding &operator[](Type::TypeID TypeId) const;
- LLVM_ABI const ir2vec::Embedding &operator[](const Value &Arg) const;
+ LLVM_ABI const ir2vec::Embedding &operator[](unsigned Opcode) const {
+ assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode");
+ return Storage[static_cast<unsigned>(Section::Opcodes)][Opcode - 1];
+ }
+
+ LLVM_ABI const ir2vec::Embedding &operator[](Type::TypeID TypeID) const {
+ assert(static_cast<unsigned>(TypeID) < MaxTypeIDs && "Invalid type ID");
+ unsigned LocalIndex = static_cast<unsigned>(getCanonicalTypeID(TypeID));
+ return Storage[static_cast<unsigned>(Section::CanonicalTypes)][LocalIndex];
+ }
+
+ LLVM_ABI const ir2vec::Embedding &operator[](const Value &Arg) const {
+ unsigned LocalIndex = static_cast<unsigned>(getOperandKind(&Arg));
+ assert(LocalIndex < MaxOperandKinds && "Invalid OperandKind");
+ return Storage[static_cast<unsigned>(Section::Operands)][LocalIndex];
+ }
+
+ LLVM_ABI const ir2vec::Embedding &operator[](CmpInst::Predicate P) const {
+ unsigned LocalIndex = getPredicateLocalIndex(P);
+ return Storage[static_cast<unsigned>(Section::Predicates)][LocalIndex];
+ }
/// Const Iterator type aliases
- using const_iterator = VocabVector::const_iterator;
+ using const_iterator = VocabStorage::const_iterator;
+
const_iterator begin() const {
assert(isValid() && "IR2Vec Vocabulary is invalid");
- return Vocab.begin();
+ return Storage.begin();
}
- const_iterator cbegin() const {
- assert(isValid() && "IR2Vec Vocabulary is invalid");
- return Vocab.cbegin();
- }
+ const_iterator cbegin() const { return begin(); }
const_iterator end() const {
assert(isValid() && "IR2Vec Vocabulary is invalid");
- return Vocab.end();
+ return Storage.end();
}
- const_iterator cend() const {
- assert(isValid() && "IR2Vec Vocabulary is invalid");
- return Vocab.cend();
- }
+ const_iterator cend() const { return end(); }
/// Returns the string key for a given index position in the vocabulary.
/// This is useful for debugging or printing the vocabulary. Do not use this
@@ -267,14 +425,24 @@ public:
LLVM_ABI static StringRef getStringKey(unsigned Pos);
/// Create a dummy vocabulary for testing purposes.
- LLVM_ABI static VocabVector createDummyVocabForTest(unsigned Dim = 1);
+ LLVM_ABI static VocabStorage createDummyVocabForTest(unsigned Dim = 1);
LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA,
ModuleAnalysisManager::Invalidator &Inv) const;
private:
constexpr static unsigned NumCanonicalEntries =
- MaxOpcodes + MaxCanonicalTypeIDs + MaxOperandKinds;
+ MaxOpcodes + MaxCanonicalTypeIDs + MaxOperandKinds + MaxPredicateKinds;
+
+ // Base offsets for flat index computation
+ constexpr static unsigned OperandBaseOffset =
+ MaxOpcodes + MaxCanonicalTypeIDs;
+ constexpr static unsigned PredicateBaseOffset =
+ OperandBaseOffset + MaxOperandKinds;
+
+ /// Functions for predicate index calculations
+ static unsigned getPredicateLocalIndex(CmpInst::Predicate P);
+ static CmpInst::Predicate getPredicateFromLocalIndex(unsigned LocalIndex);
/// String mappings for CanonicalTypeID values
static constexpr StringLiteral CanonicalTypeNames[] = {
@@ -322,10 +490,26 @@ private:
/// Function to get vocabulary key for canonical type by enum
LLVM_ABI static StringRef
- getVocabKeyForCanonicalTypeID(CanonicalTypeID CType);
+ getVocabKeyForCanonicalTypeID(CanonicalTypeID CType) {
+ unsigned Index = static_cast<unsigned>(CType);
+ assert(Index < MaxCanonicalTypeIDs && "Invalid CanonicalTypeID");
+ return CanonicalTypeNames[Index];
+ }
/// Function to convert TypeID to CanonicalTypeID
- LLVM_ABI static CanonicalTypeID getCanonicalTypeID(Type::TypeID TypeID);
+ LLVM_ABI static CanonicalTypeID getCanonicalTypeID(Type::TypeID TypeID) {
+ unsigned Index = static_cast<unsigned>(TypeID);
+ assert(Index < MaxTypeIDs && "Invalid TypeID");
+ return TypeIDMapping[Index];
+ }
+
+ /// Function to get the predicate enum value for a given index. Index is
+ /// relative to the predicates section of the vocabulary. E.g., Index 0
+ /// corresponds to the first predicate.
+ LLVM_ABI static CmpInst::Predicate getPredicate(unsigned Index) {
+ assert(Index < MaxPredicateKinds && "Invalid predicate index");
+ return getPredicateFromLocalIndex(Index);
+ }
};
/// Embedder provides the interface to generate embeddings (vector
@@ -418,22 +602,20 @@ public:
/// mapping between an entity of the IR (like opcode, type, argument, etc.) and
/// its corresponding embedding.
class IR2VecVocabAnalysis : public AnalysisInfoMixin<IR2VecVocabAnalysis> {
- using VocabVector = std::vector<ir2vec::Embedding>;
using VocabMap = std::map<std::string, ir2vec::Embedding>;
- VocabMap OpcVocab, TypeVocab, ArgVocab;
- VocabVector Vocab;
+ std::optional<ir2vec::VocabStorage> Vocab;
- Error readVocabulary();
- Error parseVocabSection(StringRef Key, const json::Value &ParsedVocabValue,
- VocabMap &TargetVocab, unsigned &Dim);
- void generateNumMappedVocab();
+ Error readVocabulary(VocabMap &OpcVocab, VocabMap &TypeVocab,
+ VocabMap &ArgVocab);
+ void generateVocabStorage(VocabMap &OpcVocab, VocabMap &TypeVocab,
+ VocabMap &ArgVocab);
void emitError(Error Err, LLVMContext &Ctx);
public:
LLVM_ABI static AnalysisKey Key;
IR2VecVocabAnalysis() = default;
- LLVM_ABI explicit IR2VecVocabAnalysis(const VocabVector &Vocab);
- LLVM_ABI explicit IR2VecVocabAnalysis(VocabVector &&Vocab);
+ LLVM_ABI explicit IR2VecVocabAnalysis(ir2vec::VocabStorage &&Vocab)
+ : Vocab(std::move(Vocab)) {}
using Result = ir2vec::Vocabulary;
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &MAM);
};
diff --git a/llvm/include/llvm/Analysis/MemoryProfileInfo.h b/llvm/include/llvm/Analysis/MemoryProfileInfo.h
index be690a4..571caf9 100644
--- a/llvm/include/llvm/Analysis/MemoryProfileInfo.h
+++ b/llvm/include/llvm/Analysis/MemoryProfileInfo.h
@@ -59,14 +59,6 @@ LLVM_ABI std::string getAllocTypeAttributeString(AllocationType Type);
/// True if the AllocTypes bitmask contains just a single type.
LLVM_ABI bool hasSingleAllocType(uint8_t AllocTypes);
-/// Removes any existing "ambiguous" memprof attribute. Called before we apply a
-/// specific allocation type such as "cold", "notcold", or "hot".
-LLVM_ABI void removeAnyExistingAmbiguousAttribute(CallBase *CB);
-
-/// Adds an "ambiguous" memprof attribute to call with a matched allocation
-/// profile but that we haven't yet been able to disambiguate.
-LLVM_ABI void addAmbiguousAttribute(CallBase *CB);
-
/// Class to build a trie of call stack contexts for a particular profiled
/// allocation call, along with their associated allocation types.
/// The allocation will be at the root of the trie, which is then used to
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
index 7a45ae9..164b46b 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
@@ -184,6 +184,7 @@ m_scev_PtrToInt(const Op0_t &Op0) {
/// Match a binary SCEV.
template <typename SCEVTy, typename Op0_t, typename Op1_t,
+ SCEV::NoWrapFlags WrapFlags = SCEV::FlagAnyWrap,
bool Commutable = false>
struct SCEVBinaryExpr_match {
Op0_t Op0;
@@ -192,6 +193,10 @@ struct SCEVBinaryExpr_match {
SCEVBinaryExpr_match(Op0_t Op0, Op1_t Op1) : Op0(Op0), Op1(Op1) {}
bool match(const SCEV *S) const {
+ if (auto WrappingS = dyn_cast<SCEVNAryExpr>(S))
+ if (WrappingS->getNoWrapFlags(WrapFlags) != WrapFlags)
+ return false;
+
auto *E = dyn_cast<SCEVTy>(S);
return E && E->getNumOperands() == 2 &&
((Op0.match(E->getOperand(0)) && Op1.match(E->getOperand(1))) ||
@@ -201,10 +206,12 @@ struct SCEVBinaryExpr_match {
};
template <typename SCEVTy, typename Op0_t, typename Op1_t,
+ SCEV::NoWrapFlags WrapFlags = SCEV::FlagAnyWrap,
bool Commutable = false>
-inline SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, Commutable>
+inline SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, WrapFlags, Commutable>
m_scev_Binary(const Op0_t &Op0, const Op1_t &Op1) {
- return SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, Commutable>(Op0, Op1);
+ return SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, WrapFlags, Commutable>(Op0,
+ Op1);
}
template <typename Op0_t, typename Op1_t>
@@ -220,9 +227,17 @@ m_scev_Mul(const Op0_t &Op0, const Op1_t &Op1) {
}
template <typename Op0_t, typename Op1_t>
-inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t, true>
+inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true>
m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1) {
- return m_scev_Binary<SCEVMulExpr, Op0_t, Op1_t, true>(Op0, Op1);
+ return m_scev_Binary<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true>(Op0,
+ Op1);
+}
+
+template <typename Op0_t, typename Op1_t>
+inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true>
+m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_scev_Binary<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true>(Op0,
+ Op1);
}
template <typename Op0_t, typename Op1_t>
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index 15ff129..af218ba 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -613,6 +613,12 @@ LLVM_ABI bool isValidAssumeForContext(const Instruction *I,
const DominatorTree *DT = nullptr,
bool AllowEphemerals = false);
+/// Returns true, if no instruction between \p Assume and \p CtxI may free
+/// memory and the function is marked as NoSync. The latter ensures the current
+/// function cannot arrange for another thread to free on its behalf.
+LLVM_ABI bool willNotFreeBetween(const Instruction *Assume,
+ const Instruction *CtxI);
+
enum class OverflowResult {
/// Always overflows in the direction of signed/unsigned min value.
AlwaysOverflowsLow,
diff --git a/llvm/include/llvm/BinaryFormat/DXContainer.h b/llvm/include/llvm/BinaryFormat/DXContainer.h
index 08a7ddb..8944e736 100644
--- a/llvm/include/llvm/BinaryFormat/DXContainer.h
+++ b/llvm/include/llvm/BinaryFormat/DXContainer.h
@@ -844,6 +844,7 @@ struct StaticSampler : public v1::StaticSampler {
enum class RootSignatureVersion {
V1_0 = 0x1,
V1_1 = 0x2,
+ V1_2 = 0x3,
};
} // namespace dxbc
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 22569aa..c0e426c 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -300,6 +300,10 @@ private:
Type *OpType,
LostDebugLocObserver &LocObserver);
+ LegalizeResult emitModfLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
+ unsigned Size, Type *OpType,
+ LostDebugLocObserver &LocObserver);
+
public:
/// Return the alignment to use for a stack temporary object with the given
/// type.
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 0b6033b..40c7792 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -2184,6 +2184,13 @@ public:
return buildInstr(TargetOpcode::G_FSINCOS, {Sin, Cos}, {Src}, Flags);
}
+ /// Build and insert \p Fract, \p Int = G_FMODF \p Src
+ MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int,
+ const SrcOp &Src,
+ std::optional<unsigned> Flags = std::nullopt) {
+ return buildInstr(TargetOpcode::G_FMODF, {Fract, Int}, {Src}, Flags);
+ }
+
/// Build and insert \p Res = G_FCOPYSIGN \p Op0, \p Op1
MachineInstrBuilder buildFCopysign(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1) {
diff --git a/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/llvm/include/llvm/CodeGen/MIRYamlMapping.h
index c7304e3..e80c138 100644
--- a/llvm/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/llvm/include/llvm/CodeGen/MIRYamlMapping.h
@@ -378,6 +378,8 @@ struct ScalarEnumerationTraits<TargetStackID::Value> {
IO.enumCase(ID, "default", TargetStackID::Default);
IO.enumCase(ID, "sgpr-spill", TargetStackID::SGPRSpill);
IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector);
+ IO.enumCase(ID, "scalable-predicate-vector",
+ TargetStackID::ScalablePredicateVector);
IO.enumCase(ID, "wasm-local", TargetStackID::WasmLocal);
IO.enumCase(ID, "noalloc", TargetStackID::NoAlloc);
}
diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index 00c7343..50ce931 100644
--- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -497,7 +497,18 @@ public:
/// Should this stack ID be considered in MaxAlignment.
bool contributesToMaxAlignment(uint8_t StackID) {
return StackID == TargetStackID::Default ||
- StackID == TargetStackID::ScalableVector;
+ StackID == TargetStackID::ScalableVector ||
+ StackID == TargetStackID::ScalablePredicateVector;
+ }
+
+ bool hasScalableStackID(int ObjectIdx) const {
+ uint8_t StackID = getStackID(ObjectIdx);
+ return isScalableStackID(StackID);
+ }
+
+ bool isScalableStackID(uint8_t StackID) const {
+ return StackID == TargetStackID::ScalableVector ||
+ StackID == TargetStackID::ScalablePredicateVector;
}
/// setObjectAlignment - Change the alignment of the specified stack object.
diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
index 0e29e45..75696faf 100644
--- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
@@ -32,6 +32,7 @@ enum Value {
SGPRSpill = 1,
ScalableVector = 2,
WasmLocal = 3,
+ ScalablePredicateVector = 4,
NoAlloc = 255
};
}
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index c45e03a..88691b9 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -480,13 +480,6 @@ public:
return true;
}
- /// Return true if the @llvm.vector.partial.reduce.* intrinsic
- /// should be expanded using generic code in SelectionDAGBuilder.
- virtual bool
- shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const {
- return true;
- }
-
/// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded
/// using generic code in SelectionDAGBuilder.
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const {
@@ -4661,23 +4654,6 @@ public:
return false;
}
- /// Allows the target to handle physreg-carried dependency
- /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether
- /// to add the edge to the dependency graph.
- /// Def - input: Selection DAG node defininfg physical register
- /// User - input: Selection DAG node using physical register
- /// Op - input: Number of User operand
- /// PhysReg - inout: set to the physical register if the edge is
- /// necessary, unchanged otherwise
- /// Cost - inout: physical register copy cost.
- /// Returns 'true' is the edge is necessary, 'false' otherwise
- virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
- const TargetRegisterInfo *TRI,
- const TargetInstrInfo *TII,
- MCRegister &PhysReg, int &Cost) const {
- return false;
- }
-
/// Target-specific combining of register parts into its original value
virtual SDValue
joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index bf133f0..822245f 100644
--- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -109,10 +109,15 @@ public:
return MC->contains(Reg1.asMCReg(), Reg2.asMCReg());
}
- /// Return the cost of copying a value between two registers in this class.
- /// A negative number means the register class is very expensive
- /// to copy e.g. status flag register classes.
- int getCopyCost() const { return MC->getCopyCost(); }
+ /// Return the cost of copying a value between two registers in this class. If
+ /// this is the maximum value, the register may be impossible to copy.
+ uint8_t getCopyCost() const { return MC->getCopyCost(); }
+
+ /// \return true if register class is very expensive to copy e.g. status flag
+ /// register classes.
+ bool expensiveOrImpossibleToCopy() const {
+ return MC->getCopyCost() == std::numeric_limits<uint8_t>::max();
+ }
/// Return true if this register class may be used to create virtual
/// registers.
diff --git a/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h b/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h
index 52af205..ffe0b50 100644
--- a/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h
+++ b/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h
@@ -179,6 +179,7 @@ public:
class DWARFDataExtractorSimple
: public DWARFDataExtractorBase<DWARFDataExtractorSimple> {
+public:
using DWARFDataExtractorBase::DWARFDataExtractorBase;
LLVM_ABI uint64_t getRelocatedValueImpl(uint32_t Size, uint64_t *Off,
diff --git a/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h b/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h
index 87777fd..edee6a7 100644
--- a/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h
+++ b/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h
@@ -56,7 +56,8 @@ struct RootDescriptor {
return;
}
- assert(Version == llvm::dxbc::RootSignatureVersion::V1_1 &&
+ assert((Version == llvm::dxbc::RootSignatureVersion::V1_1 ||
+ Version == llvm::dxbc::RootSignatureVersion::V1_2) &&
"Specified an invalid root signature version");
switch (Type) {
case dxil::ResourceClass::CBuffer:
@@ -100,7 +101,8 @@ struct DescriptorTableClause {
return;
}
- assert(Version == dxbc::RootSignatureVersion::V1_1 &&
+ assert((Version == dxbc::RootSignatureVersion::V1_1 ||
+ Version == dxbc::RootSignatureVersion::V1_2) &&
"Specified an invalid root signature version");
switch (Type) {
case dxil::ResourceClass::CBuffer:
@@ -131,6 +133,7 @@ struct StaticSampler {
float MaxLOD = std::numeric_limits<float>::max();
uint32_t Space = 0;
dxbc::ShaderVisibility Visibility = dxbc::ShaderVisibility::All;
+ dxbc::StaticSamplerFlags Flags = dxbc::StaticSamplerFlags::None;
};
/// Models RootElement : RootFlags | RootConstants | RootParam
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td
index 38f95a1..bba0d6e 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMP.td
+++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td
@@ -1333,6 +1333,9 @@ def OMP_Tile : Directive<[Spelling<"tile">]> {
let allowedOnceClauses = [
VersionedClause<OMPC_Sizes, 51>,
];
+ let requiredClauses = [
+ VersionedClause<OMPC_Sizes, 51>,
+ ];
let association = AS_Loop;
let category = CA_Executable;
}
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 95a0a7f..de7a237 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -32,6 +32,7 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/ProfDataUtils.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/Support/AtomicOrdering.h"
@@ -3536,8 +3537,6 @@ class SwitchInstProfUpdateWrapper {
bool Changed = false;
protected:
- LLVM_ABI MDNode *buildProfBranchWeightsMD();
-
LLVM_ABI void init();
public:
@@ -3549,8 +3548,8 @@ public:
SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
~SwitchInstProfUpdateWrapper() {
- if (Changed)
- SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
+ if (Changed && Weights.has_value() && Weights->size() >= 2)
+ setBranchWeights(SI, Weights.value(), /*IsExpected=*/false);
}
/// Delegate the call to the underlying SwitchInst::removeCase() and remove
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index eb0440f..0622bfa 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -810,6 +810,26 @@ public:
/// Whether the intrinsic is signed or unsigned.
bool isSigned() const { return isSigned(getIntrinsicID()); };
+ /// Whether the intrinsic is a smin or umin.
+ static bool isMin(Intrinsic::ID ID) {
+ switch (ID) {
+ case Intrinsic::umin:
+ case Intrinsic::smin:
+ return true;
+ case Intrinsic::umax:
+ case Intrinsic::smax:
+ return false;
+ default:
+ llvm_unreachable("Invalid intrinsic");
+ }
+ }
+
+ /// Whether the intrinsic is a smin or a umin.
+ bool isMin() const { return isMin(getIntrinsicID()); }
+
+ /// Whether the intrinsic is a smax or a umax.
+ bool isMax() const { return !isMin(getIntrinsicID()); }
+
/// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values,
/// so there is a certain threshold value, upon reaching which,
/// their value can no longer change. Return said threshold.
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 7c9aef5..fbc92d7 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -130,8 +130,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
class AdvSIMD_1VectorArg_Expand_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
- class AdvSIMD_1VectorArg_Long_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
class AdvSIMD_1IntArg_Narrow_Intrinsic
: DefaultAttrsIntrinsic<[llvm_any_ty], [llvm_any_ty], [IntrNoMem]>;
class AdvSIMD_1VectorArg_Narrow_Intrinsic
@@ -150,9 +148,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
class AdvSIMD_2VectorArg_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Compare_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
- [IntrNoMem]>;
class AdvSIMD_2Arg_FloatCompare_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
[IntrNoMem]>;
@@ -160,10 +155,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMTruncatedType<0>, LLVMTruncatedType<0>],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Wide_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMTruncatedType<0>],
- [IntrNoMem]>;
class AdvSIMD_2VectorArg_Narrow_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMExtendedType<0>, LLVMExtendedType<0>],
@@ -172,10 +163,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyint_ty],
[LLVMExtendedType<0>, llvm_i32_ty],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMTruncatedType<0>],
@@ -184,10 +171,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMTruncatedType<0>, llvm_i32_ty],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty],
- [IntrNoMem]>;
class AdvSIMD_2VectorArg_Lane_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, llvm_anyint_ty, llvm_i32_ty],
@@ -205,14 +188,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
[IntrNoMem]>;
- class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty,
- LLVMMatchType<1>], [IntrNoMem]>;
- class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty, llvm_i32_ty],
- [IntrNoMem]>;
class AdvSIMD_CvtFxToFP_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
[IntrNoMem]>;
@@ -238,11 +213,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
[IntrNoMem]>;
- class AdvSIMD_FML_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
- [IntrNoMem]>;
-
class AdvSIMD_BF16FML_Intrinsic
: DefaultAttrsIntrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty],
diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h
index 6168e24..2e31fe5 100644
--- a/llvm/include/llvm/IR/PatternMatch.h
+++ b/llvm/include/llvm/IR/PatternMatch.h
@@ -2773,6 +2773,14 @@ m_MaskedLoad(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2,
return m_Intrinsic<Intrinsic::masked_load>(Op0, Op1, Op2, Op3);
}
+/// Matches MaskedStore Intrinsic.
+template <typename Opnd0, typename Opnd1, typename Opnd2, typename Opnd3>
+inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2, Opnd3>::Ty
+m_MaskedStore(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2,
+ const Opnd3 &Op3) {
+ return m_Intrinsic<Intrinsic::masked_store>(Op0, Op1, Op2, Op3);
+}
+
/// Matches MaskedGather Intrinsic.
template <typename Opnd0, typename Opnd1, typename Opnd2, typename Opnd3>
inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2, Opnd3>::Ty
diff --git a/llvm/include/llvm/IR/ProfDataUtils.h b/llvm/include/llvm/IR/ProfDataUtils.h
index e97160e..a0876b1 100644
--- a/llvm/include/llvm/IR/ProfDataUtils.h
+++ b/llvm/include/llvm/IR/ProfDataUtils.h
@@ -145,7 +145,13 @@ LLVM_ABI bool extractProfTotalWeight(const Instruction &I,
/// \param Weights an array of weights to set on instruction I.
/// \param IsExpected were these weights added from an llvm.expect* intrinsic.
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights,
- bool IsExpected);
+ bool IsExpected, bool ElideAllZero = false);
+
+/// Variant of `setBranchWeights` where the `Weights` will be fit first to
+/// uint32_t by shifting right.
+LLVM_ABI void setFittedBranchWeights(Instruction &I, ArrayRef<uint64_t> Weights,
+ bool IsExpected,
+ bool ElideAllZero = false);
/// downscale the given weights preserving the ratio. If the maximum value is
/// not already known and not provided via \param KnownMaxCount , it will be
diff --git a/llvm/include/llvm/IR/ValueMap.h b/llvm/include/llvm/IR/ValueMap.h
index 1a11718..97653c2 100644
--- a/llvm/include/llvm/IR/ValueMap.h
+++ b/llvm/include/llvm/IR/ValueMap.h
@@ -42,18 +42,15 @@
namespace llvm {
-template<typename KeyT, typename ValueT, typename Config>
+template <typename KeyT, typename ValueT, typename Config>
class ValueMapCallbackVH;
-template<typename DenseMapT, typename KeyT>
-class ValueMapIterator;
-template<typename DenseMapT, typename KeyT>
-class ValueMapConstIterator;
+template <typename DenseMapT, typename KeyT> class ValueMapIterator;
+template <typename DenseMapT, typename KeyT> class ValueMapConstIterator;
/// This class defines the default behavior for configurable aspects of
/// ValueMap<>. User Configs should inherit from this class to be as compatible
/// as possible with future versions of ValueMap.
-template<typename KeyT, typename MutexT = sys::Mutex>
-struct ValueMapConfig {
+template <typename KeyT, typename MutexT = sys::Mutex> struct ValueMapConfig {
using mutex_type = MutexT;
/// If FollowRAUW is true, the ValueMap will update mappings on RAUW. If it's
@@ -66,21 +63,24 @@ struct ValueMapConfig {
// override all the defaults.
struct ExtraData {};
- template<typename ExtraDataT>
+ template <typename ExtraDataT>
static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {}
- template<typename ExtraDataT>
- static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {}
+ template <typename ExtraDataT>
+ static void onDelete(const ExtraDataT & /*Data*/, KeyT /*Old*/) {}
/// Returns a mutex that should be acquired around any changes to the map.
/// This is only acquired from the CallbackVH (and held around calls to onRAUW
/// and onDelete) and not inside other ValueMap methods. NULL means that no
/// mutex is necessary.
- template<typename ExtraDataT>
- static mutex_type *getMutex(const ExtraDataT &/*Data*/) { return nullptr; }
+ template <typename ExtraDataT>
+ static mutex_type *getMutex(const ExtraDataT & /*Data*/) {
+ return nullptr;
+ }
};
/// See the file comment.
-template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT>>
+template <typename KeyT, typename ValueT,
+ typename Config = ValueMapConfig<KeyT>>
class ValueMap {
friend class ValueMapCallbackVH<KeyT, ValueT, Config>;
@@ -157,9 +157,7 @@ public:
return Map.find_as(Val) == Map.end() ? 0 : 1;
}
- iterator find(const KeyT &Val) {
- return iterator(Map.find_as(Val));
- }
+ iterator find(const KeyT &Val) { return iterator(Map.find_as(Val)); }
const_iterator find(const KeyT &Val) const {
return const_iterator(Map.find_as(Val));
}
@@ -186,8 +184,7 @@ public:
}
/// insert - Range insertion of pairs.
- template<typename InputIt>
- void insert(InputIt I, InputIt E) {
+ template <typename InputIt> void insert(InputIt I, InputIt E) {
for (; I != E; ++I)
insert(*I);
}
@@ -200,17 +197,13 @@ public:
Map.erase(I);
return true;
}
- void erase(iterator I) {
- return Map.erase(I.base());
- }
+ void erase(iterator I) { return Map.erase(I.base()); }
- value_type& FindAndConstruct(const KeyT &Key) {
+ value_type &FindAndConstruct(const KeyT &Key) {
return Map.FindAndConstruct(Wrap(Key));
}
- ValueT &operator[](const KeyT &Key) {
- return Map[Wrap(Key)];
- }
+ ValueT &operator[](const KeyT &Key) { return Map[Wrap(Key)]; }
/// isPointerIntoBucketsArray - Return true if the specified pointer points
/// somewhere into the ValueMap's array of buckets (i.e. either to a key or
@@ -235,7 +228,7 @@ private:
// the const_cast incorrect) is if it gets inserted into the map. But then
// this function must have been called from a non-const method, making the
// const_cast ok.
- return ValueMapCVH(key, const_cast<ValueMap*>(this));
+ return ValueMapCVH(key, const_cast<ValueMap *>(this));
}
};
@@ -252,7 +245,7 @@ class ValueMapCallbackVH final : public CallbackVH {
ValueMapT *Map;
ValueMapCallbackVH(KeyT Key, ValueMapT *Map)
- : CallbackVH(const_cast<Value*>(static_cast<const Value*>(Key))),
+ : CallbackVH(const_cast<Value *>(static_cast<const Value *>(Key))),
Map(Map) {}
// Private constructor used to create empty/tombstone DenseMap keys.
@@ -268,8 +261,8 @@ public:
std::unique_lock<typename Config::mutex_type> Guard;
if (M)
Guard = std::unique_lock<typename Config::mutex_type>(*M);
- Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this.
- Copy.Map->Map.erase(Copy); // Definitely destroys *this.
+ Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this.
+ Copy.Map->Map.erase(Copy); // Definitely destroys *this.
}
void allUsesReplacedWith(Value *new_key) override {
@@ -291,14 +284,14 @@ public:
// removed the old mapping.
if (I != Copy.Map->Map.end()) {
ValueT Target(std::move(I->second));
- Copy.Map->Map.erase(I); // Definitely destroys *this.
+ Copy.Map->Map.erase(I); // Definitely destroys *this.
Copy.Map->insert(std::make_pair(typed_new_key, std::move(Target)));
}
}
}
};
-template<typename KeyT, typename ValueT, typename Config>
+template <typename KeyT, typename ValueT, typename Config>
struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config>> {
using VH = ValueMapCallbackVH<KeyT, ValueT, Config>;
@@ -318,9 +311,7 @@ struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config>> {
return DenseMapInfo<KeyT>::getHashValue(Val);
}
- static bool isEqual(const VH &LHS, const VH &RHS) {
- return LHS == RHS;
- }
+ static bool isEqual(const VH &LHS, const VH &RHS) { return LHS == RHS; }
static bool isEqual(const KeyT &LHS, const VH &RHS) {
return LHS == RHS.getValPtr();
@@ -347,7 +338,7 @@ public:
struct ValueTypeProxy {
const KeyT first;
- ValueT& second;
+ ValueT &second;
ValueTypeProxy *operator->() { return this; }
@@ -361,23 +352,19 @@ public:
return Result;
}
- ValueTypeProxy operator->() const {
- return operator*();
- }
+ ValueTypeProxy operator->() const { return operator*(); }
- bool operator==(const ValueMapIterator &RHS) const {
- return I == RHS.I;
- }
- bool operator!=(const ValueMapIterator &RHS) const {
- return I != RHS.I;
- }
+ bool operator==(const ValueMapIterator &RHS) const { return I == RHS.I; }
+ bool operator!=(const ValueMapIterator &RHS) const { return I != RHS.I; }
- inline ValueMapIterator& operator++() { // Preincrement
+ inline ValueMapIterator &operator++() { // Preincrement
++I;
return *this;
}
- ValueMapIterator operator++(int) { // Postincrement
- ValueMapIterator tmp = *this; ++*this; return tmp;
+ ValueMapIterator operator++(int) { // Postincrement
+ ValueMapIterator tmp = *this;
+ ++*this;
+ return tmp;
}
};
@@ -397,13 +384,13 @@ public:
ValueMapConstIterator() : I() {}
ValueMapConstIterator(BaseT I) : I(I) {}
ValueMapConstIterator(ValueMapIterator<DenseMapT, KeyT> Other)
- : I(Other.base()) {}
+ : I(Other.base()) {}
BaseT base() const { return I; }
struct ValueTypeProxy {
const KeyT first;
- const ValueT& second;
+ const ValueT &second;
ValueTypeProxy *operator->() { return this; }
operator std::pair<KeyT, ValueT>() const {
return std::make_pair(first, second);
@@ -415,23 +402,19 @@ public:
return Result;
}
- ValueTypeProxy operator->() const {
- return operator*();
- }
+ ValueTypeProxy operator->() const { return operator*(); }
- bool operator==(const ValueMapConstIterator &RHS) const {
- return I == RHS.I;
- }
- bool operator!=(const ValueMapConstIterator &RHS) const {
- return I != RHS.I;
- }
+ bool operator==(const ValueMapConstIterator &RHS) const { return I == RHS.I; }
+ bool operator!=(const ValueMapConstIterator &RHS) const { return I != RHS.I; }
- inline ValueMapConstIterator& operator++() { // Preincrement
+ inline ValueMapConstIterator &operator++() { // Preincrement
++I;
return *this;
}
- ValueMapConstIterator operator++(int) { // Postincrement
- ValueMapConstIterator tmp = *this; ++*this; return tmp;
+ ValueMapConstIterator operator++(int) { // Postincrement
+ ValueMapConstIterator tmp = *this;
+ ++*this;
+ return tmp;
}
};
diff --git a/llvm/include/llvm/MC/MCRegisterInfo.h b/llvm/include/llvm/MC/MCRegisterInfo.h
index aad3792..e6fc707 100644
--- a/llvm/include/llvm/MC/MCRegisterInfo.h
+++ b/llvm/include/llvm/MC/MCRegisterInfo.h
@@ -45,7 +45,7 @@ public:
const uint16_t RegSetSize;
const uint16_t ID;
const uint16_t RegSizeInBits;
- const int8_t CopyCost;
+ const uint8_t CopyCost;
const bool Allocatable;
const bool BaseClass;
@@ -94,7 +94,7 @@ public:
/// getCopyCost - Return the cost of copying a value between two registers in
/// this class. A negative number means the register class is very expensive
/// to copy e.g. status flag register classes.
- int getCopyCost() const { return CopyCost; }
+ uint8_t getCopyCost() const { return CopyCost; }
/// isAllocatable - Return true if this register class may be used to create
/// virtual registers.
diff --git a/llvm/include/llvm/Object/ELF.h b/llvm/include/llvm/Object/ELF.h
index 0b362d3..59f63eb 100644
--- a/llvm/include/llvm/Object/ELF.h
+++ b/llvm/include/llvm/Object/ELF.h
@@ -407,7 +407,8 @@ public:
Elf_Note_Iterator notes_begin(const Elf_Phdr &Phdr, Error &Err) const {
assert(Phdr.p_type == ELF::PT_NOTE && "Phdr is not of type PT_NOTE");
ErrorAsOutParameter ErrAsOutParam(Err);
- if (Phdr.p_offset + Phdr.p_filesz > getBufSize()) {
+ if (Phdr.p_offset + Phdr.p_filesz > getBufSize() ||
+ Phdr.p_offset + Phdr.p_filesz < Phdr.p_offset) {
Err =
createError("invalid offset (0x" + Twine::utohexstr(Phdr.p_offset) +
") or size (0x" + Twine::utohexstr(Phdr.p_filesz) + ")");
@@ -435,7 +436,8 @@ public:
Elf_Note_Iterator notes_begin(const Elf_Shdr &Shdr, Error &Err) const {
assert(Shdr.sh_type == ELF::SHT_NOTE && "Shdr is not of type SHT_NOTE");
ErrorAsOutParameter ErrAsOutParam(Err);
- if (Shdr.sh_offset + Shdr.sh_size > getBufSize()) {
+ if (Shdr.sh_offset + Shdr.sh_size > getBufSize() ||
+ Shdr.sh_offset + Shdr.sh_size < Shdr.sh_offset) {
Err =
createError("invalid offset (0x" + Twine::utohexstr(Shdr.sh_offset) +
") or size (0x" + Twine::utohexstr(Shdr.sh_size) + ")");
diff --git a/llvm/include/llvm/Support/FileSystem.h b/llvm/include/llvm/Support/FileSystem.h
index c203779..cf2a810 100644
--- a/llvm/include/llvm/Support/FileSystem.h
+++ b/llvm/include/llvm/Support/FileSystem.h
@@ -268,18 +268,6 @@ public:
/// Make \a path an absolute path.
///
-/// Makes \a path absolute using the \a current_directory if it is not already.
-/// An empty \a path will result in the \a current_directory.
-///
-/// /absolute/path => /absolute/path
-/// relative/../path => <current-directory>/relative/../path
-///
-/// @param path A path that is modified to be an absolute path.
-LLVM_ABI void make_absolute(const Twine &current_directory,
- SmallVectorImpl<char> &path);
-
-/// Make \a path an absolute path.
-///
/// Makes \a path absolute using the current directory if it is not already. An
/// empty \a path will result in the current directory.
///
diff --git a/llvm/include/llvm/Support/Format.h b/llvm/include/llvm/Support/Format.h
index 2553002..34b224d 100644
--- a/llvm/include/llvm/Support/Format.h
+++ b/llvm/include/llvm/Support/Format.h
@@ -78,16 +78,6 @@ public:
/// printed, this synthesizes the string into a temporary buffer provided and
/// returns whether or not it is big enough.
-// Helper to validate that format() parameters are scalars or pointers.
-template <typename... Args> struct validate_format_parameters;
-template <typename Arg, typename... Args>
-struct validate_format_parameters<Arg, Args...> {
- static_assert(std::is_scalar_v<Arg>,
- "format can't be used with non fundamental / non pointer type");
- validate_format_parameters() { validate_format_parameters<Args...>(); }
-};
-template <> struct validate_format_parameters<> {};
-
template <typename... Ts>
class format_object final : public format_object_base {
std::tuple<Ts...> Vals;
@@ -105,7 +95,9 @@ class format_object final : public format_object_base {
public:
format_object(const char *fmt, const Ts &... vals)
: format_object_base(fmt), Vals(vals...) {
- validate_format_parameters<Ts...>();
+ static_assert(
+ (std::is_scalar_v<Ts> && ...),
+ "format can't be used with non fundamental / non pointer type");
}
int snprint(char *Buffer, unsigned BufferSize) const override {
diff --git a/llvm/include/llvm/Support/FormatProviders.h b/llvm/include/llvm/Support/FormatProviders.h
index 9147782..8eaa5e38 100644
--- a/llvm/include/llvm/Support/FormatProviders.h
+++ b/llvm/include/llvm/Support/FormatProviders.h
@@ -29,22 +29,18 @@ namespace support {
namespace detail {
template <typename T>
struct use_integral_formatter
- : public std::bool_constant<
- is_one_of<T, uint8_t, int16_t, uint16_t, int32_t, uint32_t, int64_t,
- uint64_t, int, unsigned, long, unsigned long, long long,
- unsigned long long>::value> {};
+ : public is_one_of<T, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
+ int64_t, uint64_t, int, unsigned, long, unsigned long,
+ long long, unsigned long long> {};
template <typename T>
-struct use_char_formatter : public std::bool_constant<std::is_same_v<T, char>> {
-};
+struct use_char_formatter : public std::is_same<T, char> {};
template <typename T>
-struct is_cstring
- : public std::bool_constant<is_one_of<T, char *, const char *>::value> {};
+struct is_cstring : public is_one_of<T, char *, const char *> {};
template <typename T>
-struct use_string_formatter
- : public std::bool_constant<std::is_convertible_v<T, llvm::StringRef>> {};
+struct use_string_formatter : public std::is_convertible<T, llvm::StringRef> {};
template <typename T>
struct use_pointer_formatter
@@ -52,8 +48,7 @@ struct use_pointer_formatter
};
template <typename T>
-struct use_double_formatter
- : public std::bool_constant<std::is_floating_point_v<T>> {};
+struct use_double_formatter : public std::is_floating_point<T> {};
class HelperFunctions {
protected:
diff --git a/llvm/include/llvm/Support/FormatVariadicDetails.h b/llvm/include/llvm/Support/FormatVariadicDetails.h
index 4002caf..0fdc7b6 100644
--- a/llvm/include/llvm/Support/FormatVariadicDetails.h
+++ b/llvm/include/llvm/Support/FormatVariadicDetails.h
@@ -92,8 +92,7 @@ public:
// based format() invocation.
template <typename T>
struct uses_format_member
- : public std::bool_constant<
- std::is_base_of_v<format_adapter, std::remove_reference_t<T>>> {};
+ : public std::is_base_of<format_adapter, std::remove_reference_t<T>> {};
// Simple template that decides whether a type T should use the format_provider
// based format() invocation. The member function takes priority, so this test
diff --git a/llvm/include/llvm/Support/HashBuilder.h b/llvm/include/llvm/Support/HashBuilder.h
index ae266d3..d0130d6 100644
--- a/llvm/include/llvm/Support/HashBuilder.h
+++ b/llvm/include/llvm/Support/HashBuilder.h
@@ -31,8 +31,7 @@ namespace llvm {
namespace hashbuilder_detail {
/// Trait to indicate whether a type's bits can be hashed directly (after
/// endianness correction).
-template <typename U>
-struct IsHashableData : std::bool_constant<is_integral_or_enum<U>::value> {};
+template <typename U> struct IsHashableData : is_integral_or_enum<U> {};
} // namespace hashbuilder_detail
diff --git a/llvm/include/llvm/Support/InstructionCost.h b/llvm/include/llvm/Support/InstructionCost.h
index ab1c8eb..507c166 100644
--- a/llvm/include/llvm/Support/InstructionCost.h
+++ b/llvm/include/llvm/Support/InstructionCost.h
@@ -59,8 +59,8 @@ private:
State = Invalid;
}
- static CostType getMaxValue() { return std::numeric_limits<CostType>::max(); }
- static CostType getMinValue() { return std::numeric_limits<CostType>::min(); }
+ static constexpr CostType MaxValue = std::numeric_limits<CostType>::max();
+ static constexpr CostType MinValue = std::numeric_limits<CostType>::min();
public:
// A default constructed InstructionCost is a valid zero cost
@@ -69,8 +69,8 @@ public:
InstructionCost(CostState) = delete;
InstructionCost(CostType Val) : Value(Val), State(Valid) {}
- static InstructionCost getMax() { return getMaxValue(); }
- static InstructionCost getMin() { return getMinValue(); }
+ static InstructionCost getMax() { return MaxValue; }
+ static InstructionCost getMin() { return MinValue; }
static InstructionCost getInvalid(CostType Val = 0) {
InstructionCost Tmp(Val);
Tmp.setInvalid();
@@ -102,7 +102,7 @@ public:
// Saturating addition.
InstructionCost::CostType Result;
if (AddOverflow(Value, RHS.Value, Result))
- Result = RHS.Value > 0 ? getMaxValue() : getMinValue();
+ Result = RHS.Value > 0 ? MaxValue : MinValue;
Value = Result;
return *this;
@@ -120,7 +120,7 @@ public:
// Saturating subtract.
InstructionCost::CostType Result;
if (SubOverflow(Value, RHS.Value, Result))
- Result = RHS.Value > 0 ? getMinValue() : getMaxValue();
+ Result = RHS.Value > 0 ? MinValue : MaxValue;
Value = Result;
return *this;
}
@@ -138,9 +138,9 @@ public:
InstructionCost::CostType Result;
if (MulOverflow(Value, RHS.Value, Result)) {
if ((Value > 0 && RHS.Value > 0) || (Value < 0 && RHS.Value < 0))
- Result = getMaxValue();
+ Result = MaxValue;
else
- Result = getMinValue();
+ Result = MinValue;
}
Value = Result;
diff --git a/llvm/include/llvm/Support/Path.h b/llvm/include/llvm/Support/Path.h
index 0cb5171..a8e0f33 100644
--- a/llvm/include/llvm/Support/Path.h
+++ b/llvm/include/llvm/Support/Path.h
@@ -566,6 +566,18 @@ LLVM_ABI bool is_absolute_gnu(const Twine &path, Style style = Style::native);
/// @result True if the path is relative, false if it is not.
LLVM_ABI bool is_relative(const Twine &path, Style style = Style::native);
+/// Make \a path an absolute path.
+///
+/// Makes \a path absolute using the \a current_directory if it is not already.
+/// An empty \a path will result in the \a current_directory.
+///
+/// /absolute/path => /absolute/path
+/// relative/../path => <current-directory>/relative/../path
+///
+/// @param path A path that is modified to be an absolute path.
+LLVM_ABI void make_absolute(const Twine &current_directory,
+ SmallVectorImpl<char> &path);
+
} // end namespace path
} // end namespace sys
} // end namespace llvm
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index 7710e2f..e5531456 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -650,6 +650,9 @@ HANDLE_TARGET_OPCODE(G_FDIV)
/// Generic FP remainder.
HANDLE_TARGET_OPCODE(G_FREM)
+/// Generic FP modf
+HANDLE_TARGET_OPCODE(G_FMODF)
+
/// Generic FP exponentiation.
HANDLE_TARGET_OPCODE(G_FPOW)
diff --git a/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h b/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h
index 1e07fbe..faaff4a 100644
--- a/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h
+++ b/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h
@@ -18,8 +18,7 @@
#include "llvm/Support/DataTypes.h"
-namespace llvm {
-namespace X86Disassembler {
+namespace llvm::X86Disassembler {
#define INSTRUCTIONS_SYM x86DisassemblerInstrSpecifiers
#define CONTEXTS_SYM x86DisassemblerContexts
@@ -541,7 +540,6 @@ static const unsigned X86_MAX_OPERANDS = 6;
/// respectively.
enum DisassemblerMode { MODE_16BIT, MODE_32BIT, MODE_64BIT };
-} // namespace X86Disassembler
-} // namespace llvm
+} // namespace llvm::X86Disassembler
#endif
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 733d10b..faf7788 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -981,6 +981,13 @@ def G_FREM : GenericInstruction {
let hasSideEffects = false;
}
+/// Generic FP modf
+def G_FMODF : GenericInstruction {
+ let OutOperandList = (outs type0:$dst1, type0:$dst2);
+ let InOperandList = (ins type0:$src1);
+ let hasSideEffects = false;
+}
+
// Floating point exponentiation.
def G_FPOW : GenericInstruction {
let OutOperandList = (outs type0:$dst);
diff --git a/llvm/include/llvm/Target/TargetMachine.h b/llvm/include/llvm/Target/TargetMachine.h
index bf4e490..d0fd483 100644
--- a/llvm/include/llvm/Target/TargetMachine.h
+++ b/llvm/include/llvm/Target/TargetMachine.h
@@ -29,10 +29,10 @@
#include <string>
#include <utility>
-LLVM_ABI extern llvm::cl::opt<bool> NoKernelInfoEndLTO;
-
namespace llvm {
+LLVM_ABI extern llvm::cl::opt<bool> NoKernelInfoEndLTO;
+
class AAManager;
using ModulePassManager = PassManager<Module>;
diff --git a/llvm/include/llvm/Transforms/Scalar/GVN.h b/llvm/include/llvm/Transforms/Scalar/GVN.h
index 2454149..74a4d6c 100644
--- a/llvm/include/llvm/Transforms/Scalar/GVN.h
+++ b/llvm/include/llvm/Transforms/Scalar/GVN.h
@@ -56,6 +56,7 @@ class OptimizationRemarkEmitter;
class PHINode;
class TargetLibraryInfo;
class Value;
+class IntrinsicInst;
/// A private "module" namespace for types and utilities used by GVN. These
/// are implementation details and should not be used by clients.
namespace LLVM_LIBRARY_VISIBILITY_NAMESPACE gvn {
@@ -349,6 +350,7 @@ private:
// Helper functions of redundant load elimination.
bool processLoad(LoadInst *L);
+ bool processMaskedLoad(IntrinsicInst *I);
bool processNonLocalLoad(LoadInst *L);
bool processAssumeIntrinsic(AssumeInst *II);
diff --git a/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h b/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h
index 6178622..dfd6e2f 100644
--- a/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h
+++ b/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h
@@ -15,7 +15,12 @@ namespace llvm {
class Function;
-struct JumpTableToSwitchPass : PassInfoMixin<JumpTableToSwitchPass> {
+class JumpTableToSwitchPass : public PassInfoMixin<JumpTableToSwitchPass> {
+ // Necessary until we switch to GUIDs as metadata, after which we can drop it.
+ const bool InLTO;
+
+public:
+ explicit JumpTableToSwitchPass(bool InLTO = false) : InLTO(InLTO) {}
/// Run the pass over the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};