aboutsummaryrefslogtreecommitdiff
path: root/llvm/include
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/include')
-rw-r--r--llvm/include/llvm/ADT/FoldingSet.h32
-rw-r--r--llvm/include/llvm/ADT/IndexedMap.h17
-rw-r--r--llvm/include/llvm/ADT/SmallVector.h12
-rw-r--r--llvm/include/llvm/ADT/identity.h31
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfo.h3
-rw-r--r--llvm/include/llvm/CodeGen/AtomicExpand.h2
-rw-r--r--llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h2
-rw-r--r--llvm/include/llvm/CodeGen/CodeGenPrepare.h2
-rw-r--r--llvm/include/llvm/CodeGen/ComplexDeinterleavingPass.h4
-rw-r--r--llvm/include/llvm/CodeGen/DwarfEHPrepare.h2
-rw-r--r--llvm/include/llvm/CodeGen/ExpandFp.h2
-rw-r--r--llvm/include/llvm/CodeGen/ExpandLargeDivRem.h2
-rw-r--r--llvm/include/llvm/CodeGen/ExpandMemCmp.h2
-rw-r--r--llvm/include/llvm/CodeGen/IndirectBrExpand.h2
-rw-r--r--llvm/include/llvm/CodeGen/InterleavedAccess.h2
-rw-r--r--llvm/include/llvm/CodeGen/InterleavedLoadCombine.h2
-rw-r--r--llvm/include/llvm/CodeGen/MIR2Vec.h38
-rw-r--r--llvm/include/llvm/CodeGen/MachineBlockHashInfo.h114
-rw-r--r--llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h2
-rw-r--r--llvm/include/llvm/CodeGen/Passes.h3
-rw-r--r--llvm/include/llvm/CodeGen/SafeStack.h2
-rw-r--r--llvm/include/llvm/CodeGen/SelectOptimize.h2
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAGNodes.h15
-rw-r--r--llvm/include/llvm/CodeGen/StackProtector.h2
-rw-r--r--llvm/include/llvm/CodeGen/TypePromotion.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/Core.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/Layer.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/LinkGraphLinkingLayer.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/TaskDispatch.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h4
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td12
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td14
-rw-r--r--llvm/include/llvm/IR/IntrinsicsNVVM.td100
-rw-r--r--llvm/include/llvm/IR/ModuleSummaryIndex.h4
-rw-r--r--llvm/include/llvm/IR/RuntimeLibcalls.td4
-rw-r--r--llvm/include/llvm/InitializePasses.h1
-rw-r--r--llvm/include/llvm/LTO/LTO.h3
-rw-r--r--llvm/include/llvm/MC/MCContext.h2
-rw-r--r--llvm/include/llvm/Passes/CodeGenPassBuilder.h18
-rw-r--r--llvm/include/llvm/Support/GlobPattern.h21
49 files changed, 348 insertions, 160 deletions
diff --git a/llvm/include/llvm/ADT/FoldingSet.h b/llvm/include/llvm/ADT/FoldingSet.h
index 82a88c4..675b5c6 100644
--- a/llvm/include/llvm/ADT/FoldingSet.h
+++ b/llvm/include/llvm/ADT/FoldingSet.h
@@ -332,6 +332,14 @@ class FoldingSetNodeID {
/// Use a SmallVector to avoid a heap allocation in the common case.
SmallVector<unsigned, 32> Bits;
+ template <typename T> void AddIntegerImpl(T I) {
+ static_assert(std::is_integral_v<T> && sizeof(T) <= sizeof(unsigned) * 2,
+ "T must be an integer type no wider than 64 bits");
+ Bits.push_back(static_cast<unsigned>(I));
+ if constexpr (sizeof(unsigned) < sizeof(T))
+ Bits.push_back(static_cast<unsigned long long>(I) >> 32);
+ }
+
public:
FoldingSetNodeID() = default;
@@ -348,24 +356,12 @@ public:
"unexpected pointer size");
AddInteger(reinterpret_cast<uintptr_t>(Ptr));
}
- void AddInteger(signed I) { Bits.push_back(I); }
- void AddInteger(unsigned I) { Bits.push_back(I); }
- void AddInteger(long I) { AddInteger((unsigned long)I); }
- void AddInteger(unsigned long I) {
- if (sizeof(long) == sizeof(int))
- AddInteger(unsigned(I));
- else if (sizeof(long) == sizeof(long long)) {
- AddInteger((unsigned long long)I);
- } else {
- llvm_unreachable("unexpected sizeof(long)");
- }
- }
- void AddInteger(long long I) { AddInteger((unsigned long long)I); }
- void AddInteger(unsigned long long I) {
- AddInteger(unsigned(I));
- AddInteger(unsigned(I >> 32));
- }
-
+ void AddInteger(signed I) { AddIntegerImpl(I); }
+ void AddInteger(unsigned I) { AddIntegerImpl(I); }
+ void AddInteger(long I) { AddIntegerImpl(I); }
+ void AddInteger(unsigned long I) { AddIntegerImpl(I); }
+ void AddInteger(long long I) { AddIntegerImpl(I); }
+ void AddInteger(unsigned long long I) { AddIntegerImpl(I); }
void AddBoolean(bool B) { AddInteger(B ? 1U : 0U); }
LLVM_ABI void AddString(StringRef String);
LLVM_ABI void AddNodeID(const FoldingSetNodeID &ID);
diff --git a/llvm/include/llvm/ADT/IndexedMap.h b/llvm/include/llvm/ADT/IndexedMap.h
index cda0316..55935a7 100644
--- a/llvm/include/llvm/ADT/IndexedMap.h
+++ b/llvm/include/llvm/ADT/IndexedMap.h
@@ -22,12 +22,21 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/identity.h"
#include <cassert>
namespace llvm {
-template <typename T, typename ToIndexT = identity<unsigned>> class IndexedMap {
+namespace detail {
+template <class Ty> struct IdentityIndex {
+ using argument_type = Ty;
+
+ Ty &operator()(Ty &self) const { return self; }
+ const Ty &operator()(const Ty &self) const { return self; }
+};
+} // namespace detail
+
+template <typename T, typename ToIndexT = detail::IdentityIndex<unsigned>>
+class IndexedMap {
using IndexT = typename ToIndexT::argument_type;
// Prefer SmallVector with zero inline storage over std::vector. IndexedMaps
// can grow very large and SmallVector grows more efficiently as long as T
@@ -35,11 +44,11 @@ template <typename T, typename ToIndexT = identity<unsigned>> class IndexedMap {
using StorageT = SmallVector<T, 0>;
StorageT storage_;
- T nullVal_;
+ T nullVal_ = T();
ToIndexT toIndex_;
public:
- IndexedMap() : nullVal_(T()) {}
+ IndexedMap() = default;
explicit IndexedMap(const T &val) : nullVal_(val) {}
diff --git a/llvm/include/llvm/ADT/SmallVector.h b/llvm/include/llvm/ADT/SmallVector.h
index ca0b918..51109d1 100644
--- a/llvm/include/llvm/ADT/SmallVector.h
+++ b/llvm/include/llvm/ADT/SmallVector.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ADT_SMALLVECTOR_H
#define LLVM_ADT_SMALLVECTOR_H
+#include "llvm/ADT/ADL.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
@@ -1295,28 +1296,27 @@ inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
template <typename RangeType>
using ValueTypeFromRangeType =
- std::remove_const_t<std::remove_reference_t<decltype(*std::begin(
- std::declval<RangeType &>()))>>;
+ std::remove_const_t<detail::ValueOfRange<RangeType>>;
/// Given a range of type R, iterate the entire range and return a
/// SmallVector with elements of the vector. This is useful, for example,
/// when you want to iterate a range and then sort the results.
template <unsigned Size, typename R>
SmallVector<ValueTypeFromRangeType<R>, Size> to_vector(R &&Range) {
- return {std::begin(Range), std::end(Range)};
+ return {adl_begin(Range), adl_end(Range)};
}
template <typename R>
SmallVector<ValueTypeFromRangeType<R>> to_vector(R &&Range) {
- return {std::begin(Range), std::end(Range)};
+ return {adl_begin(Range), adl_end(Range)};
}
template <typename Out, unsigned Size, typename R>
SmallVector<Out, Size> to_vector_of(R &&Range) {
- return {std::begin(Range), std::end(Range)};
+ return {adl_begin(Range), adl_end(Range)};
}
template <typename Out, typename R> SmallVector<Out> to_vector_of(R &&Range) {
- return {std::begin(Range), std::end(Range)};
+ return {adl_begin(Range), adl_end(Range)};
}
// Explicit instantiations
diff --git a/llvm/include/llvm/ADT/identity.h b/llvm/include/llvm/ADT/identity.h
deleted file mode 100644
index 88d033f..0000000
--- a/llvm/include/llvm/ADT/identity.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===- llvm/ADT/Identity.h - Provide std::identity from C++20 ---*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides an implementation of std::identity from C++20.
-//
-// No library is required when using these functions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ADT_IDENTITY_H
-#define LLVM_ADT_IDENTITY_H
-
-namespace llvm {
-
-// Similar to `std::identity` from C++20.
-template <class Ty> struct identity {
- using is_transparent = void;
- using argument_type = Ty;
-
- Ty &operator()(Ty &self) const { return self; }
- const Ty &operator()(const Ty &self) const { return self; }
-};
-
-} // namespace llvm
-
-#endif // LLVM_ADT_IDENTITY_H
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 5d3b233..7b7dc1b 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -227,6 +227,9 @@ public:
/// Get the kind of extension that an instruction represents.
LLVM_ABI static PartialReductionExtendKind
getPartialReductionExtendKind(Instruction *I);
+ /// Get the kind of extension that a cast opcode represents.
+ LLVM_ABI static PartialReductionExtendKind
+ getPartialReductionExtendKind(Instruction::CastOps CastOpc);
/// Construct a TTI object using a type implementing the \c Concept
/// API below.
diff --git a/llvm/include/llvm/CodeGen/AtomicExpand.h b/llvm/include/llvm/CodeGen/AtomicExpand.h
index 1b8a988..34f520f 100644
--- a/llvm/include/llvm/CodeGen/AtomicExpand.h
+++ b/llvm/include/llvm/CodeGen/AtomicExpand.h
@@ -21,7 +21,7 @@ private:
const TargetMachine *TM;
public:
- AtomicExpandPass(const TargetMachine *TM) : TM(TM) {}
+ AtomicExpandPass(const TargetMachine &TM) : TM(&TM) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
diff --git a/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h b/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h
index 82dd5fe..48650a6 100644
--- a/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h
+++ b/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h
@@ -155,7 +155,7 @@ class BasicBlockSectionsProfileReaderAnalysis
public:
static AnalysisKey Key;
typedef BasicBlockSectionsProfileReader Result;
- BasicBlockSectionsProfileReaderAnalysis(const TargetMachine *TM) : TM(TM) {}
+ BasicBlockSectionsProfileReaderAnalysis(const TargetMachine &TM) : TM(&TM) {}
Result run(Function &F, FunctionAnalysisManager &AM);
diff --git a/llvm/include/llvm/CodeGen/CodeGenPrepare.h b/llvm/include/llvm/CodeGen/CodeGenPrepare.h
index dee3a9e..e673d0f 100644
--- a/llvm/include/llvm/CodeGen/CodeGenPrepare.h
+++ b/llvm/include/llvm/CodeGen/CodeGenPrepare.h
@@ -26,7 +26,7 @@ private:
const TargetMachine *TM;
public:
- CodeGenPreparePass(const TargetMachine *TM) : TM(TM) {}
+ CodeGenPreparePass(const TargetMachine &TM) : TM(&TM) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
diff --git a/llvm/include/llvm/CodeGen/ComplexDeinterleavingPass.h b/llvm/include/llvm/CodeGen/ComplexDeinterleavingPass.h
index 4383249..7b74f2a 100644
--- a/llvm/include/llvm/CodeGen/ComplexDeinterleavingPass.h
+++ b/llvm/include/llvm/CodeGen/ComplexDeinterleavingPass.h
@@ -24,10 +24,10 @@ class TargetMachine;
struct ComplexDeinterleavingPass
: public PassInfoMixin<ComplexDeinterleavingPass> {
private:
- TargetMachine *TM;
+ const TargetMachine *TM;
public:
- ComplexDeinterleavingPass(TargetMachine *TM) : TM(TM) {}
+ ComplexDeinterleavingPass(const TargetMachine &TM) : TM(&TM) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
diff --git a/llvm/include/llvm/CodeGen/DwarfEHPrepare.h b/llvm/include/llvm/CodeGen/DwarfEHPrepare.h
index 3f625cd..5b68b8c 100644
--- a/llvm/include/llvm/CodeGen/DwarfEHPrepare.h
+++ b/llvm/include/llvm/CodeGen/DwarfEHPrepare.h
@@ -24,7 +24,7 @@ class DwarfEHPreparePass : public PassInfoMixin<DwarfEHPreparePass> {
const TargetMachine *TM;
public:
- explicit DwarfEHPreparePass(const TargetMachine *TM_) : TM(TM_) {}
+ explicit DwarfEHPreparePass(const TargetMachine &TM_) : TM(&TM_) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
diff --git a/llvm/include/llvm/CodeGen/ExpandFp.h b/llvm/include/llvm/CodeGen/ExpandFp.h
index f1f441b..28e6aec 100644
--- a/llvm/include/llvm/CodeGen/ExpandFp.h
+++ b/llvm/include/llvm/CodeGen/ExpandFp.h
@@ -22,7 +22,7 @@ private:
CodeGenOptLevel OptLevel;
public:
- explicit ExpandFpPass(const TargetMachine *TM, CodeGenOptLevel OptLevel);
+ explicit ExpandFpPass(const TargetMachine &TM, CodeGenOptLevel OptLevel);
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
static bool isRequired() { return true; }
diff --git a/llvm/include/llvm/CodeGen/ExpandLargeDivRem.h b/llvm/include/llvm/CodeGen/ExpandLargeDivRem.h
index 6fc4409..b73a382 100644
--- a/llvm/include/llvm/CodeGen/ExpandLargeDivRem.h
+++ b/llvm/include/llvm/CodeGen/ExpandLargeDivRem.h
@@ -20,7 +20,7 @@ private:
const TargetMachine *TM;
public:
- explicit ExpandLargeDivRemPass(const TargetMachine *TM_) : TM(TM_) {}
+ explicit ExpandLargeDivRemPass(const TargetMachine &TM_) : TM(&TM_) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
diff --git a/llvm/include/llvm/CodeGen/ExpandMemCmp.h b/llvm/include/llvm/CodeGen/ExpandMemCmp.h
index 94a8778..0b845e4 100644
--- a/llvm/include/llvm/CodeGen/ExpandMemCmp.h
+++ b/llvm/include/llvm/CodeGen/ExpandMemCmp.h
@@ -19,7 +19,7 @@ class ExpandMemCmpPass : public PassInfoMixin<ExpandMemCmpPass> {
const TargetMachine *TM;
public:
- explicit ExpandMemCmpPass(const TargetMachine *TM_) : TM(TM_) {}
+ explicit ExpandMemCmpPass(const TargetMachine &TM_) : TM(&TM_) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
diff --git a/llvm/include/llvm/CodeGen/IndirectBrExpand.h b/llvm/include/llvm/CodeGen/IndirectBrExpand.h
index f7d9d5d..572a712 100644
--- a/llvm/include/llvm/CodeGen/IndirectBrExpand.h
+++ b/llvm/include/llvm/CodeGen/IndirectBrExpand.h
@@ -19,7 +19,7 @@ class IndirectBrExpandPass : public PassInfoMixin<IndirectBrExpandPass> {
const TargetMachine *TM;
public:
- IndirectBrExpandPass(const TargetMachine *TM) : TM(TM) {}
+ IndirectBrExpandPass(const TargetMachine &TM) : TM(&TM) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
diff --git a/llvm/include/llvm/CodeGen/InterleavedAccess.h b/llvm/include/llvm/CodeGen/InterleavedAccess.h
index 31bd19a..42bfa84 100644
--- a/llvm/include/llvm/CodeGen/InterleavedAccess.h
+++ b/llvm/include/llvm/CodeGen/InterleavedAccess.h
@@ -25,7 +25,7 @@ class InterleavedAccessPass : public PassInfoMixin<InterleavedAccessPass> {
const TargetMachine *TM;
public:
- explicit InterleavedAccessPass(const TargetMachine *TM) : TM(TM) {}
+ explicit InterleavedAccessPass(const TargetMachine &TM) : TM(&TM) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
diff --git a/llvm/include/llvm/CodeGen/InterleavedLoadCombine.h b/llvm/include/llvm/CodeGen/InterleavedLoadCombine.h
index fa99aa3..2750fd4 100644
--- a/llvm/include/llvm/CodeGen/InterleavedLoadCombine.h
+++ b/llvm/include/llvm/CodeGen/InterleavedLoadCombine.h
@@ -20,7 +20,7 @@ class InterleavedLoadCombinePass
const TargetMachine *TM;
public:
- explicit InterleavedLoadCombinePass(const TargetMachine *TM) : TM(TM) {}
+ explicit InterleavedLoadCombinePass(const TargetMachine &TM) : TM(&TM) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
diff --git a/llvm/include/llvm/CodeGen/MIR2Vec.h b/llvm/include/llvm/CodeGen/MIR2Vec.h
index 48bb0a5..44f009c 100644
--- a/llvm/include/llvm/CodeGen/MIR2Vec.h
+++ b/llvm/include/llvm/CodeGen/MIR2Vec.h
@@ -111,6 +111,11 @@ class MIRVocabulary {
size_t TotalEntries = 0;
} Layout;
+ // TODO: See if we can have only one reg classes section instead of physical
+ // and virtual separate sections in the vocabulary. This would reduce the
+ // number of vocabulary entities significantly.
+ // We can potentially distinguish physical and virtual registers by
+ // considering them as a separate feature.
enum class Section : unsigned {
Opcodes = 0,
CommonOperands = 1,
@@ -185,6 +190,25 @@ class MIRVocabulary {
return Storage[static_cast<unsigned>(SectionID)][LocalIndex];
}
+ /// Get entity ID (flat index) for a common operand type
+ /// This is used for triplet generation
+ unsigned getEntityIDForCommonOperand(
+ MachineOperand::MachineOperandType OperandType) const {
+ return Layout.CommonOperandBase + getCommonOperandIndex(OperandType);
+ }
+
+ /// Get entity ID (flat index) for a register
+ /// This is used for triplet generation
+ unsigned getEntityIDForRegister(Register Reg) const {
+ if (!Reg.isValid() || Reg.isStack())
+ return Layout
+ .VirtRegBase; // Return VirtRegBase for invalid/stack registers
+ unsigned LocalIndex = getRegisterOperandIndex(Reg);
+ size_t BaseOffset =
+ Reg.isPhysical() ? Layout.PhyRegBase : Layout.VirtRegBase;
+ return BaseOffset + LocalIndex;
+ }
+
public:
/// Static method for extracting base opcode names (public for testing)
static std::string extractBaseOpcodeName(StringRef InstrName);
@@ -201,6 +225,20 @@ public:
unsigned getDimension() const { return Storage.getDimension(); }
+ /// Get entity ID (flat index) for an opcode
+ /// This is used for triplet generation
+ unsigned getEntityIDForOpcode(unsigned Opcode) const {
+ return Layout.OpcodeBase + getCanonicalOpcodeIndex(Opcode);
+ }
+
+ /// Get entity ID (flat index) for a machine operand
+ /// This is used for triplet generation
+ unsigned getEntityIDForMachineOperand(const MachineOperand &MO) const {
+ if (MO.getType() == MachineOperand::MO_Register)
+ return getEntityIDForRegister(MO.getReg());
+ return getEntityIDForCommonOperand(MO.getType());
+ }
+
// Accessor methods
const Embedding &operator[](unsigned Opcode) const {
unsigned LocalIndex = getCanonicalOpcodeIndex(Opcode);
diff --git a/llvm/include/llvm/CodeGen/MachineBlockHashInfo.h b/llvm/include/llvm/CodeGen/MachineBlockHashInfo.h
new file mode 100644
index 0000000..d044d5f
--- /dev/null
+++ b/llvm/include/llvm/CodeGen/MachineBlockHashInfo.h
@@ -0,0 +1,114 @@
+//===- llvm/CodeGen/MachineBlockHashInfo.h ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Compute the hashes of basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBLOCKHASHINFO_H
+#define LLVM_CODEGEN_MACHINEBLOCKHASHINFO_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+/// An object wrapping several components of a basic block hash. The combined
+/// (blended) hash is represented and stored as one uint64_t, while individual
+/// components are of smaller size (e.g., uint16_t or uint8_t).
+struct BlendedBlockHash {
+public:
+ explicit BlendedBlockHash(uint16_t Offset, uint16_t OpcodeHash,
+ uint16_t InstrHash, uint16_t NeighborHash)
+ : Offset(Offset), OpcodeHash(OpcodeHash), InstrHash(InstrHash),
+ NeighborHash(NeighborHash) {}
+
+ explicit BlendedBlockHash(uint64_t CombinedHash) {
+ Offset = CombinedHash & 0xffff;
+ CombinedHash >>= 16;
+ OpcodeHash = CombinedHash & 0xffff;
+ CombinedHash >>= 16;
+ InstrHash = CombinedHash & 0xffff;
+ CombinedHash >>= 16;
+ NeighborHash = CombinedHash & 0xffff;
+ }
+
+ /// Combine the blended hash into uint64_t.
+ uint64_t combine() const {
+ uint64_t Hash = 0;
+ Hash |= uint64_t(NeighborHash);
+ Hash <<= 16;
+ Hash |= uint64_t(InstrHash);
+ Hash <<= 16;
+ Hash |= uint64_t(OpcodeHash);
+ Hash <<= 16;
+ Hash |= uint64_t(Offset);
+ return Hash;
+ }
+
+ /// Compute a distance between two given blended hashes. The smaller the
+ /// distance, the more similar two blocks are. For identical basic blocks,
+ /// the distance is zero.
+ /// Since OpcodeHash is highly stable, we consider a match good only if
+ /// the OpcodeHashes are identical. Mismatched OpcodeHashes lead to low
+ /// matching accuracy, and poor matches undermine the quality of final
+ /// inference. Notably, during inference, we also consider the matching
+ /// ratio of basic blocks. For MachineFunctions with a low matching
+ /// ratio, we directly skip optimization to reduce the impact of
+ /// mismatches. This ensures even very poor profiles won’t cause negative
+ /// optimization.
+ /// In the context of matching, we consider NeighborHash to be more
+ /// important. This is especially true when accounting for inlining
+ /// scenarios, where the position of a basic block in the control
+ /// flow graph is more critical.
+ uint64_t distance(const BlendedBlockHash &BBH) const {
+ assert(OpcodeHash == BBH.OpcodeHash &&
+ "incorrect blended hash distance computation");
+ uint64_t Dist = 0;
+ // Account for NeighborHash
+ Dist += NeighborHash == BBH.NeighborHash ? 0 : 1;
+ Dist <<= 16;
+ // Account for InstrHash
+ Dist += InstrHash == BBH.InstrHash ? 0 : 1;
+ Dist <<= 16;
+ // Account for Offset
+ Dist += (Offset >= BBH.Offset ? Offset - BBH.Offset : BBH.Offset - Offset);
+ return Dist;
+ }
+
+private:
+ /// The offset of the basic block from the function start.
+ uint16_t Offset{0};
+ /// Hash of the basic block instructions, excluding operands.
+ uint16_t OpcodeHash{0};
+ /// Hash of the basic block instructions, including opcodes and
+ /// operands.
+ uint16_t InstrHash{0};
+ /// OpcodeHash of the basic block together with OpcodeHashes of its
+ /// successors and predecessors.
+ uint16_t NeighborHash{0};
+};
+
+class MachineBlockHashInfo : public MachineFunctionPass {
+ DenseMap<const MachineBasicBlock *, uint64_t> MBBHashInfo;
+
+public:
+ static char ID;
+ MachineBlockHashInfo();
+
+ StringRef getPassName() const override { return "Basic Block Hash Compute"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ uint64_t getMBBHash(const MachineBasicBlock &MBB);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEBLOCKHASHINFO_H
diff --git a/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h b/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
index cd00e5f..bea1c78 100644
--- a/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
+++ b/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
@@ -42,7 +42,7 @@ public:
FunctionAnalysisManager::Invalidator &);
};
- MachineFunctionAnalysis(const TargetMachine *TM) : TM(TM) {};
+ MachineFunctionAnalysis(const TargetMachine &TM) : TM(&TM) {};
LLVM_ABI Result run(Function &F, FunctionAnalysisManager &FAM);
};
diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h
index 7fae550..9fddd47 100644
--- a/llvm/include/llvm/CodeGen/Passes.h
+++ b/llvm/include/llvm/CodeGen/Passes.h
@@ -69,6 +69,9 @@ LLVM_ABI MachineFunctionPass *createBasicBlockSectionsPass();
LLVM_ABI MachineFunctionPass *createBasicBlockPathCloningPass();
+/// createMachineBlockHashInfoPass - This pass computes basic block hashes.
+LLVM_ABI MachineFunctionPass *createMachineBlockHashInfoPass();
+
/// createMachineFunctionSplitterPass - This pass splits machine functions
/// using profile information.
LLVM_ABI MachineFunctionPass *createMachineFunctionSplitterPass();
diff --git a/llvm/include/llvm/CodeGen/SafeStack.h b/llvm/include/llvm/CodeGen/SafeStack.h
index e8f0d14..05ad40e 100644
--- a/llvm/include/llvm/CodeGen/SafeStack.h
+++ b/llvm/include/llvm/CodeGen/SafeStack.h
@@ -19,7 +19,7 @@ class SafeStackPass : public PassInfoMixin<SafeStackPass> {
const TargetMachine *TM;
public:
- explicit SafeStackPass(const TargetMachine *TM_) : TM(TM_) {}
+ explicit SafeStackPass(const TargetMachine &TM_) : TM(&TM_) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
diff --git a/llvm/include/llvm/CodeGen/SelectOptimize.h b/llvm/include/llvm/CodeGen/SelectOptimize.h
index 37024a15..33f66bb 100644
--- a/llvm/include/llvm/CodeGen/SelectOptimize.h
+++ b/llvm/include/llvm/CodeGen/SelectOptimize.h
@@ -25,7 +25,7 @@ class SelectOptimizePass : public PassInfoMixin<SelectOptimizePass> {
const TargetMachine *TM;
public:
- explicit SelectOptimizePass(const TargetMachine *TM) : TM(TM) {}
+ explicit SelectOptimizePass(const TargetMachine &TM) : TM(&TM) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 69713d0..1759463 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -418,12 +418,21 @@ public:
Unpredictable = 1 << 13,
// Compare instructions which may carry the samesign flag.
SameSign = 1 << 14,
+ // ISD::PTRADD operations that remain in bounds, i.e., the left operand is
+ // an address in a memory object in which the result of the operation also
+ // lies. WARNING: Since SDAG generally uses integers instead of pointer
+ // types, a PTRADD's pointer operand is effectively the result of an
+ // implicit inttoptr cast. Therefore, when an inbounds PTRADD uses a
+ // pointer P, transformations cannot assume that P has the provenance
+ // implied by its producer as, e.g, operations between producer and PTRADD
+ // that affect the provenance may have been optimized away.
+ InBounds = 1 << 15,
// NOTE: Please update LargestValue in LLVM_DECLARE_ENUM_AS_BITMASK below
// the class definition when adding new flags.
PoisonGeneratingFlags = NoUnsignedWrap | NoSignedWrap | Exact | Disjoint |
- NonNeg | NoNaNs | NoInfs | SameSign,
+ NonNeg | NoNaNs | NoInfs | SameSign | InBounds,
FastMathFlags = NoNaNs | NoInfs | NoSignedZeros | AllowReciprocal |
AllowContract | ApproximateFuncs | AllowReassociation,
};
@@ -458,6 +467,7 @@ public:
void setAllowReassociation(bool b) { setFlag<AllowReassociation>(b); }
void setNoFPExcept(bool b) { setFlag<NoFPExcept>(b); }
void setUnpredictable(bool b) { setFlag<Unpredictable>(b); }
+ void setInBounds(bool b) { setFlag<InBounds>(b); }
// These are accessors for each flag.
bool hasNoUnsignedWrap() const { return Flags & NoUnsignedWrap; }
@@ -475,6 +485,7 @@ public:
bool hasAllowReassociation() const { return Flags & AllowReassociation; }
bool hasNoFPExcept() const { return Flags & NoFPExcept; }
bool hasUnpredictable() const { return Flags & Unpredictable; }
+ bool hasInBounds() const { return Flags & InBounds; }
bool operator==(const SDNodeFlags &Other) const {
return Flags == Other.Flags;
@@ -484,7 +495,7 @@ public:
};
LLVM_DECLARE_ENUM_AS_BITMASK(decltype(SDNodeFlags::None),
- SDNodeFlags::SameSign);
+ SDNodeFlags::InBounds);
inline SDNodeFlags operator|(SDNodeFlags LHS, SDNodeFlags RHS) {
LHS |= RHS;
diff --git a/llvm/include/llvm/CodeGen/StackProtector.h b/llvm/include/llvm/CodeGen/StackProtector.h
index dfafc78..fbe7935 100644
--- a/llvm/include/llvm/CodeGen/StackProtector.h
+++ b/llvm/include/llvm/CodeGen/StackProtector.h
@@ -86,7 +86,7 @@ class StackProtectorPass : public PassInfoMixin<StackProtectorPass> {
const TargetMachine *TM;
public:
- explicit StackProtectorPass(const TargetMachine *TM) : TM(TM) {}
+ explicit StackProtectorPass(const TargetMachine &TM) : TM(&TM) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
diff --git a/llvm/include/llvm/CodeGen/TypePromotion.h b/llvm/include/llvm/CodeGen/TypePromotion.h
index efe5823..ba32a21 100644
--- a/llvm/include/llvm/CodeGen/TypePromotion.h
+++ b/llvm/include/llvm/CodeGen/TypePromotion.h
@@ -26,7 +26,7 @@ private:
const TargetMachine *TM;
public:
- TypePromotionPass(const TargetMachine *TM): TM(TM) { }
+ TypePromotionPass(const TargetMachine &TM) : TM(&TM) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/Core.h b/llvm/include/llvm/ExecutionEngine/Orc/Core.h
index 8613ddd..f05febf 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/Core.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/Core.h
@@ -448,7 +448,7 @@ public:
FailedToMaterialize(std::shared_ptr<SymbolStringPool> SSP,
std::shared_ptr<SymbolDependenceMap> Symbols);
- ~FailedToMaterialize();
+ ~FailedToMaterialize() override;
std::error_code convertToErrorCode() const override;
void log(raw_ostream &OS) const override;
const SymbolDependenceMap &getSymbols() const { return *Symbols; }
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h b/llvm/include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h
index 254b897..e84eb4b 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h
@@ -70,7 +70,7 @@ public:
DebugObjectManagerPlugin(ExecutionSession &ES,
std::unique_ptr<DebugObjectRegistrar> Target,
bool RequireDebugSections, bool AutoRegisterCode);
- ~DebugObjectManagerPlugin();
+ ~DebugObjectManagerPlugin() override;
void notifyMaterializing(MaterializationResponsibility &MR,
jitlink::LinkGraph &G, jitlink::JITLinkContext &Ctx,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h b/llvm/include/llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h
index 179fedc..f9b1d2c 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h
@@ -32,7 +32,7 @@ public:
ExecutorAddr RegisterPerfEndAddr,
ExecutorAddr RegisterPerfImplAddr, bool EmitDebugInfo,
bool EmitUnwindInfo);
- ~PerfSupportPlugin();
+ ~PerfSupportPlugin() override;
void modifyPassConfig(MaterializationResponsibility &MR,
jitlink::LinkGraph &G,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h b/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h
index fa48480..031bb27 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h
@@ -52,7 +52,7 @@ public:
EPCGenericRTDyldMemoryManager(EPCGenericRTDyldMemoryManager &&) = delete;
EPCGenericRTDyldMemoryManager &
operator=(EPCGenericRTDyldMemoryManager &&) = delete;
- ~EPCGenericRTDyldMemoryManager();
+ ~EPCGenericRTDyldMemoryManager() override;
uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h b/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
index fecffc2..fa4bb9d 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
@@ -285,7 +285,7 @@ public:
/// Map type for initializing the manager. See init.
using StubInitsMap = StringMap<std::pair<ExecutorAddr, JITSymbolFlags>>;
- virtual ~IndirectStubsManager() = default;
+ ~IndirectStubsManager() override = default;
/// Create a single stub with the given name, target address and flags.
virtual Error createStub(StringRef StubName, ExecutorAddr StubAddr,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/Layer.h b/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
index 8dfc12e..25df380 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
@@ -136,7 +136,7 @@ public:
static char ID;
ObjectLayer(ExecutionSession &ES);
- virtual ~ObjectLayer();
+ ~ObjectLayer() override;
/// Returns the execution session for this layer.
ExecutionSession &getExecutionSession() { return ES; }
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/LinkGraphLinkingLayer.h b/llvm/include/llvm/ExecutionEngine/Orc/LinkGraphLinkingLayer.h
index d3643f9..2079a35 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/LinkGraphLinkingLayer.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/LinkGraphLinkingLayer.h
@@ -88,7 +88,7 @@ public:
std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr);
/// Destroy the LinkGraphLinkingLayer.
- ~LinkGraphLinkingLayer();
+ ~LinkGraphLinkingLayer() override;
/// Add a plugin.
LinkGraphLinkingLayer &addPlugin(std::shared_ptr<Plugin> P) {
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h b/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
index 1fb472a1..8c6a8f5 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
@@ -58,7 +58,7 @@ public:
RTDyldObjectLinkingLayer(ExecutionSession &ES,
GetMemoryManagerFunction GetMemoryManager);
- ~RTDyldObjectLinkingLayer();
+ ~RTDyldObjectLinkingLayer() override;
/// Emit the object.
void emit(std::unique_ptr<MaterializationResponsibility> R,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h b/llvm/include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h
index 7acb6a4..8176183 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h
@@ -69,7 +69,7 @@ public:
SimpleRemoteEPC &operator=(const SimpleRemoteEPC &) = delete;
SimpleRemoteEPC(SimpleRemoteEPC &&) = delete;
SimpleRemoteEPC &operator=(SimpleRemoteEPC &&) = delete;
- ~SimpleRemoteEPC();
+ ~SimpleRemoteEPC() override;
Expected<int32_t> runAsMain(ExecutorAddr MainFnAddr,
ArrayRef<std::string> Args) override;
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h
index 85c2d65..2c385de 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h
@@ -29,7 +29,7 @@ namespace rt_bootstrap {
class LLVM_ABI ExecutorSharedMemoryMapperService final
: public ExecutorBootstrapService {
public:
- ~ExecutorSharedMemoryMapperService(){};
+ ~ExecutorSharedMemoryMapperService() override {};
Expected<std::pair<ExecutorAddr, std::string>> reserve(uint64_t Size);
Expected<ExecutorAddr> initialize(ExecutorAddr Reservation,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h
index 7526a29d..7ca2ff2 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h
@@ -37,7 +37,7 @@ namespace rt_bootstrap {
/// Simple page-based allocator.
class LLVM_ABI SimpleExecutorDylibManager : public ExecutorBootstrapService {
public:
- virtual ~SimpleExecutorDylibManager();
+ ~SimpleExecutorDylibManager() override;
Expected<tpctypes::DylibHandle> open(const std::string &Path, uint64_t Mode);
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h
index 6224e92..45256ec 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h
@@ -32,7 +32,7 @@ namespace rt_bootstrap {
/// Simple page-based allocator.
class LLVM_ABI SimpleExecutorMemoryManager : public ExecutorBootstrapService {
public:
- virtual ~SimpleExecutorMemoryManager();
+ ~SimpleExecutorMemoryManager() override;
Expected<ExecutorAddr> reserve(uint64_t Size);
Expected<ExecutorAddr> initialize(tpctypes::FinalizeRequest &FR);
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TaskDispatch.h b/llvm/include/llvm/ExecutionEngine/Orc/TaskDispatch.h
index 9cf6e00..b73da19 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/TaskDispatch.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/TaskDispatch.h
@@ -37,7 +37,7 @@ class LLVM_ABI Task : public RTTIExtends<Task, RTTIRoot> {
public:
static char ID;
- virtual ~Task() = default;
+ ~Task() override = default;
/// Description of the task to be performed. Used for logging.
virtual void printDescription(raw_ostream &OS) = 0;
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h b/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h
index 45834f1..d8506ce 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h
@@ -186,9 +186,7 @@ private:
SmallVector<ElementId> SortedElems(ContainerElems.begin(),
ContainerElems.end());
llvm::sort(SortedElems);
- Hash = hash_combine(
- Hash, Container,
- hash_combine_range(SortedElems.begin(), SortedElems.end()));
+ Hash = hash_combine(Hash, Container, hash_combine_range(SortedElems));
}
return Hash;
}
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index e6cce9a4..4d59ee8 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1487,24 +1487,23 @@ def int_eh_sjlj_setup_dispatch : Intrinsic<[], []>;
//
def int_var_annotation : DefaultAttrsIntrinsic<
[], [llvm_anyptr_ty, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty, LLVMMatchType<1>],
- [IntrInaccessibleMemOnly], "llvm.var.annotation">;
+ [IntrInaccessibleMemOnly]>;
def int_ptr_annotation : DefaultAttrsIntrinsic<
[llvm_anyptr_ty],
[LLVMMatchType<0>, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty, LLVMMatchType<1>],
- [IntrInaccessibleMemOnly], "llvm.ptr.annotation">;
+ [IntrInaccessibleMemOnly]>;
def int_annotation : DefaultAttrsIntrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty],
- [IntrInaccessibleMemOnly], "llvm.annotation">;
+ [IntrInaccessibleMemOnly]>;
// Annotates the current program point with metadata strings which are emitted
// as CodeView debug info records. This is expensive, as it disables inlining
// and is modelled as having side effects.
def int_codeview_annotation : DefaultAttrsIntrinsic<[], [llvm_metadata_ty],
- [IntrInaccessibleMemOnly, IntrNoDuplicate],
- "llvm.codeview.annotation">;
+ [IntrInaccessibleMemOnly, IntrNoDuplicate]>;
//===------------------------ Trampoline Intrinsics -----------------------===//
//
@@ -1881,8 +1880,7 @@ def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
// Intrinsic to detect whether its argument is a constant.
def int_is_constant : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_any_ty],
- [IntrNoMem, IntrConvergent],
- "llvm.is.constant">;
+ [IntrNoMem, IntrConvergent]>;
// Introduce a use of the argument without generating any code.
def int_fake_use : DefaultAttrsIntrinsic<[], [llvm_vararg_ty],
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 9e334d4..8e35109 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3789,6 +3789,20 @@ def int_amdgcn_perm_pk16_b8_u4 : ClangBuiltin<"__builtin_amdgcn_perm_pk16_b8_u4"
DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i64_ty, llvm_v2i32_ty],
[IntrNoMem, IntrSpeculatable]>;
+class AMDGPUAddMinMax<LLVMType Ty, string Name> : ClangBuiltin<"__builtin_amdgcn_"#Name>,
+ DefaultAttrsIntrinsic<[Ty], [Ty, Ty, Ty, llvm_i1_ty /* clamp */],
+ [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]>;
+
+def int_amdgcn_add_max_i32 : AMDGPUAddMinMax<llvm_i32_ty, "add_max_i32">;
+def int_amdgcn_add_max_u32 : AMDGPUAddMinMax<llvm_i32_ty, "add_max_u32">;
+def int_amdgcn_add_min_i32 : AMDGPUAddMinMax<llvm_i32_ty, "add_min_i32">;
+def int_amdgcn_add_min_u32 : AMDGPUAddMinMax<llvm_i32_ty, "add_min_u32">;
+
+def int_amdgcn_pk_add_max_i16 : AMDGPUAddMinMax<llvm_v2i16_ty, "pk_add_max_i16">;
+def int_amdgcn_pk_add_max_u16 : AMDGPUAddMinMax<llvm_v2i16_ty, "pk_add_max_u16">;
+def int_amdgcn_pk_add_min_i16 : AMDGPUAddMinMax<llvm_v2i16_ty, "pk_add_min_i16">;
+def int_amdgcn_pk_add_min_u16 : AMDGPUAddMinMax<llvm_v2i16_ty, "pk_add_min_u16">;
+
class AMDGPUCooperativeAtomicStore<LLVMType Ty> : Intrinsic <
[],
[llvm_anyptr_ty, // pointer to store to
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index 3af1750..c9df6c4 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -456,7 +456,7 @@ class WMMA_REGS<string Geom, string Frag, string PtxEltType, bit IsSparse = fals
}
class WMMA_NAME_LDST<string Op, WMMA_REGS Frag, string Layout, int WithStride> {
- string intr = "llvm.nvvm.wmma."
+ string intr_name = "llvm.nvvm.wmma."
# Frag.geom
# "." # Op
# "." # Frag.frag
@@ -467,7 +467,7 @@ class WMMA_NAME_LDST<string Op, WMMA_REGS Frag, string Layout, int WithStride> {
// TODO(tra): record name should ideally use the same field order as the intrinsic.
// E.g. string record = !subst("llvm", "int",
// !subst(".", "_", llvm));
- string record = "int_nvvm_wmma_"
+ string record_name = "int_nvvm_wmma_"
# Frag.geom
# "_" # Op
# "_" # Frag.frag
@@ -496,7 +496,7 @@ class MMA_SIGNATURE<WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
class WMMA_NAME<string ALayout, string BLayout, int Satfinite, string Rnd, string b1op,
WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
string signature = MMA_SIGNATURE<A, B, C, D>.ret;
- string record = "int_nvvm_wmma_"
+ string record_name = "int_nvvm_wmma_"
# A.geom
# "_mma"
# !subst(".", "_", b1op)
@@ -510,7 +510,7 @@ class WMMA_NAME<string ALayout, string BLayout, int Satfinite, string Rnd, strin
class MMA_NAME<string ALayout, string BLayout, int Satfinite, string b1op, string Kind,
WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
string signature = MMA_SIGNATURE<A, B, C, D>.ret;
- string record = "int_nvvm_mma"
+ string record_name = "int_nvvm_mma"
# !subst(".", "_", b1op)
# "_" # A.geom
# "_" # ALayout
@@ -524,7 +524,7 @@ class MMA_SP_NAME<string Metadata, string Kind, int Satfinite,
WMMA_REGS A, WMMA_REGS B,
WMMA_REGS C, WMMA_REGS D> {
string signature = MMA_SIGNATURE<A, B, C, D>.ret;
- string record = "int_nvvm_mma"
+ string record_name = "int_nvvm_mma"
# "_" # !subst("::", "_", Metadata)
# "_" # A.geom
# "_row_col"
@@ -533,26 +533,37 @@ class MMA_SP_NAME<string Metadata, string Kind, int Satfinite,
# signature;
}
+// Helper class that takes an intrinsic name and construct a record name.
+// Additionally, sets `intr_name` to be non-empty if the default name assigned
+// to this intrinsic will not match the name given.
+class IntrinsicName<string name> {
+ string record_name = !subst(".", "_",
+ !subst("llvm.", "int_", name));
+ // Use explicit intrinsic name if it has an _ in it, else rely on LLVM
+ // assigned default name.
+ string intr_name = !if(!ne(!find(name, "_"), -1), name, "");
+}
+
class LDMATRIX_NAME<WMMA_REGS Frag, int Trans> {
- string intr = "llvm.nvvm.ldmatrix.sync.aligned"
+ defvar name = "llvm.nvvm.ldmatrix.sync.aligned"
# "." # Frag.geom
# "." # Frag.frag
# !if(Trans, ".trans", "")
# "." # Frag.ptx_elt_type
;
- string record = !subst(".", "_",
- !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
class STMATRIX_NAME<WMMA_REGS Frag, int Trans> {
- string intr = "llvm.nvvm.stmatrix.sync.aligned"
+ defvar name = "llvm.nvvm.stmatrix.sync.aligned"
# "." # Frag.geom
# "." # Frag.frag
# !if(Trans, ".trans", "")
# "." # Frag.ptx_elt_type
;
- string record = !subst(".", "_",
- !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
// Generates list of 4-tuples of WMMA_REGS representing a valid MMA op.
@@ -1042,45 +1053,49 @@ class NVVM_TCGEN05_MMA_BASE<string Space, bit Sp> {
class NVVM_TCGEN05_MMA<bit Sp, string Space,
bit AShift, bit ScaleInputD>:
NVVM_TCGEN05_MMA_BASE<Space, Sp> {
- string intr = "llvm.nvvm.tcgen05.mma"
+ string name = "llvm.nvvm.tcgen05.mma"
# !if(!eq(Sp, 1), ".sp", "")
# "." # Space
# !if(!eq(ScaleInputD, 1), ".scale_d", "")
# !if(!eq(AShift, 1), ".ashift", "");
- string record = !subst(".", "_", !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
class NVVM_TCGEN05_MMA_BLOCKSCALE<bit Sp, string Space,
string Kind, string ScaleVecSize>:
NVVM_TCGEN05_MMA_BASE<Space, Sp> {
- string intr = "llvm.nvvm.tcgen05.mma"
+ string name = "llvm.nvvm.tcgen05.mma"
# !if(!eq(Sp, 1), ".sp", "")
# "." # Space
# "." # Kind
# ".block_scale" # ScaleVecSize;
- string record = !subst(".", "_", !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
class NVVM_TCGEN05_MMA_WS<bit Sp, string Space, bit ZeroColMask>:
NVVM_TCGEN05_MMA_BASE<Space, Sp> {
- string intr = "llvm.nvvm.tcgen05.mma.ws"
+ string name = "llvm.nvvm.tcgen05.mma.ws"
# !if(!eq(Sp, 1), ".sp", "")
# "." # Space
# !if(!eq(ZeroColMask, 1), ".zero_col_mask", "");
- string record = !subst(".", "_", !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
class NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE<bit Sp, string Space,
int CtaGroup, bit AShift,
bit ScaleInputD>:
NVVM_TCGEN05_MMA_BASE<Space, Sp> {
- string intr = "llvm.nvvm.tcgen05.mma"
+ string name = "llvm.nvvm.tcgen05.mma"
# !if(!eq(Sp, 1), ".sp", "")
# "." # Space
# !if(!eq(ScaleInputD, 1), ".scale_d", "")
# ".disable_output_lane.cg" # CtaGroup
# !if(!eq(AShift, 1), ".ashift", "");
- string record = !subst(".", "_", !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
class NVVM_TCGEN05_MMA_BLOCKSCALE_SUPPORTED<string Kind, string ScaleVecSize> {
@@ -2273,7 +2288,7 @@ class NVVM_WMMA_LD<WMMA_REGS Frag, string Layout, int WithStride>
: Intrinsic<Frag.regs,
!if(WithStride, [llvm_anyptr_ty, llvm_i32_ty], [llvm_anyptr_ty]),
[IntrWillReturn, IntrReadMem, IntrArgMemOnly, IntrNoCallback, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
- WMMA_NAME_LDST<"load", Frag, Layout, WithStride>.intr>;
+ WMMA_NAME_LDST<"load", Frag, Layout, WithStride>.intr_name>;
// WMMA.STORE.D
class NVVM_WMMA_ST<WMMA_REGS Frag, string Layout, int WithStride>
@@ -2283,18 +2298,18 @@ class NVVM_WMMA_ST<WMMA_REGS Frag, string Layout, int WithStride>
Frag.regs,
!if(WithStride, [llvm_i32_ty], [])),
[IntrWriteMem, IntrArgMemOnly, IntrNoCallback, WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
- WMMA_NAME_LDST<"store", Frag, Layout, WithStride>.intr>;
+ WMMA_NAME_LDST<"store", Frag, Layout, WithStride>.intr_name>;
// Create all load/store variants
foreach layout = ["row", "col"] in {
foreach stride = [0, 1] in {
foreach frag = NVVM_MMA_OPS.all_ld_ops in
if NVVM_WMMA_LDST_SUPPORTED<frag, layout>.ret then
- def WMMA_NAME_LDST<"load", frag, layout, stride>.record
+ def WMMA_NAME_LDST<"load", frag, layout, stride>.record_name
: NVVM_WMMA_LD<frag, layout, stride>;
foreach frag = NVVM_MMA_OPS.all_st_ops in
if NVVM_WMMA_LDST_SUPPORTED<frag, layout>.ret then
- def WMMA_NAME_LDST<"store", frag, layout, stride>.record
+ def WMMA_NAME_LDST<"store", frag, layout, stride>.record_name
: NVVM_WMMA_ST<frag, layout, stride>;
}
}
@@ -2313,7 +2328,7 @@ foreach layout_a = ["row", "col"] in {
foreach b1op = NVVM_MMA_B1OPS<op>.ret in {
if NVVM_WMMA_SUPPORTED<op, layout_a, layout_b, satf, rnd>.ret then {
def WMMA_NAME<layout_a, layout_b, satf, rnd, b1op,
- op[0], op[1], op[2], op[3]>.record
+ op[0], op[1], op[2], op[3]>.record_name
: NVVM_MMA<op[0], op[1], op[2], op[3]>;
}
} // b1op
@@ -2330,7 +2345,7 @@ foreach layout_a = ["row", "col"] in {
foreach b1op = NVVM_MMA_B1OPS<op>.ret in {
foreach kind = ["", "kind::f8f6f4"] in {
if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, kind, satf>.ret then {
- def MMA_NAME<layout_a, layout_b, satf, b1op, kind, op[0], op[1], op[2], op[3]>.record
+ def MMA_NAME<layout_a, layout_b, satf, b1op, kind, op[0], op[1], op[2], op[3]>.record_name
: NVVM_MMA<op[0], op[1], op[2], op[3]>;
}
} // kind
@@ -2379,7 +2394,7 @@ foreach metadata = ["sp", "sp::ordered_metadata"] in {
foreach op = NVVM_MMA_OPS.all_mma_sp_ops in {
if NVVM_MMA_SP_SUPPORTED<op, metadata, kind, satf>.ret then {
def MMA_SP_NAME<metadata, kind, satf,
- op[0], op[1], op[2], op[3]>.record
+ op[0], op[1], op[2], op[3]>.record_name
: NVVM_MMA_SP<op[0], op[1], op[2], op[3]>;
}
} // op
@@ -2392,12 +2407,12 @@ class NVVM_LDMATRIX<WMMA_REGS Frag, int Transposed>
: Intrinsic<Frag.regs, [llvm_anyptr_ty],
[IntrReadMem, IntrArgMemOnly, IntrNoCallback, ReadOnly<ArgIndex<0>>,
NoCapture<ArgIndex<0>>],
- LDMATRIX_NAME<Frag, Transposed>.intr>;
+ LDMATRIX_NAME<Frag, Transposed>.intr_name>;
foreach transposed = [0, 1] in {
foreach frag = NVVM_MMA_OPS.all_ldmatrix_ops in {
if NVVM_LDMATRIX_SUPPORTED<frag, transposed>.ret then {
- def LDMATRIX_NAME<frag, transposed>.record
+ def LDMATRIX_NAME<frag, transposed>.record_name
: NVVM_LDMATRIX<frag, transposed>;
}
}
@@ -2409,12 +2424,12 @@ class NVVM_STMATRIX<WMMA_REGS Frag, int Transposed>
!listconcat([llvm_anyptr_ty], Frag.regs),
[IntrWriteMem, IntrArgMemOnly, IntrNoCallback,
WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
- STMATRIX_NAME<Frag, Transposed>.intr>;
+ STMATRIX_NAME<Frag, Transposed>.intr_name>;
foreach transposed = [0, 1] in {
foreach frag = NVVM_MMA_OPS.all_stmatrix_ops in {
if NVVM_STMATRIX_SUPPORTED<frag, transposed>.ret then {
- def STMATRIX_NAME<frag, transposed>.record
+ def STMATRIX_NAME<frag, transposed>.record_name
: NVVM_STMATRIX<frag, transposed>;
}
}
@@ -2767,14 +2782,15 @@ foreach cta_group = ["cg1", "cg2"] in {
"64x128b_warpx2_02_13",
"64x128b_warpx2_01_23",
"32x128b_warpx4"] in {
- defvar intr_suffix = StrJoin<"_", [shape, src_fmt, cta_group]>.ret;
- defvar name_suffix = StrJoin<".", [shape, src_fmt, cta_group]>.ret;
+ defvar name = "llvm.nvvm.tcgen05.cp." #
+ StrJoin<".", [shape, src_fmt, cta_group]>.ret;
- def int_nvvm_tcgen05_cp_ # intr_suffix : Intrinsic<[],
+ defvar intrinsic_name = IntrinsicName<name>;
+ def intrinsic_name.record_name : Intrinsic<[],
[llvm_tmem_ptr_ty, // tmem_addr
llvm_i64_ty], // smem descriptor
[IntrConvergent, IntrInaccessibleMemOrArgMemOnly, NoCapture<ArgIndex<0>>],
- "llvm.nvvm.tcgen05.cp." # name_suffix>;
+ intrinsic_name.intr_name>;
}
}
}
@@ -2881,9 +2897,9 @@ foreach sp = [0, 1] in {
]
);
- def mma.record:
+ def mma.record_name:
DefaultAttrsIntrinsicFlags<[], args, flags, intrinsic_properties,
- mma.intr>;
+ mma.intr_name>;
}
}
}
@@ -2918,8 +2934,8 @@ foreach sp = [0, 1] in {
Range<ArgIndex<!add(nargs, 1)>, 0, !if(!eq(ashift, 1), 2, 4)>]
);
- def mma.record: DefaultAttrsIntrinsicFlags<[], args, flags, intrinsic_properties,
- mma.intr>;
+ def mma.record_name : DefaultAttrsIntrinsicFlags<[], args, flags,
+ intrinsic_properties, mma.intr_name>;
} // ashift
} // scale_d
} // cta_group
@@ -2944,11 +2960,11 @@ foreach sp = [0, 1] in {
defvar collector_usage = ArgIndex<!add(nargs, 1)>;
if NVVM_TCGEN05_MMA_BLOCKSCALE_SUPPORTED<kind, scale_vec_size>.ret then {
- def mma.record: DefaultAttrsIntrinsicFlags<[], args, flags,
+ def mma.record_name : DefaultAttrsIntrinsicFlags<[], args, flags,
!listconcat(mma.common_intr_props,
[Range<cta_group, 1, 3>,
Range<collector_usage, 0, 4>]),
- mma.intr>;
+ mma.intr_name>;
}
}
}
@@ -2977,9 +2993,9 @@ foreach sp = [0, 1] in {
Range<ArgIndex<!add(nargs, 2)>, 0, 4>]
);
- def mma.record:
+ def mma.record_name:
DefaultAttrsIntrinsicFlags<[], args, flags, intrinsic_properties,
- mma.intr>;
+ mma.intr_name>;
}
}
}
diff --git a/llvm/include/llvm/IR/ModuleSummaryIndex.h b/llvm/include/llvm/IR/ModuleSummaryIndex.h
index cdfee72..eb60bee 100644
--- a/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -178,6 +178,8 @@ struct alignas(8) GlobalValueSummaryInfo {
/// only be called prior to index-based internalization and promotion.
inline void verifyLocal() const;
+ bool hasLocal() const { return HasLocal; }
+
private:
/// List of global value summary structures for a particular value held
/// in the GlobalValueMap. Requires a vector in the case of multiple
@@ -239,6 +241,8 @@ struct ValueInfo {
void verifyLocal() const { getRef()->second.verifyLocal(); }
+ bool hasLocal() const { return getRef()->second.hasLocal(); }
+
// Even if the index is built with GVs available, we may not have one for
// summary entries synthesized for profiled indirect call targets.
bool hasName() const { return !haveGVs() || getValue(); }
diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.td b/llvm/include/llvm/IR/RuntimeLibcalls.td
index a8b647c..3dc9055 100644
--- a/llvm/include/llvm/IR/RuntimeLibcalls.td
+++ b/llvm/include/llvm/IR/RuntimeLibcalls.td
@@ -2117,7 +2117,9 @@ defvar MSP430DefaultOptOut = [
__fixdfdi, __fixunsdfsi, __modsi3, __floatunsisf,
__fixunsdfdi, __ltsf2, __floatdisf, __floatdidf,
__lshrsi3, __subsf3, __umodhi3, __floatunsidf,
- __floatundidf
+ __floatundidf, __gtdf2, __eqdf2, __gedf2, __ltdf2, __ledf2,
+ __adddf3, __divdf3, __divdi3, __moddi3,
+ __muldf3, __subdf3, __udivdi3, __umoddi3
];
// EABI Libcalls - EABI Section 6.2
diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index d507ba2..581b4ad 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -62,6 +62,7 @@ LLVM_ABI void initializeBasicBlockSectionsPass(PassRegistry &);
LLVM_ABI void initializeBarrierNoopPass(PassRegistry &);
LLVM_ABI void initializeBasicAAWrapperPassPass(PassRegistry &);
LLVM_ABI void initializeBlockFrequencyInfoWrapperPassPass(PassRegistry &);
+LLVM_ABI void initializeMachineBlockHashInfoPass(PassRegistry &);
LLVM_ABI void initializeBranchFolderLegacyPass(PassRegistry &);
LLVM_ABI void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry &);
LLVM_ABI void initializeBranchRelaxationLegacyPass(PassRegistry &);
diff --git a/llvm/include/llvm/LTO/LTO.h b/llvm/include/llvm/LTO/LTO.h
index 000472f..a837cdd 100644
--- a/llvm/include/llvm/LTO/LTO.h
+++ b/llvm/include/llvm/LTO/LTO.h
@@ -317,6 +317,8 @@ LLVM_ABI ThinBackend createInProcessThinBackend(
/// distributor.
/// RemoteCompiler specifies the path to a Clang executable to be invoked for
/// the backend jobs.
+/// RemoteCompilerPrependArgs specifies a list of prepend arguments to be
+/// applied to the backend compilations.
/// RemoteCompilerArgs specifies a list of arguments to be applied to the
/// backend compilations.
/// SaveTemps is a debugging tool that prevents temporary files created by this
@@ -326,6 +328,7 @@ LLVM_ABI ThinBackend createOutOfProcessThinBackend(
bool ShouldEmitIndexFiles, bool ShouldEmitImportsFiles,
StringRef LinkerOutputFile, StringRef Distributor,
ArrayRef<StringRef> DistributorArgs, StringRef RemoteCompiler,
+ ArrayRef<StringRef> RemoteCompilerPrependArgs,
ArrayRef<StringRef> RemoteCompilerArgs, bool SaveTemps);
/// This ThinBackend writes individual module indexes to files, instead of
diff --git a/llvm/include/llvm/MC/MCContext.h b/llvm/include/llvm/MC/MCContext.h
index 4a528ee..74abe34 100644
--- a/llvm/include/llvm/MC/MCContext.h
+++ b/llvm/include/llvm/MC/MCContext.h
@@ -175,7 +175,7 @@ private:
unsigned GetInstance(unsigned LocalLabelVal);
/// SHT_LLVM_BB_ADDR_MAP version to emit.
- uint8_t BBAddrMapVersion = 3;
+ uint8_t BBAddrMapVersion = 4;
/// The file name of the log file from the environment variable
/// AS_SECURE_LOG_FILE. Which must be set before the .secure_log_unique
diff --git a/llvm/include/llvm/Passes/CodeGenPassBuilder.h b/llvm/include/llvm/Passes/CodeGenPassBuilder.h
index 6a235c0..2f207925 100644
--- a/llvm/include/llvm/Passes/CodeGenPassBuilder.h
+++ b/llvm/include/llvm/Passes/CodeGenPassBuilder.h
@@ -736,8 +736,8 @@ void CodeGenPassBuilder<Derived, TargetMachineT>::addISelPasses(
addPass(LowerEmuTLSPass());
addPass(PreISelIntrinsicLoweringPass(&TM));
- addPass(ExpandLargeDivRemPass(&TM));
- addPass(ExpandFpPass(&TM, getOptLevel()));
+ addPass(ExpandLargeDivRemPass(TM));
+ addPass(ExpandFpPass(TM, getOptLevel()));
derived().addIRPasses(addPass);
derived().addCodeGenPrepare(addPass);
@@ -773,7 +773,7 @@ void CodeGenPassBuilder<Derived, TargetMachineT>::addIRPasses(
// target lowering hook.
if (!Opt.DisableMergeICmps)
addPass(MergeICmpsPass());
- addPass(ExpandMemCmpPass(&TM));
+ addPass(ExpandMemCmpPass(TM));
}
// Run GC lowering passes for builtin collectors
@@ -812,7 +812,7 @@ void CodeGenPassBuilder<Derived, TargetMachineT>::addIRPasses(
// Convert conditional moves to conditional jumps when profitable.
if (getOptLevel() != CodeGenOptLevel::None && !Opt.DisableSelectOptimize)
- addPass(SelectOptimizePass(&TM));
+ addPass(SelectOptimizePass(TM));
if (Opt.EnableGlobalMergeFunc)
addPass(GlobalMergeFuncPass());
@@ -839,14 +839,14 @@ void CodeGenPassBuilder<Derived, TargetMachineT>::addPassesToHandleExceptions(
case ExceptionHandling::ARM:
case ExceptionHandling::AIX:
case ExceptionHandling::ZOS:
- addPass(DwarfEHPreparePass(&TM));
+ addPass(DwarfEHPreparePass(TM));
break;
case ExceptionHandling::WinEH:
// We support using both GCC-style and MSVC-style exceptions on Windows, so
// add both preparation passes. Each pass will only actually run if it
// recognizes the personality function.
addPass(WinEHPreparePass());
- addPass(DwarfEHPreparePass(&TM));
+ addPass(DwarfEHPreparePass(TM));
break;
case ExceptionHandling::Wasm:
// Wasm EH uses Windows EH instructions, but it does not need to demote PHIs
@@ -871,7 +871,7 @@ template <typename Derived, typename TargetMachineT>
void CodeGenPassBuilder<Derived, TargetMachineT>::addCodeGenPrepare(
AddIRPass &addPass) const {
if (getOptLevel() != CodeGenOptLevel::None && !Opt.DisableCGP)
- addPass(CodeGenPreparePass(&TM));
+ addPass(CodeGenPreparePass(TM));
// TODO: Default ctor'd RewriteSymbolPass is no-op.
// addPass(RewriteSymbolPass());
}
@@ -892,8 +892,8 @@ void CodeGenPassBuilder<Derived, TargetMachineT>::addISelPrepare(
addPass(CallBrPreparePass());
// Add both the safe stack and the stack protection passes: each of them will
// only protect functions that have corresponding attributes.
- addPass(SafeStackPass(&TM));
- addPass(StackProtectorPass(&TM));
+ addPass(SafeStackPass(TM));
+ addPass(StackProtectorPass(TM));
if (Opt.PrintISelInput)
addPass(PrintFunctionPass(dbgs(),
diff --git a/llvm/include/llvm/Support/GlobPattern.h b/llvm/include/llvm/Support/GlobPattern.h
index c1b4484..8cae6a3 100644
--- a/llvm/include/llvm/Support/GlobPattern.h
+++ b/llvm/include/llvm/Support/GlobPattern.h
@@ -63,21 +63,30 @@ public:
// Returns true for glob pattern "*". Can be used to avoid expensive
// preparation/acquisition of the input for match().
bool isTrivialMatchAll() const {
- if (!Prefix.empty())
+ if (PrefixSize)
return false;
- if (!Suffix.empty())
+ if (SuffixSize)
return false;
if (SubGlobs.size() != 1)
return false;
return SubGlobs[0].getPat() == "*";
}
- StringRef prefix() const { return Prefix; }
- StringRef suffix() const { return Suffix; }
+ // The following functions are just shortcuts for faster matching. They are
+ // conservative to simplify implementations.
+
+ // Returns plain prefix of the pattern.
+ StringRef prefix() const { return Pattern.take_front(PrefixSize); }
+ // Returns plain suffix of the pattern.
+ StringRef suffix() const { return Pattern.take_back(SuffixSize); }
+ // Returns the longest plain substring of the pattern between prefix and
+ // suffix.
+ StringRef longest_substr() const;
private:
- StringRef Prefix;
- StringRef Suffix;
+ StringRef Pattern;
+ size_t PrefixSize = 0;
+ size_t SuffixSize = 0;
struct SubGlobPattern {
/// \param Pat the pattern to match against