aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
authorMohammed Keyvanzadeh <mohammadkeyvanzade94@gmail.com>2024-06-21 23:50:53 +0330
committerGitHub <noreply@github.com>2024-06-21 23:50:53 +0330
commit7b57a1b4018db0c987fb5a67effbef4d7559c4f1 (patch)
tree49545c4bbf6ac7c575938ccfad2a8ca34a7caa3f /llvm
parentbf824d98c06099c50413cd6c957a75b894a8ac26 (diff)
downloadllvm-7b57a1b4018db0c987fb5a67effbef4d7559c4f1.zip
llvm-7b57a1b4018db0c987fb5a67effbef4d7559c4f1.tar.gz
llvm-7b57a1b4018db0c987fb5a67effbef4d7559c4f1.tar.bz2
[llvm] format and terminate namespaces with closing comment (#94917)
Namespaces are terminated with a closing comment in the majority of the codebase so do the same here for consistency. Also format code within some namespaces to make clang-format happy.
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Analysis/CallGraphSCCPass.cpp2
-rw-r--r--llvm/lib/Analysis/CallPrinter.cpp4
-rw-r--r--llvm/lib/Analysis/CaptureTracking.cpp184
-rw-r--r--llvm/lib/Analysis/CycleAnalysis.cpp2
-rw-r--r--llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp2
-rw-r--r--llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp2
-rw-r--r--llvm/lib/Analysis/InlineAdvisor.cpp2
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp187
-rw-r--r--llvm/lib/Analysis/LoopAnalysisManager.cpp2
-rw-r--r--llvm/lib/Analysis/LoopPass.cpp2
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp2
-rw-r--r--llvm/lib/Analysis/ScalarEvolutionDivision.cpp2
12 files changed, 197 insertions, 196 deletions
diff --git a/llvm/lib/Analysis/CallGraphSCCPass.cpp b/llvm/lib/Analysis/CallGraphSCCPass.cpp
index 307dddd..ccba8b3 100644
--- a/llvm/lib/Analysis/CallGraphSCCPass.cpp
+++ b/llvm/lib/Analysis/CallGraphSCCPass.cpp
@@ -46,7 +46,7 @@ using namespace llvm;
namespace llvm {
cl::opt<unsigned> MaxDevirtIterations("max-devirt-iterations", cl::ReallyHidden,
cl::init(4));
-}
+} // namespace llvm
STATISTIC(MaxSCCIterations, "Maximum CGSCCPassMgr iterations on one SCC");
diff --git a/llvm/lib/Analysis/CallPrinter.cpp b/llvm/lib/Analysis/CallPrinter.cpp
index 65e3184..effa25f 100644
--- a/llvm/lib/Analysis/CallPrinter.cpp
+++ b/llvm/lib/Analysis/CallPrinter.cpp
@@ -29,7 +29,7 @@ using namespace llvm;
namespace llvm {
template <class GraphType> struct GraphTraits;
-}
+} // namespace llvm
// This option shows static (relative) call counts.
// FIXME:
@@ -215,7 +215,7 @@ struct DOTGraphTraits<CallGraphDOTInfo *> : public DefaultDOTGraphTraits {
}
};
-} // end llvm namespace
+} // namespace llvm
namespace {
void doCallGraphDOTPrinting(
diff --git a/llvm/lib/Analysis/CaptureTracking.cpp b/llvm/lib/Analysis/CaptureTracking.cpp
index 7f8f7b2..d8ca4a6 100644
--- a/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/llvm/lib/Analysis/CaptureTracking.cpp
@@ -72,127 +72,127 @@ bool CaptureTracker::isDereferenceableOrNull(Value *O, const DataLayout &DL) {
}
namespace {
- struct SimpleCaptureTracker : public CaptureTracker {
- explicit SimpleCaptureTracker(bool ReturnCaptures)
- : ReturnCaptures(ReturnCaptures) {}
+struct SimpleCaptureTracker : public CaptureTracker {
+ explicit SimpleCaptureTracker(bool ReturnCaptures)
+ : ReturnCaptures(ReturnCaptures) {}
- void tooManyUses() override {
- LLVM_DEBUG(dbgs() << "Captured due to too many uses\n");
- Captured = true;
- }
+ void tooManyUses() override {
+ LLVM_DEBUG(dbgs() << "Captured due to too many uses\n");
+ Captured = true;
+ }
- bool captured(const Use *U) override {
- if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
- return false;
+ bool captured(const Use *U) override {
+ if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
+ return false;
- LLVM_DEBUG(dbgs() << "Captured by: " << *U->getUser() << "\n");
+ LLVM_DEBUG(dbgs() << "Captured by: " << *U->getUser() << "\n");
- Captured = true;
- return true;
- }
+ Captured = true;
+ return true;
+ }
- bool ReturnCaptures;
+ bool ReturnCaptures;
- bool Captured = false;
- };
+ bool Captured = false;
+};
- /// Only find pointer captures which happen before the given instruction. Uses
- /// the dominator tree to determine whether one instruction is before another.
- /// Only support the case where the Value is defined in the same basic block
- /// as the given instruction and the use.
- struct CapturesBefore : public CaptureTracker {
+/// Only find pointer captures which happen before the given instruction. Uses
+/// the dominator tree to determine whether one instruction is before another.
+/// Only support the case where the Value is defined in the same basic block
+/// as the given instruction and the use.
+struct CapturesBefore : public CaptureTracker {
- CapturesBefore(bool ReturnCaptures, const Instruction *I,
- const DominatorTree *DT, bool IncludeI, const LoopInfo *LI)
- : BeforeHere(I), DT(DT), ReturnCaptures(ReturnCaptures),
- IncludeI(IncludeI), LI(LI) {}
+ CapturesBefore(bool ReturnCaptures, const Instruction *I,
+ const DominatorTree *DT, bool IncludeI, const LoopInfo *LI)
+ : BeforeHere(I), DT(DT), ReturnCaptures(ReturnCaptures),
+ IncludeI(IncludeI), LI(LI) {}
- void tooManyUses() override { Captured = true; }
+ void tooManyUses() override { Captured = true; }
- bool isSafeToPrune(Instruction *I) {
- if (BeforeHere == I)
- return !IncludeI;
+ bool isSafeToPrune(Instruction *I) {
+ if (BeforeHere == I)
+ return !IncludeI;
- // We explore this usage only if the usage can reach "BeforeHere".
- // If use is not reachable from entry, there is no need to explore.
- if (!DT->isReachableFromEntry(I->getParent()))
- return true;
+ // We explore this usage only if the usage can reach "BeforeHere".
+ // If use is not reachable from entry, there is no need to explore.
+ if (!DT->isReachableFromEntry(I->getParent()))
+ return true;
- // Check whether there is a path from I to BeforeHere.
- return !isPotentiallyReachable(I, BeforeHere, nullptr, DT, LI);
- }
+ // Check whether there is a path from I to BeforeHere.
+ return !isPotentiallyReachable(I, BeforeHere, nullptr, DT, LI);
+ }
- bool captured(const Use *U) override {
- Instruction *I = cast<Instruction>(U->getUser());
- if (isa<ReturnInst>(I) && !ReturnCaptures)
- return false;
+ bool captured(const Use *U) override {
+ Instruction *I = cast<Instruction>(U->getUser());
+ if (isa<ReturnInst>(I) && !ReturnCaptures)
+ return false;
- // Check isSafeToPrune() here rather than in shouldExplore() to avoid
- // an expensive reachability query for every instruction we look at.
- // Instead we only do one for actual capturing candidates.
- if (isSafeToPrune(I))
- return false;
+ // Check isSafeToPrune() here rather than in shouldExplore() to avoid
+ // an expensive reachability query for every instruction we look at.
+ // Instead we only do one for actual capturing candidates.
+ if (isSafeToPrune(I))
+ return false;
- Captured = true;
- return true;
- }
+ Captured = true;
+ return true;
+ }
- const Instruction *BeforeHere;
- const DominatorTree *DT;
+ const Instruction *BeforeHere;
+ const DominatorTree *DT;
- bool ReturnCaptures;
- bool IncludeI;
+ bool ReturnCaptures;
+ bool IncludeI;
- bool Captured = false;
+ bool Captured = false;
- const LoopInfo *LI;
- };
+ const LoopInfo *LI;
+};
- /// Find the 'earliest' instruction before which the pointer is known not to
- /// be captured. Here an instruction A is considered earlier than instruction
- /// B, if A dominates B. If 2 escapes do not dominate each other, the
- /// terminator of the common dominator is chosen. If not all uses cannot be
- /// analyzed, the earliest escape is set to the first instruction in the
- /// function entry block.
- // NOTE: Users have to make sure instructions compared against the earliest
- // escape are not in a cycle.
- struct EarliestCaptures : public CaptureTracker {
-
- EarliestCaptures(bool ReturnCaptures, Function &F, const DominatorTree &DT)
- : DT(DT), ReturnCaptures(ReturnCaptures), F(F) {}
-
- void tooManyUses() override {
- Captured = true;
- EarliestCapture = &*F.getEntryBlock().begin();
- }
+/// Find the 'earliest' instruction before which the pointer is known not to
+/// be captured. Here an instruction A is considered earlier than instruction
+/// B, if A dominates B. If 2 escapes do not dominate each other, the
+/// terminator of the common dominator is chosen. If not all uses cannot be
+/// analyzed, the earliest escape is set to the first instruction in the
+/// function entry block.
+// NOTE: Users have to make sure instructions compared against the earliest
+// escape are not in a cycle.
+struct EarliestCaptures : public CaptureTracker {
- bool captured(const Use *U) override {
- Instruction *I = cast<Instruction>(U->getUser());
- if (isa<ReturnInst>(I) && !ReturnCaptures)
- return false;
+ EarliestCaptures(bool ReturnCaptures, Function &F, const DominatorTree &DT)
+ : DT(DT), ReturnCaptures(ReturnCaptures), F(F) {}
- if (!EarliestCapture)
- EarliestCapture = I;
- else
- EarliestCapture = DT.findNearestCommonDominator(EarliestCapture, I);
- Captured = true;
+ void tooManyUses() override {
+ Captured = true;
+ EarliestCapture = &*F.getEntryBlock().begin();
+ }
- // Return false to continue analysis; we need to see all potential
- // captures.
+ bool captured(const Use *U) override {
+ Instruction *I = cast<Instruction>(U->getUser());
+ if (isa<ReturnInst>(I) && !ReturnCaptures)
return false;
- }
- Instruction *EarliestCapture = nullptr;
+ if (!EarliestCapture)
+ EarliestCapture = I;
+ else
+ EarliestCapture = DT.findNearestCommonDominator(EarliestCapture, I);
+ Captured = true;
- const DominatorTree &DT;
+ // Return false to continue analysis; we need to see all potential
+ // captures.
+ return false;
+ }
- bool ReturnCaptures;
+ Instruction *EarliestCapture = nullptr;
- bool Captured = false;
+ const DominatorTree &DT;
- Function &F;
- };
-}
+ bool ReturnCaptures;
+
+ bool Captured = false;
+
+ Function &F;
+};
+} // namespace
/// PointerMayBeCaptured - Return true if this pointer value may be captured
/// by the enclosing function (which is required to exist). This routine can
diff --git a/llvm/lib/Analysis/CycleAnalysis.cpp b/llvm/lib/Analysis/CycleAnalysis.cpp
index 41a95a4..4d7980a 100644
--- a/llvm/lib/Analysis/CycleAnalysis.cpp
+++ b/llvm/lib/Analysis/CycleAnalysis.cpp
@@ -15,7 +15,7 @@ using namespace llvm;
namespace llvm {
class Module;
-}
+} // namespace llvm
CycleInfo CycleAnalysis::run(Function &F, FunctionAnalysisManager &) {
CycleInfo CI;
diff --git a/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp b/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp
index e27db66..6d6ec6c 100644
--- a/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp
+++ b/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp
@@ -39,7 +39,7 @@ cl::opt<unsigned> MediumBasicBlockInstructionThreshold(
"medium-basic-block-instruction-threshold", cl::Hidden, cl::init(15),
cl::desc("The minimum number of instructions a basic block should contain "
"before being considered medium-sized."));
-}
+} // namespace llvm
static cl::opt<unsigned> CallWithManyArgumentsThreshold(
"call-with-many-arguments-threshold", cl::Hidden, cl::init(4),
diff --git a/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp b/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp
index 279f76d..6667aff 100644
--- a/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp
+++ b/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp
@@ -32,7 +32,7 @@ cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats(
clEnumValN(InlinerFunctionImportStatsOpts::Verbose, "verbose",
"printing of statistics for each inlined function")),
cl::Hidden, cl::desc("Enable inliner stats for imported functions"));
-}
+} // namespace llvm
ImportedFunctionsInliningStatistics::InlineGraphNode &
ImportedFunctionsInliningStatistics::createInlineGraphNode(const Function &F) {
diff --git a/llvm/lib/Analysis/InlineAdvisor.cpp b/llvm/lib/Analysis/InlineAdvisor.cpp
index e2480d5..71dfcbc 100644
--- a/llvm/lib/Analysis/InlineAdvisor.cpp
+++ b/llvm/lib/Analysis/InlineAdvisor.cpp
@@ -64,7 +64,7 @@ static cl::opt<bool>
namespace llvm {
extern cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats;
-}
+} // namespace llvm
namespace {
using namespace llvm::ore;
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index aaa7baa..f10b2bc 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -60,8 +60,10 @@ INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
"Lazy Value Information Analysis", false, true)
namespace llvm {
- FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); }
+FunctionPass *createLazyValueInfoPass() {
+ return new LazyValueInfoWrapperPass();
}
+} // namespace llvm
AnalysisKey LazyValueAnalysis::Key;
@@ -151,114 +153,113 @@ namespace {
} // end anonymous namespace
namespace {
- using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
-
- /// This is the cache kept by LazyValueInfo which
- /// maintains information about queries across the clients' queries.
- class LazyValueInfoCache {
- /// This is all of the cached information for one basic block. It contains
- /// the per-value lattice elements, as well as a separate set for
- /// overdefined values to reduce memory usage. Additionally pointers
- /// dereferenced in the block are cached for nullability queries.
- struct BlockCacheEntry {
- SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
- SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
- // std::nullopt indicates that the nonnull pointers for this basic block
- // block have not been computed yet.
- std::optional<NonNullPointerSet> NonNullPointers;
- };
-
- /// Cached information per basic block.
- DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
- BlockCache;
- /// Set of value handles used to erase values from the cache on deletion.
- DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
-
- const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
- auto It = BlockCache.find_as(BB);
- if (It == BlockCache.end())
- return nullptr;
- return It->second.get();
- }
+using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
+
+/// This is the cache kept by LazyValueInfo which
+/// maintains information about queries across the clients' queries.
+class LazyValueInfoCache {
+ /// This is all of the cached information for one basic block. It contains
+ /// the per-value lattice elements, as well as a separate set for
+ /// overdefined values to reduce memory usage. Additionally pointers
+ /// dereferenced in the block are cached for nullability queries.
+ struct BlockCacheEntry {
+ SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
+ SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
+ // std::nullopt indicates that the nonnull pointers for this basic block
+ // block have not been computed yet.
+ std::optional<NonNullPointerSet> NonNullPointers;
+ };
- BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
- auto It = BlockCache.find_as(BB);
- if (It == BlockCache.end())
- It = BlockCache.insert({ BB, std::make_unique<BlockCacheEntry>() })
- .first;
+ /// Cached information per basic block.
+ DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
+ BlockCache;
+ /// Set of value handles used to erase values from the cache on deletion.
+ DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
+
+ const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
+ auto It = BlockCache.find_as(BB);
+ if (It == BlockCache.end())
+ return nullptr;
+ return It->second.get();
+ }
- return It->second.get();
- }
+ BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
+ auto It = BlockCache.find_as(BB);
+ if (It == BlockCache.end())
+ It = BlockCache.insert({BB, std::make_unique<BlockCacheEntry>()}).first;
- void addValueHandle(Value *Val) {
- auto HandleIt = ValueHandles.find_as(Val);
- if (HandleIt == ValueHandles.end())
- ValueHandles.insert({ Val, this });
- }
+ return It->second.get();
+ }
- public:
- void insertResult(Value *Val, BasicBlock *BB,
- const ValueLatticeElement &Result) {
- BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
+ void addValueHandle(Value *Val) {
+ auto HandleIt = ValueHandles.find_as(Val);
+ if (HandleIt == ValueHandles.end())
+ ValueHandles.insert({Val, this});
+ }
- // Insert over-defined values into their own cache to reduce memory
- // overhead.
- if (Result.isOverdefined())
- Entry->OverDefined.insert(Val);
- else
- Entry->LatticeElements.insert({ Val, Result });
+public:
+ void insertResult(Value *Val, BasicBlock *BB,
+ const ValueLatticeElement &Result) {
+ BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
+
+ // Insert over-defined values into their own cache to reduce memory
+ // overhead.
+ if (Result.isOverdefined())
+ Entry->OverDefined.insert(Val);
+ else
+ Entry->LatticeElements.insert({Val, Result});
+
+ addValueHandle(Val);
+ }
- addValueHandle(Val);
- }
+ std::optional<ValueLatticeElement> getCachedValueInfo(Value *V,
+ BasicBlock *BB) const {
+ const BlockCacheEntry *Entry = getBlockEntry(BB);
+ if (!Entry)
+ return std::nullopt;
- std::optional<ValueLatticeElement>
- getCachedValueInfo(Value *V, BasicBlock *BB) const {
- const BlockCacheEntry *Entry = getBlockEntry(BB);
- if (!Entry)
- return std::nullopt;
+ if (Entry->OverDefined.count(V))
+ return ValueLatticeElement::getOverdefined();
- if (Entry->OverDefined.count(V))
- return ValueLatticeElement::getOverdefined();
+ auto LatticeIt = Entry->LatticeElements.find_as(V);
+ if (LatticeIt == Entry->LatticeElements.end())
+ return std::nullopt;
- auto LatticeIt = Entry->LatticeElements.find_as(V);
- if (LatticeIt == Entry->LatticeElements.end())
- return std::nullopt;
+ return LatticeIt->second;
+ }
- return LatticeIt->second;
+ bool
+ isNonNullAtEndOfBlock(Value *V, BasicBlock *BB,
+ function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
+ BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
+ if (!Entry->NonNullPointers) {
+ Entry->NonNullPointers = InitFn(BB);
+ for (Value *V : *Entry->NonNullPointers)
+ addValueHandle(V);
}
- bool isNonNullAtEndOfBlock(
- Value *V, BasicBlock *BB,
- function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
- BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
- if (!Entry->NonNullPointers) {
- Entry->NonNullPointers = InitFn(BB);
- for (Value *V : *Entry->NonNullPointers)
- addValueHandle(V);
- }
-
- return Entry->NonNullPointers->count(V);
- }
+ return Entry->NonNullPointers->count(V);
+ }
- /// clear - Empty the cache.
- void clear() {
- BlockCache.clear();
- ValueHandles.clear();
- }
+ /// clear - Empty the cache.
+ void clear() {
+ BlockCache.clear();
+ ValueHandles.clear();
+ }
- /// Inform the cache that a given value has been deleted.
- void eraseValue(Value *V);
+ /// Inform the cache that a given value has been deleted.
+ void eraseValue(Value *V);
- /// This is part of the update interface to inform the cache
- /// that a block has been deleted.
- void eraseBlock(BasicBlock *BB);
+ /// This is part of the update interface to inform the cache
+ /// that a block has been deleted.
+ void eraseBlock(BasicBlock *BB);
- /// Updates the cache to remove any influence an overdefined value in
- /// OldSucc might have (unless also overdefined in NewSucc). This just
- /// flushes elements from the cache and does not add any.
- void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
- };
-}
+ /// Updates the cache to remove any influence an overdefined value in
+ /// OldSucc might have (unless also overdefined in NewSucc). This just
+ /// flushes elements from the cache and does not add any.
+ void threadEdgeImpl(BasicBlock *OldSucc, BasicBlock *NewSucc);
+};
+} // namespace
void LazyValueInfoCache::eraseValue(Value *V) {
for (auto &Pair : BlockCache) {
diff --git a/llvm/lib/Analysis/LoopAnalysisManager.cpp b/llvm/lib/Analysis/LoopAnalysisManager.cpp
index 74b1da8..74c318e 100644
--- a/llvm/lib/Analysis/LoopAnalysisManager.cpp
+++ b/llvm/lib/Analysis/LoopAnalysisManager.cpp
@@ -133,7 +133,7 @@ LoopAnalysisManagerFunctionProxy::run(Function &F,
FunctionAnalysisManager &AM) {
return Result(*InnerAM, AM.getResult<LoopAnalysis>(F));
}
-}
+} // namespace llvm
PreservedAnalyses llvm::getLoopPassPreservedAnalyses() {
PreservedAnalyses PA;
diff --git a/llvm/lib/Analysis/LoopPass.cpp b/llvm/lib/Analysis/LoopPass.cpp
index 61d3a27..2159c45 100644
--- a/llvm/lib/Analysis/LoopPass.cpp
+++ b/llvm/lib/Analysis/LoopPass.cpp
@@ -59,7 +59,7 @@ public:
};
char PrintLoopPassWrapper::ID = 0;
-}
+} // namespace
//===----------------------------------------------------------------------===//
// LPPassManager
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 2802de6..b2cb672 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -13689,7 +13689,7 @@ raw_ostream &operator<<(raw_ostream &OS, ScalarEvolution::BlockDisposition BD) {
}
return OS;
}
-}
+} // namespace llvm
void ScalarEvolution::print(raw_ostream &OS) const {
// ScalarEvolution's implementation of the print method is to print
diff --git a/llvm/lib/Analysis/ScalarEvolutionDivision.cpp b/llvm/lib/Analysis/ScalarEvolutionDivision.cpp
index e1dd834..d03930d 100644
--- a/llvm/lib/Analysis/ScalarEvolutionDivision.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionDivision.cpp
@@ -21,7 +21,7 @@
namespace llvm {
class Type;
-}
+} // namespace llvm
using namespace llvm;