aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
authorKazu Hirata <kazu@google.com>2022-06-25 11:55:57 -0700
committerKazu Hirata <kazu@google.com>2022-06-25 11:55:57 -0700
commitaa8feeefd3ac6c78ee8f67bf033976fc7d68bc6d (patch)
treed207b35cfb445636f41204bcfe51f6ca3a94a3ba /llvm/lib
parentb8df4093e4d82c67a419911a46b63482043643e5 (diff)
downloadllvm-aa8feeefd3ac6c78ee8f67bf033976fc7d68bc6d.zip
llvm-aa8feeefd3ac6c78ee8f67bf033976fc7d68bc6d.tar.gz
llvm-aa8feeefd3ac6c78ee8f67bf033976fc7d68bc6d.tar.bz2
Don't use Optional::hasValue (NFC)
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/BranchProbabilityInfo.cpp5
-rw-r--r--llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp4
-rw-r--r--llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp10
-rw-r--r--llvm/lib/Analysis/IRSimilarityIdentifier.cpp6
-rw-r--r--llvm/lib/Analysis/InlineCost.cpp14
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp41
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp8
-rw-r--r--llvm/lib/Analysis/LoopCacheAnalysis.cpp4
-rw-r--r--llvm/lib/Analysis/MemoryBuiltins.cpp29
-rw-r--r--llvm/lib/Analysis/MemorySSA.cpp4
-rw-r--r--llvm/lib/Analysis/MustExecute.cpp4
-rw-r--r--llvm/lib/Analysis/ProfileSummaryInfo.cpp12
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp28
-rw-r--r--llvm/lib/Analysis/StratifiedSets.h4
-rw-r--r--llvm/lib/Analysis/VectorUtils.cpp4
-rw-r--r--llvm/lib/CodeGen/BasicBlockSections.cpp5
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp4
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIParser.cpp4
-rw-r--r--llvm/lib/CodeGen/MachineBasicBlock.cpp4
-rw-r--r--llvm/lib/CodeGen/MachineFunctionSplitter.cpp5
-rw-r--r--llvm/lib/CodeGen/ModuloSchedule.cpp16
-rw-r--r--llvm/lib/CodeGen/SelectOptimize.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp9
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp28
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp14
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp8
-rw-r--r--llvm/lib/DebugInfo/CodeView/CodeViewRecordIO.cpp6
-rw-r--r--llvm/lib/DebugInfo/CodeView/TypeRecordMapping.cpp16
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFContext.cpp5
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp10
-rw-r--r--llvm/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp4
-rw-r--r--llvm/lib/Frontend/OpenMP/OMPContext.cpp10
-rw-r--r--llvm/lib/IR/Instructions.cpp13
-rw-r--r--llvm/lib/IR/IntrinsicInst.cpp17
-rw-r--r--llvm/lib/IR/LLVMContextImpl.cpp6
-rw-r--r--llvm/lib/IR/VectorBuilder.cpp4
-rw-r--r--llvm/lib/IR/Verifier.cpp4
-rw-r--r--llvm/lib/InterfaceStub/IFSHandler.cpp23
-rw-r--r--llvm/lib/MC/MCContext.cpp16
-rw-r--r--llvm/lib/MC/MCDisassembler/MCDisassembler.cpp11
-rw-r--r--llvm/lib/MC/MCParser/MasmParser.cpp11
-rw-r--r--llvm/lib/MC/MCSchedule.cpp12
-rw-r--r--llvm/lib/MC/MCSectionXCOFF.cpp4
-rw-r--r--llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp14
-rw-r--r--llvm/lib/Object/ELFObjectFile.cpp36
-rw-r--r--llvm/lib/ObjectYAML/DXContainerEmitter.cpp6
-rw-r--r--llvm/lib/Support/Process.cpp4
-rw-r--r--llvm/lib/Support/VirtualFileSystem.cpp16
-rw-r--r--llvm/lib/Support/raw_ostream.cpp4
-rw-r--r--llvm/lib/TableGen/Record.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h4
-rw-r--r--llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp8
-rw-r--r--llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp8
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp8
-rw-r--r--llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp8
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp4
-rw-r--r--llvm/lib/Target/PowerPC/PPCMacroFusion.cpp4
-rw-r--r--llvm/lib/Target/PowerPC/PPCTargetMachine.cpp4
-rw-r--r--llvm/lib/Target/VE/VVPISelLowering.cpp4
-rw-r--r--llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp17
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp4
-rw-r--r--llvm/lib/Transforms/IPO/Attributor.cpp18
-rw-r--r--llvm/lib/Transforms/IPO/AttributorAttributes.cpp175
-rw-r--r--llvm/lib/Transforms/IPO/IROutliner.cpp65
-rw-r--r--llvm/lib/Transforms/IPO/OpenMPOpt.cpp18
-rw-r--r--llvm/lib/Transforms/IPO/SampleContextTracker.cpp4
-rw-r--r--llvm/lib/Transforms/IPO/SampleProfile.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp5
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp8
-rw-r--r--llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/ConstantHoisting.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/GVN.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp31
-rw-r--r--llvm/lib/Transforms/Scalar/LoopDistribute.cpp14
-rw-r--r--llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp9
-rw-r--r--llvm/lib/Transforms/Scalar/LoopRotation.cpp9
-rw-r--r--llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp16
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp30
-rw-r--r--llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/CodeExtractor.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/LoopPeel.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/LoopUtils.cpp8
-rw-r--r--llvm/lib/Transforms/Utils/MisExpect.cpp8
-rw-r--r--llvm/lib/Transforms/Utils/ModuleUtils.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp16
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp4
-rw-r--r--llvm/lib/WindowsDriver/MSVCPaths.cpp12
91 files changed, 536 insertions, 580 deletions
diff --git a/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index 1d88042..2bca424 100644
--- a/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -826,9 +826,8 @@ void BranchProbabilityInfo::computeEestimateBlockWeight(
if (auto BBWeight = getInitialEstimatedBlockWeight(BB))
// If we were able to find estimated weight for the block set it to this
// block and propagate up the IR.
- propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT,
- BBWeight.getValue(), BlockWorkList,
- LoopWorkList);
+ propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT, *BBWeight,
+ BlockWorkList, LoopWorkList);
// BlockWorklist/LoopWorkList contains blocks/loops with at least one
// successor/exit having estimated weight. Try to propagate weight to such
diff --git a/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp b/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
index 1216d03..602a018 100644
--- a/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
@@ -831,14 +831,14 @@ CFLAndersAAResult::ensureCached(const Function &Fn) {
scan(Fn);
Iter = Cache.find(&Fn);
assert(Iter != Cache.end());
- assert(Iter->second.hasValue());
+ assert(Iter->second);
}
return Iter->second;
}
const AliasSummary *CFLAndersAAResult::getAliasSummary(const Function &Fn) {
auto &FunInfo = ensureCached(Fn);
- if (FunInfo.hasValue())
+ if (FunInfo)
return &FunInfo->getAliasSummary();
else
return nullptr;
diff --git a/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp b/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp
index b831a59..f92869c 100644
--- a/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp
@@ -250,14 +250,14 @@ CFLSteensAAResult::ensureCached(Function *Fn) {
scan(Fn);
Iter = Cache.find(Fn);
assert(Iter != Cache.end());
- assert(Iter->second.hasValue());
+ assert(Iter->second);
}
return Iter->second;
}
const AliasSummary *CFLSteensAAResult::getAliasSummary(Function &Fn) {
auto &FunInfo = ensureCached(&Fn);
- if (FunInfo.hasValue())
+ if (FunInfo)
return &FunInfo->getAliasSummary();
else
return nullptr;
@@ -293,15 +293,15 @@ AliasResult CFLSteensAAResult::query(const MemoryLocation &LocA,
assert(Fn != nullptr);
auto &MaybeInfo = ensureCached(Fn);
- assert(MaybeInfo.hasValue());
+ assert(MaybeInfo);
auto &Sets = MaybeInfo->getStratifiedSets();
auto MaybeA = Sets.find(InstantiatedValue{ValA, 0});
- if (!MaybeA.hasValue())
+ if (!MaybeA)
return AliasResult::MayAlias;
auto MaybeB = Sets.find(InstantiatedValue{ValB, 0});
- if (!MaybeB.hasValue())
+ if (!MaybeB)
return AliasResult::MayAlias;
auto SetA = *MaybeA;
diff --git a/llvm/lib/Analysis/IRSimilarityIdentifier.cpp b/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
index c945050..81aa854 100644
--- a/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
+++ b/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
@@ -183,9 +183,9 @@ CmpInst::Predicate IRInstructionData::getPredicate() const {
assert(isa<CmpInst>(Inst) &&
"Can only get a predicate from a compare instruction");
- if (RevisedPredicate.hasValue())
- return RevisedPredicate.getValue();
-
+ if (RevisedPredicate)
+ return *RevisedPredicate;
+
return cast<CmpInst>(Inst)->getPredicate();
}
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index 63fe651..f2dcaa8 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -703,8 +703,8 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
BlockFrequencyInfo *BFI = &(GetBFI(F));
assert(BFI && "BFI must be available");
auto ProfileCount = BFI->getBlockProfileCount(BB);
- assert(ProfileCount.hasValue());
- if (ProfileCount.getValue() == 0)
+ assert(ProfileCount);
+ if (*ProfileCount == 0)
ColdSize += Cost - CostAtBBStart;
}
@@ -828,14 +828,14 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
}
auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB);
- assert(ProfileCount.hasValue());
- CurrentSavings *= ProfileCount.getValue();
+ assert(ProfileCount);
+ CurrentSavings *= *ProfileCount;
CycleSavings += CurrentSavings;
}
// Compute the cycle savings per call.
auto EntryProfileCount = F.getEntryCount();
- assert(EntryProfileCount.hasValue() && EntryProfileCount->getCount());
+ assert(EntryProfileCount && EntryProfileCount->getCount());
auto EntryCount = EntryProfileCount->getCount();
CycleSavings += EntryCount / 2;
CycleSavings = CycleSavings.udiv(EntryCount);
@@ -1800,12 +1800,12 @@ void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
// return min(A, B) if B is valid.
auto MinIfValid = [](int A, Optional<int> B) {
- return B ? std::min(A, B.getValue()) : A;
+ return B ? std::min(A, *B) : A;
};
// return max(A, B) if B is valid.
auto MaxIfValid = [](int A, Optional<int> B) {
- return B ? std::max(A, B.getValue()) : A;
+ return B ? std::max(A, *B) : A;
};
// Various bonus percentages. These are multiplied by Threshold to get the
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 07e3392..a7aec39 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -6123,9 +6123,9 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
Value *Op1 = Call->getArgOperand(1);
Value *Op2 = Call->getArgOperand(2);
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue()))
+ if (Value *V =
+ simplifyFPOp({Op0, Op1, Op2}, {}, Q, *FPI->getExceptionBehavior(),
+ *FPI->getRoundingMode()))
return V;
return nullptr;
}
@@ -6189,38 +6189,33 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
}
case Intrinsic::experimental_constrained_fadd: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- return simplifyFAddInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
- FPI->getFastMathFlags(), Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue());
+ return simplifyFAddInst(
+ FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
+ Q, *FPI->getExceptionBehavior(), *FPI->getRoundingMode());
}
case Intrinsic::experimental_constrained_fsub: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- return simplifyFSubInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
- FPI->getFastMathFlags(), Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue());
+ return simplifyFSubInst(
+ FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
+ Q, *FPI->getExceptionBehavior(), *FPI->getRoundingMode());
}
case Intrinsic::experimental_constrained_fmul: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- return simplifyFMulInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
- FPI->getFastMathFlags(), Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue());
+ return simplifyFMulInst(
+ FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
+ Q, *FPI->getExceptionBehavior(), *FPI->getRoundingMode());
}
case Intrinsic::experimental_constrained_fdiv: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- return simplifyFDivInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
- FPI->getFastMathFlags(), Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue());
+ return simplifyFDivInst(
+ FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
+ Q, *FPI->getExceptionBehavior(), *FPI->getRoundingMode());
}
case Intrinsic::experimental_constrained_frem: {
auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
- return simplifyFRemInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
- FPI->getFastMathFlags(), Q,
- FPI->getExceptionBehavior().getValue(),
- FPI->getRoundingMode().getValue());
+ return simplifyFRemInst(
+ FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
+ Q, *FPI->getExceptionBehavior(), *FPI->getRoundingMode());
}
default:
return nullptr;
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 5803c3d..e82c3ae 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -918,10 +918,10 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueCast(
// transfer rule on the full set since we may be able to locally infer
// interesting facts.
Optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
- if (!LHSRes.hasValue())
+ if (!LHSRes)
// More work to do before applying this transfer rule.
return None;
- const ConstantRange &LHSRange = LHSRes.getValue();
+ const ConstantRange &LHSRange = *LHSRes;
const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
@@ -946,8 +946,8 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
// More work to do before applying this transfer rule.
return None;
- const ConstantRange &LHSRange = LHSRes.getValue();
- const ConstantRange &RHSRange = RHSRes.getValue();
+ const ConstantRange &LHSRange = *LHSRes;
+ const ConstantRange &RHSRange = *RHSRes;
return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
}
diff --git a/llvm/lib/Analysis/LoopCacheAnalysis.cpp b/llvm/lib/Analysis/LoopCacheAnalysis.cpp
index 002e993..2cbf1f7 100644
--- a/llvm/lib/Analysis/LoopCacheAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopCacheAnalysis.cpp
@@ -645,8 +645,8 @@ bool CacheCost::populateReferenceGroups(ReferenceGroupsTy &RefGroups) const {
Optional<bool> HasSpacialReuse =
R->hasSpacialReuse(Representative, CLS, AA);
- if ((HasTemporalReuse.hasValue() && *HasTemporalReuse) ||
- (HasSpacialReuse.hasValue() && *HasSpacialReuse)) {
+ if ((HasTemporalReuse && *HasTemporalReuse) ||
+ (HasSpacialReuse && *HasSpacialReuse)) {
RefGroup.push_back(std::move(R));
Added = true;
break;
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 3c0e494..ff303ce 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -270,54 +270,53 @@ static Optional<AllocFnsTy> getAllocationSize(const Value *V,
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) {
- return getAllocationData(V, AnyAlloc, TLI).hasValue();
+ return getAllocationData(V, AnyAlloc, TLI).has_value();
}
bool llvm::isAllocationFn(
const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
- return getAllocationData(V, AnyAlloc, GetTLI).hasValue();
+ return getAllocationData(V, AnyAlloc, GetTLI).has_value();
}
/// Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
static bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
- return getAllocationData(V, MallocOrOpNewLike, TLI).hasValue();
+ return getAllocationData(V, MallocOrOpNewLike, TLI).has_value();
}
/// Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory with alignment (such as aligned_alloc).
static bool isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
- return getAllocationData(V, AlignedAllocLike, TLI)
- .hasValue();
+ return getAllocationData(V, AlignedAllocLike, TLI).has_value();
}
/// Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
static bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
- return getAllocationData(V, CallocLike, TLI).hasValue();
+ return getAllocationData(V, CallocLike, TLI).has_value();
}
/// Tests if a value is a call or invoke to a library function that
/// allocates memory similar to malloc or calloc.
bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
- return getAllocationData(V, MallocOrCallocLike, TLI).hasValue();
+ return getAllocationData(V, MallocOrCallocLike, TLI).has_value();
}
/// Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
- return getAllocationData(V, AllocLike, TLI).hasValue();
+ return getAllocationData(V, AllocLike, TLI).has_value();
}
/// Tests if a value is a call or invoke to a library function that
/// reallocates memory (e.g., realloc).
bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
- return getAllocationData(V, ReallocLike, TLI).hasValue();
+ return getAllocationData(V, ReallocLike, TLI).has_value();
}
/// Tests if a functions is a call or invoke to a library function that
/// reallocates memory (e.g., realloc).
bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) {
- return getAllocationDataForFunction(F, ReallocLike, TLI).hasValue();
+ return getAllocationDataForFunction(F, ReallocLike, TLI).has_value();
}
bool llvm::isAllocRemovable(const CallBase *CB, const TargetLibraryInfo *TLI) {
@@ -501,18 +500,18 @@ Optional<StringRef> llvm::getAllocationFamily(const Value *I,
if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn))
return None;
const auto AllocData = getAllocationDataForFunction(Callee, AnyAlloc, TLI);
- if (AllocData.hasValue())
- return mangledNameForMallocFamily(AllocData.getValue().Family);
+ if (AllocData)
+ return mangledNameForMallocFamily(AllocData->Family);
const auto FreeData = getFreeFunctionDataForFunction(Callee, TLIFn);
- if (FreeData.hasValue())
- return mangledNameForMallocFamily(FreeData.getValue().Family);
+ if (FreeData)
+ return mangledNameForMallocFamily(FreeData->Family);
return None;
}
/// isLibFreeFunction - Returns true if the function is a builtin free()
bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) {
Optional<FreeFnsTy> FnData = getFreeFunctionDataForFunction(F, TLIFn);
- if (!FnData.hasValue())
+ if (!FnData)
return false;
// Check free prototype.
diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index f63898b..76371b8 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -749,9 +749,9 @@ template <class AliasAnalysisType> class ClobberWalker {
}
bool operator==(const generic_def_path_iterator &O) const {
- if (N.hasValue() != O.N.hasValue())
+ if (N.has_value() != O.N.has_value())
return false;
- return !N.hasValue() || *N == *O.N;
+ return !N || *N == *O.N;
}
private:
diff --git a/llvm/lib/Analysis/MustExecute.cpp b/llvm/lib/Analysis/MustExecute.cpp
index c785b50..8dbce26 100644
--- a/llvm/lib/Analysis/MustExecute.cpp
+++ b/llvm/lib/Analysis/MustExecute.cpp
@@ -491,9 +491,9 @@ template <typename K, typename V, typename FnTy, typename... ArgsTy>
static V getOrCreateCachedOptional(K Key, DenseMap<K, Optional<V>> &Map,
FnTy &&Fn, ArgsTy&&... args) {
Optional<V> &OptVal = Map[Key];
- if (!OptVal.hasValue())
+ if (!OptVal)
OptVal = Fn(std::forward<ArgsTy>(args)...);
- return OptVal.getValue();
+ return *OptVal;
}
const BasicBlock *
diff --git a/llvm/lib/Analysis/ProfileSummaryInfo.cpp b/llvm/lib/Analysis/ProfileSummaryInfo.cpp
index 9d5fa6d..b564dbe 100644
--- a/llvm/lib/Analysis/ProfileSummaryInfo.cpp
+++ b/llvm/lib/Analysis/ProfileSummaryInfo.cpp
@@ -279,19 +279,19 @@ ProfileSummaryInfo::computeThreshold(int PercentileCutoff) const {
}
bool ProfileSummaryInfo::hasHugeWorkingSetSize() const {
- return HasHugeWorkingSetSize && HasHugeWorkingSetSize.getValue();
+ return HasHugeWorkingSetSize && *HasHugeWorkingSetSize;
}
bool ProfileSummaryInfo::hasLargeWorkingSetSize() const {
- return HasLargeWorkingSetSize && HasLargeWorkingSetSize.getValue();
+ return HasLargeWorkingSetSize && *HasLargeWorkingSetSize;
}
bool ProfileSummaryInfo::isHotCount(uint64_t C) const {
- return HotCountThreshold && C >= HotCountThreshold.getValue();
+ return HotCountThreshold && C >= *HotCountThreshold;
}
bool ProfileSummaryInfo::isColdCount(uint64_t C) const {
- return ColdCountThreshold && C <= ColdCountThreshold.getValue();
+ return ColdCountThreshold && C <= *ColdCountThreshold;
}
template <bool isHot>
@@ -299,9 +299,9 @@ bool ProfileSummaryInfo::isHotOrColdCountNthPercentile(int PercentileCutoff,
uint64_t C) const {
auto CountThreshold = computeThreshold(PercentileCutoff);
if (isHot)
- return CountThreshold && C >= CountThreshold.getValue();
+ return CountThreshold && C >= *CountThreshold;
else
- return CountThreshold && C <= CountThreshold.getValue();
+ return CountThreshold && C <= *CountThreshold;
}
bool ProfileSummaryInfo::isHotCountNthPercentile(int PercentileCutoff,
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 5eb5c6b..7951180 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -4847,16 +4847,16 @@ public:
SelectInst *SI = cast<SelectInst>(I);
Optional<const SCEV *> Res =
compareWithBackedgeCondition(SI->getCondition());
- if (Res.hasValue()) {
- bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
+ if (Res) {
+ bool IsOne = cast<SCEVConstant>(*Res)->getValue()->isOne();
Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
}
break;
}
default: {
Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
- if (Res.hasValue())
- Result = Res.getValue();
+ if (Res)
+ Result = *Res;
break;
}
}
@@ -6596,9 +6596,9 @@ ScalarEvolution::getRangeRef(const SCEV *S,
// Check if the IR explicitly contains !range metadata.
Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
- if (MDRange.hasValue())
- ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
- RangeType);
+ if (MDRange)
+ ConservativeResult =
+ ConservativeResult.intersectWith(*MDRange, RangeType);
// Use facts about recurrences in the underlying IR. Note that add
// recurrences are AddRecExprs and thus don't hit this path. This
@@ -9710,15 +9710,15 @@ GetQuadraticEquation(const SCEVAddRecExpr *AddRec) {
/// (b) if neither X nor Y exist, return None,
/// (c) if exactly one of X and Y exists, return that value.
static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
- if (X.hasValue() && Y.hasValue()) {
+ if (X && Y) {
unsigned W = std::max(X->getBitWidth(), Y->getBitWidth());
APInt XW = X->sext(W);
APInt YW = Y->sext(W);
return XW.slt(YW) ? *X : *Y;
}
- if (!X.hasValue() && !Y.hasValue())
+ if (!X && !Y)
return None;
- return X.hasValue() ? *X : *Y;
+ return X ? *X : *Y;
}
/// Helper function to truncate an optional APInt to a given BitWidth.
@@ -9760,13 +9760,13 @@ SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
APInt A, B, C, M;
unsigned BitWidth;
auto T = GetQuadraticEquation(AddRec);
- if (!T.hasValue())
+ if (!T)
return None;
std::tie(A, B, C, M, BitWidth) = *T;
LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n");
Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1);
- if (!X.hasValue())
+ if (!X)
return None;
ConstantInt *CX = ConstantInt::get(SE.getContext(), *X);
@@ -10471,8 +10471,8 @@ ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
auto ResultSwapped =
getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred));
- assert(ResultSwapped.hasValue() && "should be able to analyze both!");
- assert(ResultSwapped.getValue() != Result.getValue() &&
+ assert(ResultSwapped && "should be able to analyze both!");
+ assert(*ResultSwapped != *Result &&
"monotonicity should flip as we flip the predicate");
}
#endif
diff --git a/llvm/lib/Analysis/StratifiedSets.h b/llvm/lib/Analysis/StratifiedSets.h
index 8468f2b..883ebd2 100644
--- a/llvm/lib/Analysis/StratifiedSets.h
+++ b/llvm/lib/Analysis/StratifiedSets.h
@@ -340,10 +340,10 @@ public:
return StratifiedSets<T>(std::move(Values), std::move(StratLinks));
}
- bool has(const T &Elem) const { return get(Elem).hasValue(); }
+ bool has(const T &Elem) const { return get(Elem).has_value(); }
bool add(const T &Main) {
- if (get(Main).hasValue())
+ if (get(Main))
return false;
auto NewIndex = getNewUnlinkedIndex();
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index beb14c4..56a5983 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -1501,8 +1501,8 @@ void VFABI::getVectorVariantNames(
#ifndef NDEBUG
LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n");
Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
- assert(Info.hasValue() && "Invalid name for a VFABI variant.");
- assert(CI.getModule()->getFunction(Info.getValue().VectorName) &&
+ assert(Info && "Invalid name for a VFABI variant.");
+ assert(CI.getModule()->getFunction(Info->VectorName) &&
"Vector function is missing.");
#endif
VariantMappings.push_back(std::string(S));
diff --git a/llvm/lib/CodeGen/BasicBlockSections.cpp b/llvm/lib/CodeGen/BasicBlockSections.cpp
index e6f4e39..b6b018c 100644
--- a/llvm/lib/CodeGen/BasicBlockSections.cpp
+++ b/llvm/lib/CodeGen/BasicBlockSections.cpp
@@ -234,9 +234,8 @@ assignSections(MachineFunction &MF,
// If we already have one cluster containing eh_pads, this must be updated
// to ExceptionSectionID. Otherwise, we set it equal to the current
// section ID.
- EHPadsSectionID = EHPadsSectionID.hasValue()
- ? MBBSectionID::ExceptionSectionID
- : MBB.getSectionID();
+ EHPadsSectionID = EHPadsSectionID ? MBBSectionID::ExceptionSectionID
+ : MBB.getSectionID();
}
}
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 674649c..2c94f87 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -1291,12 +1291,12 @@ bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI,
Register SrcReg = MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(DstReg);
Cst = constantFoldFpUnary(MI.getOpcode(), DstTy, SrcReg, MRI);
- return Cst.hasValue();
+ return Cst.has_value();
}
void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
Optional<APFloat> &Cst) {
- assert(Cst.hasValue() && "Optional is unexpectedly empty!");
+ assert(Cst && "Optional is unexpectedly empty!");
Builder.setInstrAndDebugLoc(MI);
MachineFunction &MF = Builder.getMF();
auto *FPVal = ConstantFP::get(MF.getFunction().getContext(), *Cst);
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 879ec00..5ddfc69 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -741,8 +741,8 @@ bool MIParser::parseBasicBlockDefinition(
MBB->setIsEHPad(IsLandingPad);
MBB->setIsInlineAsmBrIndirectTarget(IsInlineAsmBrIndirectTarget);
MBB->setIsEHFuncletEntry(IsEHFuncletEntry);
- if (SectionID.hasValue()) {
- MBB->setSectionID(SectionID.getValue());
+ if (SectionID) {
+ MBB->setSectionID(*SectionID);
MF.setBBSectionsType(BasicBlockSection::List);
}
return false;
diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp
index c186d0b..a50e188a 100644
--- a/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -450,8 +450,8 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
if (IrrLoopHeaderWeight && IsStandalone) {
if (Indexes) OS << '\t';
- OS.indent(2) << "; Irreducible loop header weight: "
- << IrrLoopHeaderWeight.getValue() << '\n';
+ OS.indent(2) << "; Irreducible loop header weight: " << *IrrLoopHeaderWeight
+ << '\n';
}
}
diff --git a/llvm/lib/CodeGen/MachineFunctionSplitter.cpp b/llvm/lib/CodeGen/MachineFunctionSplitter.cpp
index 7d0f294..3f55a82 100644
--- a/llvm/lib/CodeGen/MachineFunctionSplitter.cpp
+++ b/llvm/lib/CodeGen/MachineFunctionSplitter.cpp
@@ -106,9 +106,8 @@ bool MachineFunctionSplitter::runOnMachineFunction(MachineFunction &MF) {
// We don't want to proceed further for cold functions
// or functions of unknown hotness. Lukewarm functions have no prefix.
Optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
- if (SectionPrefix.hasValue() &&
- (SectionPrefix.getValue().equals("unlikely") ||
- SectionPrefix.getValue().equals("unknown"))) {
+ if (SectionPrefix &&
+ (SectionPrefix->equals("unlikely") || SectionPrefix->equals("unknown"))) {
return false;
}
diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp
index 8e8cdb2..37b851f 100644
--- a/llvm/lib/CodeGen/ModuloSchedule.cpp
+++ b/llvm/lib/CodeGen/ModuloSchedule.cpp
@@ -1447,8 +1447,8 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) {
Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg,
const TargetRegisterClass *RC) {
// If the init register is not undef, try and find an existing phi.
- if (InitReg.hasValue()) {
- auto I = Phis.find({LoopReg, InitReg.getValue()});
+ if (InitReg) {
+ auto I = Phis.find({LoopReg, *InitReg});
if (I != Phis.end())
return I->second;
} else {
@@ -1469,10 +1469,10 @@ Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg,
return R;
// Found a phi taking undef as input, so rewrite it to take InitReg.
MachineInstr *MI = MRI.getVRegDef(R);
- MI->getOperand(1).setReg(InitReg.getValue());
- Phis.insert({{LoopReg, InitReg.getValue()}, R});
+ MI->getOperand(1).setReg(*InitReg);
+ Phis.insert({{LoopReg, *InitReg}, R});
const TargetRegisterClass *ConstrainRegClass =
- MRI.constrainRegClass(R, MRI.getRegClass(InitReg.getValue()));
+ MRI.constrainRegClass(R, MRI.getRegClass(*InitReg));
assert(ConstrainRegClass && "Expected a valid constrained register class!");
(void)ConstrainRegClass;
UndefPhis.erase(I);
@@ -1483,18 +1483,18 @@ Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg,
if (!RC)
RC = MRI.getRegClass(LoopReg);
Register R = MRI.createVirtualRegister(RC);
- if (InitReg.hasValue()) {
+ if (InitReg) {
const TargetRegisterClass *ConstrainRegClass =
MRI.constrainRegClass(R, MRI.getRegClass(*InitReg));
assert(ConstrainRegClass && "Expected a valid constrained register class!");
(void)ConstrainRegClass;
}
BuildMI(*BB, BB->getFirstNonPHI(), DebugLoc(), TII->get(TargetOpcode::PHI), R)
- .addReg(InitReg.hasValue() ? *InitReg : undef(RC))
+ .addReg(InitReg ? *InitReg : undef(RC))
.addMBB(PreheaderBB)
.addReg(LoopReg)
.addMBB(BB);
- if (!InitReg.hasValue())
+ if (!InitReg)
UndefPhis[LoopReg] = R;
else
Phis[{LoopReg, *InitReg}] = R;
diff --git a/llvm/lib/CodeGen/SelectOptimize.cpp b/llvm/lib/CodeGen/SelectOptimize.cpp
index c199b6a..4032227 100644
--- a/llvm/lib/CodeGen/SelectOptimize.cpp
+++ b/llvm/lib/CodeGen/SelectOptimize.cpp
@@ -870,8 +870,8 @@ bool SelectOptimize::computeLoopCosts(
ORE->emit(ORmissL);
return false;
}
- IPredCost += Scaled64::get(ILatency.getValue());
- INonPredCost += Scaled64::get(ILatency.getValue());
+ IPredCost += Scaled64::get(*ILatency);
+ INonPredCost += Scaled64::get(*ILatency);
// For a select that can be converted to branch,
// compute its cost as a branch (non-predicated cost).
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 38f440a..aa688d9d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -24514,9 +24514,8 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
auto &Size0 = MUC0.NumBytes;
auto &Size1 = MUC1.NumBytes;
if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 &&
- Size0.hasValue() && Size1.hasValue() && *Size0 == *Size1 &&
- OrigAlignment0 > *Size0 && SrcValOffset0 % *Size0 == 0 &&
- SrcValOffset1 % *Size1 == 0) {
+ Size0 && Size1 && *Size0 == *Size1 && OrigAlignment0 > *Size0 &&
+ SrcValOffset0 % *Size0 == 0 && SrcValOffset1 % *Size1 == 0) {
int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0.value();
int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1.value();
@@ -24535,8 +24534,8 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
UseAA = false;
#endif
- if (UseAA && AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() &&
- Size0.hasValue() && Size1.hasValue()) {
+ if (UseAA && AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() && Size0 &&
+ Size1) {
// Use alias analysis information.
int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
int64_t Overlap0 = *Size0 + SrcValOffset0 - MinOffset;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 15455eb..103da92 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -703,8 +703,8 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
unsigned NumRegs;
if (IsABIRegCopy) {
NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
- *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
- NumIntermediates, RegisterVT);
+ *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
+ RegisterVT);
} else {
NumRegs =
TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
@@ -800,11 +800,11 @@ RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
for (EVT ValueVT : ValueVTs) {
unsigned NumRegs =
isABIMangled()
- ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
+ ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
: TLI.getNumRegisters(Context, ValueVT);
MVT RegisterVT =
isABIMangled()
- ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
+ ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
: TLI.getRegisterType(Context, ValueVT);
for (unsigned i = 0; i != NumRegs; ++i)
Regs.push_back(Reg + i);
@@ -831,10 +831,10 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
// Copy the legal parts from the registers.
EVT ValueVT = ValueVTs[Value];
unsigned NumRegs = RegCount[Value];
- MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
- *DAG.getContext(),
- CallConv.getValue(), RegVTs[Value])
- : RegVTs[Value];
+ MVT RegisterVT = isABIMangled()
+ ? TLI.getRegisterTypeForCallingConv(
+ *DAG.getContext(), *CallConv, RegVTs[Value])
+ : RegVTs[Value];
Parts.resize(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
@@ -914,10 +914,10 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
unsigned NumParts = RegCount[Value];
- MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
- *DAG.getContext(),
- CallConv.getValue(), RegVTs[Value])
- : RegVTs[Value];
+ MVT RegisterVT = isABIMangled()
+ ? TLI.getRegisterTypeForCallingConv(
+ *DAG.getContext(), *CallConv, RegVTs[Value])
+ : RegVTs[Value];
if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
ExtendKind = ISD::ZERO_EXTEND;
@@ -8867,10 +8867,10 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
: OpInfo;
const auto RegError =
getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
- if (RegError.hasValue()) {
+ if (RegError) {
const MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const char *RegName = TRI.getName(RegError.getValue());
+ const char *RegName = TRI.getName(*RegError);
emitInlineAsmError(Call, "register '" + Twine(RegName) +
"' allocated for constraint '" +
Twine(OpInfo.ConstraintCode) +
diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index b318c2a..68c53fd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -196,10 +196,10 @@ static Optional<int> findPreviousSpillSlot(const Value *Val,
for (auto &IncomingValue : Phi->incoming_values()) {
Optional<int> SpillSlot =
findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
- if (!SpillSlot.hasValue())
+ if (!SpillSlot)
return None;
- if (MergedResult.hasValue() && *MergedResult != *SpillSlot)
+ if (MergedResult && *MergedResult != *SpillSlot)
return None;
MergedResult = SpillSlot;
@@ -530,16 +530,14 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
GCStrategy &S = GFI->getStrategy();
for (const Value *V : SI.Bases) {
auto Opt = S.isGCManagedPointer(V->getType()->getScalarType());
- if (Opt.hasValue()) {
- assert(Opt.getValue() &&
- "non gc managed base pointer found in statepoint");
+ if (Opt) {
+ assert(*Opt && "non gc managed base pointer found in statepoint");
}
}
for (const Value *V : SI.Ptrs) {
auto Opt = S.isGCManagedPointer(V->getType()->getScalarType());
- if (Opt.hasValue()) {
- assert(Opt.getValue() &&
- "non gc managed derived pointer found in statepoint");
+ if (Opt) {
+ assert(*Opt && "non gc managed derived pointer found in statepoint");
}
}
assert(SI.Bases.size() == SI.Ptrs.size() && "Pointer without base!");
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index a6b471e..c286d21 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -1972,9 +1972,9 @@ bool TargetLowering::SimplifyDemandedBits(
KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
Known = KnownBits::umin(Known0, Known1);
if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1))
- return TLO.CombineTo(Op, IsULE.getValue() ? Op0 : Op1);
+ return TLO.CombineTo(Op, *IsULE ? Op0 : Op1);
if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1))
- return TLO.CombineTo(Op, IsULT.getValue() ? Op0 : Op1);
+ return TLO.CombineTo(Op, *IsULT ? Op0 : Op1);
break;
}
case ISD::UMAX: {
@@ -1985,9 +1985,9 @@ bool TargetLowering::SimplifyDemandedBits(
KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
Known = KnownBits::umax(Known0, Known1);
if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1))
- return TLO.CombineTo(Op, IsUGE.getValue() ? Op0 : Op1);
+ return TLO.CombineTo(Op, *IsUGE ? Op0 : Op1);
if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1))
- return TLO.CombineTo(Op, IsUGT.getValue() ? Op0 : Op1);
+ return TLO.CombineTo(Op, *IsUGT ? Op0 : Op1);
break;
}
case ISD::BITREVERSE: {
diff --git a/llvm/lib/DebugInfo/CodeView/CodeViewRecordIO.cpp b/llvm/lib/DebugInfo/CodeView/CodeViewRecordIO.cpp
index c49c2e5..a66f9af 100644
--- a/llvm/lib/DebugInfo/CodeView/CodeViewRecordIO.cpp
+++ b/llvm/lib/DebugInfo/CodeView/CodeViewRecordIO.cpp
@@ -70,10 +70,10 @@ uint32_t CodeViewRecordIO::maxFieldLength() const {
Optional<uint32_t> Min = Limits.front().bytesRemaining(Offset);
for (auto X : makeArrayRef(Limits).drop_front()) {
Optional<uint32_t> ThisMin = X.bytesRemaining(Offset);
- if (ThisMin.hasValue())
- Min = (Min.hasValue()) ? std::min(*Min, *ThisMin) : *ThisMin;
+ if (ThisMin)
+ Min = Min ? std::min(*Min, *ThisMin) : *ThisMin;
}
- assert(Min.hasValue() && "Every field must have a maximum length!");
+ assert(Min && "Every field must have a maximum length!");
return *Min;
}
diff --git a/llvm/lib/DebugInfo/CodeView/TypeRecordMapping.cpp b/llvm/lib/DebugInfo/CodeView/TypeRecordMapping.cpp
index 9b35b23..27f63b9 100644
--- a/llvm/lib/DebugInfo/CodeView/TypeRecordMapping.cpp
+++ b/llvm/lib/DebugInfo/CodeView/TypeRecordMapping.cpp
@@ -228,8 +228,8 @@ static Error mapNameAndUniqueName(CodeViewRecordIO &IO, StringRef &Name,
}
Error TypeRecordMapping::visitTypeBegin(CVType &CVR) {
- assert(!TypeKind.hasValue() && "Already in a type mapping!");
- assert(!MemberKind.hasValue() && "Already in a member mapping!");
+ assert(!TypeKind && "Already in a type mapping!");
+ assert(!MemberKind && "Already in a member mapping!");
// FieldList and MethodList records can be any length because they can be
// split with continuation records. All other record types cannot be
@@ -260,8 +260,8 @@ Error TypeRecordMapping::visitTypeBegin(CVType &CVR, TypeIndex Index) {
}
Error TypeRecordMapping::visitTypeEnd(CVType &Record) {
- assert(TypeKind.hasValue() && "Not in a type mapping!");
- assert(!MemberKind.hasValue() && "Still in a member mapping!");
+ assert(TypeKind && "Not in a type mapping!");
+ assert(!MemberKind && "Still in a member mapping!");
error(IO.endRecord());
@@ -270,8 +270,8 @@ Error TypeRecordMapping::visitTypeEnd(CVType &Record) {
}
Error TypeRecordMapping::visitMemberBegin(CVMemberRecord &Record) {
- assert(TypeKind.hasValue() && "Not in a type mapping!");
- assert(!MemberKind.hasValue() && "Already in a member mapping!");
+ assert(TypeKind && "Not in a type mapping!");
+ assert(!MemberKind && "Already in a member mapping!");
// The largest possible subrecord is one in which there is a record prefix,
// followed by the subrecord, followed by a continuation, and that entire
@@ -296,8 +296,8 @@ Error TypeRecordMapping::visitMemberBegin(CVMemberRecord &Record) {
}
Error TypeRecordMapping::visitMemberEnd(CVMemberRecord &Record) {
- assert(TypeKind.hasValue() && "Not in a type mapping!");
- assert(MemberKind.hasValue() && "Not in a member mapping!");
+ assert(TypeKind && "Not in a type mapping!");
+ assert(MemberKind && "Not in a member mapping!");
if (IO.isReading()) {
if (auto EC = IO.skipPadding())
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp b/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
index c785026..dd49ef8 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -1205,13 +1205,12 @@ void DWARFContext::addLocalsForDie(DWARFCompileUnit *CU, DWARFDie Subprogram,
if (auto DeclFileAttr = Die.find(DW_AT_decl_file)) {
if (const auto *LT = CU->getContext().getLineTableForUnit(CU))
LT->getFileNameByIndex(
- DeclFileAttr->getAsUnsignedConstant().getValue(),
- CU->getCompilationDir(),
+ *DeclFileAttr->getAsUnsignedConstant(), CU->getCompilationDir(),
DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath,
Local.DeclFile);
}
if (auto DeclLineAttr = Die.find(DW_AT_decl_line))
- Local.DeclLine = DeclLineAttr->getAsUnsignedConstant().getValue();
+ Local.DeclLine = *DeclLineAttr->getAsUnsignedConstant();
Result.push_back(Local);
return;
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
index 2e0780e..25d0b9f 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
@@ -327,20 +327,20 @@ parseV5DirFileTables(const DWARFDataExtractor &DebugLineData,
FileEntry.Source = Value;
break;
case DW_LNCT_directory_index:
- FileEntry.DirIdx = Value.getAsUnsignedConstant().getValue();
+ FileEntry.DirIdx = *Value.getAsUnsignedConstant();
break;
case DW_LNCT_timestamp:
- FileEntry.ModTime = Value.getAsUnsignedConstant().getValue();
+ FileEntry.ModTime = *Value.getAsUnsignedConstant();
break;
case DW_LNCT_size:
- FileEntry.Length = Value.getAsUnsignedConstant().getValue();
+ FileEntry.Length = *Value.getAsUnsignedConstant();
break;
case DW_LNCT_MD5:
- if (!Value.getAsBlock() || Value.getAsBlock().getValue().size() != 16)
+ if (!Value.getAsBlock() || Value.getAsBlock()->size() != 16)
return createStringError(
errc::invalid_argument,
"failed to parse file entry because the MD5 hash is invalid");
- std::uninitialized_copy_n(Value.getAsBlock().getValue().begin(), 16,
+ std::uninitialized_copy_n(Value.getAsBlock()->begin(), 16,
FileEntry.Checksum.begin());
break;
default:
diff --git a/llvm/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp b/llvm/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
index 1cb332c..3a719bd 100644
--- a/llvm/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
+++ b/llvm/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
@@ -427,14 +427,14 @@ Error DbiStreamBuilder::commit(const msf::MSFLayout &Layout,
for (auto &Stream : DbgStreams) {
uint16_t StreamNumber = kInvalidStreamIndex;
- if (Stream.hasValue())
+ if (Stream)
StreamNumber = Stream->StreamNumber;
if (auto EC = Writer.writeInteger(StreamNumber))
return EC;
}
for (auto &Stream : DbgStreams) {
- if (!Stream.hasValue())
+ if (!Stream)
continue;
assert(Stream->StreamNumber != kInvalidStreamIndex);
diff --git a/llvm/lib/Frontend/OpenMP/OMPContext.cpp b/llvm/lib/Frontend/OpenMP/OMPContext.cpp
index eea08b2..017548f 100644
--- a/llvm/lib/Frontend/OpenMP/OMPContext.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPContext.cpp
@@ -212,9 +212,8 @@ static int isVariantApplicableInContextHelper(
return Ctx.matchesISATrait(RawString);
});
- Optional<bool> Result = HandleTrait(Property, IsActiveTrait);
- if (Result.hasValue())
- return Result.getValue();
+ if (Optional<bool> Result = HandleTrait(Property, IsActiveTrait))
+ return *Result;
}
if (!DeviceSetOnly) {
@@ -233,9 +232,8 @@ static int isVariantApplicableInContextHelper(
if (ConstructMatches)
ConstructMatches->push_back(ConstructIdx - 1);
- Optional<bool> Result = HandleTrait(Property, FoundInOrder);
- if (Result.hasValue())
- return Result.getValue();
+ if (Optional<bool> Result = HandleTrait(Property, FoundInOrder))
+ return *Result;
if (!FoundInOrder) {
LLVM_DEBUG(dbgs() << "[" << DEBUG_TYPE << "] Construct property "
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 22e8ca7..7e718ac 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -4423,10 +4423,9 @@ MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
assert(SI.getNumSuccessors() == Weights->size() &&
"num of prof branch_weights must accord with num of successors");
- bool AllZeroes =
- all_of(Weights.getValue(), [](uint32_t W) { return W == 0; });
+ bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
- if (AllZeroes || Weights.getValue().size() < 2)
+ if (AllZeroes || Weights->size() < 2)
return nullptr;
return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
@@ -4460,8 +4459,8 @@ SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
// Copy the last case to the place of the removed one and shrink.
// This is tightly coupled with the way SwitchInst::removeCase() removes
// the cases in SwitchInst::removeCase(CaseIt).
- Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back();
- Weights.getValue().pop_back();
+ Weights.value()[I->getCaseIndex() + 1] = Weights->back();
+ Weights->pop_back();
}
return SI.removeCase(I);
}
@@ -4474,10 +4473,10 @@ void SwitchInstProfUpdateWrapper::addCase(
if (!Weights && W && *W) {
Changed = true;
Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
- Weights.getValue()[SI.getNumSuccessors() - 1] = *W;
+ Weights.value()[SI.getNumSuccessors() - 1] = *W;
} else if (Weights) {
Changed = true;
- Weights.getValue().push_back(W.value_or(0));
+ Weights->push_back(W.value_or(0));
}
if (Weights)
assert(SI.getNumSuccessors() == Weights->size() &&
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index ac03b14..953090f 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -223,13 +223,13 @@ ConstrainedFPIntrinsic::getExceptionBehavior() const {
bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const {
Optional<fp::ExceptionBehavior> Except = getExceptionBehavior();
if (Except) {
- if (Except.getValue() != fp::ebIgnore)
+ if (*Except != fp::ebIgnore)
return false;
}
Optional<RoundingMode> Rounding = getRoundingMode();
if (Rounding) {
- if (Rounding.getValue() != RoundingMode::NearestTiesToEven)
+ if (*Rounding != RoundingMode::NearestTiesToEven)
return false;
}
@@ -363,14 +363,14 @@ VPIntrinsic::getVectorLengthParamPos(Intrinsic::ID IntrinsicID) {
/// scatter.
MaybeAlign VPIntrinsic::getPointerAlignment() const {
Optional<unsigned> PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID());
- assert(PtrParamOpt.hasValue() && "no pointer argument!");
- return getParamAlign(PtrParamOpt.getValue());
+ assert(PtrParamOpt && "no pointer argument!");
+ return getParamAlign(*PtrParamOpt);
}
/// \return The pointer operand of this load,store, gather or scatter.
Value *VPIntrinsic::getMemoryPointerParam() const {
if (auto PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID()))
- return getArgOperand(PtrParamOpt.getValue());
+ return getArgOperand(*PtrParamOpt);
return nullptr;
}
@@ -388,10 +388,9 @@ Optional<unsigned> VPIntrinsic::getMemoryPointerParamPos(Intrinsic::ID VPID) {
/// \return The data (payload) operand of this store or scatter.
Value *VPIntrinsic::getMemoryDataParam() const {
- auto DataParamOpt = getMemoryDataParamPos(getIntrinsicID());
- if (!DataParamOpt.hasValue())
- return nullptr;
- return getArgOperand(DataParamOpt.getValue());
+ if (auto DataParamOpt = getMemoryDataParamPos(getIntrinsicID()))
+ return getArgOperand(*DataParamOpt);
+ return nullptr;
}
Optional<unsigned> VPIntrinsic::getMemoryDataParamPos(Intrinsic::ID VPID) {
diff --git a/llvm/lib/IR/LLVMContextImpl.cpp b/llvm/lib/IR/LLVMContextImpl.cpp
index dc44a34..7e005b3 100644
--- a/llvm/lib/IR/LLVMContextImpl.cpp
+++ b/llvm/lib/IR/LLVMContextImpl.cpp
@@ -250,17 +250,17 @@ void LLVMContextImpl::setOptPassGate(OptPassGate& OPG) {
}
bool LLVMContextImpl::hasOpaquePointersValue() {
- return OpaquePointers.hasValue();
+ return OpaquePointers.has_value();
}
bool LLVMContextImpl::getOpaquePointers() {
- if (LLVM_UNLIKELY(!(OpaquePointers.hasValue())))
+ if (LLVM_UNLIKELY(!OpaquePointers))
OpaquePointers = OpaquePointersCL;
return *OpaquePointers;
}
void LLVMContextImpl::setOpaquePointers(bool OP) {
- assert((!OpaquePointers.hasValue() || OpaquePointers.getValue() == OP) &&
+ assert((!OpaquePointers || *OpaquePointers == OP) &&
"Cannot change opaque pointers mode once set");
OpaquePointers = OP;
}
diff --git a/llvm/lib/IR/VectorBuilder.cpp b/llvm/lib/IR/VectorBuilder.cpp
index 82995ce..e7be7a9 100644
--- a/llvm/lib/IR/VectorBuilder.cpp
+++ b/llvm/lib/IR/VectorBuilder.cpp
@@ -90,9 +90,9 @@ Value *VectorBuilder::createVectorInstruction(unsigned Opcode, Type *ReturnTy,
}
}
- if (MaskPosOpt.hasValue())
+ if (MaskPosOpt)
IntrinParams[*MaskPosOpt] = &requestMask();
- if (VLenPosOpt.hasValue())
+ if (VLenPosOpt)
IntrinParams[*VLenPosOpt] = &requestEVL();
auto *VPDecl = VPIntrinsic::getDeclarationForParams(&getModule(), VPID,
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 8f857933..cbdf1d1 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5844,10 +5844,10 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
// match the specification in the intrinsic call table. Thus, no
// argument type check is needed here.
- Check(FPI.getExceptionBehavior().hasValue(),
+ Check(FPI.getExceptionBehavior().has_value(),
"invalid exception behavior argument", &FPI);
if (HasRoundingMD) {
- Check(FPI.getRoundingMode().hasValue(), "invalid rounding mode argument",
+ Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
&FPI);
}
}
diff --git a/llvm/lib/InterfaceStub/IFSHandler.cpp b/llvm/lib/InterfaceStub/IFSHandler.cpp
index 71189e7..c999387 100644
--- a/llvm/lib/InterfaceStub/IFSHandler.cpp
+++ b/llvm/lib/InterfaceStub/IFSHandler.cpp
@@ -202,8 +202,8 @@ Error ifs::writeIFSToOutputStream(raw_ostream &OS, const IFSStub &Stub) {
yaml::Output YamlOut(OS, nullptr, /*WrapColumn =*/0);
std::unique_ptr<IFSStubTriple> CopyStub(new IFSStubTriple(Stub));
if (Stub.Target.Arch) {
- CopyStub->Target.ArchString = std::string(
- ELF::convertEMachineToArchName(Stub.Target.Arch.getValue()));
+ CopyStub->Target.ArchString =
+ std::string(ELF::convertEMachineToArchName(*Stub.Target.Arch));
}
IFSTarget Target = Stub.Target;
@@ -222,36 +222,33 @@ Error ifs::overrideIFSTarget(IFSStub &Stub, Optional<IFSArch> OverrideArch,
Optional<std::string> OverrideTriple) {
std::error_code OverrideEC(1, std::generic_category());
if (OverrideArch) {
- if (Stub.Target.Arch &&
- Stub.Target.Arch.getValue() != OverrideArch.getValue()) {
+ if (Stub.Target.Arch && *Stub.Target.Arch != *OverrideArch) {
return make_error<StringError>(
"Supplied Arch conflicts with the text stub", OverrideEC);
}
- Stub.Target.Arch = OverrideArch.getValue();
+ Stub.Target.Arch = *OverrideArch;
}
if (OverrideEndianness) {
if (Stub.Target.Endianness &&
- Stub.Target.Endianness.getValue() != OverrideEndianness.getValue()) {
+ *Stub.Target.Endianness != *OverrideEndianness) {
return make_error<StringError>(
"Supplied Endianness conflicts with the text stub", OverrideEC);
}
- Stub.Target.Endianness = OverrideEndianness.getValue();
+ Stub.Target.Endianness = *OverrideEndianness;
}
if (OverrideBitWidth) {
- if (Stub.Target.BitWidth &&
- Stub.Target.BitWidth.getValue() != OverrideBitWidth.getValue()) {
+ if (Stub.Target.BitWidth && *Stub.Target.BitWidth != *OverrideBitWidth) {
return make_error<StringError>(
"Supplied BitWidth conflicts with the text stub", OverrideEC);
}
- Stub.Target.BitWidth = OverrideBitWidth.getValue();
+ Stub.Target.BitWidth = *OverrideBitWidth;
}
if (OverrideTriple) {
- if (Stub.Target.Triple &&
- Stub.Target.Triple.getValue() != OverrideTriple.getValue()) {
+ if (Stub.Target.Triple && *Stub.Target.Triple != *OverrideTriple) {
return make_error<StringError>(
"Supplied Triple conflicts with the text stub", OverrideEC);
}
- Stub.Target.Triple = OverrideTriple.getValue();
+ Stub.Target.Triple = *OverrideTriple;
}
return Error::success();
}
diff --git a/llvm/lib/MC/MCContext.cpp b/llvm/lib/MC/MCContext.cpp
index 1c0c711..e8f0987 100644
--- a/llvm/lib/MC/MCContext.cpp
+++ b/llvm/lib/MC/MCContext.cpp
@@ -767,14 +767,13 @@ MCSectionXCOFF *MCContext::getXCOFFSection(
Optional<XCOFF::CsectProperties> CsectProp, bool MultiSymbolsAllowed,
const char *BeginSymName,
Optional<XCOFF::DwarfSectionSubtypeFlags> DwarfSectionSubtypeFlags) {
- bool IsDwarfSec = DwarfSectionSubtypeFlags.hasValue();
- assert((IsDwarfSec != CsectProp.hasValue()) && "Invalid XCOFF section!");
+ bool IsDwarfSec = DwarfSectionSubtypeFlags.has_value();
+ assert((IsDwarfSec != CsectProp.has_value()) && "Invalid XCOFF section!");
// Do the lookup. If we have a hit, return it.
auto IterBool = XCOFFUniquingMap.insert(std::make_pair(
- IsDwarfSec
- ? XCOFFSectionKey(Section.str(), DwarfSectionSubtypeFlags.getValue())
- : XCOFFSectionKey(Section.str(), CsectProp->MappingClass),
+ IsDwarfSec ? XCOFFSectionKey(Section.str(), *DwarfSectionSubtypeFlags)
+ : XCOFFSectionKey(Section.str(), CsectProp->MappingClass),
nullptr));
auto &Entry = *IterBool.first;
if (!IterBool.second) {
@@ -804,10 +803,9 @@ MCSectionXCOFF *MCContext::getXCOFFSection(
// CachedName contains invalid character(s) such as '$' for an XCOFF symbol.
MCSectionXCOFF *Result = nullptr;
if (IsDwarfSec)
- Result = new (XCOFFAllocator.Allocate())
- MCSectionXCOFF(QualName->getUnqualifiedName(), Kind, QualName,
- DwarfSectionSubtypeFlags.getValue(), Begin, CachedName,
- MultiSymbolsAllowed);
+ Result = new (XCOFFAllocator.Allocate()) MCSectionXCOFF(
+ QualName->getUnqualifiedName(), Kind, QualName,
+ *DwarfSectionSubtypeFlags, Begin, CachedName, MultiSymbolsAllowed);
else
Result = new (XCOFFAllocator.Allocate())
MCSectionXCOFF(QualName->getUnqualifiedName(), CsectProp->MappingClass,
diff --git a/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp b/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp
index c6035dc..af52c34 100644
--- a/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp
+++ b/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp
@@ -83,12 +83,13 @@ bool XCOFFSymbolInfo::operator<(const XCOFFSymbolInfo &SymInfo) const {
return SymInfo.IsLabel;
// Symbols with a StorageMappingClass have higher priority than those without.
- if (StorageMappingClass.hasValue() != SymInfo.StorageMappingClass.hasValue())
- return SymInfo.StorageMappingClass.hasValue();
+ if (StorageMappingClass.has_value() !=
+ SymInfo.StorageMappingClass.has_value())
+ return SymInfo.StorageMappingClass.has_value();
- if (StorageMappingClass.hasValue()) {
- return getSMCPriority(StorageMappingClass.getValue()) <
- getSMCPriority(SymInfo.StorageMappingClass.getValue());
+ if (StorageMappingClass) {
+ return getSMCPriority(*StorageMappingClass) <
+ getSMCPriority(*SymInfo.StorageMappingClass);
}
return false;
diff --git a/llvm/lib/MC/MCParser/MasmParser.cpp b/llvm/lib/MC/MCParser/MasmParser.cpp
index 7ed37b0..c4240ca 100644
--- a/llvm/lib/MC/MCParser/MasmParser.cpp
+++ b/llvm/lib/MC/MCParser/MasmParser.cpp
@@ -4239,10 +4239,9 @@ bool MasmParser::parseStructInitializer(const StructInfo &Structure,
auto &FieldInitializers = Initializer.FieldInitializers;
size_t FieldIndex = 0;
- if (EndToken.hasValue()) {
+ if (EndToken) {
// Initialize all fields with given initializers.
- while (getTok().isNot(EndToken.getValue()) &&
- FieldIndex < Structure.Fields.size()) {
+ while (getTok().isNot(*EndToken) && FieldIndex < Structure.Fields.size()) {
const FieldInfo &Field = Structure.Fields[FieldIndex++];
if (parseOptionalToken(AsmToken::Comma)) {
// Empty initializer; use the default and continue. (Also, allow line
@@ -4272,11 +4271,11 @@ bool MasmParser::parseStructInitializer(const StructInfo &Structure,
FieldInitializers.push_back(Field.Contents);
}
- if (EndToken.hasValue()) {
- if (EndToken.getValue() == AsmToken::Greater)
+ if (EndToken) {
+ if (*EndToken == AsmToken::Greater)
return parseAngleBracketClose();
- return parseToken(EndToken.getValue());
+ return parseToken(*EndToken);
}
return false;
diff --git a/llvm/lib/MC/MCSchedule.cpp b/llvm/lib/MC/MCSchedule.cpp
index db08e20..dd1ecce 100644
--- a/llvm/lib/MC/MCSchedule.cpp
+++ b/llvm/lib/MC/MCSchedule.cpp
@@ -96,10 +96,10 @@ MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI,
continue;
unsigned NumUnits = SM.getProcResource(I->ProcResourceIdx)->NumUnits;
double Temp = NumUnits * 1.0 / I->Cycles;
- Throughput = Throughput ? std::min(Throughput.getValue(), Temp) : Temp;
+ Throughput = Throughput ? std::min(*Throughput, Temp) : Temp;
}
- if (Throughput.hasValue())
- return 1.0 / Throughput.getValue();
+ if (Throughput)
+ return 1.0 / *Throughput;
// If no throughput value was calculated, assume that we can execute at the
// maximum issue width scaled by number of micro-ops for the schedule class.
@@ -140,10 +140,10 @@ MCSchedModel::getReciprocalThroughput(unsigned SchedClass,
if (!I->getCycles())
continue;
double Temp = countPopulation(I->getUnits()) * 1.0 / I->getCycles();
- Throughput = Throughput ? std::min(Throughput.getValue(), Temp) : Temp;
+ Throughput = Throughput ? std::min(*Throughput, Temp) : Temp;
}
- if (Throughput.hasValue())
- return 1.0 / Throughput.getValue();
+ if (Throughput)
+ return 1.0 / *Throughput;
// If there are no execution resources specified for this class, then assume
// that it can execute at the maximum default issue width.
diff --git a/llvm/lib/MC/MCSectionXCOFF.cpp b/llvm/lib/MC/MCSectionXCOFF.cpp
index ee8fa04..0f6d5d5 100644
--- a/llvm/lib/MC/MCSectionXCOFF.cpp
+++ b/llvm/lib/MC/MCSectionXCOFF.cpp
@@ -110,8 +110,8 @@ void MCSectionXCOFF::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
// XCOFF debug sections.
if (getKind().isMetadata() && isDwarfSect()) {
- OS << "\n\t.dwsect "
- << format("0x%" PRIx32, getDwarfSubtypeFlags().getValue()) << '\n';
+ OS << "\n\t.dwsect " << format("0x%" PRIx32, *getDwarfSubtypeFlags())
+ << '\n';
OS << MAI.getPrivateLabelPrefix() << getName() << ':' << '\n';
return;
}
diff --git a/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp b/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp
index a7b7a47..01c0ae7 100644
--- a/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp
+++ b/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp
@@ -601,8 +601,8 @@ handleUserSection(const NewSectionInfo &NewSection,
static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig,
Object &Obj) {
if (Config.OutputArch) {
- Obj.Machine = Config.OutputArch.getValue().EMachine;
- Obj.OSABI = Config.OutputArch.getValue().OSABI;
+ Obj.Machine = Config.OutputArch->EMachine;
+ Obj.OSABI = Config.OutputArch->OSABI;
}
if (!Config.SplitDWO.empty() && Config.ExtractDWO) {
@@ -639,8 +639,8 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig,
if (Iter != Config.SectionsToRename.end()) {
const SectionRename &SR = Iter->second;
Sec.Name = std::string(SR.NewName);
- if (SR.NewFlags.hasValue())
- setSectionFlagsAndType(Sec, SR.NewFlags.getValue());
+ if (SR.NewFlags)
+ setSectionFlagsAndType(Sec, *SR.NewFlags);
RenamedSections.insert(&Sec);
} else if (RelocSec && !(Sec.Flags & SHF_ALLOC))
// Postpone processing relocation sections which are not specified in
@@ -808,9 +808,9 @@ Error objcopy::elf::executeObjcopyOnBinary(const CommonConfig &Config,
if (!Obj)
return Obj.takeError();
// Prefer OutputArch (-O<format>) if set, otherwise infer it from the input.
- const ElfType OutputElfType =
- Config.OutputArch ? getOutputElfType(Config.OutputArch.getValue())
- : getOutputElfType(In);
+ const ElfType OutputElfType = Config.OutputArch
+ ? getOutputElfType(*Config.OutputArch)
+ : getOutputElfType(In);
if (Error E = handleArgs(Config, ELFConfig, **Obj))
return createFileError(Config.InputFilename, std::move(E));
diff --git a/llvm/lib/Object/ELFObjectFile.cpp b/llvm/lib/Object/ELFObjectFile.cpp
index 9bac454..9ab0357 100644
--- a/llvm/lib/Object/ELFObjectFile.cpp
+++ b/llvm/lib/Object/ELFObjectFile.cpp
@@ -167,12 +167,12 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
bool isV7 = false;
Optional<unsigned> Attr =
Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch);
- if (Attr.hasValue())
- isV7 = Attr.getValue() == ARMBuildAttrs::v7;
+ if (Attr)
+ isV7 = *Attr == ARMBuildAttrs::v7;
Attr = Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch_profile);
- if (Attr.hasValue()) {
- switch (Attr.getValue()) {
+ if (Attr) {
+ switch (*Attr) {
case ARMBuildAttrs::ApplicationProfile:
Features.AddFeature("aclass");
break;
@@ -190,8 +190,8 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
}
Attr = Attributes.getAttributeValue(ARMBuildAttrs::THUMB_ISA_use);
- if (Attr.hasValue()) {
- switch (Attr.getValue()) {
+ if (Attr) {
+ switch (*Attr) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
@@ -205,8 +205,8 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
}
Attr = Attributes.getAttributeValue(ARMBuildAttrs::FP_arch);
- if (Attr.hasValue()) {
- switch (Attr.getValue()) {
+ if (Attr) {
+ switch (*Attr) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
@@ -229,8 +229,8 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
}
Attr = Attributes.getAttributeValue(ARMBuildAttrs::Advanced_SIMD_arch);
- if (Attr.hasValue()) {
- switch (Attr.getValue()) {
+ if (Attr) {
+ switch (*Attr) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
@@ -248,8 +248,8 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
}
Attr = Attributes.getAttributeValue(ARMBuildAttrs::MVE_arch);
- if (Attr.hasValue()) {
- switch (Attr.getValue()) {
+ if (Attr) {
+ switch (*Attr) {
default:
break;
case ARMBuildAttrs::Not_Allowed:
@@ -267,8 +267,8 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const {
}
Attr = Attributes.getAttributeValue(ARMBuildAttrs::DIV_use);
- if (Attr.hasValue()) {
- switch (Attr.getValue()) {
+ if (Attr) {
+ switch (*Attr) {
default:
break;
case ARMBuildAttrs::DisallowDIV:
@@ -521,8 +521,8 @@ void ELFObjectFileBase::setARMSubArch(Triple &TheTriple) const {
Optional<unsigned> Attr =
Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch);
- if (Attr.hasValue()) {
- switch (Attr.getValue()) {
+ if (Attr) {
+ switch (*Attr) {
case ARMBuildAttrs::v4:
Triple += "v4";
break;
@@ -553,8 +553,8 @@ void ELFObjectFileBase::setARMSubArch(Triple &TheTriple) const {
case ARMBuildAttrs::v7: {
Optional<unsigned> ArchProfileAttr =
Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch_profile);
- if (ArchProfileAttr.hasValue() &&
- ArchProfileAttr.getValue() == ARMBuildAttrs::MicroControllerProfile)
+ if (ArchProfileAttr &&
+ *ArchProfileAttr == ARMBuildAttrs::MicroControllerProfile)
Triple += "v7m";
else
Triple += "v7";
diff --git a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp
index 9834b03..d51120b 100644
--- a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp
+++ b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp
@@ -133,17 +133,17 @@ void DXContainerWriter::writeParts(raw_ostream &OS) {
// Compute the optional fields if needed...
if (P.Program->DXILOffset)
- Header.Bitcode.Offset = P.Program->DXILOffset.getValue();
+ Header.Bitcode.Offset = *P.Program->DXILOffset;
else
Header.Bitcode.Offset = sizeof(dxbc::BitcodeHeader);
if (P.Program->DXILSize)
- Header.Bitcode.Size = P.Program->DXILSize.getValue();
+ Header.Bitcode.Size = *P.Program->DXILSize;
else
Header.Bitcode.Size = P.Program->DXIL ? P.Program->DXIL->size() : 0;
if (P.Program->Size)
- Header.Size = P.Program->Size.getValue();
+ Header.Size = *P.Program->Size;
else
Header.Size = sizeof(dxbc::ProgramHeader) + Header.Bitcode.Size;
diff --git a/llvm/lib/Support/Process.cpp b/llvm/lib/Support/Process.cpp
index 547b3b7..d93b7de 100644
--- a/llvm/lib/Support/Process.cpp
+++ b/llvm/lib/Support/Process.cpp
@@ -42,12 +42,12 @@ Optional<std::string> Process::FindInEnvPath(StringRef EnvName,
assert(!path::is_absolute(FileName));
Optional<std::string> FoundPath;
Optional<std::string> OptPath = Process::GetEnv(EnvName);
- if (!OptPath.hasValue())
+ if (!OptPath)
return FoundPath;
const char EnvPathSeparatorStr[] = {Separator, '\0'};
SmallVector<StringRef, 8> Dirs;
- SplitString(OptPath.getValue(), Dirs, EnvPathSeparatorStr);
+ SplitString(*OptPath, Dirs, EnvPathSeparatorStr);
for (StringRef Dir : Dirs) {
if (Dir.empty())
diff --git a/llvm/lib/Support/VirtualFileSystem.cpp b/llvm/lib/Support/VirtualFileSystem.cpp
index 9c6a0c0..9f6ad7e 100644
--- a/llvm/lib/Support/VirtualFileSystem.cpp
+++ b/llvm/lib/Support/VirtualFileSystem.cpp
@@ -2667,15 +2667,15 @@ void JSONWriter::write(ArrayRef<YAMLVFSEntry> Entries,
OS << "{\n"
" 'version': 0,\n";
- if (IsCaseSensitive.hasValue())
- OS << " 'case-sensitive': '"
- << (IsCaseSensitive.getValue() ? "true" : "false") << "',\n";
- if (UseExternalNames.hasValue())
- OS << " 'use-external-names': '"
- << (UseExternalNames.getValue() ? "true" : "false") << "',\n";
+ if (IsCaseSensitive)
+ OS << " 'case-sensitive': '" << (*IsCaseSensitive ? "true" : "false")
+ << "',\n";
+ if (UseExternalNames)
+ OS << " 'use-external-names': '" << (*UseExternalNames ? "true" : "false")
+ << "',\n";
bool UseOverlayRelative = false;
- if (IsOverlayRelative.hasValue()) {
- UseOverlayRelative = IsOverlayRelative.getValue();
+ if (IsOverlayRelative) {
+ UseOverlayRelative = *IsOverlayRelative;
OS << " 'overlay-relative': '" << (UseOverlayRelative ? "true" : "false")
<< "',\n";
}
diff --git a/llvm/lib/Support/raw_ostream.cpp b/llvm/lib/Support/raw_ostream.cpp
index 7648dac..0c83259 100644
--- a/llvm/lib/Support/raw_ostream.cpp
+++ b/llvm/lib/Support/raw_ostream.cpp
@@ -428,8 +428,8 @@ raw_ostream &raw_ostream::operator<<(const FormattedBytes &FB) {
while (!Bytes.empty()) {
indent(FB.IndentLevel);
- if (FB.FirstByteOffset.hasValue()) {
- uint64_t Offset = FB.FirstByteOffset.getValue();
+ if (FB.FirstByteOffset) {
+ uint64_t Offset = *FB.FirstByteOffset;
llvm::write_hex(*this, Offset + LineIndex, HPS, OffsetWidth);
*this << ": ";
}
diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp
index e100251..64f0953 100644
--- a/llvm/lib/TableGen/Record.cpp
+++ b/llvm/lib/TableGen/Record.cpp
@@ -2598,10 +2598,10 @@ Init *Record::getValueInit(StringRef FieldName) const {
StringRef Record::getValueAsString(StringRef FieldName) const {
llvm::Optional<StringRef> S = getValueAsOptionalString(FieldName);
- if (!S.hasValue())
+ if (!S)
PrintFatalError(getLoc(), "Record `" + getName() +
"' does not have a field named `" + FieldName + "'!\n");
- return S.getValue();
+ return *S;
}
llvm::Optional<StringRef>
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 76a39d0..4e2dfc6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -1171,8 +1171,8 @@ bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
Optional<ValueAndVReg> Arg =
getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
- if (Arg.hasValue()) {
- const int64_t Value = Arg.getValue().Value.getSExtValue();
+ if (Arg) {
+ const int64_t Value = Arg->Value.getSExtValue();
if (Value == 0) {
unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
@@ -4201,8 +4201,8 @@ AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
MIB.addReg(Info->getScratchRSrcReg());
},
[=](MachineInstrBuilder &MIB) { // vaddr
- if (FI.hasValue())
- MIB.addFrameIndex(FI.getValue());
+ if (FI)
+ MIB.addFrameIndex(*FI);
else
MIB.addReg(VAddr);
},
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h
index 1b513c4..2572b04 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h
@@ -131,8 +131,8 @@ public:
bool IsAOneAddressSpace = isOneAddressSpace(A);
bool IsBOneAddressSpace = isOneAddressSpace(B);
- return AIO.getValue() >= BIO.getValue() &&
- (IsAOneAddressSpace == IsBOneAddressSpace || !IsAOneAddressSpace);
+ return *AIO >= *BIO &&
+ (IsAOneAddressSpace == IsBOneAddressSpace || !IsAOneAddressSpace);
}
};
diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index 8a66213..19989a3 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -2329,13 +2329,13 @@ bool SIMemoryLegalizer::runOnMachineFunction(MachineFunction &MF) {
continue;
if (const auto &MOI = MOA.getLoadInfo(MI))
- Changed |= expandLoad(MOI.getValue(), MI);
+ Changed |= expandLoad(*MOI, MI);
else if (const auto &MOI = MOA.getStoreInfo(MI))
- Changed |= expandStore(MOI.getValue(), MI);
+ Changed |= expandStore(*MOI, MI);
else if (const auto &MOI = MOA.getAtomicFenceInfo(MI))
- Changed |= expandAtomicFence(MOI.getValue(), MI);
+ Changed |= expandAtomicFence(*MOI, MI);
else if (const auto &MOI = MOA.getAtomicCmpxchgOrRmwInfo(MI))
- Changed |= expandAtomicCmpxchgOrRmw(MOI.getValue(), MI);
+ Changed |= expandAtomicCmpxchgOrRmw(*MOI, MI);
}
}
diff --git a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
index 3078534..310d3ed 100644
--- a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
+++ b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
@@ -351,13 +351,13 @@ Optional<int64_t> MVEGatherScatterLowering::getIfConst(const Value *V) {
if (!Op0 || !Op1)
return Optional<int64_t>{};
if (I->getOpcode() == Instruction::Add)
- return Optional<int64_t>{Op0.getValue() + Op1.getValue()};
+ return Optional<int64_t>{*Op0 + *Op1};
if (I->getOpcode() == Instruction::Mul)
- return Optional<int64_t>{Op0.getValue() * Op1.getValue()};
+ return Optional<int64_t>{*Op0 * *Op1};
if (I->getOpcode() == Instruction::Shl)
- return Optional<int64_t>{Op0.getValue() << Op1.getValue()};
+ return Optional<int64_t>{*Op0 << *Op1};
if (I->getOpcode() == Instruction::Or)
- return Optional<int64_t>{Op0.getValue() | Op1.getValue()};
+ return Optional<int64_t>{*Op0 | *Op1};
}
return Optional<int64_t>{};
}
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 56d471a..9641791 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -1021,11 +1021,9 @@ findCFILocation(MachineBasicBlock &B) {
}
void HexagonFrameLowering::insertCFIInstructions(MachineFunction &MF) const {
- for (auto &B : MF) {
- auto At = findCFILocation(B);
- if (At.hasValue())
- insertCFIInstructionsAt(B, At.getValue());
- }
+ for (auto &B : MF)
+ if (auto At = findCFILocation(B))
+ insertCFIInstructionsAt(B, *At);
}
void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
diff --git a/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp b/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
index 660215c..5ef28cc 100644
--- a/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
+++ b/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
@@ -704,15 +704,15 @@ LanaiAsmParser::parseRegister(bool RestoreOnFailure) {
if (Lexer.getKind() == AsmToken::Identifier) {
RegNum = MatchRegisterName(Lexer.getTok().getIdentifier());
if (RegNum == 0) {
- if (PercentTok.hasValue() && RestoreOnFailure)
- Lexer.UnLex(PercentTok.getValue());
+ if (PercentTok && RestoreOnFailure)
+ Lexer.UnLex(*PercentTok);
return nullptr;
}
Parser.Lex(); // Eat identifier token
return LanaiOperand::createReg(RegNum, Start, End);
}
- if (PercentTok.hasValue() && RestoreOnFailure)
- Lexer.UnLex(PercentTok.getValue());
+ if (PercentTok && RestoreOnFailure)
+ Lexer.UnLex(*PercentTok);
return nullptr;
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index e52b49e..312ab0a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1860,8 +1860,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Chain = Ret.getValue(1);
InFlag = Ret.getValue(2);
- if (ProxyRegTruncates[i].hasValue()) {
- Ret = DAG.getNode(ISD::TRUNCATE, dl, ProxyRegTruncates[i].getValue(), Ret);
+ if (ProxyRegTruncates[i]) {
+ Ret = DAG.getNode(ISD::TRUNCATE, dl, *ProxyRegTruncates[i], Ret);
}
InVals.push_back(Ret);
diff --git a/llvm/lib/Target/PowerPC/PPCMacroFusion.cpp b/llvm/lib/Target/PowerPC/PPCMacroFusion.cpp
index caf1457..58b74c6 100644
--- a/llvm/lib/Target/PowerPC/PPCMacroFusion.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMacroFusion.cpp
@@ -267,13 +267,13 @@ static bool shouldScheduleAdjacent(const TargetInstrInfo &TII,
continue;
auto DepOpIdx = Feature.depOpIdx();
- if (DepOpIdx.hasValue()) {
+ if (DepOpIdx) {
// Checking if the result of the FirstMI is the desired operand of the
// SecondMI if the DepOpIdx is set. Otherwise, ignore it.
if (!matchingRegOps(*FirstMI, 0, SecondMI, *DepOpIdx))
return false;
}
-
+
// Checking more on the instruction operands.
if (checkOpConstraints(Feature.getKind(), *FirstMI, SecondMI))
return true;
diff --git a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index dd7a9fe..fe396cb 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -246,10 +246,10 @@ static PPCTargetMachine::PPCABI computeTargetABI(const Triple &TT,
static Reloc::Model getEffectiveRelocModel(const Triple &TT,
Optional<Reloc::Model> RM) {
- assert((!TT.isOSAIX() || !RM.hasValue() || *RM == Reloc::PIC_) &&
+ assert((!TT.isOSAIX() || !RM || *RM == Reloc::PIC_) &&
"Invalid relocation model for AIX.");
- if (RM.hasValue())
+ if (RM)
return *RM;
// Big Endian PPC and AIX default to PIC.
diff --git a/llvm/lib/Target/VE/VVPISelLowering.cpp b/llvm/lib/Target/VE/VVPISelLowering.cpp
index cd67a0f..e4c35da 100644
--- a/llvm/lib/Target/VE/VVPISelLowering.cpp
+++ b/llvm/lib/Target/VE/VVPISelLowering.cpp
@@ -39,9 +39,9 @@ SDValue VETargetLowering::lowerToVVP(SDValue Op, SelectionDAG &DAG) const {
// Can we represent this as a VVP node.
const unsigned Opcode = Op->getOpcode();
auto VVPOpcodeOpt = getVVPOpcode(Opcode);
- if (!VVPOpcodeOpt.hasValue())
+ if (!VVPOpcodeOpt)
return SDValue();
- unsigned VVPOpcode = VVPOpcodeOpt.getValue();
+ unsigned VVPOpcode = *VVPOpcodeOpt;
const bool FromVP = ISD::isVPOpcode(Opcode);
// The representative and legalized vector type of this operation.
diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
index 61097e1..abc6115 100644
--- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
+++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
@@ -86,18 +86,15 @@ bool WebAssemblyAsmTypeCheck::popType(SMLoc ErrorLoc,
Optional<wasm::ValType> EVT) {
if (Stack.empty()) {
return typeError(ErrorLoc,
- EVT.hasValue()
- ? StringRef("empty stack while popping ") +
- WebAssembly::typeToString(EVT.getValue())
- : StringRef(
- "empty stack while popping value"));
+ EVT ? StringRef("empty stack while popping ") +
+ WebAssembly::typeToString(*EVT)
+ : StringRef("empty stack while popping value"));
}
auto PVT = Stack.pop_back_val();
- if (EVT.hasValue() && EVT.getValue() != PVT) {
- return typeError(
- ErrorLoc, StringRef("popped ") + WebAssembly::typeToString(PVT) +
- ", expected " +
- WebAssembly::typeToString(EVT.getValue()));
+ if (EVT && *EVT != PVT) {
+ return typeError(ErrorLoc,
+ StringRef("popped ") + WebAssembly::typeToString(PVT) +
+ ", expected " + WebAssembly::typeToString(*EVT));
}
return false;
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
index f09758a..65aa709 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -552,8 +552,8 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallBase *CI) {
Optional<unsigned> NEltArg;
std::tie(SizeArg, NEltArg) = FnAttrs.getAllocSizeArgs();
SizeArg += 1;
- if (NEltArg.hasValue())
- NEltArg = NEltArg.getValue() + 1;
+ if (NEltArg)
+ NEltArg = *NEltArg + 1;
FnAttrs.addAllocSizeAttr(SizeArg, NEltArg);
}
// In case the callee has 'noreturn' attribute, We need to remove it, because
diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp
index 769d79b..284b9b3 100644
--- a/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -297,11 +297,11 @@ AA::combineOptionalValuesInAAValueLatice(const Optional<Value *> &A,
const Optional<Value *> &B, Type *Ty) {
if (A == B)
return A;
- if (!B.hasValue())
+ if (!B)
return A;
if (*B == nullptr)
return nullptr;
- if (!A.hasValue())
+ if (!A)
return Ty ? getWithType(**B, *Ty) : nullptr;
if (*A == nullptr)
return nullptr;
@@ -718,8 +718,8 @@ Argument *IRPosition::getAssociatedArgument() const {
}
// If we found a unique callback candidate argument, return it.
- if (CBCandidateArg.hasValue() && CBCandidateArg.getValue())
- return CBCandidateArg.getValue();
+ if (CBCandidateArg && *CBCandidateArg)
+ return *CBCandidateArg;
// If no callbacks were found, or none used the underlying call site operand
// exclusively, use the direct callee argument if available.
@@ -1048,11 +1048,11 @@ Attributor::getAssumedConstant(const IRPosition &IRP,
recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
return llvm::None;
}
- if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue())) {
+ if (isa_and_nonnull<UndefValue>(*SimplifiedV)) {
recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
return UndefValue::get(IRP.getAssociatedType());
}
- Constant *CI = dyn_cast_or_null<Constant>(SimplifiedV.getValue());
+ Constant *CI = dyn_cast_or_null<Constant>(*SimplifiedV);
if (CI)
CI = dyn_cast_or_null<Constant>(
AA::getWithType(*CI, *IRP.getAssociatedType()));
@@ -2695,10 +2695,10 @@ void InformationCache::initializeInformationCache(const Function &CF,
while (!Worklist.empty()) {
const Instruction *I = Worklist.pop_back_val();
Optional<short> &NumUses = AssumeUsesMap[I];
- if (!NumUses.hasValue())
+ if (!NumUses)
NumUses = I->getNumUses();
- NumUses = NumUses.getValue() - /* this assume */ 1;
- if (NumUses.getValue() != 0)
+ NumUses = *NumUses - /* this assume */ 1;
+ if (*NumUses != 0)
continue;
AssumeOnlyValues.insert(I);
for (const Value *Op : I->operands())
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index b36c712..5483e42 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -395,9 +395,9 @@ static bool genericValueTraversal(
if (UseValueSimplify && !isa<Constant>(V)) {
Optional<Value *> SimpleV =
A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
- if (!SimpleV.hasValue())
+ if (!SimpleV)
continue;
- Value *NewV = SimpleV.getValue();
+ Value *NewV = *SimpleV;
if (NewV && NewV != V) {
if ((VS & AA::Interprocedural) || !CtxI ||
AA::isValidInScope(*NewV, CtxI->getFunction())) {
@@ -1851,14 +1851,14 @@ ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
// Check if we have an assumed unique return value that we could manifest.
Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
- if (!UniqueRV.hasValue() || !UniqueRV.getValue())
+ if (!UniqueRV || !*UniqueRV)
return Changed;
// Bookkeeping.
STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
"Number of function with unique return");
// If the assumed unique return value is an argument, annotate it.
- if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
+ if (auto *UniqueRVArg = dyn_cast<Argument>(*UniqueRV)) {
if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
getAssociatedFunction()->getReturnType())) {
getIRPosition() = IRPosition::argument(*UniqueRVArg);
@@ -2626,9 +2626,9 @@ struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
// Either we stopped and the appropriate action was taken,
// or we got back a simplified value to continue.
Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
- if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
+ if (!SimplifiedPtrOp || !*SimplifiedPtrOp)
return true;
- const Value *PtrOpVal = SimplifiedPtrOp.getValue();
+ const Value *PtrOpVal = *SimplifiedPtrOp;
// A memory access through a pointer is considered UB
// only if the pointer has constant null value.
@@ -2717,15 +2717,14 @@ struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
if (UsedAssumedInformation)
continue;
- if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
+ if (SimplifiedVal && !*SimplifiedVal)
return true;
- if (!SimplifiedVal.hasValue() ||
- isa<UndefValue>(*SimplifiedVal.getValue())) {
+ if (!SimplifiedVal || isa<UndefValue>(**SimplifiedVal)) {
KnownUBInsts.insert(&I);
continue;
}
if (!ArgVal->getType()->isPointerTy() ||
- !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
+ !isa<ConstantPointerNull>(**SimplifiedVal))
continue;
auto &NonNullAA =
A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
@@ -4062,11 +4061,11 @@ identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
bool UsedAssumedInformation = false;
Optional<Constant *> C =
A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
- if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
+ if (!C || isa_and_nonnull<UndefValue>(*C)) {
// No value yet, assume all edges are dead.
- } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
+ } else if (isa_and_nonnull<ConstantInt>(*C)) {
for (auto &CaseIt : SI.cases()) {
- if (CaseIt.getCaseValue() == C.getValue()) {
+ if (CaseIt.getCaseValue() == *C) {
AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
return UsedAssumedInformation;
}
@@ -5481,11 +5480,11 @@ struct AAValueSimplifyImpl : AAValueSimplify {
bool UsedAssumedInformation = false;
Optional<Value *> SimpleV =
A.getAssumedSimplified(V, QueryingAA, UsedAssumedInformation);
- if (!SimpleV.hasValue())
+ if (!SimpleV)
return PoisonValue::get(&Ty);
Value *EffectiveV = &V;
- if (SimpleV.getValue())
- EffectiveV = SimpleV.getValue();
+ if (*SimpleV)
+ EffectiveV = *SimpleV;
if (auto *C = dyn_cast<Constant>(EffectiveV))
if (!C->canTrap())
return C;
@@ -5501,8 +5500,8 @@ struct AAValueSimplifyImpl : AAValueSimplify {
/// Return a value we can use as replacement for the associated one, or
/// nullptr if we don't have one that makes sense.
Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
- Value *NewV = SimplifiedAssociatedValue.hasValue()
- ? SimplifiedAssociatedValue.getValue()
+ Value *NewV = SimplifiedAssociatedValue
+ ? *SimplifiedAssociatedValue
: UndefValue::get(getAssociatedType());
if (NewV && NewV != &getAssociatedValue()) {
ValueToValueMapTy VMap;
@@ -5631,9 +5630,9 @@ struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
bool UsedAssumedInformation = false;
Optional<Constant *> SimpleArgOp =
A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
- if (!SimpleArgOp.hasValue())
+ if (!SimpleArgOp)
return true;
- if (!SimpleArgOp.getValue())
+ if (!*SimpleArgOp)
return false;
if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
return false;
@@ -5746,18 +5745,18 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl {
const auto &SimplifiedLHS =
A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedLHS.hasValue())
+ if (!SimplifiedLHS)
return true;
- if (!SimplifiedLHS.getValue())
+ if (!*SimplifiedLHS)
return false;
LHS = *SimplifiedLHS;
const auto &SimplifiedRHS =
A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedRHS.hasValue())
+ if (!SimplifiedRHS)
return true;
- if (!SimplifiedRHS.getValue())
+ if (!*SimplifiedRHS)
return false;
RHS = *SimplifiedRHS;
@@ -5826,11 +5825,11 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl {
*this, UsedAssumedInformation);
// If we are not sure about any operand we are not sure about the entire
// instruction, we'll wait.
- if (!SimplifiedOp.hasValue())
+ if (!SimplifiedOp)
return true;
- if (SimplifiedOp.getValue())
- NewOps[Idx] = SimplifiedOp.getValue();
+ if (*SimplifiedOp)
+ NewOps[Idx] = *SimplifiedOp;
else
NewOps[Idx] = Op;
@@ -6249,11 +6248,10 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
Alignment = std::max(Alignment, *RetAlign);
if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
- assert(AlignmentAPI.hasValue() &&
- AlignmentAPI.getValue().getZExtValue() > 0 &&
+ assert(AlignmentAPI && AlignmentAPI->getZExtValue() > 0 &&
"Expected an alignment during manifest!");
- Alignment = std::max(
- Alignment, assumeAligned(AlignmentAPI.getValue().getZExtValue()));
+ Alignment =
+ std::max(Alignment, assumeAligned(AlignmentAPI->getZExtValue()));
}
// TODO: Hoist the alloca towards the function entry.
@@ -6299,9 +6297,9 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
bool UsedAssumedInformation = false;
Optional<Constant *> SimpleV =
A.getAssumedConstant(V, AA, UsedAssumedInformation);
- if (!SimpleV.hasValue())
+ if (!SimpleV)
return APInt(64, 0);
- if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
+ if (auto *CI = dyn_cast_or_null<ConstantInt>(*SimpleV))
return CI->getValue();
return llvm::None;
}
@@ -6578,9 +6576,9 @@ ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
if (MaxHeapToStackSize != -1) {
Optional<APInt> Size = getSize(A, *this, AI);
- if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
+ if (!Size || Size->ugt(MaxHeapToStackSize)) {
LLVM_DEBUG({
- if (!Size.hasValue())
+ if (!Size)
dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
else
dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
@@ -6633,9 +6631,9 @@ struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
/// Return a privatizable type that encloses both T0 and T1.
/// TODO: This is merely a stub for now as we should manage a mapping as well.
Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
- if (!T0.hasValue())
+ if (!T0)
return T1;
- if (!T1.hasValue())
+ if (!T1)
return T0;
if (T0 == T1)
return T0;
@@ -6695,9 +6693,9 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
LLVM_DEBUG({
dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
- if (CSTy.hasValue() && CSTy.getValue())
- CSTy.getValue()->print(dbgs());
- else if (CSTy.hasValue())
+ if (CSTy && *CSTy)
+ CSTy.value()->print(dbgs());
+ else if (CSTy)
dbgs() << "<nullptr>";
else
dbgs() << "<none>";
@@ -6707,16 +6705,16 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
LLVM_DEBUG({
dbgs() << " : New Type: ";
- if (Ty.hasValue() && Ty.getValue())
- Ty.getValue()->print(dbgs());
- else if (Ty.hasValue())
+ if (Ty && *Ty)
+ (*Ty)->print(dbgs());
+ else if (Ty)
dbgs() << "<nullptr>";
else
dbgs() << "<none>";
dbgs() << "\n";
});
- return !Ty.hasValue() || Ty.getValue();
+ return !Ty || *Ty;
};
if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
@@ -6728,9 +6726,9 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
PrivatizableType = identifyPrivatizableType(A);
- if (!PrivatizableType.hasValue())
+ if (!PrivatizableType)
return ChangeStatus::UNCHANGED;
- if (!PrivatizableType.getValue())
+ if (!*PrivatizableType)
return indicatePessimisticFixpoint();
// The dependence is optional so we don't give up once we give up on the
@@ -6817,9 +6815,9 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
*this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
if (CBArgPrivAA.isValidState()) {
auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
- if (!CBArgPrivTy.hasValue())
+ if (!CBArgPrivTy)
continue;
- if (CBArgPrivTy.getValue() == PrivatizableType)
+ if (*CBArgPrivTy == PrivatizableType)
continue;
}
@@ -6864,9 +6862,9 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
DepClassTy::REQUIRED);
if (DCArgPrivAA.isValidState()) {
auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
- if (!DCArgPrivTy.hasValue())
+ if (!DCArgPrivTy)
return true;
- if (DCArgPrivTy.getValue() == PrivatizableType)
+ if (*DCArgPrivTy == PrivatizableType)
return true;
}
}
@@ -7006,9 +7004,9 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
/// See AbstractAttribute::manifest(...)
ChangeStatus manifest(Attributor &A) override {
- if (!PrivatizableType.hasValue())
+ if (!PrivatizableType)
return ChangeStatus::UNCHANGED;
- assert(PrivatizableType.getValue() && "Expected privatizable type!");
+ assert(*PrivatizableType && "Expected privatizable type!");
// Collect all tail calls in the function as we cannot allow new allocas to
// escape into tail recursion.
@@ -7041,9 +7039,9 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
Instruction *IP = &*EntryBB.getFirstInsertionPt();
const DataLayout &DL = IP->getModule()->getDataLayout();
unsigned AS = DL.getAllocaAddrSpace();
- Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS,
+ Instruction *AI = new AllocaInst(*PrivatizableType, AS,
Arg->getName() + ".priv", IP);
- createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
+ createInitialization(*PrivatizableType, *AI, ReplacementFn,
ArgIt->getArgNo(), *IP);
if (AI->getType() != Arg->getType())
@@ -7149,9 +7147,9 @@ struct AAPrivatizablePtrCallSiteArgument final
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
PrivatizableType = identifyPrivatizableType(A);
- if (!PrivatizableType.hasValue())
+ if (!PrivatizableType)
return ChangeStatus::UNCHANGED;
- if (!PrivatizableType.getValue())
+ if (!*PrivatizableType)
return indicatePessimisticFixpoint();
const IRPosition &IRP = getIRPosition();
@@ -8610,18 +8608,18 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
const auto &SimplifiedLHS =
A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedLHS.hasValue())
+ if (!SimplifiedLHS)
return true;
- if (!SimplifiedLHS.getValue())
+ if (!*SimplifiedLHS)
return false;
LHS = *SimplifiedLHS;
const auto &SimplifiedRHS =
A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedRHS.hasValue())
+ if (!SimplifiedRHS)
return true;
- if (!SimplifiedRHS.getValue())
+ if (!*SimplifiedRHS)
return false;
RHS = *SimplifiedRHS;
@@ -8663,9 +8661,9 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
const auto &SimplifiedOpV =
A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedOpV.hasValue())
+ if (!SimplifiedOpV)
return true;
- if (!SimplifiedOpV.getValue())
+ if (!*SimplifiedOpV)
return false;
OpV = *SimplifiedOpV;
@@ -8693,18 +8691,18 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
const auto &SimplifiedLHS =
A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedLHS.hasValue())
+ if (!SimplifiedLHS)
return true;
- if (!SimplifiedLHS.getValue())
+ if (!*SimplifiedLHS)
return false;
LHS = *SimplifiedLHS;
const auto &SimplifiedRHS =
A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedRHS.hasValue())
+ if (!SimplifiedRHS)
return true;
- if (!SimplifiedRHS.getValue())
+ if (!*SimplifiedRHS)
return false;
RHS = *SimplifiedRHS;
@@ -8767,9 +8765,9 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
const auto &SimplifiedOpV =
A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedOpV.hasValue())
+ if (!SimplifiedOpV)
return true;
- if (!SimplifiedOpV.getValue())
+ if (!*SimplifiedOpV)
return false;
Value *VPtr = *SimplifiedOpV;
@@ -9128,18 +9126,18 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
const auto &SimplifiedLHS =
A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedLHS.hasValue())
+ if (!SimplifiedLHS)
return ChangeStatus::UNCHANGED;
- if (!SimplifiedLHS.getValue())
+ if (!*SimplifiedLHS)
return indicatePessimisticFixpoint();
LHS = *SimplifiedLHS;
const auto &SimplifiedRHS =
A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedRHS.hasValue())
+ if (!SimplifiedRHS)
return ChangeStatus::UNCHANGED;
- if (!SimplifiedRHS.getValue())
+ if (!*SimplifiedRHS)
return indicatePessimisticFixpoint();
RHS = *SimplifiedRHS;
@@ -9211,18 +9209,18 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
const auto &SimplifiedLHS =
A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedLHS.hasValue())
+ if (!SimplifiedLHS)
return ChangeStatus::UNCHANGED;
- if (!SimplifiedLHS.getValue())
+ if (!*SimplifiedLHS)
return indicatePessimisticFixpoint();
LHS = *SimplifiedLHS;
const auto &SimplifiedRHS =
A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedRHS.hasValue())
+ if (!SimplifiedRHS)
return ChangeStatus::UNCHANGED;
- if (!SimplifiedRHS.getValue())
+ if (!*SimplifiedRHS)
return indicatePessimisticFixpoint();
RHS = *SimplifiedRHS;
@@ -9234,9 +9232,9 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
// Check if we only need one operand.
bool OnlyLeft = false, OnlyRight = false;
- if (C.hasValue() && *C && (*C)->isOneValue())
+ if (C && *C && (*C)->isOneValue())
OnlyLeft = true;
- else if (C.hasValue() && *C && (*C)->isZeroValue())
+ else if (C && *C && (*C)->isZeroValue())
OnlyRight = true;
const AAPotentialConstantValues *LHSAA = nullptr, *RHSAA = nullptr;
@@ -9286,9 +9284,9 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
const auto &SimplifiedSrc =
A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedSrc.hasValue())
+ if (!SimplifiedSrc)
return ChangeStatus::UNCHANGED;
- if (!SimplifiedSrc.getValue())
+ if (!*SimplifiedSrc)
return indicatePessimisticFixpoint();
Src = *SimplifiedSrc;
@@ -9319,18 +9317,18 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
const auto &SimplifiedLHS =
A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedLHS.hasValue())
+ if (!SimplifiedLHS)
return ChangeStatus::UNCHANGED;
- if (!SimplifiedLHS.getValue())
+ if (!*SimplifiedLHS)
return indicatePessimisticFixpoint();
LHS = *SimplifiedLHS;
const auto &SimplifiedRHS =
A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
*this, UsedAssumedInformation);
- if (!SimplifiedRHS.hasValue())
+ if (!SimplifiedRHS)
return ChangeStatus::UNCHANGED;
- if (!SimplifiedRHS.getValue())
+ if (!*SimplifiedRHS)
return indicatePessimisticFixpoint();
RHS = *SimplifiedRHS;
@@ -9387,9 +9385,9 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
UsedAssumedInformation);
- if (!SimplifiedIncomingValue.hasValue())
+ if (!SimplifiedIncomingValue)
continue;
- if (!SimplifiedIncomingValue.getValue())
+ if (!*SimplifiedIncomingValue)
return indicatePessimisticFixpoint();
IncomingValue = *SimplifiedIncomingValue;
@@ -9876,9 +9874,8 @@ private:
bool isReachable(Attributor &A, AAFunctionReachability &AA,
ArrayRef<const AACallEdges *> AAEdgesList,
const Function &Fn) {
- Optional<bool> Cached = isCachedReachable(Fn);
- if (Cached.hasValue())
- return Cached.getValue();
+ if (Optional<bool> Cached = isCachedReachable(Fn))
+ return *Cached;
// The query was not cached, thus it is new. We need to request an update
// explicitly to make sure this the information is properly run to a
diff --git a/llvm/lib/Transforms/IPO/IROutliner.cpp b/llvm/lib/Transforms/IPO/IROutliner.cpp
index 89ae575..9ed21de 100644
--- a/llvm/lib/Transforms/IPO/IROutliner.cpp
+++ b/llvm/lib/Transforms/IPO/IROutliner.cpp
@@ -554,8 +554,8 @@ collectRegionsConstants(OutlinableRegion &Region,
// the the number has been found to be not the same value in each instance.
for (Value *V : ID.OperVals) {
Optional<unsigned> GVNOpt = C.getGVN(V);
- assert(GVNOpt.hasValue() && "Expected a GVN for operand?");
- unsigned GVN = GVNOpt.getValue();
+ assert(GVNOpt && "Expected a GVN for operand?");
+ unsigned GVN = *GVNOpt;
// Check if this global value has been found to not be the same already.
if (NotSame.contains(GVN)) {
@@ -569,8 +569,8 @@ collectRegionsConstants(OutlinableRegion &Region,
// global value number. If the global value does not map to a Constant,
// it is considered to not be the same value.
Optional<bool> ConstantMatches = constantMatches(V, GVN, GVNToConstant);
- if (ConstantMatches.hasValue()) {
- if (ConstantMatches.getValue())
+ if (ConstantMatches) {
+ if (*ConstantMatches)
continue;
else
ConstantsTheSame = false;
@@ -650,8 +650,8 @@ Function *IROutliner::createFunction(Module &M, OutlinableGroup &Group,
"outlined_ir_func_" + std::to_string(FunctionNameSuffix), M);
// Transfer the swifterr attribute to the correct function parameter.
- if (Group.SwiftErrorArgument.hasValue())
- Group.OutlinedFunction->addParamAttr(Group.SwiftErrorArgument.getValue(),
+ if (Group.SwiftErrorArgument)
+ Group.OutlinedFunction->addParamAttr(*Group.SwiftErrorArgument,
Attribute::SwiftError);
Group.OutlinedFunction->addFnAttr(Attribute::OptimizeForSize);
@@ -808,9 +808,8 @@ static void mapInputsToGVNs(IRSimilarityCandidate &C,
assert(Input && "Have a nullptr as an input");
if (OutputMappings.find(Input) != OutputMappings.end())
Input = OutputMappings.find(Input)->second;
- assert(C.getGVN(Input).hasValue() &&
- "Could not find a numbering for the given input");
- EndInputNumbers.push_back(C.getGVN(Input).getValue());
+ assert(C.getGVN(Input) && "Could not find a numbering for the given input");
+ EndInputNumbers.push_back(*C.getGVN(Input));
}
}
@@ -948,12 +947,12 @@ findExtractedInputToOverallInputMapping(OutlinableRegion &Region,
// numbering overrides any discovered location for the extracted code.
for (unsigned InputVal : InputGVNs) {
Optional<unsigned> CanonicalNumberOpt = C.getCanonicalNum(InputVal);
- assert(CanonicalNumberOpt.hasValue() && "Canonical number not found?");
- unsigned CanonicalNumber = CanonicalNumberOpt.getValue();
+ assert(CanonicalNumberOpt && "Canonical number not found?");
+ unsigned CanonicalNumber = *CanonicalNumberOpt;
Optional<Value *> InputOpt = C.fromGVN(InputVal);
- assert(InputOpt.hasValue() && "Global value number not found?");
- Value *Input = InputOpt.getValue();
+ assert(InputOpt && "Global value number not found?");
+ Value *Input = *InputOpt;
DenseMap<unsigned, unsigned>::iterator AggArgIt =
Group.CanonicalNumberToAggArg.find(CanonicalNumber);
@@ -1235,16 +1234,15 @@ static Optional<unsigned> getGVNForPHINode(OutlinableRegion &Region,
DenseMap<hash_code, unsigned>::iterator GVNToPHIIt;
DenseMap<unsigned, PHINodeData>::iterator PHIToGVNIt;
Optional<unsigned> BBGVN = Cand.getGVN(PHIBB);
- assert(BBGVN.hasValue() && "Could not find GVN for the incoming block!");
+ assert(BBGVN && "Could not find GVN for the incoming block!");
- BBGVN = Cand.getCanonicalNum(BBGVN.getValue());
- assert(BBGVN.hasValue() &&
- "Could not find canonical number for the incoming block!");
+ BBGVN = Cand.getCanonicalNum(*BBGVN);
+ assert(BBGVN && "Could not find canonical number for the incoming block!");
// Create a pair of the exit block canonical value, and the aggregate
// argument location, connected to the canonical numbers stored in the
// PHINode.
PHINodeData TemporaryPair =
- std::make_pair(std::make_pair(BBGVN.getValue(), AggArgIdx), PHIGVNs);
+ std::make_pair(std::make_pair(*BBGVN, AggArgIdx), PHIGVNs);
hash_code PHINodeDataHash = encodePHINodeData(TemporaryPair);
// Look for and create a new entry in our connection between canonical
@@ -1517,9 +1515,8 @@ CallInst *replaceCalledFunction(Module &M, OutlinableRegion &Region) {
// Make sure that the argument in the new function has the SwiftError
// argument.
- if (Group.SwiftErrorArgument.hasValue())
- Call->addParamAttr(Group.SwiftErrorArgument.getValue(),
- Attribute::SwiftError);
+ if (Group.SwiftErrorArgument)
+ Call->addParamAttr(*Group.SwiftErrorArgument, Attribute::SwiftError);
return Call;
}
@@ -1650,9 +1647,9 @@ static void findCanonNumsForPHI(
// Find and add the canonical number for the incoming value.
Optional<unsigned> GVN = Region.Candidate->getGVN(IVal);
- assert(GVN.hasValue() && "No GVN for incoming value");
+ assert(GVN && "No GVN for incoming value");
Optional<unsigned> CanonNum = Region.Candidate->getCanonicalNum(*GVN);
- assert(CanonNum.hasValue() && "No Canonical Number for GVN");
+ assert(CanonNum && "No Canonical Number for GVN");
CanonNums.push_back(std::make_pair(*CanonNum, IBlock));
}
}
@@ -2081,12 +2078,11 @@ static void alignOutputBlockWithAggFunc(
// If there is, we remove the new output blocks. If it does not,
// we add it to our list of sets of output blocks.
- if (MatchingBB.hasValue()) {
+ if (MatchingBB) {
LLVM_DEBUG(dbgs() << "Set output block for region in function"
- << Region.ExtractedFunction << " to "
- << MatchingBB.getValue());
+ << Region.ExtractedFunction << " to " << *MatchingBB);
- Region.OutputBlockNum = MatchingBB.getValue();
+ Region.OutputBlockNum = *MatchingBB;
for (std::pair<Value *, BasicBlock *> &VtoBB : OutputBBs)
VtoBB.second->eraseFromParent();
return;
@@ -2504,9 +2500,9 @@ static Value *findOutputValueInRegion(OutlinableRegion &Region,
OutputCanon = *It->second.second.begin();
}
Optional<unsigned> OGVN = Region.Candidate->fromCanonicalNum(OutputCanon);
- assert(OGVN.hasValue() && "Could not find GVN for Canonical Number?");
+ assert(OGVN && "Could not find GVN for Canonical Number?");
Optional<Value *> OV = Region.Candidate->fromGVN(*OGVN);
- assert(OV.hasValue() && "Could not find value for GVN?");
+ assert(OV && "Could not find value for GVN?");
return *OV;
}
@@ -2681,15 +2677,14 @@ void IROutliner::updateOutputMapping(OutlinableRegion &Region,
if (!OutputIdx)
return;
- if (OutputMappings.find(Outputs[OutputIdx.getValue()]) ==
- OutputMappings.end()) {
+ if (OutputMappings.find(Outputs[*OutputIdx]) == OutputMappings.end()) {
LLVM_DEBUG(dbgs() << "Mapping extracted output " << *LI << " to "
- << *Outputs[OutputIdx.getValue()] << "\n");
- OutputMappings.insert(std::make_pair(LI, Outputs[OutputIdx.getValue()]));
+ << *Outputs[*OutputIdx] << "\n");
+ OutputMappings.insert(std::make_pair(LI, Outputs[OutputIdx.value()]));
} else {
- Value *Orig = OutputMappings.find(Outputs[OutputIdx.getValue()])->second;
+ Value *Orig = OutputMappings.find(Outputs[OutputIdx.value()])->second;
LLVM_DEBUG(dbgs() << "Mapping extracted output " << *Orig << " to "
- << *Outputs[OutputIdx.getValue()] << "\n");
+ << *Outputs[*OutputIdx] << "\n");
OutputMappings.insert(std::make_pair(LI, Orig));
}
}
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index f458afa..20555af 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -2514,13 +2514,13 @@ struct AAICVTrackerFunction : public AAICVTracker {
if (ValuesMap.count(CurrInst)) {
Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst);
// Unknown value, track new.
- if (!ReplVal.hasValue()) {
+ if (!ReplVal) {
ReplVal = NewReplVal;
break;
}
// If we found a new value, we can't know the icv value anymore.
- if (NewReplVal.hasValue())
+ if (NewReplVal)
if (ReplVal != NewReplVal)
return nullptr;
@@ -2528,11 +2528,11 @@ struct AAICVTrackerFunction : public AAICVTracker {
}
Optional<Value *> NewReplVal = getValueForCall(A, *CurrInst, ICV);
- if (!NewReplVal.hasValue())
+ if (!NewReplVal)
continue;
// Unknown value, track new.
- if (!ReplVal.hasValue()) {
+ if (!ReplVal) {
ReplVal = NewReplVal;
break;
}
@@ -4422,13 +4422,13 @@ struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall {
std::string Str("simplified value: ");
- if (!SimplifiedValue.hasValue())
+ if (!SimplifiedValue)
return Str + std::string("none");
- if (!SimplifiedValue.getValue())
+ if (!SimplifiedValue.value())
return Str + std::string("nullptr");
- if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.getValue()))
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.value()))
return Str + std::to_string(CI->getSExtValue());
return Str + std::string("unknown");
@@ -4452,8 +4452,8 @@ struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall {
IRPosition::callsite_returned(CB),
[&](const IRPosition &IRP, const AbstractAttribute *AA,
bool &UsedAssumedInformation) -> Optional<Value *> {
- assert((isValidState() || (SimplifiedValue.hasValue() &&
- SimplifiedValue.getValue() == nullptr)) &&
+ assert((isValidState() ||
+ (SimplifiedValue && *SimplifiedValue == nullptr)) &&
"Unexpected invalid state!");
if (!isAtFixpoint()) {
diff --git a/llvm/lib/Transforms/IPO/SampleContextTracker.cpp b/llvm/lib/Transforms/IPO/SampleContextTracker.cpp
index 9cb558b..caeddae 100644
--- a/llvm/lib/Transforms/IPO/SampleContextTracker.cpp
+++ b/llvm/lib/Transforms/IPO/SampleContextTracker.cpp
@@ -132,10 +132,10 @@ void ContextTrieNode::setFunctionSamples(FunctionSamples *FSamples) {
Optional<uint32_t> ContextTrieNode::getFunctionSize() const { return FuncSize; }
void ContextTrieNode::addFunctionSize(uint32_t FSize) {
- if (!FuncSize.hasValue())
+ if (!FuncSize)
FuncSize = 0;
- FuncSize = FuncSize.getValue() + FSize;
+ FuncSize = *FuncSize + FSize;
}
LineLocation ContextTrieNode::getCallSiteLoc() const { return CallSiteLoc; }
diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp
index 8e45347..e5d6e26 100644
--- a/llvm/lib/Transforms/IPO/SampleProfile.cpp
+++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp
@@ -1351,14 +1351,14 @@ SampleProfileLoader::getExternalInlineAdvisorCost(CallBase &CB) {
bool SampleProfileLoader::getExternalInlineAdvisorShouldInline(CallBase &CB) {
Optional<InlineCost> Cost = getExternalInlineAdvisorCost(CB);
- return Cost ? !!Cost.getValue() : false;
+ return Cost ? !!*Cost : false;
}
InlineCost
SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) {
if (Optional<InlineCost> ReplayCost =
getExternalInlineAdvisorCost(*Candidate.CallInstr))
- return ReplayCost.getValue();
+ return *ReplayCost;
// Adjust threshold based on call site hotness, only do this for callsite
// prioritized inliner because otherwise cost-benefit check is done earlier.
int SampleThreshold = SampleColdCallSiteThreshold;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 449c0f1..120f991 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -2677,9 +2677,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
default: {
// Handle target specific intrinsics
- Optional<Instruction *> V = targetInstCombineIntrinsic(*II);
- if (V.hasValue())
- return V.getValue();
+ if (Optional<Instruction *> V = targetInstCombineIntrinsic(*II))
+ return *V;
break;
}
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index d801f4d..d8a2a96 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -924,8 +924,8 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// Handle target specific intrinsics
Optional<Value *> V = targetSimplifyDemandedUseBitsIntrinsic(
*II, DemandedMask, Known, KnownBitsComputed);
- if (V.hasValue())
- return V.getValue();
+ if (V)
+ return *V;
break;
}
}
@@ -1635,8 +1635,8 @@ Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V,
Optional<Value *> V = targetSimplifyDemandedVectorEltsIntrinsic(
*II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
simplifyAndSetOp);
- if (V.hasValue())
- return V.getValue();
+ if (V)
+ return *V;
break;
}
} // switch on IntrinsicID
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 5cc5804..284a4c2 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -483,10 +483,10 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
static bool isTsanAtomic(const Instruction *I) {
// TODO: Ask TTI whether synchronization scope is between threads.
auto SSID = getAtomicSyncScopeID(I);
- if (!SSID.hasValue())
+ if (!SSID)
return false;
if (isa<LoadInst>(I) || isa<StoreInst>(I))
- return SSID.getValue() != SyncScope::SingleThread;
+ return *SSID != SyncScope::SingleThread;
return true;
}
diff --git a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
index 8a17615..b698503 100644
--- a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -611,9 +611,9 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S,
ConstCand->ConstInt->getValue());
if (Diff) {
const InstructionCost ImmCosts =
- TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, Diff.getValue(), Ty);
+ TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, *Diff, Ty);
Cost -= ImmCosts;
- LLVM_DEBUG(dbgs() << "Offset " << Diff.getValue() << " "
+ LLVM_DEBUG(dbgs() << "Offset " << *Diff << " "
<< "has penalty: " << ImmCosts << "\n"
<< "Adjusted cost: " << Cost << "\n");
}
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 783301f..af2b48c 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -748,14 +748,14 @@ void GVNPass::printPipeline(
OS << "<";
if (Options.AllowPRE != None)
- OS << (Options.AllowPRE.getValue() ? "" : "no-") << "pre;";
+ OS << (*Options.AllowPRE ? "" : "no-") << "pre;";
if (Options.AllowLoadPRE != None)
- OS << (Options.AllowLoadPRE.getValue() ? "" : "no-") << "load-pre;";
+ OS << (*Options.AllowLoadPRE ? "" : "no-") << "load-pre;";
if (Options.AllowLoadPRESplitBackedge != None)
- OS << (Options.AllowLoadPRESplitBackedge.getValue() ? "" : "no-")
+ OS << (*Options.AllowLoadPRESplitBackedge ? "" : "no-")
<< "split-backedge-load-pre;";
if (Options.AllowMemDep != None)
- OS << (Options.AllowMemDep.getValue() ? "" : "no-") << "memdep";
+ OS << (*Options.AllowMemDep ? "" : "no-") << "memdep";
OS << ">";
}
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 0e27c85..94ed288 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -1427,9 +1427,9 @@ bool LoopConstrainer::run() {
// constructor.
ClonedLoop PreLoop, PostLoop;
bool NeedsPreLoop =
- Increasing ? SR.LowLimit.hasValue() : SR.HighLimit.hasValue();
+ Increasing ? SR.LowLimit.has_value() : SR.HighLimit.has_value();
bool NeedsPostLoop =
- Increasing ? SR.HighLimit.hasValue() : SR.LowLimit.hasValue();
+ Increasing ? SR.HighLimit.has_value() : SR.LowLimit.has_value();
Value *ExitPreLoopAt = nullptr;
Value *ExitMainLoopAt = nullptr;
@@ -1708,9 +1708,9 @@ IntersectSignedRange(ScalarEvolution &SE,
const InductiveRangeCheck::Range &R2) {
if (R2.isEmpty(SE, /* IsSigned */ true))
return None;
- if (!R1.hasValue())
+ if (!R1)
return R2;
- auto &R1Value = R1.getValue();
+ auto &R1Value = *R1;
// We never return empty ranges from this function, and R1 is supposed to be
// a result of intersection. Thus, R1 is never empty.
assert(!R1Value.isEmpty(SE, /* IsSigned */ true) &&
@@ -1737,9 +1737,9 @@ IntersectUnsignedRange(ScalarEvolution &SE,
const InductiveRangeCheck::Range &R2) {
if (R2.isEmpty(SE, /* IsSigned */ false))
return None;
- if (!R1.hasValue())
+ if (!R1)
return R2;
- auto &R1Value = R1.getValue();
+ auto &R1Value = *R1;
// We never return empty ranges from this function, and R1 is supposed to be
// a result of intersection. Thus, R1 is never empty.
assert(!R1Value.isEmpty(SE, /* IsSigned */ false) &&
@@ -1948,24 +1948,21 @@ bool InductiveRangeCheckElimination::run(
for (InductiveRangeCheck &IRC : RangeChecks) {
auto Result = IRC.computeSafeIterationSpace(SE, IndVar,
LS.IsSignedPredicate);
- if (Result.hasValue()) {
- auto MaybeSafeIterRange =
- IntersectRange(SE, SafeIterRange, Result.getValue());
- if (MaybeSafeIterRange.hasValue()) {
- assert(
- !MaybeSafeIterRange.getValue().isEmpty(SE, LS.IsSignedPredicate) &&
- "We should never return empty ranges!");
+ if (Result) {
+ auto MaybeSafeIterRange = IntersectRange(SE, SafeIterRange, *Result);
+ if (MaybeSafeIterRange) {
+ assert(!MaybeSafeIterRange->isEmpty(SE, LS.IsSignedPredicate) &&
+ "We should never return empty ranges!");
RangeChecksToEliminate.push_back(IRC);
- SafeIterRange = MaybeSafeIterRange.getValue();
+ SafeIterRange = *MaybeSafeIterRange;
}
}
}
- if (!SafeIterRange.hasValue())
+ if (!SafeIterRange)
return false;
- LoopConstrainer LC(*L, LI, LPMAddNewLoop, LS, SE, DT,
- SafeIterRange.getValue());
+ LoopConstrainer LC(*L, LI, LPMAddNewLoop, LS, SE, DT, *SafeIterRange);
bool Changed = LC.run();
if (Changed) {
diff --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
index f606e9b..f70ab0d 100644
--- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -600,9 +600,9 @@ private:
{LLVMLoopDistributeFollowupAll,
Part->hasDepCycle() ? LLVMLoopDistributeFollowupSequential
: LLVMLoopDistributeFollowupCoincident});
- if (PartitionID.hasValue()) {
+ if (PartitionID) {
Loop *NewLoop = Part->getDistributedLoop();
- NewLoop->setLoopID(PartitionID.getValue());
+ NewLoop->setLoopID(*PartitionID);
}
}
};
@@ -821,12 +821,10 @@ public:
// The unversioned loop will not be changed, so we inherit all attributes
// from the original loop, but remove the loop distribution metadata to
// avoid to distribute it again.
- MDNode *UnversionedLoopID =
- makeFollowupLoopID(OrigLoopID,
- {LLVMLoopDistributeFollowupAll,
- LLVMLoopDistributeFollowupFallback},
- "llvm.loop.distribute.", true)
- .getValue();
+ MDNode *UnversionedLoopID = *makeFollowupLoopID(
+ OrigLoopID,
+ {LLVMLoopDistributeFollowupAll, LLVMLoopDistributeFollowupFallback},
+ "llvm.loop.distribute.", true);
LVer.getNonVersionedLoop()->setLoopID(UnversionedLoopID);
}
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 6d4c675..4bcf102 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -1481,9 +1481,9 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
return Changed;
// We cannot allow unaligned ops for unordered load/store, so reject
// anything where the alignment isn't at least the element size.
- assert((StoreAlign.hasValue() && LoadAlign.hasValue()) &&
+ assert((StoreAlign && LoadAlign) &&
"Expect unordered load/store to have align.");
- if (StoreAlign.getValue() < StoreSize || LoadAlign.getValue() < StoreSize)
+ if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
return Changed;
// If the element.atomic memcpy is not lowered into explicit
@@ -1497,9 +1497,8 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
// Note that unordered atomic loads/stores are *required* by the spec to
// have an alignment but non-atomic loads/stores may not.
NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
- StoreBasePtr, StoreAlign.getValue(), LoadBasePtr, LoadAlign.getValue(),
- NumBytes, StoreSize, AATags.TBAA, AATags.TBAAStruct, AATags.Scope,
- AATags.NoAlias);
+ StoreBasePtr, *StoreAlign, LoadBasePtr, *LoadAlign, NumBytes, StoreSize,
+ AATags.TBAA, AATags.TBAAStruct, AATags.Scope, AATags.NoAlias);
}
NewCall->setDebugLoc(TheStore->getDebugLoc());
diff --git a/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 533c853..d9c33b5f 100644
--- a/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -60,8 +60,8 @@ PreservedAnalyses LoopRotatePass::run(Loop &L, LoopAnalysisManager &AM,
MSSAU = MemorySSAUpdater(AR.MSSA);
bool Changed =
LoopRotation(&L, &AR.LI, &AR.TTI, &AR.AC, &AR.DT, &AR.SE,
- MSSAU.hasValue() ? MSSAU.getPointer() : nullptr, SQ, false,
- Threshold, false, PrepareForLTO || PrepareForLTOOption);
+ MSSAU ? MSSAU.getPointer() : nullptr, SQ, false, Threshold,
+ false, PrepareForLTO || PrepareForLTOOption);
if (!Changed)
return PreservedAnalyses::all();
@@ -131,9 +131,8 @@ public:
: MaxHeaderSize;
return LoopRotation(L, LI, TTI, AC, &DT, &SE,
- MSSAU.hasValue() ? MSSAU.getPointer() : nullptr, SQ,
- false, Threshold, false,
- PrepareForLTO || PrepareForLTOOption);
+ MSSAU ? MSSAU.getPointer() : nullptr, SQ, false,
+ Threshold, false, PrepareForLTO || PrepareForLTOOption);
}
};
} // end namespace
diff --git a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
index c2c29d9..51007b9 100644
--- a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
@@ -735,9 +735,9 @@ public:
if (MSSAA && VerifyMemorySSA)
MSSAU->getMemorySSA()->verifyMemorySSA();
bool DeleteCurrentLoop = false;
- bool Changed = simplifyLoopCFG(
- *L, DT, LI, SE, MSSAU.hasValue() ? MSSAU.getPointer() : nullptr,
- DeleteCurrentLoop);
+ bool Changed =
+ simplifyLoopCFG(*L, DT, LI, SE, MSSAU ? MSSAU.getPointer() : nullptr,
+ DeleteCurrentLoop);
if (DeleteCurrentLoop)
LPM.markLoopAsDeleted(*L);
return Changed;
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 9959e40..49c3083 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -6406,9 +6406,8 @@ static bool SalvageDVI(llvm::Loop *L, ScalarEvolution &SE,
// less DWARF ops than an iteration count-based expression.
if (Optional<APInt> Offset =
SE.computeConstantDifference(DVIRec.SCEVs[i], SCEVInductionVar)) {
- if (Offset.getValue().getMinSignedBits() <= 64)
- SalvageExpr->createOffsetExpr(Offset.getValue().getSExtValue(),
- LSRInductionVar);
+ if (Offset->getMinSignedBits() <= 64)
+ SalvageExpr->createOffsetExpr(Offset->getSExtValue(), LSRInductionVar);
} else if (!SalvageExpr->createIterCountExpr(DVIRec.SCEVs[i], IterCountExpr,
SE))
return false;
diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp
index a33e5cc..8ea8f28 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp
@@ -372,8 +372,8 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
Optional<MDNode *> NewInnerEpilogueLoopID = makeFollowupLoopID(
OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll,
LLVMLoopUnrollAndJamFollowupRemainderInner});
- if (NewInnerEpilogueLoopID.hasValue())
- SubLoop->setLoopID(NewInnerEpilogueLoopID.getValue());
+ if (NewInnerEpilogueLoopID)
+ SubLoop->setLoopID(*NewInnerEpilogueLoopID);
// Find trip count and trip multiple
BasicBlock *Latch = L->getLoopLatch();
@@ -402,15 +402,15 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
Optional<MDNode *> NewOuterEpilogueLoopID = makeFollowupLoopID(
OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll,
LLVMLoopUnrollAndJamFollowupRemainderOuter});
- if (NewOuterEpilogueLoopID.hasValue())
- EpilogueOuterLoop->setLoopID(NewOuterEpilogueLoopID.getValue());
+ if (NewOuterEpilogueLoopID)
+ EpilogueOuterLoop->setLoopID(*NewOuterEpilogueLoopID);
}
Optional<MDNode *> NewInnerLoopID =
makeFollowupLoopID(OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll,
LLVMLoopUnrollAndJamFollowupInner});
- if (NewInnerLoopID.hasValue())
- SubLoop->setLoopID(NewInnerLoopID.getValue());
+ if (NewInnerLoopID)
+ SubLoop->setLoopID(*NewInnerLoopID);
else
SubLoop->setLoopID(OrigSubLoopID);
@@ -418,8 +418,8 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
Optional<MDNode *> NewOuterLoopID = makeFollowupLoopID(
OrigOuterLoopID,
{LLVMLoopUnrollAndJamFollowupAll, LLVMLoopUnrollAndJamFollowupOuter});
- if (NewOuterLoopID.hasValue()) {
- L->setLoopID(NewOuterLoopID.getValue());
+ if (NewOuterLoopID) {
+ L->setLoopID(*NewOuterLoopID);
// Do not setLoopAlreadyUnrolled if a followup was given.
return UnrollResult;
diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 1969513..08f21be 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -253,19 +253,19 @@ TargetTransformInfo::UnrollingPreferences llvm::gatherUnrollingPreferences(
UP.MaxIterationsCountToAnalyze = UnrollMaxIterationsCountToAnalyze;
// Apply user values provided by argument
- if (UserThreshold.hasValue()) {
+ if (UserThreshold) {
UP.Threshold = *UserThreshold;
UP.PartialThreshold = *UserThreshold;
}
- if (UserCount.hasValue())
+ if (UserCount)
UP.Count = *UserCount;
- if (UserAllowPartial.hasValue())
+ if (UserAllowPartial)
UP.Partial = *UserAllowPartial;
- if (UserRuntime.hasValue())
+ if (UserRuntime)
UP.Runtime = *UserRuntime;
- if (UserUpperBound.hasValue())
+ if (UserUpperBound)
UP.UpperBound = *UserUpperBound;
- if (UserFullUnrollMaxCount.hasValue())
+ if (UserFullUnrollMaxCount)
UP.FullUnrollMaxCount = *UserFullUnrollMaxCount;
return UP;
@@ -1323,16 +1323,16 @@ static LoopUnrollResult tryToUnrollLoop(
Optional<MDNode *> RemainderLoopID =
makeFollowupLoopID(OrigLoopID, {LLVMLoopUnrollFollowupAll,
LLVMLoopUnrollFollowupRemainder});
- if (RemainderLoopID.hasValue())
- RemainderLoop->setLoopID(RemainderLoopID.getValue());
+ if (RemainderLoopID)
+ RemainderLoop->setLoopID(*RemainderLoopID);
}
if (UnrollResult != LoopUnrollResult::FullyUnrolled) {
Optional<MDNode *> NewLoopID =
makeFollowupLoopID(OrigLoopID, {LLVMLoopUnrollFollowupAll,
LLVMLoopUnrollFollowupUnrolled});
- if (NewLoopID.hasValue()) {
- L->setLoopID(NewLoopID.getValue());
+ if (NewLoopID) {
+ L->setLoopID(*NewLoopID);
// Do not setLoopAlreadyUnrolled if loop attributes have been specified
// explicitly.
@@ -1645,15 +1645,15 @@ void LoopUnrollPass::printPipeline(
OS, MapClassName2PassName);
OS << "<";
if (UnrollOpts.AllowPartial != None)
- OS << (UnrollOpts.AllowPartial.getValue() ? "" : "no-") << "partial;";
+ OS << (*UnrollOpts.AllowPartial ? "" : "no-") << "partial;";
if (UnrollOpts.AllowPeeling != None)
- OS << (UnrollOpts.AllowPeeling.getValue() ? "" : "no-") << "peeling;";
+ OS << (*UnrollOpts.AllowPeeling ? "" : "no-") << "peeling;";
if (UnrollOpts.AllowRuntime != None)
- OS << (UnrollOpts.AllowRuntime.getValue() ? "" : "no-") << "runtime;";
+ OS << (*UnrollOpts.AllowRuntime ? "" : "no-") << "runtime;";
if (UnrollOpts.AllowUpperBound != None)
- OS << (UnrollOpts.AllowUpperBound.getValue() ? "" : "no-") << "upperbound;";
+ OS << (*UnrollOpts.AllowUpperBound ? "" : "no-") << "upperbound;";
if (UnrollOpts.AllowProfileBasedPeeling != None)
- OS << (UnrollOpts.AllowProfileBasedPeeling.getValue() ? "" : "no-")
+ OS << (*UnrollOpts.AllowProfileBasedPeeling ? "" : "no-")
<< "profile-peeling;";
if (UnrollOpts.FullUnrollMaxCount != None)
OS << "full-unroll-max=" << UnrollOpts.FullUnrollMaxCount << ";";
diff --git a/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp
index a886563..47493b5 100644
--- a/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp
@@ -143,10 +143,10 @@ static bool lowerConstantIntrinsics(Function &F, const TargetLibraryInfo &TLI,
break;
}
HasDeadBlocks |= replaceConditionalBranchesOnConstant(
- II, NewValue, DTU.hasValue() ? DTU.getPointer() : nullptr);
+ II, NewValue, DTU ? DTU.getPointer() : nullptr);
}
if (HasDeadBlocks)
- removeUnreachableBlocks(F, DTU.hasValue() ? DTU.getPointer() : nullptr);
+ removeUnreachableBlocks(F, DTU ? DTU.getPointer() : nullptr);
return !Worklist.empty();
}
diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index c4ef979..8240386 100644
--- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -1775,9 +1775,9 @@ CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC,
// Update the entry count of the function.
if (BFI) {
auto Count = BFI->getProfileCountFromFreq(EntryFreq.getFrequency());
- if (Count.hasValue())
+ if (Count)
newFunction->setEntryCount(
- ProfileCount(Count.getValue(), Function::PCT_Real)); // FIXME
+ ProfileCount(*Count, Function::PCT_Real)); // FIXME
BFI->setBlockFreq(codeReplacer, EntryFreq.getFrequency());
}
diff --git a/llvm/lib/Transforms/Utils/LoopPeel.cpp b/llvm/lib/Transforms/Utils/LoopPeel.cpp
index 7f0852c..f093fea 100644
--- a/llvm/lib/Transforms/Utils/LoopPeel.cpp
+++ b/llvm/lib/Transforms/Utils/LoopPeel.cpp
@@ -719,9 +719,9 @@ TargetTransformInfo::PeelingPreferences llvm::gatherPeelingPreferences(
}
// User specifed values provided by argument.
- if (UserAllowPeeling.hasValue())
+ if (UserAllowPeeling)
PP.AllowPeeling = *UserAllowPeeling;
- if (UserAllowProfileBasedPeeling.hasValue())
+ if (UserAllowProfileBasedPeeling)
PP.PeelProfiledIterations = *UserAllowProfileBasedPeeling;
return PP;
diff --git a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index df24af5..295ae03 100644
--- a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -397,8 +397,8 @@ CloneLoopBlocks(Loop *L, Value *NewIter, const bool UseEpilogRemainder,
Optional<MDNode *> NewLoopID = makeFollowupLoopID(
LoopID, {LLVMLoopUnrollFollowupAll, LLVMLoopUnrollFollowupRemainder});
- if (NewLoopID.hasValue()) {
- NewLoop->setLoopID(NewLoopID.getValue());
+ if (NewLoopID) {
+ NewLoop->setLoopID(*NewLoopID);
// Do not setLoopAlreadyUnrolled if loop attributes have been defined
// explicitly.
diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp
index b7656f4..7dc94d8 100644
--- a/llvm/lib/Transforms/Utils/LoopUtils.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp
@@ -358,8 +358,8 @@ TransformationMode llvm::hasUnrollTransformation(const Loop *L) {
Optional<int> Count =
getOptionalIntLoopAttribute(L, "llvm.loop.unroll.count");
- if (Count.hasValue())
- return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
+ if (Count)
+ return *Count == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
if (getBooleanLoopAttribute(L, "llvm.loop.unroll.enable"))
return TM_ForcedByUser;
@@ -379,8 +379,8 @@ TransformationMode llvm::hasUnrollAndJamTransformation(const Loop *L) {
Optional<int> Count =
getOptionalIntLoopAttribute(L, "llvm.loop.unroll_and_jam.count");
- if (Count.hasValue())
- return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
+ if (Count)
+ return *Count == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.enable"))
return TM_ForcedByUser;
diff --git a/llvm/lib/Transforms/Utils/MisExpect.cpp b/llvm/lib/Transforms/Utils/MisExpect.cpp
index 5ace0c6..a7acd8d 100644
--- a/llvm/lib/Transforms/Utils/MisExpect.cpp
+++ b/llvm/lib/Transforms/Utils/MisExpect.cpp
@@ -219,18 +219,18 @@ void verifyMisExpect(Instruction &I, ArrayRef<uint32_t> RealWeights,
void checkBackendInstrumentation(Instruction &I,
const ArrayRef<uint32_t> RealWeights) {
auto ExpectedWeightsOpt = extractWeights(&I, I.getContext());
- if (!ExpectedWeightsOpt.hasValue())
+ if (!ExpectedWeightsOpt)
return;
- auto ExpectedWeights = ExpectedWeightsOpt.getValue();
+ auto ExpectedWeights = *ExpectedWeightsOpt;
verifyMisExpect(I, RealWeights, ExpectedWeights);
}
void checkFrontendInstrumentation(Instruction &I,
const ArrayRef<uint32_t> ExpectedWeights) {
auto RealWeightsOpt = extractWeights(&I, I.getContext());
- if (!RealWeightsOpt.hasValue())
+ if (!RealWeightsOpt)
return;
- auto RealWeights = RealWeightsOpt.getValue();
+ auto RealWeights = *RealWeightsOpt;
verifyMisExpect(I, RealWeights, ExpectedWeights);
}
diff --git a/llvm/lib/Transforms/Utils/ModuleUtils.cpp b/llvm/lib/Transforms/Utils/ModuleUtils.cpp
index 7388a4b..694837a 100644
--- a/llvm/lib/Transforms/Utils/ModuleUtils.cpp
+++ b/llvm/lib/Transforms/Utils/ModuleUtils.cpp
@@ -254,8 +254,8 @@ void VFABI::setVectorVariantNames(CallInst *CI,
for (const std::string &VariantMapping : VariantMappings) {
LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << VariantMapping << "'\n");
Optional<VFInfo> VI = VFABI::tryDemangleForVFABI(VariantMapping, *M);
- assert(VI.hasValue() && "Cannot add an invalid VFABI name.");
- assert(M->getNamedValue(VI.getValue().VectorName) &&
+ assert(VI && "Cannot add an invalid VFABI name.");
+ assert(M->getNamedValue(VI->VectorName) &&
"Cannot add variant to attribute: "
"vector function declaration is missing.");
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 5e7a762..8ab7fc0 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -4877,8 +4877,8 @@ LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
MaxVScale =
TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
- MaxScalableVF = ElementCount::getScalable(
- MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
+ MaxScalableVF =
+ ElementCount::getScalable(MaxVScale ? (MaxSafeElements / *MaxVScale) : 0);
if (!MaxScalableVF)
reportVectorizationInfo(
"Max legal vector width too small, scalable vectorization "
@@ -5273,9 +5273,9 @@ bool LoopVectorizationCostModel::isMoreProfitable(
unsigned EstimatedWidthB = B.Width.getKnownMinValue();
if (Optional<unsigned> VScale = getVScaleForTuning()) {
if (A.Width.isScalable())
- EstimatedWidthA *= VScale.getValue();
+ EstimatedWidthA *= *VScale;
if (B.Width.isScalable())
- EstimatedWidthB *= VScale.getValue();
+ EstimatedWidthB *= *VScale;
}
// Assume vscale may be larger than 1 (or the value being tuned for),
@@ -7612,8 +7612,8 @@ void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
VPBasicBlock *HeaderVPBB =
BestVPlan.getVectorLoopRegion()->getEntryBasicBlock();
Loop *L = LI->getLoopFor(State.CFG.VPBB2IRBB[HeaderVPBB]);
- if (VectorizedLoopID.hasValue())
- L->setLoopID(VectorizedLoopID.getValue());
+ if (VectorizedLoopID)
+ L->setLoopID(*VectorizedLoopID);
else {
// Keep all loop hints from the original loop on the vector loop (we'll
// replace the vectorizer-specific hints below).
@@ -10622,8 +10622,8 @@ bool LoopVectorizePass::processLoop(Loop *L) {
Optional<MDNode *> RemainderLoopID =
makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
LLVMLoopVectorizeFollowupEpilogue});
- if (RemainderLoopID.hasValue()) {
- L->setLoopID(RemainderLoopID.getValue());
+ if (RemainderLoopID) {
+ L->setLoopID(*RemainderLoopID);
} else {
if (DisableRuntimeUnroll)
AddRuntimeUnrollDisableMetaData(L);
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index d7769ef..a9bccbc 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -2636,8 +2636,8 @@ private:
// First check if the result is already in the cache.
AliasCacheKey key = std::make_pair(Inst1, Inst2);
Optional<bool> &result = AliasCache[key];
- if (result.hasValue()) {
- return result.getValue();
+ if (result) {
+ return *result;
}
bool aliased = true;
if (Loc1.Ptr && isSimple(Inst1))
diff --git a/llvm/lib/WindowsDriver/MSVCPaths.cpp b/llvm/lib/WindowsDriver/MSVCPaths.cpp
index 46a4426b..0661ed7 100644
--- a/llvm/lib/WindowsDriver/MSVCPaths.cpp
+++ b/llvm/lib/WindowsDriver/MSVCPaths.cpp
@@ -98,14 +98,14 @@ static bool getWindowsSDKDirViaCommandLine(
llvm::Optional<llvm::StringRef> WinSdkVersion,
llvm::Optional<llvm::StringRef> WinSysRoot, std::string &Path, int &Major,
std::string &Version) {
- if (WinSdkDir.hasValue() || WinSysRoot.hasValue()) {
+ if (WinSdkDir || WinSysRoot) {
// Don't validate the input; trust the value supplied by the user.
// The motivation is to prevent unnecessary file and registry access.
llvm::VersionTuple SDKVersion;
- if (WinSdkVersion.hasValue())
+ if (WinSdkVersion)
SDKVersion.tryParse(*WinSdkVersion);
- if (WinSysRoot.hasValue()) {
+ if (WinSysRoot) {
llvm::SmallString<128> SDKPath(*WinSysRoot);
llvm::sys::path::append(SDKPath, "Windows Kits");
if (!SDKVersion.empty())
@@ -479,12 +479,12 @@ bool findVCToolChainViaCommandLine(vfs::FileSystem &VFS,
std::string &Path, ToolsetLayout &VSLayout) {
// Don't validate the input; trust the value supplied by the user.
// The primary motivation is to prevent unnecessary file and registry access.
- if (VCToolsDir.hasValue() || WinSysRoot.hasValue()) {
- if (WinSysRoot.hasValue()) {
+ if (VCToolsDir || WinSysRoot) {
+ if (WinSysRoot) {
SmallString<128> ToolsPath(*WinSysRoot);
sys::path::append(ToolsPath, "VC", "Tools", "MSVC");
std::string ToolsVersion;
- if (VCToolsVersion.hasValue())
+ if (VCToolsVersion)
ToolsVersion = VCToolsVersion->str();
else
ToolsVersion = getHighestNumericTupleInDirectory(VFS, ToolsPath);