diff options
109 files changed, 483 insertions, 502 deletions
diff --git a/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h index d8e524d7..8addbde 100644 --- a/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h +++ b/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h @@ -1278,9 +1278,9 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) { } LLVM_DEBUG(dbgs() << getBlockName(HeaderNode) << " has irr loop header weight " - << HeaderWeight.getValue() << "\n"); + << HeaderWeight.value() << "\n"); NumHeadersWithWeight++; - uint64_t HeaderWeightValue = HeaderWeight.getValue(); + uint64_t HeaderWeightValue = HeaderWeight.value(); if (!MinHeaderWeight || HeaderWeightValue < MinHeaderWeight) MinHeaderWeight = HeaderWeightValue; if (HeaderWeightValue) { @@ -1732,10 +1732,10 @@ raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const { if (Optional<uint64_t> ProfileCount = BlockFrequencyInfoImplBase::getBlockProfileCount( F->getFunction(), getNode(&BB))) - OS << ", count = " << ProfileCount.getValue(); + OS << ", count = " << ProfileCount.value(); if (Optional<uint64_t> IrrLoopHeaderWeight = BB.getIrrLoopHeaderWeight()) - OS << ", irr_loop_header_weight = " << IrrLoopHeaderWeight.getValue(); + OS << ", irr_loop_header_weight = " << IrrLoopHeaderWeight.value(); OS << "\n"; } diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h index 0005874..fa08927 100644 --- a/llvm/include/llvm/Analysis/VectorUtils.h +++ b/llvm/include/llvm/Analysis/VectorUtils.h @@ -236,10 +236,10 @@ class VFDatabase { // ensuring that the variant described in the attribute has a // corresponding definition or declaration of the vector // function in the Module M. - if (Shape && (Shape.getValue().ScalarName == ScalarName)) { - assert(CI.getModule()->getFunction(Shape.getValue().VectorName) && + if (Shape && (Shape.value().ScalarName == ScalarName)) { + assert(CI.getModule()->getFunction(Shape.value().VectorName) && "Vector function is missing."); - Mappings.push_back(Shape.getValue()); + Mappings.push_back(Shape.value()); } } } diff --git a/llvm/include/llvm/Bitstream/BitstreamWriter.h b/llvm/include/llvm/Bitstream/BitstreamWriter.h index be6bab5..143f9ba 100644 --- a/llvm/include/llvm/Bitstream/BitstreamWriter.h +++ b/llvm/include/llvm/Bitstream/BitstreamWriter.h @@ -386,12 +386,12 @@ private: const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i++); if (Op.isLiteral()) - EmitAbbreviatedLiteral(Op, Code.getValue()); + EmitAbbreviatedLiteral(Op, Code.value()); else { assert(Op.getEncoding() != BitCodeAbbrevOp::Array && Op.getEncoding() != BitCodeAbbrevOp::Blob && "Expected literal or scalar"); - EmitAbbreviatedField(Op, Code.getValue()); + EmitAbbreviatedField(Op, Code.value()); } } diff --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h index ee17170..1229dfc 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h @@ -694,7 +694,7 @@ bool InstructionSelector::executeMatchTable( (ISel.*ISelInfo.ComplexPredicates[ComplexPredicateID])( State.MIs[InsnID]->getOperand(OpIdx)); if (Renderer) - State.Renderers[RendererID] = Renderer.getValue(); + State.Renderers[RendererID] = Renderer.value(); else if (handleReject() == RejectAndGiveUp) return false; diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h index 5e18b02..cec26e9 100644 --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -1162,11 +1162,11 @@ private: RoundingMode UseRounding = DefaultConstrainedRounding; if (Rounding) - UseRounding = Rounding.getValue(); + UseRounding = Rounding.value(); Optional<StringRef> RoundingStr = convertRoundingModeToStr(UseRounding); assert(RoundingStr && "Garbage strict rounding mode!"); - auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue()); + auto *RoundingMDS = MDString::get(Context, RoundingStr.value()); return MetadataAsValue::get(Context, RoundingMDS); } @@ -1175,11 +1175,11 @@ private: fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept; if (Except) - UseExcept = Except.getValue(); + UseExcept = Except.value(); Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(UseExcept); assert(ExceptStr && "Garbage strict exception behavior!"); - auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue()); + auto *ExceptMDS = MDString::get(Context, ExceptStr.value()); return MetadataAsValue::get(Context, ExceptMDS); } diff --git a/llvm/include/llvm/MC/MCSymbolWasm.h b/llvm/include/llvm/MC/MCSymbolWasm.h index 5eab32c..33ec0db 100644 --- a/llvm/include/llvm/MC/MCSymbolWasm.h +++ b/llvm/include/llvm/MC/MCSymbolWasm.h @@ -89,7 +89,7 @@ public: bool hasImportModule() const { return ImportModule.has_value(); } StringRef getImportModule() const { if (ImportModule) - return ImportModule.getValue(); + return ImportModule.value(); // Use a default module name of "env" for now, for compatibility with // existing tools. // TODO(sbc): Find a way to specify a default value in the object format @@ -101,13 +101,13 @@ public: bool hasImportName() const { return ImportName.has_value(); } StringRef getImportName() const { if (ImportName) - return ImportName.getValue(); + return ImportName.value(); return getName(); } void setImportName(StringRef Name) { ImportName = Name; } bool hasExportName() const { return ExportName.has_value(); } - StringRef getExportName() const { return ExportName.getValue(); } + StringRef getExportName() const { return ExportName.value(); } void setExportName(StringRef Name) { ExportName = Name; } bool isFunctionTable() const { @@ -130,14 +130,14 @@ public: const wasm::WasmGlobalType &getGlobalType() const { assert(GlobalType); - return GlobalType.getValue(); + return GlobalType.value(); } void setGlobalType(wasm::WasmGlobalType GT) { GlobalType = GT; } bool hasTableType() const { return TableType.has_value(); } const wasm::WasmTableType &getTableType() const { assert(hasTableType()); - return TableType.getValue(); + return TableType.value(); } void setTableType(wasm::WasmTableType TT) { TableType = TT; } void setTableType(wasm::ValType VT) { diff --git a/llvm/include/llvm/MC/MCSymbolXCOFF.h b/llvm/include/llvm/MC/MCSymbolXCOFF.h index 2ec265e..cc19f88 100644 --- a/llvm/include/llvm/MC/MCSymbolXCOFF.h +++ b/llvm/include/llvm/MC/MCSymbolXCOFF.h @@ -40,7 +40,7 @@ public: XCOFF::StorageClass getStorageClass() const { assert(StorageClass && "StorageClass not set on XCOFF MCSymbol."); - return StorageClass.getValue(); + return StorageClass.value(); } StringRef getUnqualifiedName() const { return getUnqualifiedName(getName()); } diff --git a/llvm/include/llvm/Support/Casting.h b/llvm/include/llvm/Support/Casting.h index 894c1f4..5444d77 100644 --- a/llvm/include/llvm/Support/Casting.h +++ b/llvm/include/llvm/Support/Casting.h @@ -638,9 +638,7 @@ template <typename T, typename Enable = void> struct ValueIsPresent { template <typename T> struct ValueIsPresent<Optional<T>> { using UnwrappedType = T; static inline bool isPresent(const Optional<T> &t) { return t.has_value(); } - static inline decltype(auto) unwrapValue(Optional<T> &t) { - return t.getValue(); - } + static inline decltype(auto) unwrapValue(Optional<T> &t) { return t.value(); } }; // If something is "nullable" then we just compare it to nullptr to see if it diff --git a/llvm/include/llvm/Support/Error.h b/llvm/include/llvm/Support/Error.h index 1a801b6..3c2c2c8 100644 --- a/llvm/include/llvm/Support/Error.h +++ b/llvm/include/llvm/Support/Error.h @@ -1270,7 +1270,7 @@ public: assert(Err && "Trying to log after takeError()."); OS << "'" << FileName << "': "; if (Line) - OS << "line " << Line.getValue() << ": "; + OS << "line " << Line.value() << ": "; Err->log(OS); } diff --git a/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/llvm/lib/Analysis/BranchProbabilityInfo.cpp index 87863db..428ae89 100644 --- a/llvm/lib/Analysis/BranchProbabilityInfo.cpp +++ b/llvm/lib/Analysis/BranchProbabilityInfo.cpp @@ -828,9 +828,8 @@ void BranchProbabilityInfo::computeEestimateBlockWeight( if (auto BBWeight = getInitialEstimatedBlockWeight(BB)) // If we were able to find estimated weight for the block set it to this // block and propagate up the IR. - propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT, - BBWeight.getValue(), BlockWorkList, - LoopWorkList); + propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT, BBWeight.value(), + BlockWorkList, LoopWorkList); // BlockWorklist/LoopWorkList contains blocks/loops with at least one // successor/exit having estimated weight. Try to propagate weight to such diff --git a/llvm/lib/Analysis/IRSimilarityIdentifier.cpp b/llvm/lib/Analysis/IRSimilarityIdentifier.cpp index 3d51042..a681c52 100644 --- a/llvm/lib/Analysis/IRSimilarityIdentifier.cpp +++ b/llvm/lib/Analysis/IRSimilarityIdentifier.cpp @@ -184,8 +184,8 @@ CmpInst::Predicate IRInstructionData::getPredicate() const { "Can only get a predicate from a compare instruction"); if (RevisedPredicate) - return RevisedPredicate.getValue(); - + return RevisedPredicate.value(); + return cast<CmpInst>(Inst)->getPredicate(); } diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index f814d16..9f8a5e4 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -708,7 +708,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer { assert(BFI && "BFI must be available"); auto ProfileCount = BFI->getBlockProfileCount(BB); assert(ProfileCount); - if (ProfileCount.getValue() == 0) + if (ProfileCount.value() == 0) ColdSize += Cost - CostAtBBStart; } @@ -833,7 +833,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer { auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB); assert(ProfileCount); - CurrentSavings *= ProfileCount.getValue(); + CurrentSavings *= ProfileCount.value(); CycleSavings += CurrentSavings; } @@ -1787,12 +1787,12 @@ void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) { // return min(A, B) if B is valid. auto MinIfValid = [](int A, Optional<int> B) { - return B ? std::min(A, B.getValue()) : A; + return B ? std::min(A, B.value()) : A; }; // return max(A, B) if B is valid. auto MaxIfValid = [](int A, Optional<int> B) { - return B ? std::max(A, B.getValue()) : A; + return B ? std::max(A, B.value()) : A; }; // Various bonus percentages. These are multiplied by Threshold to get the diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 6722c43..4691aeb 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -6111,8 +6111,8 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) { Value *Op2 = Call->getArgOperand(2); auto *FPI = cast<ConstrainedFPIntrinsic>(Call); if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q, - FPI->getExceptionBehavior().getValue(), - FPI->getRoundingMode().getValue())) + FPI->getExceptionBehavior().value(), + FPI->getRoundingMode().value())) return V; return nullptr; } @@ -6176,38 +6176,33 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) { } case Intrinsic::experimental_constrained_fadd: { auto *FPI = cast<ConstrainedFPIntrinsic>(Call); - return simplifyFAddInst(FPI->getArgOperand(0), FPI->getArgOperand(1), - FPI->getFastMathFlags(), Q, - FPI->getExceptionBehavior().getValue(), - FPI->getRoundingMode().getValue()); + return simplifyFAddInst( + FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(), + Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value()); } case Intrinsic::experimental_constrained_fsub: { auto *FPI = cast<ConstrainedFPIntrinsic>(Call); - return simplifyFSubInst(FPI->getArgOperand(0), FPI->getArgOperand(1), - FPI->getFastMathFlags(), Q, - FPI->getExceptionBehavior().getValue(), - FPI->getRoundingMode().getValue()); + return simplifyFSubInst( + FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(), + Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value()); } case Intrinsic::experimental_constrained_fmul: { auto *FPI = cast<ConstrainedFPIntrinsic>(Call); - return simplifyFMulInst(FPI->getArgOperand(0), FPI->getArgOperand(1), - FPI->getFastMathFlags(), Q, - FPI->getExceptionBehavior().getValue(), - FPI->getRoundingMode().getValue()); + return simplifyFMulInst( + FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(), + Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value()); } case Intrinsic::experimental_constrained_fdiv: { auto *FPI = cast<ConstrainedFPIntrinsic>(Call); - return simplifyFDivInst(FPI->getArgOperand(0), FPI->getArgOperand(1), - FPI->getFastMathFlags(), Q, - FPI->getExceptionBehavior().getValue(), - FPI->getRoundingMode().getValue()); + return simplifyFDivInst( + FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(), + Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value()); } case Intrinsic::experimental_constrained_frem: { auto *FPI = cast<ConstrainedFPIntrinsic>(Call); - return simplifyFRemInst(FPI->getArgOperand(0), FPI->getArgOperand(1), - FPI->getFastMathFlags(), Q, - FPI->getExceptionBehavior().getValue(), - FPI->getRoundingMode().getValue()); + return simplifyFRemInst( + FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(), + Q, FPI->getExceptionBehavior().value(), FPI->getRoundingMode().value()); } default: return nullptr; diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp index 8a8e9e9..d49b207 100644 --- a/llvm/lib/Analysis/LazyValueInfo.cpp +++ b/llvm/lib/Analysis/LazyValueInfo.cpp @@ -921,7 +921,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueCast( if (!LHSRes) // More work to do before applying this transfer rule. return None; - const ConstantRange &LHSRange = LHSRes.getValue(); + const ConstantRange &LHSRange = LHSRes.value(); const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth(); @@ -946,8 +946,8 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOpImpl( // More work to do before applying this transfer rule. return None; - const ConstantRange &LHSRange = LHSRes.getValue(); - const ConstantRange &RHSRange = RHSRes.getValue(); + const ConstantRange &LHSRange = LHSRes.value(); + const ConstantRange &RHSRange = RHSRes.value(); return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange)); } diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp index 91501b0..f5b121c 100644 --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -501,10 +501,10 @@ Optional<StringRef> llvm::getAllocationFamily(const Value *I, return None; const auto AllocData = getAllocationDataForFunction(Callee, AnyAlloc, TLI); if (AllocData) - return mangledNameForMallocFamily(AllocData.getValue().Family); + return mangledNameForMallocFamily(AllocData.value().Family); const auto FreeData = getFreeFunctionDataForFunction(Callee, TLIFn); if (FreeData) - return mangledNameForMallocFamily(FreeData.getValue().Family); + return mangledNameForMallocFamily(FreeData.value().Family); return None; } diff --git a/llvm/lib/Analysis/MustExecute.cpp b/llvm/lib/Analysis/MustExecute.cpp index 5cff986..ad8322d 100644 --- a/llvm/lib/Analysis/MustExecute.cpp +++ b/llvm/lib/Analysis/MustExecute.cpp @@ -493,7 +493,7 @@ static V getOrCreateCachedOptional(K Key, DenseMap<K, Optional<V>> &Map, Optional<V> &OptVal = Map[Key]; if (!OptVal) OptVal = Fn(std::forward<ArgsTy>(args)...); - return OptVal.getValue(); + return OptVal.value(); } const BasicBlock * diff --git a/llvm/lib/Analysis/ProfileSummaryInfo.cpp b/llvm/lib/Analysis/ProfileSummaryInfo.cpp index 9d5fa6d..64844f5 100644 --- a/llvm/lib/Analysis/ProfileSummaryInfo.cpp +++ b/llvm/lib/Analysis/ProfileSummaryInfo.cpp @@ -279,19 +279,19 @@ ProfileSummaryInfo::computeThreshold(int PercentileCutoff) const { } bool ProfileSummaryInfo::hasHugeWorkingSetSize() const { - return HasHugeWorkingSetSize && HasHugeWorkingSetSize.getValue(); + return HasHugeWorkingSetSize && HasHugeWorkingSetSize.value(); } bool ProfileSummaryInfo::hasLargeWorkingSetSize() const { - return HasLargeWorkingSetSize && HasLargeWorkingSetSize.getValue(); + return HasLargeWorkingSetSize && HasLargeWorkingSetSize.value(); } bool ProfileSummaryInfo::isHotCount(uint64_t C) const { - return HotCountThreshold && C >= HotCountThreshold.getValue(); + return HotCountThreshold && C >= HotCountThreshold.value(); } bool ProfileSummaryInfo::isColdCount(uint64_t C) const { - return ColdCountThreshold && C <= ColdCountThreshold.getValue(); + return ColdCountThreshold && C <= ColdCountThreshold.value(); } template <bool isHot> @@ -299,9 +299,9 @@ bool ProfileSummaryInfo::isHotOrColdCountNthPercentile(int PercentileCutoff, uint64_t C) const { auto CountThreshold = computeThreshold(PercentileCutoff); if (isHot) - return CountThreshold && C >= CountThreshold.getValue(); + return CountThreshold && C >= CountThreshold.value(); else - return CountThreshold && C <= CountThreshold.getValue(); + return CountThreshold && C <= CountThreshold.value(); } bool ProfileSummaryInfo::isHotCountNthPercentile(int PercentileCutoff, diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index f73fd49..f61806b 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -4838,7 +4838,7 @@ public: Optional<const SCEV *> Res = compareWithBackedgeCondition(SI->getCondition()); if (Res) { - bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); + bool IsOne = cast<SCEVConstant>(Res.value())->getValue()->isOne(); Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); } break; @@ -4846,7 +4846,7 @@ public: default: { Optional<const SCEV *> Res = compareWithBackedgeCondition(I); if (Res) - Result = Res.getValue(); + Result = Res.value(); break; } } @@ -6586,8 +6586,8 @@ ScalarEvolution::getRangeRef(const SCEV *S, // Check if the IR explicitly contains !range metadata. Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); if (MDRange) - ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), - RangeType); + ConservativeResult = + ConservativeResult.intersectWith(MDRange.value(), RangeType); // Use facts about recurrences in the underlying IR. Note that add // recurrences are AddRecExprs and thus don't hit this path. This @@ -10632,7 +10632,7 @@ ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); assert(ResultSwapped && "should be able to analyze both!"); - assert(ResultSwapped.getValue() != Result.getValue() && + assert(ResultSwapped.value() != Result.value() && "monotonicity should flip as we flip the predicate"); } #endif diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp index f863a1f..894680c 100644 --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -1502,7 +1502,7 @@ void VFABI::getVectorVariantNames( LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n"); Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule())); assert(Info && "Invalid name for a VFABI variant."); - assert(CI.getModule()->getFunction(Info.getValue().VectorName) && + assert(CI.getModule()->getFunction(Info.value().VectorName) && "Vector function is missing."); #endif VariantMappings.push_back(std::string(S)); diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index 40ae705..0c94e1f 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -742,7 +742,7 @@ bool MIParser::parseBasicBlockDefinition( MBB->setIsInlineAsmBrIndirectTarget(IsInlineAsmBrIndirectTarget); MBB->setIsEHFuncletEntry(IsEHFuncletEntry); if (SectionID) { - MBB->setSectionID(SectionID.getValue()); + MBB->setSectionID(SectionID.value()); MF.setBBSectionsType(BasicBlockSection::List); } return false; diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp index c186d0b..02c44fa 100644 --- a/llvm/lib/CodeGen/MachineBasicBlock.cpp +++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp @@ -451,7 +451,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST, if (IrrLoopHeaderWeight && IsStandalone) { if (Indexes) OS << '\t'; OS.indent(2) << "; Irreducible loop header weight: " - << IrrLoopHeaderWeight.getValue() << '\n'; + << IrrLoopHeaderWeight.value() << '\n'; } } diff --git a/llvm/lib/CodeGen/MachineFunctionSplitter.cpp b/llvm/lib/CodeGen/MachineFunctionSplitter.cpp index 81c97ba..867a7ed 100644 --- a/llvm/lib/CodeGen/MachineFunctionSplitter.cpp +++ b/llvm/lib/CodeGen/MachineFunctionSplitter.cpp @@ -106,8 +106,8 @@ bool MachineFunctionSplitter::runOnMachineFunction(MachineFunction &MF) { // We don't want to proceed further for cold functions // or functions of unknown hotness. Lukewarm functions have no prefix. Optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix(); - if (SectionPrefix && (SectionPrefix.getValue().equals("unlikely") || - SectionPrefix.getValue().equals("unknown"))) { + if (SectionPrefix && (SectionPrefix.value().equals("unlikely") || + SectionPrefix.value().equals("unknown"))) { return false; } diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp index 3245d96..581168b 100644 --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -1448,7 +1448,7 @@ Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg, const TargetRegisterClass *RC) { // If the init register is not undef, try and find an existing phi. if (InitReg) { - auto I = Phis.find({LoopReg, InitReg.getValue()}); + auto I = Phis.find({LoopReg, InitReg.value()}); if (I != Phis.end()) return I->second; } else { @@ -1469,10 +1469,10 @@ Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg, return R; // Found a phi taking undef as input, so rewrite it to take InitReg. MachineInstr *MI = MRI.getVRegDef(R); - MI->getOperand(1).setReg(InitReg.getValue()); - Phis.insert({{LoopReg, InitReg.getValue()}, R}); + MI->getOperand(1).setReg(InitReg.value()); + Phis.insert({{LoopReg, InitReg.value()}, R}); const TargetRegisterClass *ConstrainRegClass = - MRI.constrainRegClass(R, MRI.getRegClass(InitReg.getValue())); + MRI.constrainRegClass(R, MRI.getRegClass(InitReg.value())); assert(ConstrainRegClass && "Expected a valid constrained register class!"); (void)ConstrainRegClass; UndefPhis.erase(I); diff --git a/llvm/lib/CodeGen/SelectOptimize.cpp b/llvm/lib/CodeGen/SelectOptimize.cpp index c199b6a..d627519 100644 --- a/llvm/lib/CodeGen/SelectOptimize.cpp +++ b/llvm/lib/CodeGen/SelectOptimize.cpp @@ -870,8 +870,8 @@ bool SelectOptimize::computeLoopCosts( ORE->emit(ORmissL); return false; } - IPredCost += Scaled64::get(ILatency.getValue()); - INonPredCost += Scaled64::get(ILatency.getValue()); + IPredCost += Scaled64::get(ILatency.value()); + INonPredCost += Scaled64::get(ILatency.value()); // For a select that can be converted to branch, // compute its cost as a branch (non-predicated cost). diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 6c985060..fe3c38e 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -703,7 +703,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL, unsigned NumRegs; if (IsABIRegCopy) { NumRegs = TLI.getVectorTypeBreakdownForCallingConv( - *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT, + *DAG.getContext(), CallConv.value(), ValueVT, IntermediateVT, NumIntermediates, RegisterVT); } else { NumRegs = @@ -800,11 +800,11 @@ RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI, for (EVT ValueVT : ValueVTs) { unsigned NumRegs = isABIMangled() - ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT) + ? TLI.getNumRegistersForCallingConv(Context, CC.value(), ValueVT) : TLI.getNumRegisters(Context, ValueVT); MVT RegisterVT = isABIMangled() - ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT) + ? TLI.getRegisterTypeForCallingConv(Context, CC.value(), ValueVT) : TLI.getRegisterType(Context, ValueVT); for (unsigned i = 0; i != NumRegs; ++i) Regs.push_back(Reg + i); @@ -831,10 +831,10 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, // Copy the legal parts from the registers. EVT ValueVT = ValueVTs[Value]; unsigned NumRegs = RegCount[Value]; - MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv( - *DAG.getContext(), - CallConv.getValue(), RegVTs[Value]) - : RegVTs[Value]; + MVT RegisterVT = + isABIMangled() ? TLI.getRegisterTypeForCallingConv( + *DAG.getContext(), CallConv.value(), RegVTs[Value]) + : RegVTs[Value]; Parts.resize(NumRegs); for (unsigned i = 0; i != NumRegs; ++i) { @@ -914,10 +914,10 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { unsigned NumParts = RegCount[Value]; - MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv( - *DAG.getContext(), - CallConv.getValue(), RegVTs[Value]) - : RegVTs[Value]; + MVT RegisterVT = + isABIMangled() ? TLI.getRegisterTypeForCallingConv( + *DAG.getContext(), CallConv.value(), RegVTs[Value]) + : RegVTs[Value]; if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT)) ExtendKind = ISD::ZERO_EXTEND; @@ -8791,7 +8791,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, if (RegError) { const MachineFunction &MF = DAG.getMachineFunction(); const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); - const char *RegName = TRI.getName(RegError.getValue()); + const char *RegName = TRI.getName(RegError.value()); emitInlineAsmError(Call, "register '" + Twine(RegName) + "' allocated for constraint '" + Twine(OpInfo.ConstraintCode) + diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp index 19a52fd..3061158 100644 --- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -531,14 +531,14 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops, for (const Value *V : SI.Bases) { auto Opt = S.isGCManagedPointer(V->getType()->getScalarType()); if (Opt) { - assert(Opt.getValue() && + assert(Opt.value() && "non gc managed base pointer found in statepoint"); } } for (const Value *V : SI.Ptrs) { auto Opt = S.isGCManagedPointer(V->getType()->getScalarType()); if (Opt) { - assert(Opt.getValue() && + assert(Opt.value() && "non gc managed derived pointer found in statepoint"); } } diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index d31ee5e..66389a5 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1995,9 +1995,9 @@ bool TargetLowering::SimplifyDemandedBits( KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); Known = KnownBits::umin(Known0, Known1); if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1)) - return TLO.CombineTo(Op, IsULE.getValue() ? Op0 : Op1); + return TLO.CombineTo(Op, IsULE.value() ? Op0 : Op1); if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1)) - return TLO.CombineTo(Op, IsULT.getValue() ? Op0 : Op1); + return TLO.CombineTo(Op, IsULT.value() ? Op0 : Op1); break; } case ISD::UMAX: { @@ -2008,9 +2008,9 @@ bool TargetLowering::SimplifyDemandedBits( KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); Known = KnownBits::umax(Known0, Known1); if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1)) - return TLO.CombineTo(Op, IsUGE.getValue() ? Op0 : Op1); + return TLO.CombineTo(Op, IsUGE.value() ? Op0 : Op1); if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1)) - return TLO.CombineTo(Op, IsUGT.getValue() ? Op0 : Op1); + return TLO.CombineTo(Op, IsUGT.value() ? Op0 : Op1); break; } case ISD::BITREVERSE: { diff --git a/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp b/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp index c785026..2e567d8 100644 --- a/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp +++ b/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp @@ -1205,13 +1205,13 @@ void DWARFContext::addLocalsForDie(DWARFCompileUnit *CU, DWARFDie Subprogram, if (auto DeclFileAttr = Die.find(DW_AT_decl_file)) { if (const auto *LT = CU->getContext().getLineTableForUnit(CU)) LT->getFileNameByIndex( - DeclFileAttr->getAsUnsignedConstant().getValue(), + DeclFileAttr->getAsUnsignedConstant().value(), CU->getCompilationDir(), DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath, Local.DeclFile); } if (auto DeclLineAttr = Die.find(DW_AT_decl_line)) - Local.DeclLine = DeclLineAttr->getAsUnsignedConstant().getValue(); + Local.DeclLine = DeclLineAttr->getAsUnsignedConstant().value(); Result.push_back(Local); return; diff --git a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp index 2e0780e..33856c1 100644 --- a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp +++ b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp @@ -327,20 +327,20 @@ parseV5DirFileTables(const DWARFDataExtractor &DebugLineData, FileEntry.Source = Value; break; case DW_LNCT_directory_index: - FileEntry.DirIdx = Value.getAsUnsignedConstant().getValue(); + FileEntry.DirIdx = Value.getAsUnsignedConstant().value(); break; case DW_LNCT_timestamp: - FileEntry.ModTime = Value.getAsUnsignedConstant().getValue(); + FileEntry.ModTime = Value.getAsUnsignedConstant().value(); break; case DW_LNCT_size: - FileEntry.Length = Value.getAsUnsignedConstant().getValue(); + FileEntry.Length = Value.getAsUnsignedConstant().value(); break; case DW_LNCT_MD5: - if (!Value.getAsBlock() || Value.getAsBlock().getValue().size() != 16) + if (!Value.getAsBlock() || Value.getAsBlock().value().size() != 16) return createStringError( errc::invalid_argument, "failed to parse file entry because the MD5 hash is invalid"); - std::uninitialized_copy_n(Value.getAsBlock().getValue().begin(), 16, + std::uninitialized_copy_n(Value.getAsBlock().value().begin(), 16, FileEntry.Checksum.begin()); break; default: diff --git a/llvm/lib/Debuginfod/Debuginfod.cpp b/llvm/lib/Debuginfod/Debuginfod.cpp index bd54d69..ef4e11ca 100644 --- a/llvm/lib/Debuginfod/Debuginfod.cpp +++ b/llvm/lib/Debuginfod/Debuginfod.cpp @@ -373,7 +373,7 @@ Error DebuginfodCollection::findBinaries(StringRef Path) { if (!ID) continue; - std::string IDString = buildIDToString(ID.getValue()); + std::string IDString = buildIDToString(ID.value()); if (isDebugBinary(Object)) { std::lock_guard<sys::RWMutex> DebugBinariesGuard(DebugBinariesMutex); DebugBinaries[IDString] = FilePath; @@ -435,7 +435,7 @@ Expected<std::string> DebuginfodCollection::findBinaryPath(BuildIDRef ID) { } } if (Path) - return Path.getValue(); + return Path.value(); } // Try federation. @@ -466,7 +466,7 @@ Expected<std::string> DebuginfodCollection::findDebugBinaryPath(BuildIDRef ID) { } } if (Path) - return Path.getValue(); + return Path.value(); // Try federation. return getCachedOrDownloadDebuginfo(ID); diff --git a/llvm/lib/Frontend/OpenMP/OMPContext.cpp b/llvm/lib/Frontend/OpenMP/OMPContext.cpp index 6e8856f..0f846f7 100644 --- a/llvm/lib/Frontend/OpenMP/OMPContext.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPContext.cpp @@ -214,7 +214,7 @@ static int isVariantApplicableInContextHelper( Optional<bool> Result = HandleTrait(Property, IsActiveTrait); if (Result) - return Result.getValue(); + return Result.value(); } if (!DeviceSetOnly) { @@ -235,7 +235,7 @@ static int isVariantApplicableInContextHelper( Optional<bool> Result = HandleTrait(Property, FoundInOrder); if (Result) - return Result.getValue(); + return Result.value(); if (!FoundInOrder) { LLVM_DEBUG(dbgs() << "[" << DEBUG_TYPE << "] Construct property " diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index 495f86b..b333f40 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -4427,10 +4427,9 @@ MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() { assert(SI.getNumSuccessors() == Weights->size() && "num of prof branch_weights must accord with num of successors"); - bool AllZeroes = - all_of(Weights.getValue(), [](uint32_t W) { return W == 0; }); + bool AllZeroes = all_of(Weights.value(), [](uint32_t W) { return W == 0; }); - if (AllZeroes || Weights.getValue().size() < 2) + if (AllZeroes || Weights.value().size() < 2) return nullptr; return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights); @@ -4464,8 +4463,8 @@ SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) { // Copy the last case to the place of the removed one and shrink. // This is tightly coupled with the way SwitchInst::removeCase() removes // the cases in SwitchInst::removeCase(CaseIt). - Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back(); - Weights.getValue().pop_back(); + Weights.value()[I->getCaseIndex() + 1] = Weights.value().back(); + Weights.value().pop_back(); } return SI.removeCase(I); } @@ -4478,10 +4477,10 @@ void SwitchInstProfUpdateWrapper::addCase( if (!Weights && W && *W) { Changed = true; Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); - Weights.getValue()[SI.getNumSuccessors() - 1] = *W; + Weights.value()[SI.getNumSuccessors() - 1] = *W; } else if (Weights) { Changed = true; - Weights.getValue().push_back(W.value_or(0)); + Weights.value().push_back(W.value_or(0)); } if (Weights) assert(SI.getNumSuccessors() == Weights->size() && diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp index b132a9d..65a9a32 100644 --- a/llvm/lib/IR/IntrinsicInst.cpp +++ b/llvm/lib/IR/IntrinsicInst.cpp @@ -223,13 +223,13 @@ ConstrainedFPIntrinsic::getExceptionBehavior() const { bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const { Optional<fp::ExceptionBehavior> Except = getExceptionBehavior(); if (Except) { - if (Except.getValue() != fp::ebIgnore) + if (Except.value() != fp::ebIgnore) return false; } Optional<RoundingMode> Rounding = getRoundingMode(); if (Rounding) { - if (Rounding.getValue() != RoundingMode::NearestTiesToEven) + if (Rounding.value() != RoundingMode::NearestTiesToEven) return false; } @@ -364,13 +364,13 @@ VPIntrinsic::getVectorLengthParamPos(Intrinsic::ID IntrinsicID) { MaybeAlign VPIntrinsic::getPointerAlignment() const { Optional<unsigned> PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID()); assert(PtrParamOpt && "no pointer argument!"); - return getParamAlign(PtrParamOpt.getValue()); + return getParamAlign(PtrParamOpt.value()); } /// \return The pointer operand of this load,store, gather or scatter. Value *VPIntrinsic::getMemoryPointerParam() const { if (auto PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID())) - return getArgOperand(PtrParamOpt.getValue()); + return getArgOperand(PtrParamOpt.value()); return nullptr; } @@ -391,7 +391,7 @@ Value *VPIntrinsic::getMemoryDataParam() const { auto DataParamOpt = getMemoryDataParamPos(getIntrinsicID()); if (!DataParamOpt) return nullptr; - return getArgOperand(DataParamOpt.getValue()); + return getArgOperand(DataParamOpt.value()); } Optional<unsigned> VPIntrinsic::getMemoryDataParamPos(Intrinsic::ID VPID) { diff --git a/llvm/lib/IR/LLVMContextImpl.cpp b/llvm/lib/IR/LLVMContextImpl.cpp index de970dd..d7aaf00 100644 --- a/llvm/lib/IR/LLVMContextImpl.cpp +++ b/llvm/lib/IR/LLVMContextImpl.cpp @@ -259,7 +259,7 @@ bool LLVMContextImpl::getOpaquePointers() { } void LLVMContextImpl::setOpaquePointers(bool OP) { - assert((!OpaquePointers || OpaquePointers.getValue() == OP) && + assert((!OpaquePointers || OpaquePointers.value() == OP) && "Cannot change opaque pointers mode once set"); OpaquePointers = OP; } diff --git a/llvm/lib/InterfaceStub/IFSHandler.cpp b/llvm/lib/InterfaceStub/IFSHandler.cpp index 71189e7..4edaeb7 100644 --- a/llvm/lib/InterfaceStub/IFSHandler.cpp +++ b/llvm/lib/InterfaceStub/IFSHandler.cpp @@ -202,8 +202,8 @@ Error ifs::writeIFSToOutputStream(raw_ostream &OS, const IFSStub &Stub) { yaml::Output YamlOut(OS, nullptr, /*WrapColumn =*/0); std::unique_ptr<IFSStubTriple> CopyStub(new IFSStubTriple(Stub)); if (Stub.Target.Arch) { - CopyStub->Target.ArchString = std::string( - ELF::convertEMachineToArchName(Stub.Target.Arch.getValue())); + CopyStub->Target.ArchString = + std::string(ELF::convertEMachineToArchName(Stub.Target.Arch.value())); } IFSTarget Target = Stub.Target; @@ -222,36 +222,35 @@ Error ifs::overrideIFSTarget(IFSStub &Stub, Optional<IFSArch> OverrideArch, Optional<std::string> OverrideTriple) { std::error_code OverrideEC(1, std::generic_category()); if (OverrideArch) { - if (Stub.Target.Arch && - Stub.Target.Arch.getValue() != OverrideArch.getValue()) { + if (Stub.Target.Arch && Stub.Target.Arch.value() != OverrideArch.value()) { return make_error<StringError>( "Supplied Arch conflicts with the text stub", OverrideEC); } - Stub.Target.Arch = OverrideArch.getValue(); + Stub.Target.Arch = OverrideArch.value(); } if (OverrideEndianness) { if (Stub.Target.Endianness && - Stub.Target.Endianness.getValue() != OverrideEndianness.getValue()) { + Stub.Target.Endianness.value() != OverrideEndianness.value()) { return make_error<StringError>( "Supplied Endianness conflicts with the text stub", OverrideEC); } - Stub.Target.Endianness = OverrideEndianness.getValue(); + Stub.Target.Endianness = OverrideEndianness.value(); } if (OverrideBitWidth) { if (Stub.Target.BitWidth && - Stub.Target.BitWidth.getValue() != OverrideBitWidth.getValue()) { + Stub.Target.BitWidth.value() != OverrideBitWidth.value()) { return make_error<StringError>( "Supplied BitWidth conflicts with the text stub", OverrideEC); } - Stub.Target.BitWidth = OverrideBitWidth.getValue(); + Stub.Target.BitWidth = OverrideBitWidth.value(); } if (OverrideTriple) { if (Stub.Target.Triple && - Stub.Target.Triple.getValue() != OverrideTriple.getValue()) { + Stub.Target.Triple.value() != OverrideTriple.value()) { return make_error<StringError>( "Supplied Triple conflicts with the text stub", OverrideEC); } - Stub.Target.Triple = OverrideTriple.getValue(); + Stub.Target.Triple = OverrideTriple.value(); } return Error::success(); } diff --git a/llvm/lib/MC/MCContext.cpp b/llvm/lib/MC/MCContext.cpp index 4be84ca..d312e35 100644 --- a/llvm/lib/MC/MCContext.cpp +++ b/llvm/lib/MC/MCContext.cpp @@ -773,7 +773,7 @@ MCSectionXCOFF *MCContext::getXCOFFSection( // Do the lookup. If we have a hit, return it. auto IterBool = XCOFFUniquingMap.insert(std::make_pair( IsDwarfSec - ? XCOFFSectionKey(Section.str(), DwarfSectionSubtypeFlags.getValue()) + ? XCOFFSectionKey(Section.str(), DwarfSectionSubtypeFlags.value()) : XCOFFSectionKey(Section.str(), CsectProp->MappingClass), nullptr)); auto &Entry = *IterBool.first; @@ -806,7 +806,7 @@ MCSectionXCOFF *MCContext::getXCOFFSection( if (IsDwarfSec) Result = new (XCOFFAllocator.Allocate()) MCSectionXCOFF(QualName->getUnqualifiedName(), Kind, QualName, - DwarfSectionSubtypeFlags.getValue(), Begin, CachedName, + DwarfSectionSubtypeFlags.value(), Begin, CachedName, MultiSymbolsAllowed); else Result = new (XCOFFAllocator.Allocate()) diff --git a/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp b/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp index 0c04118..cf98cb8 100644 --- a/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp +++ b/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp @@ -88,8 +88,8 @@ bool XCOFFSymbolInfo::operator<(const XCOFFSymbolInfo &SymInfo) const { return SymInfo.StorageMappingClass.has_value(); if (StorageMappingClass) { - return getSMCPriority(StorageMappingClass.getValue()) < - getSMCPriority(SymInfo.StorageMappingClass.getValue()); + return getSMCPriority(StorageMappingClass.value()) < + getSMCPriority(SymInfo.StorageMappingClass.value()); } return false; diff --git a/llvm/lib/MC/MCParser/MasmParser.cpp b/llvm/lib/MC/MCParser/MasmParser.cpp index 424e9db..694ea39 100644 --- a/llvm/lib/MC/MCParser/MasmParser.cpp +++ b/llvm/lib/MC/MCParser/MasmParser.cpp @@ -4213,7 +4213,7 @@ bool MasmParser::parseStructInitializer(const StructInfo &Structure, size_t FieldIndex = 0; if (EndToken) { // Initialize all fields with given initializers. - while (getTok().isNot(EndToken.getValue()) && + while (getTok().isNot(EndToken.value()) && FieldIndex < Structure.Fields.size()) { const FieldInfo &Field = Structure.Fields[FieldIndex++]; if (parseOptionalToken(AsmToken::Comma)) { @@ -4245,10 +4245,10 @@ bool MasmParser::parseStructInitializer(const StructInfo &Structure, } if (EndToken) { - if (EndToken.getValue() == AsmToken::Greater) + if (EndToken.value() == AsmToken::Greater) return parseAngleBracketClose(); - return parseToken(EndToken.getValue()); + return parseToken(EndToken.value()); } return false; diff --git a/llvm/lib/MC/MCSchedule.cpp b/llvm/lib/MC/MCSchedule.cpp index 98eb7ea..71c8e6f 100644 --- a/llvm/lib/MC/MCSchedule.cpp +++ b/llvm/lib/MC/MCSchedule.cpp @@ -96,10 +96,10 @@ MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, continue; unsigned NumUnits = SM.getProcResource(I->ProcResourceIdx)->NumUnits; double Temp = NumUnits * 1.0 / I->Cycles; - Throughput = Throughput ? std::min(Throughput.getValue(), Temp) : Temp; + Throughput = Throughput ? std::min(Throughput.value(), Temp) : Temp; } if (Throughput) - return 1.0 / Throughput.getValue(); + return 1.0 / Throughput.value(); // If no throughput value was calculated, assume that we can execute at the // maximum issue width scaled by number of micro-ops for the schedule class. @@ -140,10 +140,10 @@ MCSchedModel::getReciprocalThroughput(unsigned SchedClass, if (!I->getCycles()) continue; double Temp = countPopulation(I->getUnits()) * 1.0 / I->getCycles(); - Throughput = Throughput ? std::min(Throughput.getValue(), Temp) : Temp; + Throughput = Throughput ? std::min(Throughput.value(), Temp) : Temp; } if (Throughput) - return 1.0 / Throughput.getValue(); + return 1.0 / Throughput.value(); // If there are no execution resources specified for this class, then assume // that it can execute at the maximum default issue width. diff --git a/llvm/lib/MC/MCSectionXCOFF.cpp b/llvm/lib/MC/MCSectionXCOFF.cpp index ee8fa04..9a35ac6 100644 --- a/llvm/lib/MC/MCSectionXCOFF.cpp +++ b/llvm/lib/MC/MCSectionXCOFF.cpp @@ -110,8 +110,8 @@ void MCSectionXCOFF::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T, // XCOFF debug sections. if (getKind().isMetadata() && isDwarfSect()) { - OS << "\n\t.dwsect " - << format("0x%" PRIx32, getDwarfSubtypeFlags().getValue()) << '\n'; + OS << "\n\t.dwsect " << format("0x%" PRIx32, getDwarfSubtypeFlags().value()) + << '\n'; OS << MAI.getPrivateLabelPrefix() << getName() << ':' << '\n'; return; } diff --git a/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp b/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp index 36e0d50..781be3d 100644 --- a/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp +++ b/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp @@ -600,8 +600,8 @@ handleUserSection(const NewSectionInfo &NewSection, static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, Object &Obj) { if (Config.OutputArch) { - Obj.Machine = Config.OutputArch.getValue().EMachine; - Obj.OSABI = Config.OutputArch.getValue().OSABI; + Obj.Machine = Config.OutputArch.value().EMachine; + Obj.OSABI = Config.OutputArch.value().OSABI; } if (!Config.SplitDWO.empty() && Config.ExtractDWO) { @@ -699,7 +699,7 @@ static Error handleArgs(const CommonConfig &Config, const ELFConfig &ELFConfig, const SectionRename &SR = Iter->second; Sec.Name = std::string(SR.NewName); if (SR.NewFlags) - setSectionFlagsAndType(Sec, SR.NewFlags.getValue()); + setSectionFlagsAndType(Sec, SR.NewFlags.value()); RenamedSections.insert(&Sec); } else if (RelocSec && !(Sec.Flags & SHF_ALLOC)) // Postpone processing relocation sections which are not specified in @@ -811,7 +811,7 @@ Error objcopy::elf::executeObjcopyOnBinary(const CommonConfig &Config, return Obj.takeError(); // Prefer OutputArch (-O<format>) if set, otherwise infer it from the input. const ElfType OutputElfType = - Config.OutputArch ? getOutputElfType(Config.OutputArch.getValue()) + Config.OutputArch ? getOutputElfType(Config.OutputArch.value()) : getOutputElfType(In); if (Error E = handleArgs(Config, ELFConfig, **Obj)) diff --git a/llvm/lib/Object/ELFObjectFile.cpp b/llvm/lib/Object/ELFObjectFile.cpp index 38de669..1f342e5 100644 --- a/llvm/lib/Object/ELFObjectFile.cpp +++ b/llvm/lib/Object/ELFObjectFile.cpp @@ -168,11 +168,11 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const { Optional<unsigned> Attr = Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch); if (Attr) - isV7 = Attr.getValue() == ARMBuildAttrs::v7; + isV7 = Attr.value() == ARMBuildAttrs::v7; Attr = Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch_profile); if (Attr) { - switch (Attr.getValue()) { + switch (Attr.value()) { case ARMBuildAttrs::ApplicationProfile: Features.AddFeature("aclass"); break; @@ -191,7 +191,7 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const { Attr = Attributes.getAttributeValue(ARMBuildAttrs::THUMB_ISA_use); if (Attr) { - switch (Attr.getValue()) { + switch (Attr.value()) { default: break; case ARMBuildAttrs::Not_Allowed: @@ -206,7 +206,7 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const { Attr = Attributes.getAttributeValue(ARMBuildAttrs::FP_arch); if (Attr) { - switch (Attr.getValue()) { + switch (Attr.value()) { default: break; case ARMBuildAttrs::Not_Allowed: @@ -230,7 +230,7 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const { Attr = Attributes.getAttributeValue(ARMBuildAttrs::Advanced_SIMD_arch); if (Attr) { - switch (Attr.getValue()) { + switch (Attr.value()) { default: break; case ARMBuildAttrs::Not_Allowed: @@ -249,7 +249,7 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const { Attr = Attributes.getAttributeValue(ARMBuildAttrs::MVE_arch); if (Attr) { - switch (Attr.getValue()) { + switch (Attr.value()) { default: break; case ARMBuildAttrs::Not_Allowed: @@ -268,7 +268,7 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const { Attr = Attributes.getAttributeValue(ARMBuildAttrs::DIV_use); if (Attr) { - switch (Attr.getValue()) { + switch (Attr.value()) { default: break; case ARMBuildAttrs::DisallowDIV: @@ -524,7 +524,7 @@ void ELFObjectFileBase::setARMSubArch(Triple &TheTriple) const { Optional<unsigned> Attr = Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch); if (Attr) { - switch (Attr.getValue()) { + switch (Attr.value()) { case ARMBuildAttrs::v4: Triple += "v4"; break; @@ -556,7 +556,7 @@ void ELFObjectFileBase::setARMSubArch(Triple &TheTriple) const { Optional<unsigned> ArchProfileAttr = Attributes.getAttributeValue(ARMBuildAttrs::CPU_arch_profile); if (ArchProfileAttr && - ArchProfileAttr.getValue() == ARMBuildAttrs::MicroControllerProfile) + ArchProfileAttr.value() == ARMBuildAttrs::MicroControllerProfile) Triple += "v7m"; else Triple += "v7"; diff --git a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp index 9834b03..60870bb 100644 --- a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp +++ b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp @@ -133,17 +133,17 @@ void DXContainerWriter::writeParts(raw_ostream &OS) { // Compute the optional fields if needed... if (P.Program->DXILOffset) - Header.Bitcode.Offset = P.Program->DXILOffset.getValue(); + Header.Bitcode.Offset = P.Program->DXILOffset.value(); else Header.Bitcode.Offset = sizeof(dxbc::BitcodeHeader); if (P.Program->DXILSize) - Header.Bitcode.Size = P.Program->DXILSize.getValue(); + Header.Bitcode.Size = P.Program->DXILSize.value(); else Header.Bitcode.Size = P.Program->DXIL ? P.Program->DXIL->size() : 0; if (P.Program->Size) - Header.Size = P.Program->Size.getValue(); + Header.Size = P.Program->Size.value(); else Header.Size = sizeof(dxbc::ProgramHeader) + Header.Bitcode.Size; diff --git a/llvm/lib/Support/Process.cpp b/llvm/lib/Support/Process.cpp index cf3962a..5476bec 100644 --- a/llvm/lib/Support/Process.cpp +++ b/llvm/lib/Support/Process.cpp @@ -47,7 +47,7 @@ Optional<std::string> Process::FindInEnvPath(StringRef EnvName, const char EnvPathSeparatorStr[] = {Separator, '\0'}; SmallVector<StringRef, 8> Dirs; - SplitString(OptPath.getValue(), Dirs, EnvPathSeparatorStr); + SplitString(OptPath.value(), Dirs, EnvPathSeparatorStr); for (StringRef Dir : Dirs) { if (Dir.empty()) diff --git a/llvm/lib/Support/VirtualFileSystem.cpp b/llvm/lib/Support/VirtualFileSystem.cpp index 21f0c39..97d63ff 100644 --- a/llvm/lib/Support/VirtualFileSystem.cpp +++ b/llvm/lib/Support/VirtualFileSystem.cpp @@ -2669,13 +2669,13 @@ void JSONWriter::write(ArrayRef<YAMLVFSEntry> Entries, " 'version': 0,\n"; if (IsCaseSensitive) OS << " 'case-sensitive': '" - << (IsCaseSensitive.getValue() ? "true" : "false") << "',\n"; + << (IsCaseSensitive.value() ? "true" : "false") << "',\n"; if (UseExternalNames) OS << " 'use-external-names': '" - << (UseExternalNames.getValue() ? "true" : "false") << "',\n"; + << (UseExternalNames.value() ? "true" : "false") << "',\n"; bool UseOverlayRelative = false; if (IsOverlayRelative) { - UseOverlayRelative = IsOverlayRelative.getValue(); + UseOverlayRelative = IsOverlayRelative.value(); OS << " 'overlay-relative': '" << (UseOverlayRelative ? "true" : "false") << "',\n"; } diff --git a/llvm/lib/Support/raw_ostream.cpp b/llvm/lib/Support/raw_ostream.cpp index 98ceea3..651949a 100644 --- a/llvm/lib/Support/raw_ostream.cpp +++ b/llvm/lib/Support/raw_ostream.cpp @@ -429,7 +429,7 @@ raw_ostream &raw_ostream::operator<<(const FormattedBytes &FB) { indent(FB.IndentLevel); if (FB.FirstByteOffset) { - uint64_t Offset = FB.FirstByteOffset.getValue(); + uint64_t Offset = FB.FirstByteOffset.value(); llvm::write_hex(*this, Offset + LineIndex, HPS, OffsetWidth); *this << ": "; } diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp index 6c20510..75a99e9 100644 --- a/llvm/lib/TableGen/Record.cpp +++ b/llvm/lib/TableGen/Record.cpp @@ -2601,7 +2601,7 @@ StringRef Record::getValueAsString(StringRef FieldName) const { if (!S) PrintFatalError(getLoc(), "Record `" + getName() + "' does not have a field named `" + FieldName + "'!\n"); - return S.getValue(); + return S.value(); } llvm::Optional<StringRef> diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index 5b6fcb7..70fae9d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -1180,7 +1180,7 @@ bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const { getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI); if (Arg) { - const int64_t Value = Arg.getValue().Value.getSExtValue(); + const int64_t Value = Arg.value().Value.getSExtValue(); if (Value == 0) { unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0); @@ -4240,7 +4240,7 @@ AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const { }, [=](MachineInstrBuilder &MIB) { // vaddr if (FI) - MIB.addFrameIndex(FI.getValue()); + MIB.addFrameIndex(FI.value()); else MIB.addReg(VAddr); }, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h index 1b513c4..745734a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h @@ -131,8 +131,8 @@ public: bool IsAOneAddressSpace = isOneAddressSpace(A); bool IsBOneAddressSpace = isOneAddressSpace(B); - return AIO.getValue() >= BIO.getValue() && - (IsAOneAddressSpace == IsBOneAddressSpace || !IsAOneAddressSpace); + return AIO.value() >= BIO.value() && + (IsAOneAddressSpace == IsBOneAddressSpace || !IsAOneAddressSpace); } }; diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp index 8a66213..6b937699 100644 --- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -2329,13 +2329,13 @@ bool SIMemoryLegalizer::runOnMachineFunction(MachineFunction &MF) { continue; if (const auto &MOI = MOA.getLoadInfo(MI)) - Changed |= expandLoad(MOI.getValue(), MI); + Changed |= expandLoad(MOI.value(), MI); else if (const auto &MOI = MOA.getStoreInfo(MI)) - Changed |= expandStore(MOI.getValue(), MI); + Changed |= expandStore(MOI.value(), MI); else if (const auto &MOI = MOA.getAtomicFenceInfo(MI)) - Changed |= expandAtomicFence(MOI.getValue(), MI); + Changed |= expandAtomicFence(MOI.value(), MI); else if (const auto &MOI = MOA.getAtomicCmpxchgOrRmwInfo(MI)) - Changed |= expandAtomicCmpxchgOrRmw(MOI.getValue(), MI); + Changed |= expandAtomicCmpxchgOrRmw(MOI.value(), MI); } } diff --git a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp index 3078534..2968010 100644 --- a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp +++ b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp @@ -351,13 +351,13 @@ Optional<int64_t> MVEGatherScatterLowering::getIfConst(const Value *V) { if (!Op0 || !Op1) return Optional<int64_t>{}; if (I->getOpcode() == Instruction::Add) - return Optional<int64_t>{Op0.getValue() + Op1.getValue()}; + return Optional<int64_t>{Op0.value() + Op1.value()}; if (I->getOpcode() == Instruction::Mul) - return Optional<int64_t>{Op0.getValue() * Op1.getValue()}; + return Optional<int64_t>{Op0.value() * Op1.value()}; if (I->getOpcode() == Instruction::Shl) - return Optional<int64_t>{Op0.getValue() << Op1.getValue()}; + return Optional<int64_t>{Op0.value() << Op1.value()}; if (I->getOpcode() == Instruction::Or) - return Optional<int64_t>{Op0.getValue() | Op1.getValue()}; + return Optional<int64_t>{Op0.value() | Op1.value()}; } return Optional<int64_t>{}; } diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp index 0b4a95b..0150110 100644 --- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -1024,7 +1024,7 @@ void HexagonFrameLowering::insertCFIInstructions(MachineFunction &MF) const { for (auto &B : MF) { auto At = findCFILocation(B); if (At) - insertCFIInstructionsAt(B, At.getValue()); + insertCFIInstructionsAt(B, At.value()); } } diff --git a/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp b/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp index d715ba9..33e7068 100644 --- a/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp +++ b/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp @@ -705,14 +705,14 @@ LanaiAsmParser::parseRegister(bool RestoreOnFailure) { RegNum = MatchRegisterName(Lexer.getTok().getIdentifier()); if (RegNum == 0) { if (PercentTok && RestoreOnFailure) - Lexer.UnLex(PercentTok.getValue()); + Lexer.UnLex(PercentTok.value()); return nullptr; } Parser.Lex(); // Eat identifier token return LanaiOperand::createReg(RegNum, Start, End); } if (PercentTok && RestoreOnFailure) - Lexer.UnLex(PercentTok.getValue()); + Lexer.UnLex(PercentTok.value()); return nullptr; } diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 746f652..6ad016d 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -1861,7 +1861,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, InFlag = Ret.getValue(2); if (ProxyRegTruncates[i]) { - Ret = DAG.getNode(ISD::TRUNCATE, dl, ProxyRegTruncates[i].getValue(), Ret); + Ret = DAG.getNode(ISD::TRUNCATE, dl, ProxyRegTruncates[i].value(), Ret); } InVals.push_back(Ret); diff --git a/llvm/lib/Target/VE/VVPISelLowering.cpp b/llvm/lib/Target/VE/VVPISelLowering.cpp index 330eef4..f88f298 100644 --- a/llvm/lib/Target/VE/VVPISelLowering.cpp +++ b/llvm/lib/Target/VE/VVPISelLowering.cpp @@ -41,7 +41,7 @@ SDValue VETargetLowering::lowerToVVP(SDValue Op, SelectionDAG &DAG) const { auto VVPOpcodeOpt = getVVPOpcode(Opcode); if (!VVPOpcodeOpt) return SDValue(); - unsigned VVPOpcode = VVPOpcodeOpt.getValue(); + unsigned VVPOpcode = VVPOpcodeOpt.value(); const bool FromVP = ISD::isVPOpcode(Opcode); // The representative and legalized vector type of this operation. diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp index ec72c1d..d31715e 100644 --- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp +++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp @@ -87,15 +87,14 @@ bool WebAssemblyAsmTypeCheck::popType(SMLoc ErrorLoc, if (Stack.empty()) { return typeError(ErrorLoc, EVT ? StringRef("empty stack while popping ") + - WebAssembly::typeToString(EVT.getValue()) + WebAssembly::typeToString(EVT.value()) : StringRef("empty stack while popping value")); } auto PVT = Stack.pop_back_val(); - if (EVT && EVT.getValue() != PVT) { + if (EVT && EVT.value() != PVT) { return typeError( ErrorLoc, StringRef("popped ") + WebAssembly::typeToString(PVT) + - ", expected " + - WebAssembly::typeToString(EVT.getValue())); + ", expected " + WebAssembly::typeToString(EVT.value())); } return false; } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp index 2db4bd8..7a1a769 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp @@ -553,7 +553,7 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallBase *CI) { std::tie(SizeArg, NEltArg) = FnAttrs.getAllocSizeArgs(); SizeArg += 1; if (NEltArg) - NEltArg = NEltArg.getValue() + 1; + NEltArg = NEltArg.value() + 1; FnAttrs.addAllocSizeAttr(SizeArg, NEltArg); } // In case the callee has 'noreturn' attribute, We need to remove it, because diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp index b05b799..e5ff98e 100644 --- a/llvm/lib/Transforms/IPO/Attributor.cpp +++ b/llvm/lib/Transforms/IPO/Attributor.cpp @@ -718,8 +718,8 @@ Argument *IRPosition::getAssociatedArgument() const { } // If we found a unique callback candidate argument, return it. - if (CBCandidateArg && CBCandidateArg.getValue()) - return CBCandidateArg.getValue(); + if (CBCandidateArg && CBCandidateArg.value()) + return CBCandidateArg.value(); // If no callbacks were found, or none used the underlying call site operand // exclusively, use the direct callee argument if available. @@ -1048,11 +1048,11 @@ Attributor::getAssumedConstant(const IRPosition &IRP, recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL); return llvm::None; } - if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue())) { + if (isa_and_nonnull<UndefValue>(SimplifiedV.value())) { recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL); return UndefValue::get(IRP.getAssociatedType()); } - Constant *CI = dyn_cast_or_null<Constant>(SimplifiedV.getValue()); + Constant *CI = dyn_cast_or_null<Constant>(SimplifiedV.value()); if (CI) CI = dyn_cast_or_null<Constant>( AA::getWithType(*CI, *IRP.getAssociatedType())); @@ -2697,8 +2697,8 @@ void InformationCache::initializeInformationCache(const Function &CF, Optional<short> &NumUses = AssumeUsesMap[I]; if (!NumUses) NumUses = I->getNumUses(); - NumUses = NumUses.getValue() - /* this assume */ 1; - if (NumUses.getValue() != 0) + NumUses = NumUses.value() - /* this assume */ 1; + if (NumUses.value() != 0) continue; AssumeOnlyValues.insert(I); for (const Value *Op : I->operands()) diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp index d1d488f..1ff54b7 100644 --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -437,7 +437,7 @@ static bool genericValueTraversal( A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation); if (!SimpleV) continue; - Value *NewV = SimpleV.getValue(); + Value *NewV = SimpleV.value(); if (NewV && NewV != V) { if ((VS & AA::Interprocedural) || !CtxI || AA::isValidInScope(*NewV, CtxI->getFunction())) { @@ -1891,14 +1891,14 @@ ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { // Check if we have an assumed unique return value that we could manifest. Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); - if (!UniqueRV || !UniqueRV.getValue()) + if (!UniqueRV || !UniqueRV.value()) return Changed; // Bookkeeping. STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, "Number of function with unique return"); // If the assumed unique return value is an argument, annotate it. - if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { + if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.value())) { if (UniqueRVArg->getType()->canLosslesslyBitCastTo( getAssociatedFunction()->getReturnType())) { getIRPosition() = IRPosition::argument(*UniqueRVArg); @@ -2666,9 +2666,9 @@ struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { // Either we stopped and the appropriate action was taken, // or we got back a simplified value to continue. Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); - if (!SimplifiedPtrOp || !SimplifiedPtrOp.getValue()) + if (!SimplifiedPtrOp || !SimplifiedPtrOp.value()) return true; - const Value *PtrOpVal = SimplifiedPtrOp.getValue(); + const Value *PtrOpVal = SimplifiedPtrOp.value(); // A memory access through a pointer is considered UB // only if the pointer has constant null value. @@ -2757,14 +2757,14 @@ struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { IRPosition::value(*ArgVal), *this, UsedAssumedInformation); if (UsedAssumedInformation) continue; - if (SimplifiedVal && !SimplifiedVal.getValue()) + if (SimplifiedVal && !SimplifiedVal.value()) return true; - if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.getValue())) { + if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.value())) { KnownUBInsts.insert(&I); continue; } if (!ArgVal->getType()->isPointerTy() || - !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) + !isa<ConstantPointerNull>(*SimplifiedVal.value())) continue; auto &NonNullAA = A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); @@ -4101,11 +4101,11 @@ identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, bool UsedAssumedInformation = false; Optional<Constant *> C = A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation); - if (!C || isa_and_nonnull<UndefValue>(C.getValue())) { + if (!C || isa_and_nonnull<UndefValue>(C.value())) { // No value yet, assume all edges are dead. - } else if (isa_and_nonnull<ConstantInt>(C.getValue())) { + } else if (isa_and_nonnull<ConstantInt>(C.value())) { for (auto &CaseIt : SI.cases()) { - if (CaseIt.getCaseValue() == C.getValue()) { + if (CaseIt.getCaseValue() == C.value()) { AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); return UsedAssumedInformation; } @@ -5523,8 +5523,8 @@ struct AAValueSimplifyImpl : AAValueSimplify { if (!SimpleV) return PoisonValue::get(&Ty); Value *EffectiveV = &V; - if (SimpleV.getValue()) - EffectiveV = SimpleV.getValue(); + if (SimpleV.value()) + EffectiveV = SimpleV.value(); if (auto *C = dyn_cast<Constant>(EffectiveV)) return C; if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI), @@ -5540,7 +5540,7 @@ struct AAValueSimplifyImpl : AAValueSimplify { /// nullptr if we don't have one that makes sense. Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const { Value *NewV = SimplifiedAssociatedValue - ? SimplifiedAssociatedValue.getValue() + ? SimplifiedAssociatedValue.value() : UndefValue::get(getAssociatedType()); if (NewV && NewV != &getAssociatedValue()) { ValueToValueMapTy VMap; @@ -5671,7 +5671,7 @@ struct AAValueSimplifyArgument final : AAValueSimplifyImpl { A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation); if (!SimpleArgOp) return true; - if (!SimpleArgOp.getValue()) + if (!SimpleArgOp.value()) return false; if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp)) return false; @@ -5786,7 +5786,7 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl { *this, UsedAssumedInformation); if (!SimplifiedLHS) return true; - if (!SimplifiedLHS.getValue()) + if (!SimplifiedLHS.value()) return false; LHS = *SimplifiedLHS; @@ -5795,7 +5795,7 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl { *this, UsedAssumedInformation); if (!SimplifiedRHS) return true; - if (!SimplifiedRHS.getValue()) + if (!SimplifiedRHS.value()) return false; RHS = *SimplifiedRHS; @@ -5867,8 +5867,8 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl { if (!SimplifiedOp) return true; - if (SimplifiedOp.getValue()) - NewOps[Idx] = SimplifiedOp.getValue(); + if (SimplifiedOp.value()) + NewOps[Idx] = SimplifiedOp.value(); else NewOps[Idx] = Op; @@ -6294,10 +6294,10 @@ struct AAHeapToStackFunction final : public AAHeapToStack { Alignment = std::max(Alignment, *RetAlign); if (Value *Align = getAllocAlignment(AI.CB, TLI)) { Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align); - assert(AlignmentAPI && AlignmentAPI.getValue().getZExtValue() > 0 && + assert(AlignmentAPI && AlignmentAPI.value().getZExtValue() > 0 && "Expected an alignment during manifest!"); Alignment = std::max( - Alignment, assumeAligned(AlignmentAPI.getValue().getZExtValue())); + Alignment, assumeAligned(AlignmentAPI.value().getZExtValue())); } // TODO: Hoist the alloca towards the function entry. @@ -6346,7 +6346,7 @@ struct AAHeapToStackFunction final : public AAHeapToStack { A.getAssumedConstant(V, AA, UsedAssumedInformation); if (!SimpleV) return APInt(64, 0); - if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue())) + if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.value())) return CI->getValue(); return llvm::None; } @@ -6637,7 +6637,7 @@ ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { Optional<APInt> Size = getSize(A, *this, AI); if (MaxHeapToStackSize != -1) { - if (!Size || Size.getValue().ugt(MaxHeapToStackSize)) { + if (!Size || Size.value().ugt(MaxHeapToStackSize)) { LLVM_DEBUG({ if (!Size) dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; @@ -6759,8 +6759,8 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { LLVM_DEBUG({ dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; - if (CSTy && CSTy.getValue()) - CSTy.getValue()->print(dbgs()); + if (CSTy && CSTy.value()) + CSTy.value()->print(dbgs()); else if (CSTy) dbgs() << "<nullptr>"; else @@ -6771,8 +6771,8 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { LLVM_DEBUG({ dbgs() << " : New Type: "; - if (Ty && Ty.getValue()) - Ty.getValue()->print(dbgs()); + if (Ty && Ty.value()) + Ty.value()->print(dbgs()); else if (Ty) dbgs() << "<nullptr>"; else @@ -6780,7 +6780,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { dbgs() << "\n"; }); - return !Ty || Ty.getValue(); + return !Ty || Ty.value(); }; if (!A.checkForAllCallSites(CallSiteCheck, *this, true, @@ -6794,7 +6794,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { PrivatizableType = identifyPrivatizableType(A); if (!PrivatizableType) return ChangeStatus::UNCHANGED; - if (!PrivatizableType.getValue()) + if (!PrivatizableType.value()) return indicatePessimisticFixpoint(); // The dependence is optional so we don't give up once we give up on the @@ -6882,7 +6882,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); if (!CBArgPrivTy) continue; - if (CBArgPrivTy.getValue() == PrivatizableType) + if (CBArgPrivTy.value() == PrivatizableType) continue; } @@ -6929,7 +6929,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); if (!DCArgPrivTy) return true; - if (DCArgPrivTy.getValue() == PrivatizableType) + if (DCArgPrivTy.value() == PrivatizableType) return true; } } @@ -7071,7 +7071,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { ChangeStatus manifest(Attributor &A) override { if (!PrivatizableType) return ChangeStatus::UNCHANGED; - assert(PrivatizableType.getValue() && "Expected privatizable type!"); + assert(PrivatizableType.value() && "Expected privatizable type!"); // Collect all tail calls in the function as we cannot allow new allocas to // escape into tail recursion. @@ -7104,9 +7104,9 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { Instruction *IP = &*EntryBB.getFirstInsertionPt(); const DataLayout &DL = IP->getModule()->getDataLayout(); unsigned AS = DL.getAllocaAddrSpace(); - Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS, + Instruction *AI = new AllocaInst(PrivatizableType.value(), AS, Arg->getName() + ".priv", IP); - createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, + createInitialization(PrivatizableType.value(), *AI, ReplacementFn, ArgIt->getArgNo(), *IP); if (AI->getType() != Arg->getType()) @@ -7214,7 +7214,7 @@ struct AAPrivatizablePtrCallSiteArgument final PrivatizableType = identifyPrivatizableType(A); if (!PrivatizableType) return ChangeStatus::UNCHANGED; - if (!PrivatizableType.getValue()) + if (!PrivatizableType.value()) return indicatePessimisticFixpoint(); const IRPosition &IRP = getIRPosition(); @@ -8675,7 +8675,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { *this, UsedAssumedInformation); if (!SimplifiedLHS) return true; - if (!SimplifiedLHS.getValue()) + if (!SimplifiedLHS.value()) return false; LHS = *SimplifiedLHS; @@ -8684,7 +8684,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { *this, UsedAssumedInformation); if (!SimplifiedRHS) return true; - if (!SimplifiedRHS.getValue()) + if (!SimplifiedRHS.value()) return false; RHS = *SimplifiedRHS; @@ -8728,7 +8728,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { *this, UsedAssumedInformation); if (!SimplifiedOpV) return true; - if (!SimplifiedOpV.getValue()) + if (!SimplifiedOpV.value()) return false; OpV = *SimplifiedOpV; @@ -8758,7 +8758,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { *this, UsedAssumedInformation); if (!SimplifiedLHS) return true; - if (!SimplifiedLHS.getValue()) + if (!SimplifiedLHS.value()) return false; LHS = *SimplifiedLHS; @@ -8767,7 +8767,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { *this, UsedAssumedInformation); if (!SimplifiedRHS) return true; - if (!SimplifiedRHS.getValue()) + if (!SimplifiedRHS.value()) return false; RHS = *SimplifiedRHS; @@ -8832,7 +8832,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { *this, UsedAssumedInformation); if (!SimplifiedOpV) return true; - if (!SimplifiedOpV.getValue()) + if (!SimplifiedOpV.value()) return false; Value *VPtr = *SimplifiedOpV; @@ -9193,7 +9193,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { *this, UsedAssumedInformation); if (!SimplifiedLHS) return ChangeStatus::UNCHANGED; - if (!SimplifiedLHS.getValue()) + if (!SimplifiedLHS.value()) return indicatePessimisticFixpoint(); LHS = *SimplifiedLHS; @@ -9202,7 +9202,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { *this, UsedAssumedInformation); if (!SimplifiedRHS) return ChangeStatus::UNCHANGED; - if (!SimplifiedRHS.getValue()) + if (!SimplifiedRHS.value()) return indicatePessimisticFixpoint(); RHS = *SimplifiedRHS; @@ -9276,7 +9276,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { *this, UsedAssumedInformation); if (!SimplifiedLHS) return ChangeStatus::UNCHANGED; - if (!SimplifiedLHS.getValue()) + if (!SimplifiedLHS.value()) return indicatePessimisticFixpoint(); LHS = *SimplifiedLHS; @@ -9285,7 +9285,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { *this, UsedAssumedInformation); if (!SimplifiedRHS) return ChangeStatus::UNCHANGED; - if (!SimplifiedRHS.getValue()) + if (!SimplifiedRHS.value()) return indicatePessimisticFixpoint(); RHS = *SimplifiedRHS; @@ -9351,7 +9351,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { *this, UsedAssumedInformation); if (!SimplifiedSrc) return ChangeStatus::UNCHANGED; - if (!SimplifiedSrc.getValue()) + if (!SimplifiedSrc.value()) return indicatePessimisticFixpoint(); Src = *SimplifiedSrc; @@ -9384,7 +9384,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { *this, UsedAssumedInformation); if (!SimplifiedLHS) return ChangeStatus::UNCHANGED; - if (!SimplifiedLHS.getValue()) + if (!SimplifiedLHS.value()) return indicatePessimisticFixpoint(); LHS = *SimplifiedLHS; @@ -9393,7 +9393,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { *this, UsedAssumedInformation); if (!SimplifiedRHS) return ChangeStatus::UNCHANGED; - if (!SimplifiedRHS.getValue()) + if (!SimplifiedRHS.value()) return indicatePessimisticFixpoint(); RHS = *SimplifiedRHS; @@ -9452,7 +9452,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { UsedAssumedInformation); if (!SimplifiedIncomingValue) continue; - if (!SimplifiedIncomingValue.getValue()) + if (!SimplifiedIncomingValue.value()) return indicatePessimisticFixpoint(); IncomingValue = *SimplifiedIncomingValue; @@ -9941,7 +9941,7 @@ private: const Function &Fn) { Optional<bool> Cached = isCachedReachable(Fn); if (Cached) - return Cached.getValue(); + return Cached.value(); // The query was not cached, thus it is new. We need to request an update // explicitly to make sure this the information is properly run to a diff --git a/llvm/lib/Transforms/IPO/IROutliner.cpp b/llvm/lib/Transforms/IPO/IROutliner.cpp index d75d99e..28bc43a 100644 --- a/llvm/lib/Transforms/IPO/IROutliner.cpp +++ b/llvm/lib/Transforms/IPO/IROutliner.cpp @@ -555,7 +555,7 @@ collectRegionsConstants(OutlinableRegion &Region, for (Value *V : ID.OperVals) { Optional<unsigned> GVNOpt = C.getGVN(V); assert(GVNOpt && "Expected a GVN for operand?"); - unsigned GVN = GVNOpt.getValue(); + unsigned GVN = GVNOpt.value(); // Check if this global value has been found to not be the same already. if (NotSame.contains(GVN)) { @@ -570,7 +570,7 @@ collectRegionsConstants(OutlinableRegion &Region, // it is considered to not be the same value. Optional<bool> ConstantMatches = constantMatches(V, GVN, GVNToConstant); if (ConstantMatches) { - if (ConstantMatches.getValue()) + if (ConstantMatches.value()) continue; else ConstantsTheSame = false; @@ -651,7 +651,7 @@ Function *IROutliner::createFunction(Module &M, OutlinableGroup &Group, // Transfer the swifterr attribute to the correct function parameter. if (Group.SwiftErrorArgument) - Group.OutlinedFunction->addParamAttr(Group.SwiftErrorArgument.getValue(), + Group.OutlinedFunction->addParamAttr(Group.SwiftErrorArgument.value(), Attribute::SwiftError); Group.OutlinedFunction->addFnAttr(Attribute::OptimizeForSize); @@ -809,7 +809,7 @@ static void mapInputsToGVNs(IRSimilarityCandidate &C, if (OutputMappings.find(Input) != OutputMappings.end()) Input = OutputMappings.find(Input)->second; assert(C.getGVN(Input) && "Could not find a numbering for the given input"); - EndInputNumbers.push_back(C.getGVN(Input).getValue()); + EndInputNumbers.push_back(C.getGVN(Input).value()); } } @@ -948,11 +948,11 @@ findExtractedInputToOverallInputMapping(OutlinableRegion &Region, for (unsigned InputVal : InputGVNs) { Optional<unsigned> CanonicalNumberOpt = C.getCanonicalNum(InputVal); assert(CanonicalNumberOpt && "Canonical number not found?"); - unsigned CanonicalNumber = CanonicalNumberOpt.getValue(); + unsigned CanonicalNumber = CanonicalNumberOpt.value(); Optional<Value *> InputOpt = C.fromGVN(InputVal); assert(InputOpt && "Global value number not found?"); - Value *Input = InputOpt.getValue(); + Value *Input = InputOpt.value(); DenseMap<unsigned, unsigned>::iterator AggArgIt = Group.CanonicalNumberToAggArg.find(CanonicalNumber); @@ -1236,13 +1236,13 @@ static Optional<unsigned> getGVNForPHINode(OutlinableRegion &Region, Optional<unsigned> BBGVN = Cand.getGVN(PHIBB); assert(BBGVN && "Could not find GVN for the incoming block!"); - BBGVN = Cand.getCanonicalNum(BBGVN.getValue()); + BBGVN = Cand.getCanonicalNum(BBGVN.value()); assert(BBGVN && "Could not find canonical number for the incoming block!"); // Create a pair of the exit block canonical value, and the aggregate // argument location, connected to the canonical numbers stored in the // PHINode. PHINodeData TemporaryPair = - std::make_pair(std::make_pair(BBGVN.getValue(), AggArgIdx), PHIGVNs); + std::make_pair(std::make_pair(BBGVN.value(), AggArgIdx), PHIGVNs); hash_code PHINodeDataHash = encodePHINodeData(TemporaryPair); // Look for and create a new entry in our connection between canonical @@ -1516,8 +1516,7 @@ CallInst *replaceCalledFunction(Module &M, OutlinableRegion &Region) { // Make sure that the argument in the new function has the SwiftError // argument. if (Group.SwiftErrorArgument) - Call->addParamAttr(Group.SwiftErrorArgument.getValue(), - Attribute::SwiftError); + Call->addParamAttr(Group.SwiftErrorArgument.value(), Attribute::SwiftError); return Call; } @@ -2082,9 +2081,9 @@ static void alignOutputBlockWithAggFunc( if (MatchingBB) { LLVM_DEBUG(dbgs() << "Set output block for region in function" << Region.ExtractedFunction << " to " - << MatchingBB.getValue()); + << MatchingBB.value()); - Region.OutputBlockNum = MatchingBB.getValue(); + Region.OutputBlockNum = MatchingBB.value(); for (std::pair<Value *, BasicBlock *> &VtoBB : OutputBBs) VtoBB.second->eraseFromParent(); return; @@ -2679,15 +2678,14 @@ void IROutliner::updateOutputMapping(OutlinableRegion &Region, if (!OutputIdx) return; - if (OutputMappings.find(Outputs[OutputIdx.getValue()]) == - OutputMappings.end()) { + if (OutputMappings.find(Outputs[OutputIdx.value()]) == OutputMappings.end()) { LLVM_DEBUG(dbgs() << "Mapping extracted output " << *LI << " to " - << *Outputs[OutputIdx.getValue()] << "\n"); - OutputMappings.insert(std::make_pair(LI, Outputs[OutputIdx.getValue()])); + << *Outputs[OutputIdx.value()] << "\n"); + OutputMappings.insert(std::make_pair(LI, Outputs[OutputIdx.value()])); } else { - Value *Orig = OutputMappings.find(Outputs[OutputIdx.getValue()])->second; + Value *Orig = OutputMappings.find(Outputs[OutputIdx.value()])->second; LLVM_DEBUG(dbgs() << "Mapping extracted output " << *Orig << " to " - << *Outputs[OutputIdx.getValue()] << "\n"); + << *Outputs[OutputIdx.value()] << "\n"); OutputMappings.insert(std::make_pair(LI, Orig)); } } diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp index a3bdc23..8e0ca8c 100644 --- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp +++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp @@ -4431,10 +4431,10 @@ struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall { if (!SimplifiedValue) return Str + std::string("none"); - if (!SimplifiedValue.getValue()) + if (!SimplifiedValue.value()) return Str + std::string("nullptr"); - if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.getValue())) + if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.value())) return Str + std::to_string(CI->getSExtValue()); return Str + std::string("unknown"); @@ -4459,7 +4459,7 @@ struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall { [&](const IRPosition &IRP, const AbstractAttribute *AA, bool &UsedAssumedInformation) -> Optional<Value *> { assert((isValidState() || - (SimplifiedValue && SimplifiedValue.getValue() == nullptr)) && + (SimplifiedValue && SimplifiedValue.value() == nullptr)) && "Unexpected invalid state!"); if (!isAtFixpoint()) { diff --git a/llvm/lib/Transforms/IPO/SampleContextTracker.cpp b/llvm/lib/Transforms/IPO/SampleContextTracker.cpp index 6859953..764fd57 100644 --- a/llvm/lib/Transforms/IPO/SampleContextTracker.cpp +++ b/llvm/lib/Transforms/IPO/SampleContextTracker.cpp @@ -130,7 +130,7 @@ void ContextTrieNode::addFunctionSize(uint32_t FSize) { if (!FuncSize) FuncSize = 0; - FuncSize = FuncSize.getValue() + FSize; + FuncSize = FuncSize.value() + FSize; } LineLocation ContextTrieNode::getCallSiteLoc() const { return CallSiteLoc; } diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp index 40de69b..55fee21 100644 --- a/llvm/lib/Transforms/IPO/SampleProfile.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp @@ -1350,14 +1350,14 @@ SampleProfileLoader::getExternalInlineAdvisorCost(CallBase &CB) { bool SampleProfileLoader::getExternalInlineAdvisorShouldInline(CallBase &CB) { Optional<InlineCost> Cost = getExternalInlineAdvisorCost(CB); - return Cost ? !!Cost.getValue() : false; + return Cost ? !!Cost.value() : false; } InlineCost SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) { if (Optional<InlineCost> ReplayCost = getExternalInlineAdvisorCost(*Candidate.CallInstr)) - return ReplayCost.getValue(); + return ReplayCost.value(); // Adjust threshold based on call site hotness, only do this for callsite // prioritized inliner because otherwise cost-benefit check is done earlier. int SampleThreshold = SampleColdCallSiteThreshold; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 9f4f17b..edfdf70 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2682,7 +2682,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { // Handle target specific intrinsics Optional<Instruction *> V = targetInstCombineIntrinsic(*II); if (V) - return V.getValue(); + return V.value(); break; } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index 9d4c01a..febd0f5 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -925,7 +925,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, Optional<Value *> V = targetSimplifyDemandedUseBitsIntrinsic( *II, DemandedMask, Known, KnownBitsComputed); if (V) - return V.getValue(); + return V.value(); break; } } @@ -1636,7 +1636,7 @@ Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V, *II, DemandedElts, UndefElts, UndefElts2, UndefElts3, simplifyAndSetOp); if (V) - return V.getValue(); + return V.value(); break; } } // switch on IntrinsicID diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index c33b1b3..d4aa31d 100644 --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -486,7 +486,7 @@ static bool isTsanAtomic(const Instruction *I) { if (!SSID) return false; if (isa<LoadInst>(I) || isa<StoreInst>(I)) - return SSID.getValue() != SyncScope::SingleThread; + return SSID.value() != SyncScope::SingleThread; return true; } diff --git a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp index 8a17615..fe6f948 100644 --- a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp +++ b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp @@ -611,9 +611,9 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S, ConstCand->ConstInt->getValue()); if (Diff) { const InstructionCost ImmCosts = - TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, Diff.getValue(), Ty); + TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, Diff.value(), Ty); Cost -= ImmCosts; - LLVM_DEBUG(dbgs() << "Offset " << Diff.getValue() << " " + LLVM_DEBUG(dbgs() << "Offset " << Diff.value() << " " << "has penalty: " << ImmCosts << "\n" << "Adjusted cost: " << Cost << "\n"); } diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index a5a33d9..b460637 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -748,14 +748,14 @@ void GVNPass::printPipeline( OS << "<"; if (Options.AllowPRE != None) - OS << (Options.AllowPRE.getValue() ? "" : "no-") << "pre;"; + OS << (Options.AllowPRE.value() ? "" : "no-") << "pre;"; if (Options.AllowLoadPRE != None) - OS << (Options.AllowLoadPRE.getValue() ? "" : "no-") << "load-pre;"; + OS << (Options.AllowLoadPRE.value() ? "" : "no-") << "load-pre;"; if (Options.AllowLoadPRESplitBackedge != None) - OS << (Options.AllowLoadPRESplitBackedge.getValue() ? "" : "no-") + OS << (Options.AllowLoadPRESplitBackedge.value() ? "" : "no-") << "split-backedge-load-pre;"; if (Options.AllowMemDep != None) - OS << (Options.AllowMemDep.getValue() ? "" : "no-") << "memdep"; + OS << (Options.AllowMemDep.value() ? "" : "no-") << "memdep"; OS << ">"; } diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp index 799669a..b54cf5e 100644 --- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp +++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp @@ -1710,7 +1710,7 @@ IntersectSignedRange(ScalarEvolution &SE, return None; if (!R1) return R2; - auto &R1Value = R1.getValue(); + auto &R1Value = R1.value(); // We never return empty ranges from this function, and R1 is supposed to be // a result of intersection. Thus, R1 is never empty. assert(!R1Value.isEmpty(SE, /* IsSigned */ true) && @@ -1739,7 +1739,7 @@ IntersectUnsignedRange(ScalarEvolution &SE, return None; if (!R1) return R2; - auto &R1Value = R1.getValue(); + auto &R1Value = R1.value(); // We never return empty ranges from this function, and R1 is supposed to be // a result of intersection. Thus, R1 is never empty. assert(!R1Value.isEmpty(SE, /* IsSigned */ false) && @@ -1950,13 +1950,12 @@ bool InductiveRangeCheckElimination::run( LS.IsSignedPredicate); if (Result) { auto MaybeSafeIterRange = - IntersectRange(SE, SafeIterRange, Result.getValue()); + IntersectRange(SE, SafeIterRange, Result.value()); if (MaybeSafeIterRange) { - assert( - !MaybeSafeIterRange.getValue().isEmpty(SE, LS.IsSignedPredicate) && - "We should never return empty ranges!"); + assert(!MaybeSafeIterRange.value().isEmpty(SE, LS.IsSignedPredicate) && + "We should never return empty ranges!"); RangeChecksToEliminate.push_back(IRC); - SafeIterRange = MaybeSafeIterRange.getValue(); + SafeIterRange = MaybeSafeIterRange.value(); } } } @@ -1964,8 +1963,7 @@ bool InductiveRangeCheckElimination::run( if (!SafeIterRange) return false; - LoopConstrainer LC(*L, LI, LPMAddNewLoop, LS, SE, DT, - SafeIterRange.getValue()); + LoopConstrainer LC(*L, LI, LPMAddNewLoop, LS, SE, DT, SafeIterRange.value()); bool Changed = LC.run(); if (Changed) { diff --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp index 03a10cb..b178bca 100644 --- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp +++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp @@ -602,7 +602,7 @@ private: : LLVMLoopDistributeFollowupCoincident}); if (PartitionID) { Loop *NewLoop = Part->getDistributedLoop(); - NewLoop->setLoopID(PartitionID.getValue()); + NewLoop->setLoopID(PartitionID.value()); } } }; @@ -826,7 +826,7 @@ public: {LLVMLoopDistributeFollowupAll, LLVMLoopDistributeFollowupFallback}, "llvm.loop.distribute.", true) - .getValue(); + .value(); LVer.getNonVersionedLoop()->setLoopID(UnversionedLoopID); } diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index 88d6a7aff..d908c15 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1483,7 +1483,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad( // anything where the alignment isn't at least the element size. assert((StoreAlign && LoadAlign) && "Expect unordered load/store to have align."); - if (StoreAlign.getValue() < StoreSize || LoadAlign.getValue() < StoreSize) + if (StoreAlign.value() < StoreSize || LoadAlign.value() < StoreSize) return Changed; // If the element.atomic memcpy is not lowered into explicit @@ -1497,7 +1497,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad( // Note that unordered atomic loads/stores are *required* by the spec to // have an alignment but non-atomic loads/stores may not. NewCall = Builder.CreateElementUnorderedAtomicMemCpy( - StoreBasePtr, StoreAlign.getValue(), LoadBasePtr, LoadAlign.getValue(), + StoreBasePtr, StoreAlign.value(), LoadBasePtr, LoadAlign.value(), NumBytes, StoreSize, AATags.TBAA, AATags.TBAAStruct, AATags.Scope, AATags.NoAlias); } diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp index ddf7775..4ef7809 100644 --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -6385,8 +6385,8 @@ static bool SalvageDVI(llvm::Loop *L, ScalarEvolution &SE, // less DWARF ops than an iteration count-based expression. if (Optional<APInt> Offset = SE.computeConstantDifference(DVIRec.SCEVs[i], SCEVInductionVar)) { - if (Offset.getValue().getMinSignedBits() <= 64) - SalvageExpr->createOffsetExpr(Offset.getValue().getSExtValue(), + if (Offset.value().getMinSignedBits() <= 64) + SalvageExpr->createOffsetExpr(Offset.value().getSExtValue(), LSRInductionVar); } else if (!SalvageExpr->createIterCountExpr(DVIRec.SCEVs[i], IterCountExpr, SE)) diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp index 8c28685..64fcdfa 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp @@ -373,7 +373,7 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI, OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll, LLVMLoopUnrollAndJamFollowupRemainderInner}); if (NewInnerEpilogueLoopID) - SubLoop->setLoopID(NewInnerEpilogueLoopID.getValue()); + SubLoop->setLoopID(NewInnerEpilogueLoopID.value()); // Find trip count and trip multiple BasicBlock *Latch = L->getLoopLatch(); @@ -403,14 +403,14 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI, OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll, LLVMLoopUnrollAndJamFollowupRemainderOuter}); if (NewOuterEpilogueLoopID) - EpilogueOuterLoop->setLoopID(NewOuterEpilogueLoopID.getValue()); + EpilogueOuterLoop->setLoopID(NewOuterEpilogueLoopID.value()); } Optional<MDNode *> NewInnerLoopID = makeFollowupLoopID(OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll, LLVMLoopUnrollAndJamFollowupInner}); if (NewInnerLoopID) - SubLoop->setLoopID(NewInnerLoopID.getValue()); + SubLoop->setLoopID(NewInnerLoopID.value()); else SubLoop->setLoopID(OrigSubLoopID); @@ -419,7 +419,7 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI, OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll, LLVMLoopUnrollAndJamFollowupOuter}); if (NewOuterLoopID) { - L->setLoopID(NewOuterLoopID.getValue()); + L->setLoopID(NewOuterLoopID.value()); // Do not setLoopAlreadyUnrolled if a followup was given. return UnrollResult; diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp index fda86af..de5833f 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -1324,7 +1324,7 @@ static LoopUnrollResult tryToUnrollLoop( makeFollowupLoopID(OrigLoopID, {LLVMLoopUnrollFollowupAll, LLVMLoopUnrollFollowupRemainder}); if (RemainderLoopID) - RemainderLoop->setLoopID(RemainderLoopID.getValue()); + RemainderLoop->setLoopID(RemainderLoopID.value()); } if (UnrollResult != LoopUnrollResult::FullyUnrolled) { @@ -1332,7 +1332,7 @@ static LoopUnrollResult tryToUnrollLoop( makeFollowupLoopID(OrigLoopID, {LLVMLoopUnrollFollowupAll, LLVMLoopUnrollFollowupUnrolled}); if (NewLoopID) { - L->setLoopID(NewLoopID.getValue()); + L->setLoopID(NewLoopID.value()); // Do not setLoopAlreadyUnrolled if loop attributes have been specified // explicitly. @@ -1645,15 +1645,15 @@ void LoopUnrollPass::printPipeline( OS, MapClassName2PassName); OS << "<"; if (UnrollOpts.AllowPartial != None) - OS << (UnrollOpts.AllowPartial.getValue() ? "" : "no-") << "partial;"; + OS << (UnrollOpts.AllowPartial.value() ? "" : "no-") << "partial;"; if (UnrollOpts.AllowPeeling != None) - OS << (UnrollOpts.AllowPeeling.getValue() ? "" : "no-") << "peeling;"; + OS << (UnrollOpts.AllowPeeling.value() ? "" : "no-") << "peeling;"; if (UnrollOpts.AllowRuntime != None) - OS << (UnrollOpts.AllowRuntime.getValue() ? "" : "no-") << "runtime;"; + OS << (UnrollOpts.AllowRuntime.value() ? "" : "no-") << "runtime;"; if (UnrollOpts.AllowUpperBound != None) - OS << (UnrollOpts.AllowUpperBound.getValue() ? "" : "no-") << "upperbound;"; + OS << (UnrollOpts.AllowUpperBound.value() ? "" : "no-") << "upperbound;"; if (UnrollOpts.AllowProfileBasedPeeling != None) - OS << (UnrollOpts.AllowProfileBasedPeeling.getValue() ? "" : "no-") + OS << (UnrollOpts.AllowProfileBasedPeeling.value() ? "" : "no-") << "profile-peeling;"; if (UnrollOpts.FullUnrollMaxCount != None) OS << "full-unroll-max=" << UnrollOpts.FullUnrollMaxCount << ";"; diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp index c03ded5..421f1f3 100644 --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -1778,7 +1778,7 @@ CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC, auto Count = BFI->getProfileCountFromFreq(EntryFreq.getFrequency()); if (Count) newFunction->setEntryCount( - ProfileCount(Count.getValue(), Function::PCT_Real)); // FIXME + ProfileCount(Count.value(), Function::PCT_Real)); // FIXME BFI->setBlockFreq(codeReplacer, EntryFreq.getFrequency()); } diff --git a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp index cd3b6c1..023a0af 100644 --- a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp @@ -402,7 +402,7 @@ CloneLoopBlocks(Loop *L, Value *NewIter, const bool UseEpilogRemainder, Optional<MDNode *> NewLoopID = makeFollowupLoopID( LoopID, {LLVMLoopUnrollFollowupAll, LLVMLoopUnrollFollowupRemainder}); if (NewLoopID) { - NewLoop->setLoopID(NewLoopID.getValue()); + NewLoop->setLoopID(NewLoopID.value()); // Do not setLoopAlreadyUnrolled if loop attributes have been defined // explicitly. diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp index 29d3cc7..82f993b 100644 --- a/llvm/lib/Transforms/Utils/LoopUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp @@ -356,7 +356,7 @@ TransformationMode llvm::hasUnrollTransformation(const Loop *L) { Optional<int> Count = getOptionalIntLoopAttribute(L, "llvm.loop.unroll.count"); if (Count) - return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser; + return Count.value() == 1 ? TM_SuppressedByUser : TM_ForcedByUser; if (getBooleanLoopAttribute(L, "llvm.loop.unroll.enable")) return TM_ForcedByUser; @@ -377,7 +377,7 @@ TransformationMode llvm::hasUnrollAndJamTransformation(const Loop *L) { Optional<int> Count = getOptionalIntLoopAttribute(L, "llvm.loop.unroll_and_jam.count"); if (Count) - return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser; + return Count.value() == 1 ? TM_SuppressedByUser : TM_ForcedByUser; if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.enable")) return TM_ForcedByUser; diff --git a/llvm/lib/Transforms/Utils/MisExpect.cpp b/llvm/lib/Transforms/Utils/MisExpect.cpp index b73d68e..4414b04 100644 --- a/llvm/lib/Transforms/Utils/MisExpect.cpp +++ b/llvm/lib/Transforms/Utils/MisExpect.cpp @@ -221,7 +221,7 @@ void checkBackendInstrumentation(Instruction &I, auto ExpectedWeightsOpt = extractWeights(&I, I.getContext()); if (!ExpectedWeightsOpt) return; - auto ExpectedWeights = ExpectedWeightsOpt.getValue(); + auto ExpectedWeights = ExpectedWeightsOpt.value(); verifyMisExpect(I, RealWeights, ExpectedWeights); } @@ -230,7 +230,7 @@ void checkFrontendInstrumentation(Instruction &I, auto RealWeightsOpt = extractWeights(&I, I.getContext()); if (!RealWeightsOpt) return; - auto RealWeights = RealWeightsOpt.getValue(); + auto RealWeights = RealWeightsOpt.value(); verifyMisExpect(I, RealWeights, ExpectedWeights); } diff --git a/llvm/lib/Transforms/Utils/ModuleUtils.cpp b/llvm/lib/Transforms/Utils/ModuleUtils.cpp index 4dcba65..9e1492b 100644 --- a/llvm/lib/Transforms/Utils/ModuleUtils.cpp +++ b/llvm/lib/Transforms/Utils/ModuleUtils.cpp @@ -255,7 +255,7 @@ void VFABI::setVectorVariantNames(CallInst *CI, LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << VariantMapping << "'\n"); Optional<VFInfo> VI = VFABI::tryDemangleForVFABI(VariantMapping, *M); assert(VI && "Cannot add an invalid VFABI name."); - assert(M->getNamedValue(VI.getValue().VectorName) && + assert(M->getNamedValue(VI.value().VectorName) && "Cannot add variant to attribute: " "vector function declaration is missing."); } diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 9ceba51..0777a13 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4866,7 +4866,7 @@ LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { MaxVScale = TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); MaxScalableVF = ElementCount::getScalable( - MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); + MaxVScale ? (MaxSafeElements / MaxVScale.value()) : 0); if (!MaxScalableVF) reportVectorizationInfo( "Max legal vector width too small, scalable vectorization " @@ -5261,9 +5261,9 @@ bool LoopVectorizationCostModel::isMoreProfitable( unsigned EstimatedWidthB = B.Width.getKnownMinValue(); if (Optional<unsigned> VScale = getVScaleForTuning()) { if (A.Width.isScalable()) - EstimatedWidthA *= VScale.getValue(); + EstimatedWidthA *= VScale.value(); if (B.Width.isScalable()) - EstimatedWidthB *= VScale.getValue(); + EstimatedWidthB *= VScale.value(); } // Assume vscale may be larger than 1 (or the value being tuned for), @@ -7625,7 +7625,7 @@ void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, BestVPlan.getVectorLoopRegion()->getEntryBasicBlock(); Loop *L = LI->getLoopFor(State.CFG.VPBB2IRBB[HeaderVPBB]); if (VectorizedLoopID) - L->setLoopID(VectorizedLoopID.getValue()); + L->setLoopID(VectorizedLoopID.value()); else { // Keep all loop hints from the original loop on the vector loop (we'll // replace the vectorizer-specific hints below). @@ -10461,7 +10461,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, LLVMLoopVectorizeFollowupEpilogue}); if (RemainderLoopID) { - L->setLoopID(RemainderLoopID.getValue()); + L->setLoopID(RemainderLoopID.value()); } else { if (DisableRuntimeUnroll) AddRuntimeUnrollDisableMetaData(L); diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 34c6ec1..e136cd9 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -2637,7 +2637,7 @@ private: AliasCacheKey key = std::make_pair(Inst1, Inst2); Optional<bool> &result = AliasCache[key]; if (result) { - return result.getValue(); + return result.value(); } bool aliased = true; if (Loc1.Ptr && isSimple(Inst1)) diff --git a/llvm/tools/llc/llc.cpp b/llvm/tools/llc/llc.cpp index aaee45d..853a0bd 100644 --- a/llvm/tools/llc/llc.cpp +++ b/llvm/tools/llc/llc.cpp @@ -579,7 +579,7 @@ static int compileModule(char **argv, LLVMContext &Context) { Optional<CodeModel::Model> CM_IR = M->getCodeModel(); if (!CM && CM_IR) - Target->setCodeModel(CM_IR.getValue()); + Target->setCodeModel(CM_IR.value()); } else { TheTriple = Triple(Triple::normalize(TargetTriple)); if (TheTriple.getTriple().empty()) diff --git a/llvm/tools/lli/lli.cpp b/llvm/tools/lli/lli.cpp index 7e727d5..42bea1a 100644 --- a/llvm/tools/lli/lli.cpp +++ b/llvm/tools/lli/lli.cpp @@ -535,9 +535,9 @@ int main(int argc, char **argv, char * const *envp) { builder.setMCPU(codegen::getCPUStr()); builder.setMAttrs(codegen::getFeatureList()); if (auto RM = codegen::getExplicitRelocModel()) - builder.setRelocationModel(RM.getValue()); + builder.setRelocationModel(RM.value()); if (auto CM = codegen::getExplicitCodeModel()) - builder.setCodeModel(CM.getValue()); + builder.setCodeModel(CM.value()); builder.setErrorStr(&ErrorMsg); builder.setEngineKind(ForceInterpreter ? EngineKind::Interpreter diff --git a/llvm/tools/llvm-cov/CoverageExporterJson.cpp b/llvm/tools/llvm-cov/CoverageExporterJson.cpp index d341abe..2e161f5 100644 --- a/llvm/tools/llvm-cov/CoverageExporterJson.cpp +++ b/llvm/tools/llvm-cov/CoverageExporterJson.cpp @@ -291,8 +291,8 @@ void CoverageExporterJson::renderRoot(ArrayRef<std::string> SourceFiles) { const json::Object *ObjB = B.getAsObject(); assert(ObjA != nullptr && "Value A was not an Object"); assert(ObjB != nullptr && "Value B was not an Object"); - const StringRef FilenameA = ObjA->getString("filename").getValue(); - const StringRef FilenameB = ObjB->getString("filename").getValue(); + const StringRef FilenameA = ObjA->getString("filename").value(); + const StringRef FilenameB = ObjB->getString("filename").value(); return FilenameA.compare(FilenameB) < 0; }); auto Export = json::Object( diff --git a/llvm/tools/llvm-ifs/llvm-ifs.cpp b/llvm/tools/llvm-ifs/llvm-ifs.cpp index f9b6a8c..6082290 100644 --- a/llvm/tools/llvm-ifs/llvm-ifs.cpp +++ b/llvm/tools/llvm-ifs/llvm-ifs.cpp @@ -533,34 +533,33 @@ int main(int argc, char *argv[]) { << "Triple should be defined when output format is TBD"; return -1; } - return writeTbdStub(llvm::Triple(Stub.Target.Triple.getValue()), + return writeTbdStub(llvm::Triple(Stub.Target.Triple.value()), Stub.Symbols, "TBD", Out); } case FileFormat::IFS: { Stub.IfsVersion = IfsVersionCurrent; - if (Config.InputFormat.getValue() == FileFormat::ELF && + if (Config.InputFormat.value() == FileFormat::ELF && Config.HintIfsTarget) { std::error_code HintEC(1, std::generic_category()); IFSTarget HintTarget = parseTriple(*Config.HintIfsTarget); - if (Stub.Target.Arch.getValue() != HintTarget.Arch.getValue()) + if (Stub.Target.Arch.value() != HintTarget.Arch.value()) fatalError(make_error<StringError>( "Triple hint does not match the actual architecture", HintEC)); - if (Stub.Target.Endianness.getValue() != - HintTarget.Endianness.getValue()) + if (Stub.Target.Endianness.value() != HintTarget.Endianness.value()) fatalError(make_error<StringError>( "Triple hint does not match the actual endianness", HintEC)); - if (Stub.Target.BitWidth.getValue() != HintTarget.BitWidth.getValue()) + if (Stub.Target.BitWidth.value() != HintTarget.BitWidth.value()) fatalError(make_error<StringError>( "Triple hint does not match the actual bit width", HintEC)); stripIFSTarget(Stub, true, false, false, false); - Stub.Target.Triple = Config.HintIfsTarget.getValue(); + Stub.Target.Triple = Config.HintIfsTarget.value(); } else { stripIFSTarget(Stub, Config.StripIfsTarget, Config.StripIfsArch, Config.StripIfsEndianness, Config.StripIfsBitwidth); } Error IFSWriteError = - writeIFS(Config.Output.getValue(), Stub, Config.WriteIfChanged); + writeIFS(Config.Output.value(), Stub, Config.WriteIfChanged); if (IFSWriteError) fatalError(std::move(IFSWriteError)); break; @@ -589,29 +588,28 @@ int main(int argc, char *argv[]) { } if (Config.OutputIfs) { Stub.IfsVersion = IfsVersionCurrent; - if (Config.InputFormat.getValue() == FileFormat::ELF && + if (Config.InputFormat.value() == FileFormat::ELF && Config.HintIfsTarget) { std::error_code HintEC(1, std::generic_category()); IFSTarget HintTarget = parseTriple(*Config.HintIfsTarget); - if (Stub.Target.Arch.getValue() != HintTarget.Arch.getValue()) + if (Stub.Target.Arch.value() != HintTarget.Arch.value()) fatalError(make_error<StringError>( "Triple hint does not match the actual architecture", HintEC)); - if (Stub.Target.Endianness.getValue() != - HintTarget.Endianness.getValue()) + if (Stub.Target.Endianness.value() != HintTarget.Endianness.value()) fatalError(make_error<StringError>( "Triple hint does not match the actual endianness", HintEC)); - if (Stub.Target.BitWidth.getValue() != HintTarget.BitWidth.getValue()) + if (Stub.Target.BitWidth.value() != HintTarget.BitWidth.value()) fatalError(make_error<StringError>( "Triple hint does not match the actual bit width", HintEC)); stripIFSTarget(Stub, true, false, false, false); - Stub.Target.Triple = Config.HintIfsTarget.getValue(); + Stub.Target.Triple = Config.HintIfsTarget.value(); } else { stripIFSTarget(Stub, Config.StripIfsTarget, Config.StripIfsArch, Config.StripIfsEndianness, Config.StripIfsBitwidth); } Error IFSWriteError = - writeIFS(Config.OutputIfs.getValue(), Stub, Config.WriteIfChanged); + writeIFS(Config.OutputIfs.value(), Stub, Config.WriteIfChanged); if (IFSWriteError) fatalError(std::move(IFSWriteError)); } @@ -628,7 +626,7 @@ int main(int argc, char *argv[]) { << "Triple should be defined when output format is TBD"; return -1; } - return writeTbdStub(llvm::Triple(Stub.Target.Triple.getValue()), + return writeTbdStub(llvm::Triple(Stub.Target.Triple.value()), Stub.Symbols, "TBD", Out); } } diff --git a/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp b/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp index 67b6367..d3f9738 100644 --- a/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp +++ b/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp @@ -71,7 +71,7 @@ void InstructionInfoView::printView(raw_ostream &OS) const { TempStream << ' '; if (IIVDEntry.RThroughput) { - double RT = IIVDEntry.RThroughput.getValue(); + double RT = IIVDEntry.RThroughput.value(); TempStream << format("%.2f", RT) << ' '; if (RT < 10.0) TempStream << " "; diff --git a/llvm/tools/llvm-objdump/XCOFFDump.cpp b/llvm/tools/llvm-objdump/XCOFFDump.cpp index 8597852..dd1570e 100644 --- a/llvm/tools/llvm-objdump/XCOFFDump.cpp +++ b/llvm/tools/llvm-objdump/XCOFFDump.cpp @@ -94,8 +94,8 @@ std::string objdump::getXCOFFSymbolDescription(const SymbolInfoTy &SymbolInfo, std::string Result; // Dummy symbols have no symbol index. if (SymbolInfo.XCOFFSymInfo.Index) - Result = ("(idx: " + Twine(SymbolInfo.XCOFFSymInfo.Index.getValue()) + - ") " + SymbolName) + Result = ("(idx: " + Twine(SymbolInfo.XCOFFSymInfo.Index.value()) + ") " + + SymbolName) .str(); else Result.append(SymbolName.begin(), SymbolName.end()); diff --git a/llvm/tools/llvm-objdump/llvm-objdump.cpp b/llvm/tools/llvm-objdump/llvm-objdump.cpp index 3e59b5c..1245f9e 100644 --- a/llvm/tools/llvm-objdump/llvm-objdump.cpp +++ b/llvm/tools/llvm-objdump/llvm-objdump.cpp @@ -1412,7 +1412,7 @@ static void disassembleObject(const Target *TheTarget, const ObjectFile &Obj, // separately. But WebAssembly decodes preludes for some symbols. // if (Status) { - if (Status.getValue() == MCDisassembler::Fail) { + if (Status.value() == MCDisassembler::Fail) { outs() << "// Error in decoding " << SymbolName << " : Decoding failed region as bytes.\n"; for (uint64_t I = 0; I < Size; ++I) { @@ -2144,7 +2144,7 @@ void objdump::printSymbol(const ObjectFile &O, const SymbolRef &Symbol, if (SymbolDescription) SymName = getXCOFFSymbolDescription( - createSymbolInfo(O, SymRef.getValue()), SymName); + createSymbolInfo(O, SymRef.value()), SymName); outs() << ' ' << SymName; outs() << ") "; @@ -2251,8 +2251,8 @@ static void printRawClangAST(const ObjectFile *Obj) { if (!ClangASTSection) return; - StringRef ClangASTContents = unwrapOrError( - ClangASTSection.getValue().getContents(), Obj->getFileName()); + StringRef ClangASTContents = + unwrapOrError(ClangASTSection.value().getContents(), Obj->getFileName()); outs().write(ClangASTContents.data(), ClangASTContents.size()); } diff --git a/llvm/tools/llvm-profgen/ProfiledBinary.cpp b/llvm/tools/llvm-profgen/ProfiledBinary.cpp index 5c054d6..eef5b8e 100644 --- a/llvm/tools/llvm-profgen/ProfiledBinary.cpp +++ b/llvm/tools/llvm-profgen/ProfiledBinary.cpp @@ -97,7 +97,7 @@ BinarySizeContextTracker::getFuncSizeForContext(const ContextTrieNode *Node) { PrevNode = CurrNode; CurrNode = CurrNode->getChildContext(CallSiteLoc, Node->getFuncName()); if (CurrNode && CurrNode->getFunctionSize()) - Size = CurrNode->getFunctionSize().getValue(); + Size = CurrNode->getFunctionSize().value(); CallSiteLoc = Node->getCallSiteLoc(); Node = Node->getParentContext(); } @@ -111,12 +111,12 @@ BinarySizeContextTracker::getFuncSizeForContext(const ContextTrieNode *Node) { while (!Size && CurrNode && !CurrNode->getAllChildContext().empty()) { CurrNode = &CurrNode->getAllChildContext().begin()->second; if (CurrNode->getFunctionSize()) - Size = CurrNode->getFunctionSize().getValue(); + Size = CurrNode->getFunctionSize().value(); } } assert(Size && "We should at least find one context size."); - return Size.getValue(); + return Size.value(); } void BinarySizeContextTracker::trackInlineesOptimizedAway( diff --git a/llvm/tools/llvm-sim/llvm-sim.cpp b/llvm/tools/llvm-sim/llvm-sim.cpp index 6879d73..2b717d7 100644 --- a/llvm/tools/llvm-sim/llvm-sim.cpp +++ b/llvm/tools/llvm-sim/llvm-sim.cpp @@ -90,8 +90,8 @@ exportToFile(const StringRef FilePath, assert(End && "Could not find instruction number for last instruction"); J.object([&] { - J.attribute("start", Start.getValue()); - J.attribute("end", End.getValue()); + J.attribute("start", Start.value()); + J.attribute("end", End.value()); }); } J.arrayEnd(); diff --git a/llvm/tools/obj2yaml/dwarf2yaml.cpp b/llvm/tools/obj2yaml/dwarf2yaml.cpp index c0c23ea..2426705 100644 --- a/llvm/tools/obj2yaml/dwarf2yaml.cpp +++ b/llvm/tools/obj2yaml/dwarf2yaml.cpp @@ -247,15 +247,15 @@ void dumpDebugInfo(DWARFContext &DCtx, DWARFYAML::Data &Y) { auto FormValue = DIEWrapper.find(AttrSpec.Attr); if (!FormValue) return; - auto Form = FormValue.getValue().getForm(); + auto Form = FormValue.value().getForm(); bool indirect = false; do { indirect = false; switch (Form) { case dwarf::DW_FORM_addr: case dwarf::DW_FORM_GNU_addr_index: - if (auto Val = FormValue.getValue().getAsAddress()) - NewValue.Value = Val.getValue(); + if (auto Val = FormValue.value().getAsAddress()) + NewValue.Value = Val.value(); break; case dwarf::DW_FORM_ref_addr: case dwarf::DW_FORM_ref1: @@ -264,16 +264,16 @@ void dumpDebugInfo(DWARFContext &DCtx, DWARFYAML::Data &Y) { case dwarf::DW_FORM_ref8: case dwarf::DW_FORM_ref_udata: case dwarf::DW_FORM_ref_sig8: - if (auto Val = FormValue.getValue().getAsReferenceUVal()) - NewValue.Value = Val.getValue(); + if (auto Val = FormValue.value().getAsReferenceUVal()) + NewValue.Value = Val.value(); break; case dwarf::DW_FORM_exprloc: case dwarf::DW_FORM_block: case dwarf::DW_FORM_block1: case dwarf::DW_FORM_block2: case dwarf::DW_FORM_block4: - if (auto Val = FormValue.getValue().getAsBlock()) { - auto BlockData = Val.getValue(); + if (auto Val = FormValue.value().getAsBlock()) { + auto BlockData = Val.value(); std::copy(BlockData.begin(), BlockData.end(), std::back_inserter(NewValue.BlockData)); } @@ -288,8 +288,8 @@ void dumpDebugInfo(DWARFContext &DCtx, DWARFYAML::Data &Y) { case dwarf::DW_FORM_udata: case dwarf::DW_FORM_ref_sup4: case dwarf::DW_FORM_ref_sup8: - if (auto Val = FormValue.getValue().getAsUnsignedConstant()) - NewValue.Value = Val.getValue(); + if (auto Val = FormValue.value().getAsUnsignedConstant()) + NewValue.Value = Val.value(); break; case dwarf::DW_FORM_string: if (auto Val = dwarf::toString(FormValue)) @@ -297,10 +297,10 @@ void dumpDebugInfo(DWARFContext &DCtx, DWARFYAML::Data &Y) { break; case dwarf::DW_FORM_indirect: indirect = true; - if (auto Val = FormValue.getValue().getAsUnsignedConstant()) { - NewValue.Value = Val.getValue(); + if (auto Val = FormValue.value().getAsUnsignedConstant()) { + NewValue.Value = Val.value(); NewEntry.Values.push_back(NewValue); - Form = static_cast<dwarf::Form>(Val.getValue()); + Form = static_cast<dwarf::Form>(Val.value()); } break; case dwarf::DW_FORM_strp: @@ -311,8 +311,8 @@ void dumpDebugInfo(DWARFContext &DCtx, DWARFYAML::Data &Y) { case dwarf::DW_FORM_strp_sup: case dwarf::DW_FORM_GNU_str_index: case dwarf::DW_FORM_strx: - if (auto Val = FormValue.getValue().getAsCStringOffset()) - NewValue.Value = Val.getValue(); + if (auto Val = FormValue.value().getAsCStringOffset()) + NewValue.Value = Val.value(); break; case dwarf::DW_FORM_flag_present: NewValue.Value = 1; diff --git a/llvm/unittests/ADT/OptionalTest.cpp b/llvm/unittests/ADT/OptionalTest.cpp index dc1e073..dac8823 100644 --- a/llvm/unittests/ADT/OptionalTest.cpp +++ b/llvm/unittests/ADT/OptionalTest.cpp @@ -35,7 +35,7 @@ void OptionalWorksInConstexpr() { constexpr Optional<int> y2{3}; static_assert(y1.value() == y2.value() && y1.value() == 3, "Construction with value and getValue() are constexpr"); - static_assert(y1.getValue() == y2.getValue() && y1.getValue() == 3, + static_assert(y1.value() == y2.value() && y1.value() == 3, "Construction with value and getValue() are constexpr"); static_assert(Optional<int>{3} >= 2 && Optional<int>{1} < Optional<int>{2}, "Comparisons work in constexpr"); diff --git a/llvm/unittests/Analysis/BlockFrequencyInfoTest.cpp b/llvm/unittests/Analysis/BlockFrequencyInfoTest.cpp index 5dd3995..91009ab 100644 --- a/llvm/unittests/Analysis/BlockFrequencyInfoTest.cpp +++ b/llvm/unittests/Analysis/BlockFrequencyInfoTest.cpp @@ -75,11 +75,11 @@ TEST_F(BlockFrequencyInfoTest, Basic) { EXPECT_EQ(BB0Freq, BB1Freq + BB2Freq); EXPECT_EQ(BB0Freq, BB3Freq); - EXPECT_EQ(BFI.getBlockProfileCount(&BB0).getValue(), UINT64_C(100)); - EXPECT_EQ(BFI.getBlockProfileCount(BB3).getValue(), UINT64_C(100)); - EXPECT_EQ(BFI.getBlockProfileCount(BB1).getValue(), + EXPECT_EQ(BFI.getBlockProfileCount(&BB0).value(), UINT64_C(100)); + EXPECT_EQ(BFI.getBlockProfileCount(BB3).value(), UINT64_C(100)); + EXPECT_EQ(BFI.getBlockProfileCount(BB1).value(), (100 * BB1Freq + BB0Freq / 2) / BB0Freq); - EXPECT_EQ(BFI.getBlockProfileCount(BB2).getValue(), + EXPECT_EQ(BFI.getBlockProfileCount(BB2).value(), (100 * BB2Freq + BB0Freq / 2) / BB0Freq); // Scale the frequencies of BB0, BB1 and BB2 by a factor of two. diff --git a/llvm/unittests/Analysis/MemorySSATest.cpp b/llvm/unittests/Analysis/MemorySSATest.cpp index 298a8bc..4c8942f 100644 --- a/llvm/unittests/Analysis/MemorySSATest.cpp +++ b/llvm/unittests/Analysis/MemorySSATest.cpp @@ -1191,14 +1191,13 @@ TEST_F(MemorySSATest, TestStoreMayAlias) { EXPECT_EQ(MemDef->isOptimized(), true) << "Store " << I << " was not optimized"; if (I == 1 || I == 3 || I == 4) - EXPECT_EQ(MemDef->getOptimizedAccessType().getValue(), - AliasResult::MayAlias) + EXPECT_EQ(MemDef->getOptimizedAccessType().value(), AliasResult::MayAlias) << "Store " << I << " doesn't have the correct alias information"; else if (I == 0 || I == 2) EXPECT_EQ(MemDef->getOptimizedAccessType(), None) << "Store " << I << " doesn't have the correct alias information"; else - EXPECT_EQ(MemDef->getOptimizedAccessType().getValue(), + EXPECT_EQ(MemDef->getOptimizedAccessType().value(), AliasResult::MustAlias) << "Store " << I << " doesn't have the correct alias information"; // EXPECT_EQ expands such that if we increment I above, it won't get diff --git a/llvm/unittests/Analysis/VectorFunctionABITest.cpp b/llvm/unittests/Analysis/VectorFunctionABITest.cpp index b819bb4..026732c 100644 --- a/llvm/unittests/Analysis/VectorFunctionABITest.cpp +++ b/llvm/unittests/Analysis/VectorFunctionABITest.cpp @@ -76,7 +76,7 @@ protected: const auto OptInfo = VFABI::tryDemangleForVFABI(MangledName, *(M.get())); if (OptInfo) { - Info = OptInfo.getValue(); + Info = OptInfo.value(); return true; } diff --git a/llvm/unittests/CodeGen/GlobalISel/ConstantFoldingTest.cpp b/llvm/unittests/CodeGen/GlobalISel/ConstantFoldingTest.cpp index d1d10a3..b11835c1 100644 --- a/llvm/unittests/CodeGen/GlobalISel/ConstantFoldingTest.cpp +++ b/llvm/unittests/CodeGen/GlobalISel/ConstantFoldingTest.cpp @@ -84,156 +84,156 @@ TEST_F(AArch64GISelMITest, FoldBinOp) { ConstantFoldBinOp(TargetOpcode::G_ADD, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGAddInt.has_value()); - EXPECT_EQ(25ULL, FoldGAddInt.getValue().getLimitedValue()); + EXPECT_EQ(25ULL, FoldGAddInt.value().getLimitedValue()); Optional<APInt> FoldGAddMix = ConstantFoldBinOp(TargetOpcode::G_ADD, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGAddMix.has_value()); - EXPECT_EQ(1073741840ULL, FoldGAddMix.getValue().getLimitedValue()); + EXPECT_EQ(1073741840ULL, FoldGAddMix.value().getLimitedValue()); // Test G_AND folding Integer + Mixed Int-Float cases Optional<APInt> FoldGAndInt = ConstantFoldBinOp(TargetOpcode::G_AND, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGAndInt.has_value()); - EXPECT_EQ(0ULL, FoldGAndInt.getValue().getLimitedValue()); + EXPECT_EQ(0ULL, FoldGAndInt.value().getLimitedValue()); Optional<APInt> FoldGAndMix = ConstantFoldBinOp(TargetOpcode::G_AND, MIBCst2.getReg(0), MIBFCst1.getReg(0), *MRI); EXPECT_TRUE(FoldGAndMix.has_value()); - EXPECT_EQ(1ULL, FoldGAndMix.getValue().getLimitedValue()); + EXPECT_EQ(1ULL, FoldGAndMix.value().getLimitedValue()); // Test G_ASHR folding Integer + Mixed cases Optional<APInt> FoldGAShrInt = ConstantFoldBinOp(TargetOpcode::G_ASHR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGAShrInt.has_value()); - EXPECT_EQ(0ULL, FoldGAShrInt.getValue().getLimitedValue()); + EXPECT_EQ(0ULL, FoldGAShrInt.value().getLimitedValue()); Optional<APInt> FoldGAShrMix = ConstantFoldBinOp(TargetOpcode::G_ASHR, MIBFCst2.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGAShrMix.has_value()); - EXPECT_EQ(2097152ULL, FoldGAShrMix.getValue().getLimitedValue()); + EXPECT_EQ(2097152ULL, FoldGAShrMix.value().getLimitedValue()); // Test G_LSHR folding Integer + Mixed Int-Float cases Optional<APInt> FoldGLShrInt = ConstantFoldBinOp(TargetOpcode::G_LSHR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGLShrInt.has_value()); - EXPECT_EQ(0ULL, FoldGLShrInt.getValue().getLimitedValue()); + EXPECT_EQ(0ULL, FoldGLShrInt.value().getLimitedValue()); Optional<APInt> FoldGLShrMix = ConstantFoldBinOp(TargetOpcode::G_LSHR, MIBFCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGLShrMix.has_value()); - EXPECT_EQ(2080768ULL, FoldGLShrMix.getValue().getLimitedValue()); + EXPECT_EQ(2080768ULL, FoldGLShrMix.value().getLimitedValue()); // Test G_MUL folding Integer + Mixed Int-Float cases Optional<APInt> FoldGMulInt = ConstantFoldBinOp(TargetOpcode::G_MUL, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGMulInt.has_value()); - EXPECT_EQ(144ULL, FoldGMulInt.getValue().getLimitedValue()); + EXPECT_EQ(144ULL, FoldGMulInt.value().getLimitedValue()); Optional<APInt> FoldGMulMix = ConstantFoldBinOp(TargetOpcode::G_MUL, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGMulMix.has_value()); - EXPECT_EQ(0ULL, FoldGMulMix.getValue().getLimitedValue()); + EXPECT_EQ(0ULL, FoldGMulMix.value().getLimitedValue()); // Test G_OR folding Integer + Mixed Int-Float cases Optional<APInt> FoldGOrInt = ConstantFoldBinOp(TargetOpcode::G_OR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGOrInt.has_value()); - EXPECT_EQ(25ULL, FoldGOrInt.getValue().getLimitedValue()); + EXPECT_EQ(25ULL, FoldGOrInt.value().getLimitedValue()); Optional<APInt> FoldGOrMix = ConstantFoldBinOp(TargetOpcode::G_OR, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGOrMix.has_value()); - EXPECT_EQ(1073741840ULL, FoldGOrMix.getValue().getLimitedValue()); + EXPECT_EQ(1073741840ULL, FoldGOrMix.value().getLimitedValue()); // Test G_SHL folding Integer + Mixed Int-Float cases Optional<APInt> FoldGShlInt = ConstantFoldBinOp(TargetOpcode::G_SHL, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGShlInt.has_value()); - EXPECT_EQ(8192ULL, FoldGShlInt.getValue().getLimitedValue()); + EXPECT_EQ(8192ULL, FoldGShlInt.value().getLimitedValue()); Optional<APInt> FoldGShlMix = ConstantFoldBinOp(TargetOpcode::G_SHL, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGShlMix.has_value()); - EXPECT_EQ(0ULL, FoldGShlMix.getValue().getLimitedValue()); + EXPECT_EQ(0ULL, FoldGShlMix.value().getLimitedValue()); // Test G_SUB folding Integer + Mixed Int-Float cases Optional<APInt> FoldGSubInt = ConstantFoldBinOp(TargetOpcode::G_SUB, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGSubInt.has_value()); - EXPECT_EQ(7ULL, FoldGSubInt.getValue().getLimitedValue()); + EXPECT_EQ(7ULL, FoldGSubInt.value().getLimitedValue()); Optional<APInt> FoldGSubMix = ConstantFoldBinOp(TargetOpcode::G_SUB, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGSubMix.has_value()); - EXPECT_EQ(3221225488ULL, FoldGSubMix.getValue().getLimitedValue()); + EXPECT_EQ(3221225488ULL, FoldGSubMix.value().getLimitedValue()); // Test G_XOR folding Integer + Mixed Int-Float cases Optional<APInt> FoldGXorInt = ConstantFoldBinOp(TargetOpcode::G_XOR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGXorInt.has_value()); - EXPECT_EQ(25ULL, FoldGXorInt.getValue().getLimitedValue()); + EXPECT_EQ(25ULL, FoldGXorInt.value().getLimitedValue()); Optional<APInt> FoldGXorMix = ConstantFoldBinOp(TargetOpcode::G_XOR, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGXorMix.has_value()); - EXPECT_EQ(1073741840ULL, FoldGXorMix.getValue().getLimitedValue()); + EXPECT_EQ(1073741840ULL, FoldGXorMix.value().getLimitedValue()); // Test G_UDIV folding Integer + Mixed Int-Float cases Optional<APInt> FoldGUdivInt = ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGUdivInt.has_value()); - EXPECT_EQ(1ULL, FoldGUdivInt.getValue().getLimitedValue()); + EXPECT_EQ(1ULL, FoldGUdivInt.value().getLimitedValue()); Optional<APInt> FoldGUdivMix = ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGUdivMix.has_value()); - EXPECT_EQ(0ULL, FoldGUdivMix.getValue().getLimitedValue()); + EXPECT_EQ(0ULL, FoldGUdivMix.value().getLimitedValue()); // Test G_SDIV folding Integer + Mixed Int-Float cases Optional<APInt> FoldGSdivInt = ConstantFoldBinOp(TargetOpcode::G_SDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGSdivInt.has_value()); - EXPECT_EQ(1ULL, FoldGSdivInt.getValue().getLimitedValue()); + EXPECT_EQ(1ULL, FoldGSdivInt.value().getLimitedValue()); Optional<APInt> FoldGSdivMix = ConstantFoldBinOp(TargetOpcode::G_SDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGSdivMix.has_value()); - EXPECT_EQ(0ULL, FoldGSdivMix.getValue().getLimitedValue()); + EXPECT_EQ(0ULL, FoldGSdivMix.value().getLimitedValue()); // Test G_UREM folding Integer + Mixed Int-Float cases Optional<APInt> FoldGUremInt = ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGUremInt.has_value()); - EXPECT_EQ(1ULL, FoldGUremInt.getValue().getLimitedValue()); + EXPECT_EQ(1ULL, FoldGUremInt.value().getLimitedValue()); Optional<APInt> FoldGUremMix = ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGUremMix.has_value()); - EXPECT_EQ(0ULL, FoldGUremMix.getValue().getLimitedValue()); + EXPECT_EQ(0ULL, FoldGUremMix.value().getLimitedValue()); // Test G_SREM folding Integer + Mixed Int-Float cases Optional<APInt> FoldGSremInt = ConstantFoldBinOp(TargetOpcode::G_SREM, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGSremInt.has_value()); - EXPECT_EQ(7ULL, FoldGSremInt.getValue().getLimitedValue()); + EXPECT_EQ(7ULL, FoldGSremInt.value().getLimitedValue()); Optional<APInt> FoldGSremMix = ConstantFoldBinOp(TargetOpcode::G_SREM, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI); EXPECT_TRUE(FoldGSremMix.has_value()); - EXPECT_EQ(16ULL, FoldGSremMix.getValue().getLimitedValue()); + EXPECT_EQ(16ULL, FoldGSremMix.value().getLimitedValue()); } } // namespace diff --git a/llvm/unittests/DebugInfo/DWARF/DWARFDebugInfoTest.cpp b/llvm/unittests/DebugInfo/DWARF/DWARFDebugInfoTest.cpp index 3d2e084..ee92049 100644 --- a/llvm/unittests/DebugInfo/DWARF/DWARFDebugInfoTest.cpp +++ b/llvm/unittests/DebugInfo/DWARF/DWARFDebugInfoTest.cpp @@ -253,7 +253,7 @@ void TestAllForms() { EXPECT_TRUE((bool)FormValue); BlockDataOpt = FormValue->getAsBlock(); EXPECT_TRUE(BlockDataOpt.has_value()); - ExtractedBlockData = BlockDataOpt.getValue(); + ExtractedBlockData = BlockDataOpt.value(); EXPECT_EQ(ExtractedBlockData.size(), BlockSize); EXPECT_TRUE(memcmp(ExtractedBlockData.data(), BlockData, BlockSize) == 0); @@ -261,7 +261,7 @@ void TestAllForms() { EXPECT_TRUE((bool)FormValue); BlockDataOpt = FormValue->getAsBlock(); EXPECT_TRUE(BlockDataOpt.has_value()); - ExtractedBlockData = BlockDataOpt.getValue(); + ExtractedBlockData = BlockDataOpt.value(); EXPECT_EQ(ExtractedBlockData.size(), BlockSize); EXPECT_TRUE(memcmp(ExtractedBlockData.data(), BlockData, BlockSize) == 0); @@ -269,7 +269,7 @@ void TestAllForms() { EXPECT_TRUE((bool)FormValue); BlockDataOpt = FormValue->getAsBlock(); EXPECT_TRUE(BlockDataOpt.has_value()); - ExtractedBlockData = BlockDataOpt.getValue(); + ExtractedBlockData = BlockDataOpt.value(); EXPECT_EQ(ExtractedBlockData.size(), BlockSize); EXPECT_TRUE(memcmp(ExtractedBlockData.data(), BlockData, BlockSize) == 0); @@ -277,7 +277,7 @@ void TestAllForms() { EXPECT_TRUE((bool)FormValue); BlockDataOpt = FormValue->getAsBlock(); EXPECT_TRUE(BlockDataOpt.has_value()); - ExtractedBlockData = BlockDataOpt.getValue(); + ExtractedBlockData = BlockDataOpt.value(); EXPECT_EQ(ExtractedBlockData.size(), BlockSize); EXPECT_TRUE(memcmp(ExtractedBlockData.data(), BlockData, BlockSize) == 0); @@ -287,7 +287,7 @@ void TestAllForms() { EXPECT_TRUE((bool)FormValue); BlockDataOpt = FormValue->getAsBlock(); EXPECT_TRUE(BlockDataOpt.has_value()); - ExtractedBlockData = BlockDataOpt.getValue(); + ExtractedBlockData = BlockDataOpt.value(); EXPECT_EQ(ExtractedBlockData.size(), 16u); EXPECT_TRUE(memcmp(ExtractedBlockData.data(), Data16, 16) == 0); } @@ -989,21 +989,21 @@ template <uint16_t Version, class AddrType> void TestAddresses() { EXPECT_FALSE((bool)OptU64); } else { EXPECT_TRUE((bool)OptU64); - EXPECT_EQ(OptU64.getValue(), ActualHighPC); + EXPECT_EQ(OptU64.value(), ActualHighPC); } // Get the high PC as an unsigned constant. This should succeed if the high PC // was encoded as an offset and fail if the high PC was encoded as an address. OptU64 = toUnsigned(SubprogramDieLowHighPC.find(DW_AT_high_pc)); if (SupportsHighPCAsOffset) { EXPECT_TRUE((bool)OptU64); - EXPECT_EQ(OptU64.getValue(), ActualHighPCOffset); + EXPECT_EQ(OptU64.value(), ActualHighPCOffset); } else { EXPECT_FALSE((bool)OptU64); } OptU64 = SubprogramDieLowHighPC.getHighPC(ActualLowPC); EXPECT_TRUE((bool)OptU64); - EXPECT_EQ(OptU64.getValue(), ActualHighPC); + EXPECT_EQ(OptU64.value(), ActualHighPC); EXPECT_TRUE(SubprogramDieLowHighPC.getLowAndHighPC(LowPC, HighPC, SectionIndex)); EXPECT_EQ(LowPC, ActualLowPC); diff --git a/llvm/unittests/DebugInfo/DWARF/DWARFFormValueTest.cpp b/llvm/unittests/DebugInfo/DWARF/DWARFFormValueTest.cpp index 1a89025..c81059c 100644 --- a/llvm/unittests/DebugInfo/DWARF/DWARFFormValueTest.cpp +++ b/llvm/unittests/DebugInfo/DWARF/DWARFFormValueTest.cpp @@ -79,16 +79,16 @@ TEST(DWARFFormValue, SignedConstantForms) { auto Sign2 = createDataXFormValue<uint16_t>(DW_FORM_data2, -12345); auto Sign4 = createDataXFormValue<uint32_t>(DW_FORM_data4, -123456789); auto Sign8 = createDataXFormValue<uint64_t>(DW_FORM_data8, -1); - EXPECT_EQ(Sign1.getAsSignedConstant().getValue(), -123); - EXPECT_EQ(Sign2.getAsSignedConstant().getValue(), -12345); - EXPECT_EQ(Sign4.getAsSignedConstant().getValue(), -123456789); - EXPECT_EQ(Sign8.getAsSignedConstant().getValue(), -1); + EXPECT_EQ(Sign1.getAsSignedConstant().value(), -123); + EXPECT_EQ(Sign2.getAsSignedConstant().value(), -12345); + EXPECT_EQ(Sign4.getAsSignedConstant().value(), -123456789); + EXPECT_EQ(Sign8.getAsSignedConstant().value(), -1); // Check that we can handle big positive values, but that we return // an error just over the limit. auto UMax = createULEBFormValue(LLONG_MAX); auto TooBig = createULEBFormValue(uint64_t(LLONG_MAX) + 1); - EXPECT_EQ(UMax.getAsSignedConstant().getValue(), LLONG_MAX); + EXPECT_EQ(UMax.getAsSignedConstant().value(), LLONG_MAX); EXPECT_EQ(TooBig.getAsSignedConstant().has_value(), false); // Sanity check some other forms. @@ -100,14 +100,14 @@ TEST(DWARFFormValue, SignedConstantForms) { auto LEBMax = createSLEBFormValue(LLONG_MAX); auto LEB1 = createSLEBFormValue(-42); auto LEB2 = createSLEBFormValue(42); - EXPECT_EQ(Data1.getAsSignedConstant().getValue(), 120); - EXPECT_EQ(Data2.getAsSignedConstant().getValue(), 32000); - EXPECT_EQ(Data4.getAsSignedConstant().getValue(), 2000000000); - EXPECT_EQ(Data8.getAsSignedConstant().getValue(), 0x1234567812345678LL); - EXPECT_EQ(LEBMin.getAsSignedConstant().getValue(), LLONG_MIN); - EXPECT_EQ(LEBMax.getAsSignedConstant().getValue(), LLONG_MAX); - EXPECT_EQ(LEB1.getAsSignedConstant().getValue(), -42); - EXPECT_EQ(LEB2.getAsSignedConstant().getValue(), 42); + EXPECT_EQ(Data1.getAsSignedConstant().value(), 120); + EXPECT_EQ(Data2.getAsSignedConstant().value(), 32000); + EXPECT_EQ(Data4.getAsSignedConstant().value(), 2000000000); + EXPECT_EQ(Data8.getAsSignedConstant().value(), 0x1234567812345678LL); + EXPECT_EQ(LEBMin.getAsSignedConstant().value(), LLONG_MIN); + EXPECT_EQ(LEBMax.getAsSignedConstant().value(), LLONG_MAX); + EXPECT_EQ(LEB1.getAsSignedConstant().value(), -42); + EXPECT_EQ(LEB2.getAsSignedConstant().value(), 42); // Data16 is a little tricky. char Cksum[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; diff --git a/llvm/unittests/FileCheck/FileCheckTest.cpp b/llvm/unittests/FileCheck/FileCheckTest.cpp index f3b5d3d..c31c553 100644 --- a/llvm/unittests/FileCheck/FileCheckTest.cpp +++ b/llvm/unittests/FileCheck/FileCheckTest.cpp @@ -766,8 +766,8 @@ TEST_F(FileCheckTest, NumericVariable) { ASSERT_TRUE(Value); EXPECT_EQ(925, cantFail(Value->getSignedValue())); // getStringValue should return the same memory not just the same characters. - EXPECT_EQ(StringValue.begin(), FooVar.getStringValue().getValue().begin()); - EXPECT_EQ(StringValue.end(), FooVar.getStringValue().getValue().end()); + EXPECT_EQ(StringValue.begin(), FooVar.getStringValue().value().begin()); + EXPECT_EQ(StringValue.end(), FooVar.getStringValue().value().end()); EvalResult = FooVarUse.eval(); ASSERT_THAT_EXPECTED(EvalResult, Succeeded()); EXPECT_EQ(925, cantFail(EvalResult->getSignedValue())); diff --git a/llvm/unittests/IR/MetadataTest.cpp b/llvm/unittests/IR/MetadataTest.cpp index 5b64861..9dd49d0 100644 --- a/llvm/unittests/IR/MetadataTest.cpp +++ b/llvm/unittests/IR/MetadataTest.cpp @@ -1045,52 +1045,51 @@ TEST_F(DILocationTest, cloneTemporary) { } TEST_F(DILocationTest, discriminatorEncoding) { - EXPECT_EQ(0U, DILocation::encodeDiscriminator(0, 0, 0).getValue()); + EXPECT_EQ(0U, DILocation::encodeDiscriminator(0, 0, 0).value()); // Encode base discriminator as a component: lsb is 0, then the value. // The other components are all absent, so we leave all the other bits 0. - EXPECT_EQ(2U, DILocation::encodeDiscriminator(1, 0, 0).getValue()); + EXPECT_EQ(2U, DILocation::encodeDiscriminator(1, 0, 0).value()); // Base discriminator component is empty, so lsb is 1. Next component is not // empty, so its lsb is 0, then its value (1). Next component is empty. // So the bit pattern is 101. - EXPECT_EQ(5U, DILocation::encodeDiscriminator(0, 1, 0).getValue()); + EXPECT_EQ(5U, DILocation::encodeDiscriminator(0, 1, 0).value()); // First 2 components are empty, so the bit pattern is 11. Then the // next component - ending up with 1011. - EXPECT_EQ(0xbU, DILocation::encodeDiscriminator(0, 0, 1).getValue()); + EXPECT_EQ(0xbU, DILocation::encodeDiscriminator(0, 0, 1).value()); // The bit pattern for the first 2 components is 11. The next bit is 0, // because the last component is not empty. We have 29 bits usable for // encoding, but we cap it at 12 bits uniformously for all components. We // encode the last component over 14 bits. - EXPECT_EQ(0xfffbU, DILocation::encodeDiscriminator(0, 0, 0xfff).getValue()); + EXPECT_EQ(0xfffbU, DILocation::encodeDiscriminator(0, 0, 0xfff).value()); - EXPECT_EQ(0x102U, DILocation::encodeDiscriminator(1, 1, 0).getValue()); + EXPECT_EQ(0x102U, DILocation::encodeDiscriminator(1, 1, 0).value()); - EXPECT_EQ(0x13eU, DILocation::encodeDiscriminator(0x1f, 1, 0).getValue()); + EXPECT_EQ(0x13eU, DILocation::encodeDiscriminator(0x1f, 1, 0).value()); - EXPECT_EQ(0x87feU, DILocation::encodeDiscriminator(0x1ff, 1, 0).getValue()); + EXPECT_EQ(0x87feU, DILocation::encodeDiscriminator(0x1ff, 1, 0).value()); - EXPECT_EQ(0x1f3eU, DILocation::encodeDiscriminator(0x1f, 0x1f, 0).getValue()); + EXPECT_EQ(0x1f3eU, DILocation::encodeDiscriminator(0x1f, 0x1f, 0).value()); - EXPECT_EQ(0x3ff3eU, - DILocation::encodeDiscriminator(0x1f, 0x1ff, 0).getValue()); + EXPECT_EQ(0x3ff3eU, DILocation::encodeDiscriminator(0x1f, 0x1ff, 0).value()); EXPECT_EQ(0x1ff87feU, - DILocation::encodeDiscriminator(0x1ff, 0x1ff, 0).getValue()); + DILocation::encodeDiscriminator(0x1ff, 0x1ff, 0).value()); EXPECT_EQ(0xfff9f3eU, - DILocation::encodeDiscriminator(0x1f, 0x1f, 0xfff).getValue()); + DILocation::encodeDiscriminator(0x1f, 0x1f, 0xfff).value()); EXPECT_EQ(0xffc3ff3eU, - DILocation::encodeDiscriminator(0x1f, 0x1ff, 0x1ff).getValue()); + DILocation::encodeDiscriminator(0x1f, 0x1ff, 0x1ff).value()); EXPECT_EQ(0xffcf87feU, - DILocation::encodeDiscriminator(0x1ff, 0x1f, 0x1ff).getValue()); + DILocation::encodeDiscriminator(0x1ff, 0x1f, 0x1ff).value()); EXPECT_EQ(0xe1ff87feU, - DILocation::encodeDiscriminator(0x1ff, 0x1ff, 7).getValue()); + DILocation::encodeDiscriminator(0x1ff, 0x1ff, 7).value()); } TEST_F(DILocationTest, discriminatorEncodingNegativeTests) { @@ -1113,36 +1112,36 @@ TEST_F(DILocationTest, discriminatorSpecialCases) { EXPECT_EQ(0U, L1->getBaseDiscriminator()); EXPECT_EQ(1U, L1->getDuplicationFactor()); - EXPECT_EQ(L1, L1->cloneWithBaseDiscriminator(0).getValue()); - EXPECT_EQ(L1, L1->cloneByMultiplyingDuplicationFactor(0).getValue()); - EXPECT_EQ(L1, L1->cloneByMultiplyingDuplicationFactor(1).getValue()); + EXPECT_EQ(L1, L1->cloneWithBaseDiscriminator(0).value()); + EXPECT_EQ(L1, L1->cloneByMultiplyingDuplicationFactor(0).value()); + EXPECT_EQ(L1, L1->cloneByMultiplyingDuplicationFactor(1).value()); - auto L2 = L1->cloneWithBaseDiscriminator(1).getValue(); + auto L2 = L1->cloneWithBaseDiscriminator(1).value(); EXPECT_EQ(0U, L1->getBaseDiscriminator()); EXPECT_EQ(1U, L1->getDuplicationFactor()); EXPECT_EQ(1U, L2->getBaseDiscriminator()); EXPECT_EQ(1U, L2->getDuplicationFactor()); - auto L3 = L2->cloneByMultiplyingDuplicationFactor(2).getValue(); + auto L3 = L2->cloneByMultiplyingDuplicationFactor(2).value(); EXPECT_EQ(1U, L3->getBaseDiscriminator()); EXPECT_EQ(2U, L3->getDuplicationFactor()); - EXPECT_EQ(L2, L2->cloneByMultiplyingDuplicationFactor(1).getValue()); + EXPECT_EQ(L2, L2->cloneByMultiplyingDuplicationFactor(1).value()); - auto L4 = L3->cloneByMultiplyingDuplicationFactor(4).getValue(); + auto L4 = L3->cloneByMultiplyingDuplicationFactor(4).value(); EXPECT_EQ(1U, L4->getBaseDiscriminator()); EXPECT_EQ(8U, L4->getDuplicationFactor()); - auto L5 = L4->cloneWithBaseDiscriminator(2).getValue(); + auto L5 = L4->cloneWithBaseDiscriminator(2).value(); EXPECT_EQ(2U, L5->getBaseDiscriminator()); EXPECT_EQ(8U, L5->getDuplicationFactor()); // Check extreme cases - auto L6 = L1->cloneWithBaseDiscriminator(0xfff).getValue(); + auto L6 = L1->cloneWithBaseDiscriminator(0xfff).value(); EXPECT_EQ(0xfffU, L6->getBaseDiscriminator()); EXPECT_EQ(0xfffU, L6->cloneByMultiplyingDuplicationFactor(0xfff) - .getValue() + .value() ->getDuplicationFactor()); // Check we return None for unencodable cases. diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp index d5f033d..96c9c16 100644 --- a/llvm/unittests/IR/VPIntrinsicTest.cpp +++ b/llvm/unittests/IR/VPIntrinsicTest.cpp @@ -132,7 +132,7 @@ TEST_F(VPIntrinsicTest, VPIntrinsicsDefScopes) { ScopeVPID = Intrinsic::VPID; #define END_REGISTER_VP_INTRINSIC(VPID) \ ASSERT_TRUE(ScopeVPID.has_value()); \ - ASSERT_EQ(ScopeVPID.getValue(), Intrinsic::VPID); \ + ASSERT_EQ(ScopeVPID.value(), Intrinsic::VPID); \ ScopeVPID = None; Optional<ISD::NodeType> ScopeOPC; @@ -141,7 +141,7 @@ TEST_F(VPIntrinsicTest, VPIntrinsicsDefScopes) { ScopeOPC = ISD::SDOPC; #define END_REGISTER_VP_SDNODE(SDOPC) \ ASSERT_TRUE(ScopeOPC.has_value()); \ - ASSERT_EQ(ScopeOPC.getValue(), ISD::SDOPC); \ + ASSERT_EQ(ScopeOPC.value(), ISD::SDOPC); \ ScopeOPC = None; #include "llvm/IR/VPIntrinsics.def" @@ -234,7 +234,7 @@ TEST_F(VPIntrinsicTest, GetParamPos) { Optional<unsigned> MaskParamPos = VPIntrinsic::getMaskParamPos(F.getIntrinsicID()); if (MaskParamPos) { - Type *MaskParamType = F.getArg(MaskParamPos.getValue())->getType(); + Type *MaskParamType = F.getArg(MaskParamPos.value())->getType(); ASSERT_TRUE(MaskParamType->isVectorTy()); ASSERT_TRUE( cast<VectorType>(MaskParamType)->getElementType()->isIntegerTy(1)); @@ -243,7 +243,7 @@ TEST_F(VPIntrinsicTest, GetParamPos) { Optional<unsigned> VecLenParamPos = VPIntrinsic::getVectorLengthParamPos(F.getIntrinsicID()); if (VecLenParamPos) { - Type *VecLenParamType = F.getArg(VecLenParamPos.getValue())->getType(); + Type *VecLenParamType = F.getArg(VecLenParamPos.value())->getType(); ASSERT_TRUE(VecLenParamType->isIntegerTy(32)); } } diff --git a/llvm/unittests/InterfaceStub/ELFYAMLTest.cpp b/llvm/unittests/InterfaceStub/ELFYAMLTest.cpp index d8c6685..d347e17 100644 --- a/llvm/unittests/InterfaceStub/ELFYAMLTest.cpp +++ b/llvm/unittests/InterfaceStub/ELFYAMLTest.cpp @@ -49,7 +49,7 @@ TEST(ElfYamlTextAPI, YAMLReadableTBE) { EXPECT_NE(Stub.get(), nullptr); EXPECT_FALSE(Stub->SoName.has_value()); EXPECT_TRUE(Stub->Target.Arch.has_value()); - EXPECT_EQ(Stub->Target.Arch.getValue(), (uint16_t)llvm::ELF::EM_X86_64); + EXPECT_EQ(Stub->Target.Arch.value(), (uint16_t)llvm::ELF::EM_X86_64); EXPECT_EQ(Stub->NeededLibs.size(), 3u); EXPECT_STREQ(Stub->NeededLibs[0].c_str(), "libc.so"); EXPECT_STREQ(Stub->NeededLibs[1].c_str(), "libfoo.so"); diff --git a/llvm/unittests/Object/XCOFFObjectFileTest.cpp b/llvm/unittests/Object/XCOFFObjectFileTest.cpp index 41662be..097cb5e 100644 --- a/llvm/unittests/Object/XCOFFObjectFileTest.cpp +++ b/llvm/unittests/Object/XCOFFObjectFileTest.cpp @@ -83,16 +83,16 @@ TEST(XCOFFObjectFileTest, XCOFFTracebackTableAPIGeneral) { EXPECT_TRUE(TT.hasParmsOnStack()); ASSERT_TRUE(TT.getParmsType()); - EXPECT_EQ(TT.getParmsType().getValue(), "i, f, d"); + EXPECT_EQ(TT.getParmsType().value(), "i, f, d"); ASSERT_TRUE(TT.getTraceBackTableOffset()); - EXPECT_EQ(TT.getTraceBackTableOffset().getValue(), 64u); + EXPECT_EQ(TT.getTraceBackTableOffset().value(), 64u); EXPECT_FALSE(TT.getHandlerMask()); ASSERT_TRUE(TT.getFunctionName()); - EXPECT_EQ(TT.getFunctionName().getValue(), "add_all"); - EXPECT_EQ(TT.getFunctionName().getValue().size(), 7u); + EXPECT_EQ(TT.getFunctionName().value(), "add_all"); + EXPECT_EQ(TT.getFunctionName().value().size(), 7u); EXPECT_FALSE(TT.getAllocaRegister()); EXPECT_EQ(Size, 25u); @@ -171,11 +171,11 @@ TEST(XCOFFObjectFileTest, XCOFFTracebackTableAPIControlledStorageInfoDisp) { XCOFFTracebackTable TT = *TTOrErr; EXPECT_TRUE(TT.hasControlledStorage()); ASSERT_TRUE(TT.getNumOfCtlAnchors()); - EXPECT_EQ(TT.getNumOfCtlAnchors().getValue(), 2u); + EXPECT_EQ(TT.getNumOfCtlAnchors().value(), 2u); ASSERT_TRUE(TT.getControlledStorageInfoDisp()); - SmallVector<uint32_t, 8> Disp = TT.getControlledStorageInfoDisp().getValue(); + SmallVector<uint32_t, 8> Disp = TT.getControlledStorageInfoDisp().value(); ASSERT_EQ(Disp.size(), 2UL); EXPECT_EQ(Disp[0], 0x05050000u); @@ -207,10 +207,10 @@ TEST(XCOFFObjectFileTest, XCOFFTracebackTableAPIHasVectorInfo) { EXPECT_TRUE(TT.hasExtensionTable()); ASSERT_TRUE(TT.getParmsType()); - EXPECT_EQ(TT.getParmsType().getValue(), "v, i, f, i, d, i, v"); + EXPECT_EQ(TT.getParmsType().value(), "v, i, f, i, d, i, v"); ASSERT_TRUE(TT.getVectorExt()); - TBVectorExt VecExt = TT.getVectorExt().getValue(); + TBVectorExt VecExt = TT.getVectorExt().value(); EXPECT_EQ(VecExt.getNumberOfVRSaved(), 0); EXPECT_TRUE(VecExt.isVRSavedOnStack()); @@ -240,10 +240,10 @@ TEST(XCOFFObjectFileTest, XCOFFTracebackTableAPIHasVectorInfo1) { XCOFFTracebackTable TT = *TTOrErr; ASSERT_TRUE(TT.getParmsType()); - EXPECT_EQ(TT.getParmsType().getValue(), "v, i, f, i, d, i, v, v"); + EXPECT_EQ(TT.getParmsType().value(), "v, i, f, i, d, i, v, v"); ASSERT_TRUE(TT.getVectorExt()); - TBVectorExt VecExt = TT.getVectorExt().getValue(); + TBVectorExt VecExt = TT.getVectorExt().value(); EXPECT_EQ(VecExt.getNumberOfVRSaved(), 4); EXPECT_FALSE(VecExt.isVRSavedOnStack()); diff --git a/llvm/unittests/ObjectYAML/DWARFYAMLTest.cpp b/llvm/unittests/ObjectYAML/DWARFYAMLTest.cpp index badbda9..09c9e35 100644 --- a/llvm/unittests/ObjectYAML/DWARFYAMLTest.cpp +++ b/llvm/unittests/ObjectYAML/DWARFYAMLTest.cpp @@ -92,7 +92,7 @@ debug_pubtypes: ASSERT_THAT_ERROR(parseDWARFYAML(Yaml, Data), Succeeded()); ASSERT_TRUE(Data.PubNames.has_value()); - DWARFYAML::PubSection PubNames = Data.PubNames.getValue(); + DWARFYAML::PubSection PubNames = Data.PubNames.value(); ASSERT_EQ(PubNames.Entries.size(), 2u); EXPECT_EQ((uint32_t)PubNames.Entries[0].DieOffset, 0x1234u); @@ -101,7 +101,7 @@ debug_pubtypes: EXPECT_EQ(PubNames.Entries[1].Name, "def"); ASSERT_TRUE(Data.PubTypes.has_value()); - DWARFYAML::PubSection PubTypes = Data.PubTypes.getValue(); + DWARFYAML::PubSection PubTypes = Data.PubTypes.value(); ASSERT_EQ(PubTypes.Entries.size(), 2u); EXPECT_EQ((uint32_t)PubTypes.Entries[0].DieOffset, 0x1234u); @@ -158,7 +158,7 @@ debug_gnu_pubtypes: ASSERT_THAT_ERROR(parseDWARFYAML(Yaml, Data), Succeeded()); ASSERT_TRUE(Data.GNUPubNames.has_value()); - DWARFYAML::PubSection GNUPubNames = Data.GNUPubNames.getValue(); + DWARFYAML::PubSection GNUPubNames = Data.GNUPubNames.value(); ASSERT_EQ(GNUPubNames.Entries.size(), 2u); EXPECT_EQ((uint32_t)GNUPubNames.Entries[0].DieOffset, 0x1234u); @@ -169,7 +169,7 @@ debug_gnu_pubtypes: EXPECT_EQ(GNUPubNames.Entries[1].Name, "def"); ASSERT_TRUE(Data.GNUPubTypes.has_value()); - DWARFYAML::PubSection GNUPubTypes = Data.GNUPubTypes.getValue(); + DWARFYAML::PubSection GNUPubTypes = Data.GNUPubTypes.value(); ASSERT_EQ(GNUPubTypes.Entries.size(), 2u); EXPECT_EQ((uint32_t)GNUPubTypes.Entries[0].DieOffset, 0x1234u); diff --git a/llvm/unittests/ProfileData/MemProfTest.cpp b/llvm/unittests/ProfileData/MemProfTest.cpp index 8f97a38..290d331 100644 --- a/llvm/unittests/ProfileData/MemProfTest.cpp +++ b/llvm/unittests/ProfileData/MemProfTest.cpp @@ -104,9 +104,9 @@ MATCHER_P4(FrameContains, FunctionName, LineOffset, Column, Inline, "") { *result_listener << "Hash mismatch"; return false; } - if (F.SymbolName && F.SymbolName.getValue() != FunctionName) { + if (F.SymbolName && F.SymbolName.value() != FunctionName) { *result_listener << "SymbolName mismatch\nWant: " << FunctionName - << "\nGot: " << F.SymbolName.getValue(); + << "\nGot: " << F.SymbolName.value(); return false; } if (F.LineOffset == LineOffset && F.Column == Column && diff --git a/llvm/unittests/Support/AlignmentTest.cpp b/llvm/unittests/Support/AlignmentTest.cpp index 6bd9b52..4fa16fa 100644 --- a/llvm/unittests/Support/AlignmentTest.cpp +++ b/llvm/unittests/Support/AlignmentTest.cpp @@ -150,8 +150,8 @@ TEST(AlignmentTest, isAligned_isAddrAligned) { MaybeAlign A(T.alignment); // Test Align if (A) { - EXPECT_EQ(isAligned(A.getValue(), T.offset), T.isAligned); - EXPECT_EQ(isAddrAligned(A.getValue(), T.forgedAddr()), T.isAligned); + EXPECT_EQ(isAligned(A.value(), T.offset), T.isAligned); + EXPECT_EQ(isAddrAligned(A.value(), T.forgedAddr()), T.isAligned); } } } diff --git a/llvm/unittests/Support/KnownBitsTest.cpp b/llvm/unittests/Support/KnownBitsTest.cpp index 6dcea6db..04c1d7c 100644 --- a/llvm/unittests/Support/KnownBitsTest.cpp +++ b/llvm/unittests/Support/KnownBitsTest.cpp @@ -369,27 +369,27 @@ TEST(KnownBitsTest, ICmpExhaustive) { EXPECT_EQ(AllSLT || NoneSLT, KnownSLT.has_value()); EXPECT_EQ(AllSLE || NoneSLE, KnownSLE.has_value()); - EXPECT_EQ(AllEQ, KnownEQ.has_value() && KnownEQ.getValue()); - EXPECT_EQ(AllNE, KnownNE.has_value() && KnownNE.getValue()); - EXPECT_EQ(AllUGT, KnownUGT.has_value() && KnownUGT.getValue()); - EXPECT_EQ(AllUGE, KnownUGE.has_value() && KnownUGE.getValue()); - EXPECT_EQ(AllULT, KnownULT.has_value() && KnownULT.getValue()); - EXPECT_EQ(AllULE, KnownULE.has_value() && KnownULE.getValue()); - EXPECT_EQ(AllSGT, KnownSGT.has_value() && KnownSGT.getValue()); - EXPECT_EQ(AllSGE, KnownSGE.has_value() && KnownSGE.getValue()); - EXPECT_EQ(AllSLT, KnownSLT.has_value() && KnownSLT.getValue()); - EXPECT_EQ(AllSLE, KnownSLE.has_value() && KnownSLE.getValue()); - - EXPECT_EQ(NoneEQ, KnownEQ.has_value() && !KnownEQ.getValue()); - EXPECT_EQ(NoneNE, KnownNE.has_value() && !KnownNE.getValue()); - EXPECT_EQ(NoneUGT, KnownUGT.has_value() && !KnownUGT.getValue()); - EXPECT_EQ(NoneUGE, KnownUGE.has_value() && !KnownUGE.getValue()); - EXPECT_EQ(NoneULT, KnownULT.has_value() && !KnownULT.getValue()); - EXPECT_EQ(NoneULE, KnownULE.has_value() && !KnownULE.getValue()); - EXPECT_EQ(NoneSGT, KnownSGT.has_value() && !KnownSGT.getValue()); - EXPECT_EQ(NoneSGE, KnownSGE.has_value() && !KnownSGE.getValue()); - EXPECT_EQ(NoneSLT, KnownSLT.has_value() && !KnownSLT.getValue()); - EXPECT_EQ(NoneSLE, KnownSLE.has_value() && !KnownSLE.getValue()); + EXPECT_EQ(AllEQ, KnownEQ.has_value() && KnownEQ.value()); + EXPECT_EQ(AllNE, KnownNE.has_value() && KnownNE.value()); + EXPECT_EQ(AllUGT, KnownUGT.has_value() && KnownUGT.value()); + EXPECT_EQ(AllUGE, KnownUGE.has_value() && KnownUGE.value()); + EXPECT_EQ(AllULT, KnownULT.has_value() && KnownULT.value()); + EXPECT_EQ(AllULE, KnownULE.has_value() && KnownULE.value()); + EXPECT_EQ(AllSGT, KnownSGT.has_value() && KnownSGT.value()); + EXPECT_EQ(AllSGE, KnownSGE.has_value() && KnownSGE.value()); + EXPECT_EQ(AllSLT, KnownSLT.has_value() && KnownSLT.value()); + EXPECT_EQ(AllSLE, KnownSLE.has_value() && KnownSLE.value()); + + EXPECT_EQ(NoneEQ, KnownEQ.has_value() && !KnownEQ.value()); + EXPECT_EQ(NoneNE, KnownNE.has_value() && !KnownNE.value()); + EXPECT_EQ(NoneUGT, KnownUGT.has_value() && !KnownUGT.value()); + EXPECT_EQ(NoneUGE, KnownUGE.has_value() && !KnownUGE.value()); + EXPECT_EQ(NoneULT, KnownULT.has_value() && !KnownULT.value()); + EXPECT_EQ(NoneULE, KnownULE.has_value() && !KnownULE.value()); + EXPECT_EQ(NoneSGT, KnownSGT.has_value() && !KnownSGT.value()); + EXPECT_EQ(NoneSGE, KnownSGE.has_value() && !KnownSGE.value()); + EXPECT_EQ(NoneSLT, KnownSLT.has_value() && !KnownSLT.value()); + EXPECT_EQ(NoneSLE, KnownSLE.has_value() && !KnownSLE.value()); }); }); } diff --git a/llvm/unittests/TableGen/ParserEntryPointTest.cpp b/llvm/unittests/TableGen/ParserEntryPointTest.cpp index 63bbc51..a470cc0 100644 --- a/llvm/unittests/TableGen/ParserEntryPointTest.cpp +++ b/llvm/unittests/TableGen/ParserEntryPointTest.cpp @@ -36,5 +36,5 @@ TEST(Parser, SanityTest) { Record *Foo = Records.getDef("Foo"); Optional<StringRef> Field = Foo->getValueAsOptionalString("strField"); EXPECT_TRUE(Field.has_value()); - EXPECT_EQ(Field.getValue(), "value"); + EXPECT_EQ(Field.value(), "value"); } diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp index 123db21..4b47cda 100644 --- a/llvm/utils/TableGen/GlobalISelEmitter.cpp +++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp @@ -2970,7 +2970,7 @@ public: << MatchTable::IntValue(RendererID); if (SubOperand) Table << MatchTable::Comment("SubOperand") - << MatchTable::IntValue(SubOperand.getValue()); + << MatchTable::IntValue(SubOperand.value()); Table << MatchTable::Comment(SymbolicName) << MatchTable::LineBreak; } }; @@ -4986,8 +4986,8 @@ Error GlobalISelEmitter::importDefaultOperandRenderers( auto Def = DefaultDefOp->getDef(); if (Def->getName() == "undef_tied_input") { unsigned TempRegID = M.allocateTempRegID(); - M.insertAction<MakeTempRegisterAction>( - InsertPt, OpTyOrNone.getValue(), TempRegID); + M.insertAction<MakeTempRegisterAction>(InsertPt, OpTyOrNone.value(), + TempRegID); InsertPt = M.insertAction<BuildMIAction>( InsertPt, M.allocateOutputInsnID(), &Target.getInstruction(RK.getDef("IMPLICIT_DEF"))); |