diff options
author | Kazu Hirata <kazu@google.com> | 2022-12-02 20:36:08 -0800 |
---|---|---|
committer | Kazu Hirata <kazu@google.com> | 2022-12-02 20:36:08 -0800 |
commit | 998960ee1f2c8bc3830df4849ab89ec9d6217f26 (patch) | |
tree | 19bdc0c4beff9b937cc849d7c50f31c2f3fc5742 /llvm/lib | |
parent | 20cde15415d2b2d1b489b4cd5c520c6a8d7f8f54 (diff) | |
download | llvm-998960ee1f2c8bc3830df4849ab89ec9d6217f26.zip llvm-998960ee1f2c8bc3830df4849ab89ec9d6217f26.tar.gz llvm-998960ee1f2c8bc3830df4849ab89ec9d6217f26.tar.bz2 |
[CodeGen] Use std::nullopt instead of None (NFC)
This patch mechanically replaces None with std::nullopt where the
compiler would warn if None were deprecated. The intent is to reduce
the amount of manual work required in migrating from Optional to
std::optional.
This is part of an effort to migrate from llvm::Optional to
std::optional:
https://discourse.llvm.org/t/deprecating-llvm-optional-x-hasvalue-getvalue-getvalueor/63716
Diffstat (limited to 'llvm/lib')
39 files changed, 259 insertions, 252 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp index c7f23de..b9e44d0 100644 --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -2034,7 +2034,7 @@ TypeIndex CodeViewDebug::lowerTypeFunction(const DISubroutineType *Ty) { ReturnAndArgTypeIndices.back() = TypeIndex::None(); } TypeIndex ReturnTypeIndex = TypeIndex::Void(); - ArrayRef<TypeIndex> ArgTypeIndices = None; + ArrayRef<TypeIndex> ArgTypeIndices = std::nullopt; if (!ReturnAndArgTypeIndices.empty()) { auto ReturnAndArgTypesRef = makeArrayRef(ReturnAndArgTypeIndices); ReturnTypeIndex = ReturnAndArgTypesRef.front(); diff --git a/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp b/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp index d0c3e5e..d0b2fe7 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp @@ -118,13 +118,13 @@ intersects(const MachineInstr *StartMI, const MachineInstr *EndMI, for (auto RangesI = Ranges.begin(), RangesE = Ranges.end(); RangesI != RangesE; ++RangesI) { if (EndMI && Ordering.isBefore(EndMI, RangesI->first)) - return None; + return std::nullopt; if (EndMI && !Ordering.isBefore(RangesI->second, EndMI)) return RangesI; if (Ordering.isBefore(StartMI, RangesI->second)) return RangesI; } - return None; + return std::nullopt; } void DbgValueHistoryMap::trimLocationRanges( diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp b/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp index f0b1f73..1ad2362 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp @@ -36,9 +36,9 @@ DbgVariableLocation::extractFromMachineInstruction( DbgVariableLocation Location; // Variables calculated from multiple locations can't be represented here. if (Instruction.getNumDebugOperands() != 1) - return None; + return std::nullopt; if (!Instruction.getDebugOperand(0).isReg()) - return None; + return std::nullopt; Location.Register = Instruction.getDebugOperand(0).getReg(); Location.FragmentInfo.reset(); // We only handle expressions generated by DIExpression::appendOffset, @@ -53,7 +53,7 @@ DbgVariableLocation::extractFromMachineInstruction( Op->getOp() == dwarf::DW_OP_LLVM_arg) ++Op; else - return None; + return std::nullopt; } while (Op != DIExpr->expr_op_end()) { switch (Op->getOp()) { @@ -84,7 +84,7 @@ DbgVariableLocation::extractFromMachineInstruction( Offset = 0; break; default: - return None; + return std::nullopt; } ++Op; } diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h index 10019a47..dda12f7 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h +++ b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h @@ -165,7 +165,7 @@ public: ListBuilder(DebugLocStream &Locs, DwarfCompileUnit &CU, AsmPrinter &Asm, DbgVariable &V, const MachineInstr &MI) : Locs(Locs), Asm(Asm), V(V), MI(MI), ListIndex(Locs.startList(&CU)), - TagOffset(None) {} + TagOffset(std::nullopt) {} void setTagOffset(uint8_t TO) { TagOffset = TO; diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index 021a72c..14701bf 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -122,8 +122,8 @@ unsigned DwarfCompileUnit::getOrCreateSourceID(const DIFile *File) { // extend .file to support this. unsigned CUID = Asm->OutStreamer->hasRawTextSupport() ? 0 : getUniqueID(); if (!File) - return Asm->OutStreamer->emitDwarfFileDirective(0, "", "", None, None, - CUID); + return Asm->OutStreamer->emitDwarfFileDirective(0, "", "", std::nullopt, + std::nullopt, CUID); if (LastFile != File) { LastFile = File; @@ -671,13 +671,13 @@ DIE *DwarfCompileUnit::constructInlinedScopeDIE(LexicalScope *Scope, // Add the call site information to the DIE. const DILocation *IA = Scope->getInlinedAt(); - addUInt(*ScopeDIE, dwarf::DW_AT_call_file, None, + addUInt(*ScopeDIE, dwarf::DW_AT_call_file, std::nullopt, getOrCreateSourceID(IA->getFile())); - addUInt(*ScopeDIE, dwarf::DW_AT_call_line, None, IA->getLine()); + addUInt(*ScopeDIE, dwarf::DW_AT_call_line, std::nullopt, IA->getLine()); if (IA->getColumn()) - addUInt(*ScopeDIE, dwarf::DW_AT_call_column, None, IA->getColumn()); + addUInt(*ScopeDIE, dwarf::DW_AT_call_column, std::nullopt, IA->getColumn()); if (IA->getDiscriminator() && DD->getDwarfVersion() >= 4) - addUInt(*ScopeDIE, dwarf::DW_AT_GNU_discriminator, None, + addUInt(*ScopeDIE, dwarf::DW_AT_GNU_discriminator, std::nullopt, IA->getDiscriminator()); // Add name to the name table, we do this here because we're guaranteed @@ -1594,7 +1594,8 @@ void DwarfCompileUnit::createBaseTypeDIEs() { "_" + Twine(Btr.BitSize)).toStringRef(Str)); addUInt(Die, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1, Btr.Encoding); // Round up to smallest number of bytes that contains this number of bits. - addUInt(Die, dwarf::DW_AT_byte_size, None, divideCeil(Btr.BitSize, 8)); + addUInt(Die, dwarf::DW_AT_byte_size, std::nullopt, + divideCeil(Btr.BitSize, 8)); Btr.Die = &Die; } diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index d3e2bef..a06cec5 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -3532,10 +3532,10 @@ void DwarfDebug::insertSectionLabel(const MCSymbol *S) { Optional<MD5::MD5Result> DwarfDebug::getMD5AsBytes(const DIFile *File) const { assert(File); if (getDwarfVersion() < 5) - return None; + return std::nullopt; Optional<DIFile::ChecksumInfo<StringRef>> Checksum = File->getChecksum(); if (!Checksum || Checksum->Kind != DIFile::CSK_MD5) - return None; + return std::nullopt; // Convert the string checksum to an MD5Result for the streamer. // The verifier validates the checksum so we assume it's okay. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp index ab84a9d..ebe351e 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp @@ -604,7 +604,7 @@ bool DwarfExpression::addExpression( emitLegacySExt(PrevConvertOp->getArg(0)); else if (Encoding == dwarf::DW_ATE_unsigned) emitLegacyZExt(PrevConvertOp->getArg(0)); - PrevConvertOp = None; + PrevConvertOp = std::nullopt; } else { PrevConvertOp = Op; } diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h index e605fe2..36fdf04 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h @@ -55,7 +55,7 @@ public: /// Consume one operation. Optional<DIExpression::ExprOperand> take() { if (Start == End) - return None; + return std::nullopt; return *(Start++); } @@ -65,18 +65,18 @@ public: /// Return the current operation. Optional<DIExpression::ExprOperand> peek() const { if (Start == End) - return None; + return std::nullopt; return *(Start); } /// Return the next operation. Optional<DIExpression::ExprOperand> peekNext() const { if (Start == End) - return None; + return std::nullopt; auto Next = Start.getNext(); if (Next == End) - return None; + return std::nullopt; return *Next; } diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp index b1d7c29..22537c8 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp @@ -411,8 +411,8 @@ void DwarfUnit::addSourceLine(DIE &Die, unsigned Line, const DIFile *File) { return; unsigned FileID = getOrCreateSourceID(File); - addUInt(Die, dwarf::DW_AT_decl_file, None, FileID); - addUInt(Die, dwarf::DW_AT_decl_line, None, Line); + addUInt(Die, dwarf::DW_AT_decl_file, std::nullopt, FileID); + addUInt(Die, dwarf::DW_AT_decl_line, std::nullopt, Line); } void DwarfUnit::addSourceLine(DIE &Die, const DILocalVariable *V) { @@ -705,12 +705,12 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIBasicType *BTy) { BTy->getEncoding()); uint64_t Size = BTy->getSizeInBits() >> 3; - addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size); + addUInt(Buffer, dwarf::DW_AT_byte_size, std::nullopt, Size); if (BTy->isBigEndian()) - addUInt(Buffer, dwarf::DW_AT_endianity, None, dwarf::DW_END_big); + addUInt(Buffer, dwarf::DW_AT_endianity, std::nullopt, dwarf::DW_END_big); else if (BTy->isLittleEndian()) - addUInt(Buffer, dwarf::DW_AT_endianity, None, dwarf::DW_END_little); + addUInt(Buffer, dwarf::DW_AT_endianity, std::nullopt, dwarf::DW_END_little); } void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIStringType *STy) { @@ -734,7 +734,7 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIStringType *STy) { addBlock(Buffer, dwarf::DW_AT_string_length, DwarfExpr.finalize()); } else { uint64_t Size = STy->getSizeInBits() >> 3; - addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size); + addUInt(Buffer, dwarf::DW_AT_byte_size, std::nullopt, Size); } if (DIExpression *Expr = STy->getStringLocationExp()) { @@ -785,7 +785,7 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIDerivedType *DTy) { && Tag != dwarf::DW_TAG_ptr_to_member_type && Tag != dwarf::DW_TAG_reference_type && Tag != dwarf::DW_TAG_rvalue_reference_type) - addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size); + addUInt(Buffer, dwarf::DW_AT_byte_size, std::nullopt, Size); if (Tag == dwarf::DW_TAG_ptr_to_member_type) addDIEEntry(Buffer, dwarf::DW_AT_containing_type, @@ -932,9 +932,11 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DICompositeType *CTy) { if (const ConstantInt *CI = dyn_cast_or_null<ConstantInt>(DDTy->getDiscriminantValue())) { if (DD->isUnsignedDIType(Discriminator->getBaseType())) - addUInt(Variant, dwarf::DW_AT_discr_value, None, CI->getZExtValue()); + addUInt(Variant, dwarf::DW_AT_discr_value, std::nullopt, + CI->getZExtValue()); else - addSInt(Variant, dwarf::DW_AT_discr_value, None, CI->getSExtValue()); + addSInt(Variant, dwarf::DW_AT_discr_value, std::nullopt, + CI->getSExtValue()); } constructMemberDIE(Variant, DDTy); } else { @@ -954,7 +956,7 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DICompositeType *CTy) { if (!SetterName.empty()) addString(ElemDie, dwarf::DW_AT_APPLE_property_setter, SetterName); if (unsigned PropertyAttributes = Property->getAttributes()) - addUInt(ElemDie, dwarf::DW_AT_APPLE_property_attribute, None, + addUInt(ElemDie, dwarf::DW_AT_APPLE_property_attribute, std::nullopt, PropertyAttributes); } else if (auto *Composite = dyn_cast<DICompositeType>(Element)) { if (Composite->getTag() == dwarf::DW_TAG_variant_part) { @@ -1020,10 +1022,10 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DICompositeType *CTy) { // TODO: Do we care about size for enum forward declarations? if (Size && (!CTy->isForwardDecl() || Tag == dwarf::DW_TAG_enumeration_type)) - addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size); + addUInt(Buffer, dwarf::DW_AT_byte_size, std::nullopt, Size); else if (!CTy->isForwardDecl()) // Add zero size if it is not a forward declaration. - addUInt(Buffer, dwarf::DW_AT_byte_size, None, 0); + addUInt(Buffer, dwarf::DW_AT_byte_size, std::nullopt, 0); // If we're a forward decl, say so. if (CTy->isForwardDecl()) @@ -1142,10 +1144,10 @@ DIE *DwarfUnit::getOrCreateModule(const DIModule *M) { if (!M->getAPINotesFile().empty()) addString(MDie, dwarf::DW_AT_LLVM_apinotes, M->getAPINotesFile()); if (M->getFile()) - addUInt(MDie, dwarf::DW_AT_decl_file, None, + addUInt(MDie, dwarf::DW_AT_decl_file, std::nullopt, getOrCreateSourceID(M->getFile())); if (M->getLineNo()) - addUInt(MDie, dwarf::DW_AT_decl_line, None, M->getLineNo()); + addUInt(MDie, dwarf::DW_AT_decl_line, std::nullopt, M->getLineNo()); if (M->getIsDecl()) addFlag(MDie, dwarf::DW_AT_declaration); @@ -1208,10 +1210,10 @@ bool DwarfUnit::applySubprogramDefinitionAttributes(const DISubprogram *SP, unsigned DeclID = getOrCreateSourceID(SPDecl->getFile()); unsigned DefID = getOrCreateSourceID(SP->getFile()); if (DeclID != DefID) - addUInt(SPDie, dwarf::DW_AT_decl_file, None, DefID); + addUInt(SPDie, dwarf::DW_AT_decl_file, std::nullopt, DefID); if (SP->getLine() != SPDecl->getLine()) - addUInt(SPDie, dwarf::DW_AT_decl_line, None, SP->getLine()); + addUInt(SPDie, dwarf::DW_AT_decl_line, std::nullopt, SP->getLine()); } } @@ -1379,7 +1381,7 @@ void DwarfUnit::constructSubrangeDIE(DIE &Buffer, const DISubrange *SR, } else if (auto *BI = Bound.dyn_cast<ConstantInt *>()) { if (Attr == dwarf::DW_AT_count) { if (BI->getSExtValue() != -1) - addUInt(DW_Subrange, Attr, None, BI->getSExtValue()); + addUInt(DW_Subrange, Attr, std::nullopt, BI->getSExtValue()); } else if (Attr != dwarf::DW_AT_lower_bound || DefaultLowerBound == -1 || BI->getSExtValue() != DefaultLowerBound) addSInt(DW_Subrange, Attr, dwarf::DW_FORM_sdata, BI->getSExtValue()); @@ -1440,7 +1442,7 @@ DIE *DwarfUnit::getIndexTyDie() { IndexTyDie = &createAndAddDIE(dwarf::DW_TAG_base_type, getUnitDie()); StringRef Name = "__ARRAY_SIZE_TYPE__"; addString(*IndexTyDie, dwarf::DW_AT_name, Name); - addUInt(*IndexTyDie, dwarf::DW_AT_byte_size, None, sizeof(int64_t)); + addUInt(*IndexTyDie, dwarf::DW_AT_byte_size, std::nullopt, sizeof(int64_t)); addUInt(*IndexTyDie, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1, dwarf::getArrayIndexTypeEncoding( (dwarf::SourceLanguage)getLanguage())); @@ -1481,7 +1483,7 @@ void DwarfUnit::constructArrayTypeDIE(DIE &Buffer, const DICompositeType *CTy) { if (CTy->isVector()) { addFlag(Buffer, dwarf::DW_AT_GNU_vector); if (hasVectorBeenPadded(CTy)) - addUInt(Buffer, dwarf::DW_AT_byte_size, None, + addUInt(Buffer, dwarf::DW_AT_byte_size, std::nullopt, CTy->getSizeInBits() / CHAR_BIT); } @@ -1632,8 +1634,8 @@ DIE &DwarfUnit::constructMemberDIE(DIE &Buffer, const DIDerivedType *DT) { if (IsBitfield) { // Handle bitfield, assume bytes are 8 bits. if (DD->useDWARF2Bitfields()) - addUInt(MemberDie, dwarf::DW_AT_byte_size, None, FieldSize/8); - addUInt(MemberDie, dwarf::DW_AT_bit_size, None, Size); + addUInt(MemberDie, dwarf::DW_AT_byte_size, std::nullopt, FieldSize / 8); + addUInt(MemberDie, dwarf::DW_AT_bit_size, std::nullopt, Size); uint64_t Offset = DT->getOffsetInBits(); // We can't use DT->getAlignInBits() here: AlignInBits for member type @@ -1655,10 +1657,10 @@ DIE &DwarfUnit::constructMemberDIE(DIE &Buffer, const DIDerivedType *DT) { if (Asm->getDataLayout().isLittleEndian()) Offset = FieldSize - (Offset + Size); - addUInt(MemberDie, dwarf::DW_AT_bit_offset, None, Offset); + addUInt(MemberDie, dwarf::DW_AT_bit_offset, std::nullopt, Offset); OffsetInBytes = FieldOffset >> 3; } else { - addUInt(MemberDie, dwarf::DW_AT_data_bit_offset, None, Offset); + addUInt(MemberDie, dwarf::DW_AT_data_bit_offset, std::nullopt, Offset); } } else { // This is not a bitfield. @@ -1682,7 +1684,7 @@ DIE &DwarfUnit::constructMemberDIE(DIE &Buffer, const DIDerivedType *DT) { addUInt(MemberDie, dwarf::DW_AT_data_member_location, dwarf::DW_FORM_udata, OffsetInBytes); else - addUInt(MemberDie, dwarf::DW_AT_data_member_location, None, + addUInt(MemberDie, dwarf::DW_AT_data_member_location, std::nullopt, OffsetInBytes); } } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index dca8dfc..3062c63 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -1438,16 +1438,16 @@ static std::optional<std::pair<Instruction *, Constant *>> getIVIncrement(const PHINode *PN, const LoopInfo *LI) { const Loop *L = LI->getLoopFor(PN->getParent()); if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch()) - return None; + return std::nullopt; auto *IVInc = dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch())); if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L) - return None; + return std::nullopt; Instruction *LHS = nullptr; Constant *Step = nullptr; if (matchIncrement(IVInc, LHS, Step) && LHS == PN) return std::make_pair(IVInc, Step); - return None; + return std::nullopt; } static bool isIVIncrement(const Value *V, const LoopInfo *LI) { @@ -4020,10 +4020,10 @@ bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, [this](const Value *V) -> std::optional<std::pair<Instruction *, APInt>> { auto *PN = dyn_cast<PHINode>(V); if (!PN) - return None; + return std::nullopt; auto IVInc = getIVIncrement(PN, &LI); if (!IVInc) - return None; + return std::nullopt; // TODO: The result of the intrinsics above is two-compliment. However when // IV inc is expressed as add or sub, iv.next is potentially a poison value. // If it has nuw or nsw flags, we need to make sure that these flags are @@ -4032,10 +4032,10 @@ bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, // potentially complex analysis needed to prove this, we reject such cases. if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first)) if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap()) - return None; + return std::nullopt; if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second)) return std::make_pair(IVInc->first, ConstantStep->getValue()); - return None; + return std::nullopt; }; // Try to account for the following special case: diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 614ab70..932b3bd 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -114,18 +114,18 @@ isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, // Need at least two byte positions to decide on endianness. unsigned Width = MemOffset2Idx.size(); if (Width < 2) - return None; + return std::nullopt; bool BigEndian = true, LittleEndian = true; for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) { auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset); if (MemOffsetAndIdx == MemOffset2Idx.end()) - return None; + return std::nullopt; const int64_t Idx = MemOffsetAndIdx->second - LowestIdx; assert(Idx >= 0 && "Expected non-negative byte offset?"); LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset); BigEndian &= Idx == bigEndianByteAt(Width, MemOffset); if (!BigEndian && !LittleEndian) - return None; + return std::nullopt; } assert((BigEndian != LittleEndian) && @@ -1290,7 +1290,7 @@ static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy, const MachineRegisterInfo &MRI) { const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI); if (!MaybeCst) - return None; + return std::nullopt; APFloat V = MaybeCst->getValueAPF(); switch (Opcode) { @@ -3246,7 +3246,7 @@ CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const { // In the combine, we want to elimate the entire tree. if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS)) - return None; + return std::nullopt; // If it's a G_OR, save it and continue to walk. If it's not, then it's // something that may be a load + arithmetic. @@ -3263,7 +3263,7 @@ CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const { // We're going to try and merge each register into a wider power-of-2 type, // so we ought to have an even number of registers. if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0) - return None; + return std::nullopt; return RegsToVisit; } @@ -3289,15 +3289,15 @@ matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits, } if (Shift % MemSizeInBits != 0) - return None; + return std::nullopt; // TODO: Handle other types of loads. auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI); if (!Load) - return None; + return std::nullopt; if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits) - return None; + return std::nullopt; return std::make_pair(Load, Shift / MemSizeInBits); } @@ -3342,7 +3342,7 @@ CombinerHelper::findLoadOffsetsForLoadOrCombine( // shifted) value. auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI); if (!LoadAndPos) - return None; + return std::nullopt; GZExtLoad *Load; int64_t DstPos; std::tie(Load, DstPos) = *LoadAndPos; @@ -3353,14 +3353,14 @@ CombinerHelper::findLoadOffsetsForLoadOrCombine( if (!MBB) MBB = LoadMBB; if (LoadMBB != MBB) - return None; + return std::nullopt; // Make sure that the MachineMemOperands of every seen load are compatible. auto &LoadMMO = Load->getMMO(); if (!MMO) MMO = &LoadMMO; if (MMO->getAddrSpace() != LoadMMO.getAddrSpace()) - return None; + return std::nullopt; // Find out what the base pointer and index for the load is. Register LoadPtr; @@ -3373,7 +3373,7 @@ CombinerHelper::findLoadOffsetsForLoadOrCombine( // Don't combine things like a[i], a[i] -> a bigger load. if (!SeenIdx.insert(Idx).second) - return None; + return std::nullopt; // Every load must share the same base pointer; don't combine things like: // @@ -3381,7 +3381,7 @@ CombinerHelper::findLoadOffsetsForLoadOrCombine( if (!BasePtr.isValid()) BasePtr = LoadPtr; if (BasePtr != LoadPtr) - return None; + return std::nullopt; if (Idx < LowestIdx) { LowestIdx = Idx; @@ -3393,7 +3393,7 @@ CombinerHelper::findLoadOffsetsForLoadOrCombine( // // a[i] << 16, a[i + k] << 16 -> a bigger load. if (!MemOffset2Idx.try_emplace(DstPos, Idx).second) - return None; + return std::nullopt; Loads.insert(Load); // Keep track of the position of the earliest/latest loads in the pattern. @@ -3428,9 +3428,9 @@ CombinerHelper::findLoadOffsetsForLoadOrCombine( if (Loads.count(&MI)) continue; if (MI.isLoadFoldBarrier()) - return None; + return std::nullopt; if (Iter++ == MaxIter) - return None; + return std::nullopt; } return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad); @@ -3560,7 +3560,7 @@ getTruncStoreByteOffset(GStore &Store, Register &SrcVal, MachineRegisterInfo &MRI) { Register TruncVal; if (!mi_match(Store.getValueReg(), MRI, m_GTrunc(m_Reg(TruncVal)))) - return None; + return std::nullopt; // The shift amount must be a constant multiple of the narrow type. // It is translated to the offset address in the wide source value "y". @@ -3578,21 +3578,21 @@ getTruncStoreByteOffset(GStore &Store, Register &SrcVal, SrcVal = TruncVal; return 0; // If it's the lowest index store. } - return None; + return std::nullopt; } unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits(); if (ShiftAmt % NarrowBits!= 0) - return None; + return std::nullopt; const unsigned Offset = ShiftAmt / NarrowBits; if (SrcVal.isValid() && FoundSrcVal != SrcVal) - return None; + return std::nullopt; if (!SrcVal.isValid()) SrcVal = FoundSrcVal; else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal)) - return None; + return std::nullopt; return Offset; } diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 78a8f85..6c44a1c 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -205,7 +205,7 @@ MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0, if (Value == 0) { Res = Op0; - return None; + return std::nullopt; } Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp index 1809240..6750edd 100644 --- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp +++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp @@ -293,7 +293,7 @@ Optional<APInt> llvm::getIConstantVRegVal(Register VReg, assert((!ValAndVReg || ValAndVReg->VReg == VReg) && "Value found while looking through instrs"); if (!ValAndVReg) - return None; + return std::nullopt; return ValAndVReg->Value; } @@ -302,7 +302,7 @@ llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) { Optional<APInt> Val = getIConstantVRegVal(VReg, MRI); if (Val && Val->getBitWidth() <= 64) return Val->getSExtValue(); - return None; + return std::nullopt; } namespace { @@ -322,7 +322,7 @@ Optional<ValueAndVReg> getConstantVRegValWithLookThrough( switch (MI->getOpcode()) { case TargetOpcode::G_ANYEXT: if (!LookThroughAnyExt) - return None; + return std::nullopt; [[fallthrough]]; case TargetOpcode::G_TRUNC: case TargetOpcode::G_SEXT: @@ -335,21 +335,21 @@ Optional<ValueAndVReg> getConstantVRegValWithLookThrough( case TargetOpcode::COPY: VReg = MI->getOperand(1).getReg(); if (Register::isPhysicalRegister(VReg)) - return None; + return std::nullopt; break; case TargetOpcode::G_INTTOPTR: VReg = MI->getOperand(1).getReg(); break; default: - return None; + return std::nullopt; } } if (!MI || !IsConstantOpcode(MI)) - return None; + return std::nullopt; Optional<APInt> MaybeVal = getAPCstValue(MI); if (!MaybeVal) - return None; + return std::nullopt; APInt &Val = *MaybeVal; while (!SeenOpcodes.empty()) { std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val(); @@ -393,7 +393,7 @@ Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) { const MachineOperand &CstVal = MI->getOperand(1); if (CstVal.isCImm()) return CstVal.getCImm()->getValue(); - return None; + return std::nullopt; } Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) { @@ -402,7 +402,7 @@ Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) { return CstVal.getCImm()->getValue(); if (CstVal.isFPImm()) return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); - return None; + return std::nullopt; } } // end anonymous namespace @@ -426,7 +426,7 @@ Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough( auto Reg = getConstantVRegValWithLookThrough( VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs); if (!Reg) - return None; + return std::nullopt; return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(), Reg->VReg}; } @@ -445,7 +445,7 @@ llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { auto *DefMI = MRI.getVRegDef(Reg); auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); if (!DstTy.isValid()) - return None; + return std::nullopt; unsigned Opc = DefMI->getOpcode(); while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) { Register SrcReg = DefMI->getOperand(1).getReg(); @@ -497,11 +497,11 @@ Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, const MachineRegisterInfo &MRI) { auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false); if (!MaybeOp2Cst) - return None; + return std::nullopt; auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false); if (!MaybeOp1Cst) - return None; + return std::nullopt; const APInt &C1 = MaybeOp1Cst->Value; const APInt &C2 = MaybeOp2Cst->Value; @@ -553,7 +553,7 @@ Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, return APIntOps::umax(C1, C2); } - return None; + return std::nullopt; } Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, @@ -561,11 +561,11 @@ Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const MachineRegisterInfo &MRI) { const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI); if (!Op2Cst) - return None; + return std::nullopt; const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI); if (!Op1Cst) - return None; + return std::nullopt; APFloat C1 = Op1Cst->getValueAPF(); const APFloat &C2 = Op2Cst->getValueAPF(); @@ -607,7 +607,7 @@ Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, break; } - return None; + return std::nullopt; } SmallVector<APInt> @@ -773,7 +773,7 @@ Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, } } } - return None; + return std::nullopt; } Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, @@ -786,7 +786,7 @@ Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, APFloat::rmNearestTiesToEven); return DstVal; } - return None; + return std::nullopt; } Optional<SmallVector<unsigned>> @@ -796,20 +796,20 @@ llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) { auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> { auto MaybeCst = getIConstantVRegVal(R, MRI); if (!MaybeCst) - return None; + return std::nullopt; return MaybeCst->countLeadingZeros(); }; if (Ty.isVector()) { // Try to constant fold each element. auto *BV = getOpcodeDef<GBuildVector>(Src, MRI); if (!BV) - return None; + return std::nullopt; for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) { if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) { FoldedCTLZs.emplace_back(*MaybeFold); continue; } - return None; + return std::nullopt; } return FoldedCTLZs; } @@ -817,7 +817,7 @@ llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) { FoldedCTLZs.emplace_back(*MaybeCst); return FoldedCTLZs; } - return None; + return std::nullopt; } bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI, @@ -1016,7 +1016,7 @@ Optional<int> llvm::getSplatIndex(MachineInstr &MI) { int SplatValue = *FirstDefinedIdx; if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()), [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; })) - return None; + return std::nullopt; return SplatValue; } @@ -1033,10 +1033,10 @@ Optional<ValueAndVReg> getAnyConstantSplat(Register VReg, bool AllowUndef) { MachineInstr *MI = getDefIgnoringCopies(VReg, MRI); if (!MI) - return None; + return std::nullopt; if (!isBuildVectorOp(MI->getOpcode())) - return None; + return std::nullopt; Optional<ValueAndVReg> SplatValAndReg; for (MachineOperand &Op : MI->uses()) { @@ -1048,7 +1048,7 @@ Optional<ValueAndVReg> getAnyConstantSplat(Register VReg, if (!ElementValAndReg) { if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element))) continue; - return None; + return std::nullopt; } // Record splat value @@ -1057,7 +1057,7 @@ Optional<ValueAndVReg> getAnyConstantSplat(Register VReg, // Different constant then the one already recorded, not a constant splat. if (SplatValAndReg->Value != ElementValAndReg->Value) - return None; + return std::nullopt; } return SplatValAndReg; @@ -1089,7 +1089,7 @@ Optional<APInt> llvm::getIConstantSplatVal(const Register Reg, return ValAndVReg->Value; } - return None; + return std::nullopt; } Optional<APInt> llvm::getIConstantSplatVal(const MachineInstr &MI, @@ -1103,7 +1103,7 @@ llvm::getIConstantSplatSExtVal(const Register Reg, if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI); - return None; + return std::nullopt; } Optional<int64_t> @@ -1117,7 +1117,7 @@ Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg, bool AllowUndef) { if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef)) return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI); - return None; + return std::nullopt; } bool llvm::isBuildVectorAllZeros(const MachineInstr &MI, @@ -1136,13 +1136,13 @@ Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) { unsigned Opc = MI.getOpcode(); if (!isBuildVectorOp(Opc)) - return None; + return std::nullopt; if (auto Splat = getIConstantSplatSExtVal(MI, MRI)) return RegOrConstant(*Splat); auto Reg = MI.getOperand(1).getReg(); if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()), [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; })) - return None; + return std::nullopt; return RegOrConstant(Reg); } @@ -1210,7 +1210,7 @@ llvm::isConstantOrConstantSplatVector(MachineInstr &MI, return C->Value; auto MaybeCst = getIConstantSplatSExtVal(MI, MRI); if (!MaybeCst) - return None; + return std::nullopt; const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits(); return APInt(ScalarSize, *MaybeCst, true); } diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp index da6ec76..ab45269 100644 --- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp +++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp @@ -261,12 +261,12 @@ ImplicitNullChecks::computeDependence(const MachineInstr *MI, if (canReorder(*I, MI)) continue; - if (Dep == None) { + if (Dep == std::nullopt) { // Found one possible dependency, keep track of it. Dep = I; } else { // We found two dependencies, so bail out. - return {false, None}; + return {false, std::nullopt}; } } @@ -805,7 +805,7 @@ void ImplicitNullChecks::rewriteNullChecks( // Insert an *unconditional* branch to not-null successor - we expect // block placement to remove fallthroughs later. TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr, - /*Cond=*/None, DL); + /*Cond=*/std::nullopt, DL); NumImplicitNullChecks++; } diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index d243cb5..7a436a7 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -1613,7 +1613,7 @@ void HoistSpillHelper::hoistAllSpills() { RMEnt->removeOperand(i - 1); } } - Edit.eliminateDeadDefs(SpillsToRm, None); + Edit.eliminateDeadDefs(SpillsToRm, std::nullopt); } } diff --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp index 11bb3ee..74e5631 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp @@ -1045,7 +1045,7 @@ Optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) { // If there is no location, and we have reached the limit of how many stack // slots to track, then don't track this one. if (SpillLocs.size() >= StackWorkingSetLimit) - return None; + return std::nullopt; // Spill location is untracked: create record for this one, and all // subregister slots too. @@ -1297,7 +1297,7 @@ Optional<LocIdx> InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) { Optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI); if (!SpillLoc) - return None; + return std::nullopt; // Where in the stack slot is this value defined -- i.e., what size of value // is this? An important question, because it could be loaded into a register @@ -1311,7 +1311,7 @@ InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) { if (IdxIt == MTracker->StackSlotIdxes.end()) // That index is not tracked. This is suprising, and unlikely to ever // occur, but the safe action is to indicate the variable is optimised out. - return None; + return std::nullopt; unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillLoc, IdxIt->second); return MTracker->getSpillMLoc(SpillID); @@ -1529,7 +1529,7 @@ bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI, // If we didn't find anything: there's no way to express our value. if (!NewReg) { - NewID = None; + NewID = std::nullopt; } else { // Re-state the value as being defined within the subregister // that we found. @@ -1539,7 +1539,7 @@ bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI, } } else { // If we can't handle subregisters, unset the new value. - NewID = None; + NewID = std::nullopt; } } @@ -1628,7 +1628,8 @@ bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) { // a DBG_PHI. This can happen if DBG_PHIs are malformed, or refer to a // dead stack slot, for example. // Record a DebugPHIRecord with an empty value + location. - DebugPHINumToValue.push_back({InstrNum, MI.getParent(), None, None}); + DebugPHINumToValue.push_back( + {InstrNum, MI.getParent(), std::nullopt, std::nullopt}); return true; }; @@ -1840,17 +1841,17 @@ InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI, MachineFunction *MF) { // TODO: Handle multiple stores folded into one. if (!MI.hasOneMemOperand()) - return None; + return std::nullopt; // Reject any memory operand that's aliased -- we can't guarantee its value. auto MMOI = MI.memoperands_begin(); const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); if (PVal->isAliased(MFI)) - return None; + return std::nullopt; if (!MI.getSpillSize(TII) && !MI.getFoldedSpillSize(TII)) - return None; // This is not a spill instruction, since no valid size was - // returned from either function. + return std::nullopt; // This is not a spill instruction, since no valid size + // was returned from either function. return extractSpillBaseRegAndOffset(MI); } @@ -1869,7 +1870,7 @@ Optional<SpillLocationNo> InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI, MachineFunction *MF, unsigned &Reg) { if (!MI.hasOneMemOperand()) - return None; + return std::nullopt; // FIXME: Handle folded restore instructions with more than one memory // operand. @@ -1877,7 +1878,7 @@ InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI, Reg = MI.getOperand(0).getReg(); return extractSpillBaseRegAndOffset(MI); } - return None; + return std::nullopt; } bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) { @@ -2781,7 +2782,7 @@ Optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc( CandidateLocs = NewCandidates; } if (CandidateLocs.empty()) - return None; + return std::nullopt; // We now have a set of LocIdxes that contain the right output value in // each of the predecessors. Pick the lowest; if there's a register loc, @@ -3984,7 +3985,7 @@ Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( // No DBG_PHI means there can be no location. if (LowerIt == UpperIt) - return None; + return std::nullopt; // If any DBG_PHIs referred to a location we didn't understand, don't try to // compute a value. There might be scenarios where we could recover a value @@ -3993,7 +3994,7 @@ Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( auto DBGPHIRange = make_range(LowerIt, UpperIt); for (const DebugPHIRecord &DBG_PHI : DBGPHIRange) if (!DBG_PHI.ValueRead) - return None; + return std::nullopt; // If there's only one DBG_PHI, then that is our value number. if (std::distance(LowerIt, UpperIt) == 1) @@ -4077,7 +4078,7 @@ Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( for (auto &PHIIt : PHI->IncomingValues) { // Any undef input means DBG_PHIs didn't dominate the use point. if (Updater.UndefMap.find(&PHIIt.first->BB) != Updater.UndefMap.end()) - return None; + return std::nullopt; ValueIDNum ValueToCheck; const ValueTable &BlockLiveOuts = MLiveOuts[PHIIt.first->BB.getNumber()]; @@ -4096,7 +4097,7 @@ Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( } if (BlockLiveOuts[Loc.asU64()] != ValueToCheck) - return None; + return std::nullopt; } // Record this value as validated. diff --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h index 879bdf7..7db61d8 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h +++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h @@ -1007,7 +1007,7 @@ public: // in a DebugVariable is as "None". Optional<DIExpression::FragmentInfo> OptFragmentInfo = FragmentInfo; if (DebugVariable::isDefaultFragment(FragmentInfo)) - OptFragmentInfo = None; + OptFragmentInfo = std::nullopt; DebugVariable Overlapped(Var.getVariable(), OptFragmentInfo, Var.getInlinedAt()); diff --git a/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp index 7706b3f..3b36e1a 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp @@ -1115,7 +1115,7 @@ VarLocBasedLDV::OpenRangesSet::getEntryValueBackup(DebugVariable Var) { if (It != EntryValuesBackupVars.end()) return It->second; - return llvm::None; + return std::nullopt; } void VarLocBasedLDV::collectIDsForRegs(VarLocsInRange &Collected, @@ -1621,7 +1621,7 @@ Optional<VarLocBasedLDV::VarLoc::SpillLoc> VarLocBasedLDV::isRestoreInstruction(const MachineInstr &MI, MachineFunction *MF, Register &Reg) { if (!MI.hasOneMemOperand()) - return None; + return std::nullopt; // FIXME: Handle folded restore instructions with more than one memory // operand. @@ -1629,7 +1629,7 @@ VarLocBasedLDV::isRestoreInstruction(const MachineInstr &MI, Reg = MI.getOperand(0).getReg(); return extractSpillBaseRegAndOffset(MI); } - return None; + return std::nullopt; } /// A spilled register may indicate that we have to end the current range of diff --git a/llvm/lib/CodeGen/LiveDebugVariables.cpp b/llvm/lib/CodeGen/LiveDebugVariables.cpp index 574c0f9..f514f40 100644 --- a/llvm/lib/CodeGen/LiveDebugVariables.cpp +++ b/llvm/lib/CodeGen/LiveDebugVariables.cpp @@ -985,7 +985,7 @@ void UserValue::extendDef( Start = Start.getNextSlot(); if (I.value() != DbgValue || I.stop() != Start) { // Clear `Kills`, as we have a new def available. - Kills = None; + Kills = std::nullopt; return; } // This is a one-slot placeholder. Just skip it. @@ -996,7 +996,7 @@ void UserValue::extendDef( if (I.valid() && I.start() < Stop) { Stop = I.start(); // Clear `Kills`, as we have a new def available. - Kills = None; + Kills = std::nullopt; } if (Start < Stop) { diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.cpp b/llvm/lib/CodeGen/MIRParser/MILexer.cpp index b70eaa6..1cae191 100644 --- a/llvm/lib/CodeGen/MIRParser/MILexer.cpp +++ b/llvm/lib/CodeGen/MIRParser/MILexer.cpp @@ -159,7 +159,7 @@ static Cursor lexStringConstant(Cursor C, ErrorCallbackType ErrorCallback) { ErrorCallback( C.location(), "end of machine instruction reached before the closing '\"'"); - return None; + return std::nullopt; } } C.advance(); @@ -283,7 +283,7 @@ static MIToken::TokenKind getIdentifierKind(StringRef Identifier) { static Cursor maybeLexIdentifier(Cursor C, MIToken &Token) { if (!isalpha(C.peek()) && C.peek() != '_') - return None; + return std::nullopt; auto Range = C; while (isIdentifierChar(C.peek())) C.advance(); @@ -297,7 +297,7 @@ static Cursor maybeLexMachineBasicBlock(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { bool IsReference = C.remaining().startswith("%bb."); if (!IsReference && !C.remaining().startswith("bb.")) - return None; + return std::nullopt; auto Range = C; unsigned PrefixLength = IsReference ? 4 : 3; C.advance(PrefixLength); // Skip '%bb.' or 'bb.' @@ -331,7 +331,7 @@ static Cursor maybeLexMachineBasicBlock(Cursor C, MIToken &Token, static Cursor maybeLexIndex(Cursor C, MIToken &Token, StringRef Rule, MIToken::TokenKind Kind) { if (!C.remaining().startswith(Rule) || !isdigit(C.peek(Rule.size()))) - return None; + return std::nullopt; auto Range = C; C.advance(Rule.size()); auto NumberRange = C; @@ -344,7 +344,7 @@ static Cursor maybeLexIndex(Cursor C, MIToken &Token, StringRef Rule, static Cursor maybeLexIndexAndName(Cursor C, MIToken &Token, StringRef Rule, MIToken::TokenKind Kind) { if (!C.remaining().startswith(Rule) || !isdigit(C.peek(Rule.size()))) - return None; + return std::nullopt; auto Range = C; C.advance(Rule.size()); auto NumberRange = C; @@ -384,7 +384,7 @@ static Cursor maybeLexSubRegisterIndex(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { const StringRef Rule = "%subreg."; if (!C.remaining().startswith(Rule)) - return None; + return std::nullopt; return lexName(C, Token, MIToken::SubRegisterIndex, Rule.size(), ErrorCallback); } @@ -393,7 +393,7 @@ static Cursor maybeLexIRBlock(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { const StringRef Rule = "%ir-block."; if (!C.remaining().startswith(Rule)) - return None; + return std::nullopt; if (isdigit(C.peek(Rule.size()))) return maybeLexIndex(C, Token, Rule, MIToken::IRBlock); return lexName(C, Token, MIToken::NamedIRBlock, Rule.size(), ErrorCallback); @@ -403,7 +403,7 @@ static Cursor maybeLexIRValue(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { const StringRef Rule = "%ir."; if (!C.remaining().startswith(Rule)) - return None; + return std::nullopt; if (isdigit(C.peek(Rule.size()))) return maybeLexIndex(C, Token, Rule, MIToken::IRValue); return lexName(C, Token, MIToken::NamedIRValue, Rule.size(), ErrorCallback); @@ -412,7 +412,7 @@ static Cursor maybeLexIRValue(Cursor C, MIToken &Token, static Cursor maybeLexStringConstant(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { if (C.peek() != '"') - return None; + return std::nullopt; return lexName(C, Token, MIToken::StringConstant, /*PrefixLength=*/0, ErrorCallback); } @@ -446,7 +446,7 @@ static Cursor lexNamedVirtualRegister(Cursor C, MIToken &Token) { static Cursor maybeLexRegister(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { if (C.peek() != '%' && C.peek() != '$') - return None; + return std::nullopt; if (C.peek() == '%') { if (isdigit(C.peek(1))) @@ -455,7 +455,7 @@ static Cursor maybeLexRegister(Cursor C, MIToken &Token, if (isRegisterChar(C.peek(1))) return lexNamedVirtualRegister(C, Token); - return None; + return std::nullopt; } assert(C.peek() == '$'); @@ -471,7 +471,7 @@ static Cursor maybeLexRegister(Cursor C, MIToken &Token, static Cursor maybeLexGlobalValue(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { if (C.peek() != '@') - return None; + return std::nullopt; if (!isdigit(C.peek(1))) return lexName(C, Token, MIToken::NamedGlobalValue, /*PrefixLength=*/1, ErrorCallback); @@ -488,7 +488,7 @@ static Cursor maybeLexGlobalValue(Cursor C, MIToken &Token, static Cursor maybeLexExternalSymbol(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { if (C.peek() != '&') - return None; + return std::nullopt; return lexName(C, Token, MIToken::ExternalSymbol, /*PrefixLength=*/1, ErrorCallback); } @@ -497,7 +497,7 @@ static Cursor maybeLexMCSymbol(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { const StringRef Rule = "<mcsymbol "; if (!C.remaining().startswith(Rule)) - return None; + return std::nullopt; auto Start = C; C.advance(Rule.size()); @@ -562,7 +562,7 @@ static Cursor lexFloatingPointLiteral(Cursor Range, Cursor C, MIToken &Token) { static Cursor maybeLexHexadecimalLiteral(Cursor C, MIToken &Token) { if (C.peek() != '0' || (C.peek(1) != 'x' && C.peek(1) != 'X')) - return None; + return std::nullopt; Cursor Range = C; C.advance(2); unsigned PrefLen = 2; @@ -574,7 +574,7 @@ static Cursor maybeLexHexadecimalLiteral(Cursor C, MIToken &Token) { C.advance(); StringRef StrVal = Range.upto(C); if (StrVal.size() <= PrefLen) - return None; + return std::nullopt; if (PrefLen == 2) Token.reset(MIToken::HexLiteral, Range.upto(C)); else // It must be 3, which means that there was a floating-point prefix. @@ -584,7 +584,7 @@ static Cursor maybeLexHexadecimalLiteral(Cursor C, MIToken &Token) { static Cursor maybeLexNumericalLiteral(Cursor C, MIToken &Token) { if (!isdigit(C.peek()) && (C.peek() != '-' || !isdigit(C.peek(1)))) - return None; + return std::nullopt; auto Range = C; C.advance(); while (isdigit(C.peek())) @@ -610,7 +610,7 @@ static MIToken::TokenKind getMetadataKeywordKind(StringRef Identifier) { static Cursor maybeLexExclaim(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { if (C.peek() != '!') - return None; + return std::nullopt; auto Range = C; C.advance(1); if (isdigit(C.peek()) || !isIdentifierChar(C.peek())) { @@ -667,7 +667,7 @@ static Cursor maybeLexSymbol(Cursor C, MIToken &Token) { } else Kind = symbolToken(C.peek()); if (Kind == MIToken::Error) - return None; + return std::nullopt; auto Range = C; C.advance(Length); Token.reset(Kind, Range.upto(C)); @@ -676,7 +676,7 @@ static Cursor maybeLexSymbol(Cursor C, MIToken &Token) { static Cursor maybeLexNewline(Cursor C, MIToken &Token) { if (!isNewlineChar(C.peek())) - return None; + return std::nullopt; auto Range = C; C.advance(); Token.reset(MIToken::Newline, Range.upto(C)); @@ -686,7 +686,7 @@ static Cursor maybeLexNewline(Cursor C, MIToken &Token) { static Cursor maybeLexEscapedIRValue(Cursor C, MIToken &Token, ErrorCallbackType ErrorCallback) { if (C.peek() != '`') - return None; + return std::nullopt; auto Range = C; C.advance(); auto StrRange = C; diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index faaaa03..7eb701c 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -595,7 +595,7 @@ bool MIParser::error(StringRef::iterator Loc, const Twine &Msg) { // Create a diagnostic for a YAML string literal. Error = SMDiagnostic(SM, SMLoc(), Buffer.getBufferIdentifier(), 1, Loc - Source.data(), SourceMgr::DK_Error, Msg.str(), - Source, None, None); + Source, std::nullopt, std::nullopt); return true; } @@ -1356,7 +1356,7 @@ bool MIParser::parseMetadata(Metadata *&MD) { // Forward reference. auto &FwdRef = PFS.MachineForwardRefMDNodes[ID]; FwdRef = std::make_pair( - MDTuple::getTemporary(MF.getFunction().getContext(), None), Loc); + MDTuple::getTemporary(MF.getFunction().getContext(), std::nullopt), Loc); PFS.MachineMetadataNodes[ID].reset(FwdRef.first.get()); MD = FwdRef.first.get(); diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp index aa9522b..a19196e1 100644 --- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp @@ -999,7 +999,7 @@ SMDiagnostic MIRParserImpl::diagFromMIStringDiag(const SMDiagnostic &Error, (HasQuote ? 1 : 0)); // TODO: Translate any source ranges as well. - return SM.GetMessage(Loc, Error.getKind(), Error.getMessage(), None, + return SM.GetMessage(Loc, Error.getKind(), Error.getMessage(), std::nullopt, Error.getFixIts()); } diff --git a/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp b/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp index c569f03..afe0786 100644 --- a/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp +++ b/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp @@ -234,7 +234,7 @@ MachineBlockFrequencyInfo::getBlockFreq(const MachineBasicBlock *MBB) const { Optional<uint64_t> MachineBlockFrequencyInfo::getBlockProfileCount( const MachineBasicBlock *MBB) const { if (!MBFI) - return None; + return std::nullopt; const Function &F = MBFI->getFunction()->getFunction(); return MBFI->getBlockProfileCount(F, MBB); @@ -243,7 +243,7 @@ Optional<uint64_t> MachineBlockFrequencyInfo::getBlockProfileCount( Optional<uint64_t> MachineBlockFrequencyInfo::getProfileCountFromFreq(uint64_t Freq) const { if (!MBFI) - return None; + return std::nullopt; const Function &F = MBFI->getFunction()->getFunction(); return MBFI->getProfileCountFromFreq(F, Freq); diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp index 66f0eb8..b46fc43 100644 --- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp +++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp @@ -98,7 +98,7 @@ static Optional<DestSourcePair> isCopyInstr(const MachineInstr &MI, return Optional<DestSourcePair>( DestSourcePair{MI.getOperand(0), MI.getOperand(1)}); - return None; + return std::nullopt; } class CopyTracker { diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index 19a1ede..138eb3c 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -2338,7 +2338,7 @@ MachineInstr::getSpillSize(const TargetInstrInfo *TII) const { if (MFI.isSpillSlotObjectIndex(FI)) return (*memoperands_begin())->getSize(); } - return None; + return std::nullopt; } Optional<unsigned> @@ -2346,7 +2346,7 @@ MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const { MMOList Accesses; if (TII->hasStoreToStackSlot(*this, Accesses)) return getSpillSlotSize(Accesses, getMF()->getFrameInfo()); - return None; + return std::nullopt; } Optional<unsigned> @@ -2357,7 +2357,7 @@ MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const { if (MFI.isSpillSlotObjectIndex(FI)) return (*memoperands_begin())->getSize(); } - return None; + return std::nullopt; } Optional<unsigned> @@ -2365,7 +2365,7 @@ MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const { MMOList Accesses; if (TII->hasLoadFromStackSlot(*this, Accesses)) return getSpillSlotSize(Accesses, getMF()->getFrameInfo()); - return None; + return std::nullopt; } unsigned MachineInstr::getDebugInstrNum() { diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp index bc3a491..bc2a7a8 100644 --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -749,7 +749,7 @@ void MachineOperand::print(raw_ostream &OS, LLT TypeToPrint, const TargetIntrinsicInfo *IntrinsicInfo) const { tryToGetTargetInfo(*this, TRI, IntrinsicInfo); ModuleSlotTracker DummyMST(nullptr); - print(OS, DummyMST, TypeToPrint, None, /*PrintDef=*/false, + print(OS, DummyMST, TypeToPrint, std::nullopt, /*PrintDef=*/false, /*IsStandalone=*/true, /*ShouldPrintRegisterTies=*/true, /*TiedOperandIdx=*/0, TRI, IntrinsicInfo); diff --git a/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp b/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp index 631768e..4dda1c4 100644 --- a/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp +++ b/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp @@ -33,7 +33,7 @@ DiagnosticInfoMIROptimization::MachineArgument::MachineArgument( Optional<uint64_t> MachineOptimizationRemarkEmitter::computeHotness(const MachineBasicBlock &MBB) { if (!MBFI) - return None; + return std::nullopt; return MBFI->getBlockProfileCount(&MBB); } diff --git a/llvm/lib/CodeGen/MachineOutliner.cpp b/llvm/lib/CodeGen/MachineOutliner.cpp index 5da68ab..56c3d27 100644 --- a/llvm/lib/CodeGen/MachineOutliner.cpp +++ b/llvm/lib/CodeGen/MachineOutliner.cpp @@ -727,7 +727,8 @@ MachineFunction *MachineOutliner::createOutlinedFunction( Unit /* Context */, F->getName(), StringRef(MangledNameStream.str()), Unit /* File */, 0 /* Line 0 is reserved for compiler-generated code. */, - DB.createSubroutineType(DB.getOrCreateTypeArray(None)), /* void type */ + DB.createSubroutineType( + DB.getOrCreateTypeArray(std::nullopt)), /* void type */ 0, /* Line 0 is reserved for compiler-generated code. */ DINode::DIFlags::FlagArtificial /* Compiler-generated code. */, /* Outlined code is optimized code by definition. */ diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index 3310cdd..ead91f6 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -536,7 +536,7 @@ RegAllocEvictionAdvisor::getOrderLimit(const LiveInterval &VirtReg, if (MinCost >= CostPerUseLimit) { LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = " << MinCost << ", no cheaper registers to be found.\n"); - return None; + return std::nullopt; } // It is normal for register classes to have a long tail of registers with diff --git a/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp b/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp index e0dc9f0..feb31e5 100644 --- a/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp +++ b/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp @@ -89,7 +89,7 @@ static bool reduceDbgValsForwardScan(MachineBasicBlock &MBB) { for (auto &MI : MBB) { if (MI.isDebugValue()) { - DebugVariable Var(MI.getDebugVariable(), None, + DebugVariable Var(MI.getDebugVariable(), std::nullopt, MI.getDebugLoc()->getInlinedAt()); auto VMI = VariableMap.find(Var); // Just stop tracking this variable, until we cover DBG_VALUE_LIST. diff --git a/llvm/lib/CodeGen/SelectOptimize.cpp b/llvm/lib/CodeGen/SelectOptimize.cpp index a8144e4..e50d0b4 100644 --- a/llvm/lib/CodeGen/SelectOptimize.cpp +++ b/llvm/lib/CodeGen/SelectOptimize.cpp @@ -978,7 +978,7 @@ Optional<uint64_t> SelectOptimize::computeInstCost(const Instruction *I) { TTI->getInstructionCost(I, TargetTransformInfo::TCK_Latency); if (auto OC = ICost.getValue()) return Optional<uint64_t>(*OC); - return None; + return std::nullopt; } ScaledNumber<uint64_t> diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index f6d5775..3db4dc4 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7935,22 +7935,22 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, // Typical i64 by i8 pattern requires recursion up to 8 calls depth if (Depth == 10) - return None; + return std::nullopt; // Only allow multiple uses if the instruction is a vector load (in which // case we will use the load for every ExtractVectorElement) if (Depth && !Op.hasOneUse() && (Op.getOpcode() != ISD::LOAD || !Op.getValueType().isVector())) - return None; + return std::nullopt; // Fail to combine if we have encountered anything but a LOAD after handling // an ExtractVectorElement. if (Op.getOpcode() != ISD::LOAD && VectorIndex.has_value()) - return None; + return std::nullopt; unsigned BitWidth = Op.getValueSizeInBits(); if (BitWidth % 8 != 0) - return None; + return std::nullopt; unsigned ByteWidth = BitWidth / 8; assert(Index < ByteWidth && "invalid index requested"); (void) ByteWidth; @@ -7960,27 +7960,27 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, auto LHS = calculateByteProvider(Op->getOperand(0), Index, Depth + 1, VectorIndex); if (!LHS) - return None; + return std::nullopt; auto RHS = calculateByteProvider(Op->getOperand(1), Index, Depth + 1, VectorIndex); if (!RHS) - return None; + return std::nullopt; if (LHS->isConstantZero()) return RHS; if (RHS->isConstantZero()) return LHS; - return None; + return std::nullopt; } case ISD::SHL: { auto ShiftOp = dyn_cast<ConstantSDNode>(Op->getOperand(1)); if (!ShiftOp) - return None; + return std::nullopt; uint64_t BitShift = ShiftOp->getZExtValue(); if (BitShift % 8 != 0) - return None; + return std::nullopt; uint64_t ByteShift = BitShift / 8; // If we are shifting by an amount greater than the index we are trying to @@ -7997,13 +7997,13 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, SDValue NarrowOp = Op->getOperand(0); unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits(); if (NarrowBitWidth % 8 != 0) - return None; + return std::nullopt; uint64_t NarrowByteWidth = NarrowBitWidth / 8; if (Index >= NarrowByteWidth) return Op.getOpcode() == ISD::ZERO_EXTEND ? Optional<ByteProvider>(ByteProvider::getConstantZero()) - : None; + : std::nullopt; return calculateByteProvider(NarrowOp, Index, Depth + 1, VectorIndex, StartingIndex); } @@ -8013,14 +8013,14 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, case ISD::EXTRACT_VECTOR_ELT: { auto OffsetOp = dyn_cast<ConstantSDNode>(Op->getOperand(1)); if (!OffsetOp) - return None; + return std::nullopt; VectorIndex = OffsetOp->getZExtValue(); SDValue NarrowOp = Op->getOperand(0); unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits(); if (NarrowBitWidth % 8 != 0) - return None; + return std::nullopt; uint64_t NarrowByteWidth = NarrowBitWidth / 8; // Check to see if the position of the element in the vector corresponds @@ -8030,9 +8030,9 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, // vector of i16s, each element provides two bytes (V[1] provides byte 2 and // 3). if (VectorIndex.value() * NarrowByteWidth > StartingIndex) - return None; + return std::nullopt; if ((VectorIndex.value() + 1) * NarrowByteWidth <= StartingIndex) - return None; + return std::nullopt; return calculateByteProvider(Op->getOperand(0), Index, Depth + 1, VectorIndex, StartingIndex); @@ -8040,11 +8040,11 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, case ISD::LOAD: { auto L = cast<LoadSDNode>(Op.getNode()); if (!L->isSimple() || L->isIndexed()) - return None; + return std::nullopt; unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits(); if (NarrowBitWidth % 8 != 0) - return None; + return std::nullopt; uint64_t NarrowByteWidth = NarrowBitWidth / 8; // If the width of the load does not reach byte we are trying to provide for @@ -8053,14 +8053,14 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, if (Index >= NarrowByteWidth) return L->getExtensionType() == ISD::ZEXTLOAD ? Optional<ByteProvider>(ByteProvider::getConstantZero()) - : None; + : std::nullopt; unsigned BPVectorIndex = VectorIndex.value_or(0U); return ByteProvider::getMemory(L, Index, BPVectorIndex); } } - return None; + return std::nullopt; } static unsigned littleEndianByteAt(unsigned BW, unsigned i) { @@ -8079,7 +8079,7 @@ static Optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets, // The endian can be decided only when it is 2 bytes at least. unsigned Width = ByteOffsets.size(); if (Width < 2) - return None; + return std::nullopt; bool BigEndian = true, LittleEndian = true; for (unsigned i = 0; i < Width; i++) { @@ -8087,7 +8087,7 @@ static Optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets, LittleEndian &= CurrentByteOffset == littleEndianByteAt(Width, i); BigEndian &= CurrentByteOffset == bigEndianByteAt(Width, i); if (!BigEndian && !LittleEndian) - return None; + return std::nullopt; } assert((BigEndian != LittleEndian) && "It should be either big endian or" @@ -8374,8 +8374,9 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) { SmallVector<int64_t, 8> ByteOffsets(ByteWidth); unsigned ZeroExtendedBytes = 0; for (int i = ByteWidth - 1; i >= 0; --i) { - auto P = calculateByteProvider(SDValue(N, 0), i, 0, /*VectorIndex*/ None, - /*StartingIndex*/ i); + auto P = + calculateByteProvider(SDValue(N, 0), i, 0, /*VectorIndex*/ std::nullopt, + /*StartingIndex*/ i); if (!P) return SDValue(); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 8ac4dbc..7f898cb 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -6647,7 +6647,7 @@ static Optional<EVT> findMemType(SelectionDAG &DAG, const TargetLowering &TLI, // Using element-wise loads and stores for widening operations is not // supported for scalable vectors if (Scalable) - return None; + return std::nullopt; return RetVT; } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 4d94536..c29261ea 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -496,7 +496,7 @@ bool ISD::isVPReduction(unsigned Opcode) { Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { switch (Opcode) { default: - return None; + return std::nullopt; #define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \ case ISD::VPSD: \ return MASKPOS; @@ -508,7 +508,7 @@ Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) { switch (Opcode) { default: - return None; + return std::nullopt; #define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \ case ISD::VPSD: \ return EVLPOS; @@ -1609,7 +1609,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, "APInt size does not match type size!"); unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, getVTList(EltVT), None); + AddNodeIDNode(ID, Opc, getVTList(EltVT), std::nullopt); ID.AddPointer(Elt); ID.AddBoolean(isO); void *IP = nullptr; @@ -1664,7 +1664,7 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, // we don't have issues with SNANs. unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, getVTList(EltVT), None); + AddNodeIDNode(ID, Opc, getVTList(EltVT), std::nullopt); ID.AddPointer(&V); void *IP = nullptr; SDNode *N = nullptr; @@ -1721,7 +1721,7 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, getVTList(VT), None); + AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); ID.AddPointer(GV); ID.AddInteger(Offset); ID.AddInteger(TargetFlags); @@ -1739,7 +1739,7 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, getVTList(VT), None); + AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); ID.AddInteger(FI); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, IP)) @@ -1757,7 +1757,7 @@ SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, "Cannot set target flags on target-independent jump tables"); unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, getVTList(VT), None); + AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); ID.AddInteger(JTI); ID.AddInteger(TargetFlags); void *IP = nullptr; @@ -1781,7 +1781,7 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, : getDataLayout().getPrefTypeAlign(C->getType()); unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, getVTList(VT), None); + AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); ID.AddInteger(Alignment->value()); ID.AddInteger(Offset); ID.AddPointer(C); @@ -1808,7 +1808,7 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, Alignment = getDataLayout().getPrefTypeAlign(C->getType()); unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, getVTList(VT), None); + AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); ID.AddInteger(Alignment->value()); ID.AddInteger(Offset); C->addSelectionDAGCSEId(ID); @@ -1827,7 +1827,7 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, unsigned TargetFlags) { FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); + AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), std::nullopt); ID.AddInteger(Index); ID.AddInteger(Offset); ID.AddInteger(TargetFlags); @@ -1843,7 +1843,7 @@ SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); + AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), std::nullopt); ID.AddPointer(MBB); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, IP)) @@ -2120,7 +2120,7 @@ SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); + AddNodeIDNode(ID, ISD::Register, getVTList(VT), std::nullopt); ID.AddInteger(RegNo); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, IP)) @@ -2135,7 +2135,7 @@ SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); + AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), std::nullopt); ID.AddPointer(RegMask); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, IP)) @@ -2177,7 +2177,7 @@ SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, getVTList(VT), None); + AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); ID.AddPointer(BA); ID.AddInteger(Offset); ID.AddInteger(TargetFlags); @@ -2193,7 +2193,7 @@ SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, SDValue SelectionDAG::getSrcValue(const Value *V) { FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); + AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), std::nullopt); ID.AddPointer(V); void *IP = nullptr; @@ -2208,7 +2208,7 @@ SDValue SelectionDAG::getSrcValue(const Value *V) { SDValue SelectionDAG::getMDNode(const MDNode *MD) { FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); + AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), std::nullopt); ID.AddPointer(MD); void *IP = nullptr; @@ -5065,7 +5065,7 @@ static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, /// Gets or creates the specified node. SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { FoldingSetNodeID ID; - AddNodeIDNode(ID, Opcode, getVTList(VT), None); + AddNodeIDNode(ID, Opcode, getVTList(VT), std::nullopt); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) return SDValue(E, 0); @@ -5633,7 +5633,7 @@ static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, return (C1Ext + C2Ext + 1).extractBits(C1.getBitWidth(), 1); } } - return llvm::None; + return std::nullopt; } // Handle constant folding with UNDEF. @@ -5650,7 +5650,7 @@ static llvm::Optional<APInt> FoldValueWithUndef(unsigned Opcode, if (Opcode == ISD::AND || Opcode == ISD::MUL) return APInt::getZero(C1.getBitWidth()); - return llvm::None; + return std::nullopt; } SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, @@ -9341,7 +9341,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList) { - return getNode(Opcode, DL, VTList, None); + return getNode(Opcode, DL, VTList, std::nullopt); } SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, @@ -9608,7 +9608,7 @@ void SelectionDAG::setNodeMemRefs(MachineSDNode *N, SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT) { SDVTList VTs = getVTList(VT); - return SelectNodeTo(N, MachineOpc, VTs, None); + return SelectNodeTo(N, MachineOpc, VTs, std::nullopt); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, @@ -9649,7 +9649,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2) { SDVTList VTs = getVTList(VT1, VT2); - return SelectNodeTo(N, MachineOpc, VTs, None); + return SelectNodeTo(N, MachineOpc, VTs, std::nullopt); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, @@ -9816,7 +9816,7 @@ SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT) { SDVTList VTs = getVTList(VT); - return getMachineNode(Opcode, dl, VTs, None); + return getMachineNode(Opcode, dl, VTs, std::nullopt); } MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, @@ -11501,7 +11501,7 @@ MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const { return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset); } - return None; + return std::nullopt; } /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type @@ -11928,26 +11928,26 @@ Optional<std::pair<APInt, APInt>> BuildVectorSDNode::isConstantSequence() const { unsigned NumOps = getNumOperands(); if (NumOps < 2) - return None; + return std::nullopt; if (!isa<ConstantSDNode>(getOperand(0)) || !isa<ConstantSDNode>(getOperand(1))) - return None; + return std::nullopt; unsigned EltSize = getValueType(0).getScalarSizeInBits(); APInt Start = getConstantOperandAPInt(0).trunc(EltSize); APInt Stride = getConstantOperandAPInt(1).trunc(EltSize) - Start; if (Stride.isZero()) - return None; + return std::nullopt; for (unsigned i = 2; i < NumOps; ++i) { if (!isa<ConstantSDNode>(getOperand(i))) - return None; + return std::nullopt; APInt Val = getConstantOperandAPInt(i).trunc(EltSize); if (Val != (Start + (Stride * i))) - return None; + return std::nullopt; } return std::make_pair(Start, Stride); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index a5071ab..4274c45 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2781,7 +2781,8 @@ SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) { CallOptions.setDiscardResult(true); SDValue Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid, - None, CallOptions, getCurSDLoc()).second; + std::nullopt, CallOptions, getCurSDLoc()) + .second; // On PS4/PS5, the "return address" must still be within the calling // function, even if it's at the very end, so emit an explicit TRAP here. // Passing 'true' for doesNotReturn above won't generate the trap for us. @@ -4048,7 +4049,7 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) { // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign(); if (*Alignment <= StackAlign) - Alignment = None; + Alignment = std::nullopt; const uint64_t StackAlignMask = StackAlign.value() - 1U; // Round the size of the allocation up to the stack alignment size @@ -4343,7 +4344,7 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I, Src0 = I.getArgOperand(0); Ptr = I.getArgOperand(1); Mask = I.getArgOperand(2); - Alignment = None; + Alignment = std::nullopt; }; Value *PtrOperand, *MaskOperand, *Src0Operand; @@ -4505,7 +4506,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) { MaybeAlign &Alignment) { // @llvm.masked.expandload.*(Ptr, Mask, Src0) Ptr = I.getArgOperand(0); - Alignment = None; + Alignment = std::nullopt; Mask = I.getArgOperand(1); Src0 = I.getArgOperand(2); }; @@ -5719,7 +5720,7 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( if (VMI != FuncInfo.ValueMap.end()) { const auto &TLI = DAG.getTargetLoweringInfo(); RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second, - V->getType(), None); + V->getType(), std::nullopt); if (RFV.occupiesMultipleRegs()) { splitMultiRegDbgValue(RFV.getRegsAndSizes()); return true; @@ -6885,7 +6886,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, SDValue Result = DAG.getMemIntrinsicNode( ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops, EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)), - /* align */ None, Flags); + /* align */ std::nullopt, Flags); // Chain the prefetch in parallell with any pending loads, to stay out of // the way of later optimizations. @@ -10249,7 +10250,7 @@ void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, // notional registers required by the type. RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(), - None); // This is not an ABI copy. + std::nullopt); // This is not an ABI copy. SDValue Chain = DAG.getEntryNode(); if (ExtendType == ISD::ANY_EXTEND) { diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp index 0b760ac..8e0c4c6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -165,7 +165,7 @@ static Optional<int> findPreviousSpillSlot(const Value *Val, int LookUpDepth) { // Can not look any further - give up now if (LookUpDepth <= 0) - return None; + return std::nullopt; // Spill location is known for gc relocates if (const auto *Relocate = dyn_cast<GCRelocateInst>(Val)) { @@ -173,18 +173,18 @@ static Optional<int> findPreviousSpillSlot(const Value *Val, assert((isa<GCStatepointInst>(Statepoint) || isa<UndefValue>(Statepoint)) && "GetStatepoint must return one of two types"); if (isa<UndefValue>(Statepoint)) - return None; + return std::nullopt; const auto &RelocationMap = Builder.FuncInfo.StatepointRelocationMaps [cast<GCStatepointInst>(Statepoint)]; auto It = RelocationMap.find(Relocate); if (It == RelocationMap.end()) - return None; + return std::nullopt; auto &Record = It->second; if (Record.type != RecordType::Spill) - return None; + return std::nullopt; return Record.payload.FI; } @@ -203,10 +203,10 @@ static Optional<int> findPreviousSpillSlot(const Value *Val, Optional<int> SpillSlot = findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1); if (!SpillSlot) - return None; + return std::nullopt; if (MergedResult && *MergedResult != *SpillSlot) - return None; + return std::nullopt; MergedResult = SpillSlot; } @@ -241,7 +241,7 @@ static Optional<int> findPreviousSpillSlot(const Value *Val, // which we visit values is unspecified. // Don't know any information about this instruction - return None; + return std::nullopt; } /// Return true if-and-only-if the given SDValue can be lowered as either a @@ -920,7 +920,7 @@ SDValue SelectionDAGBuilder::LowerAsSTATEPOINT( auto *RetTy = Relocate->getType(); Register Reg = FuncInfo.CreateRegs(RetTy); RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), - DAG.getDataLayout(), Reg, RetTy, None); + DAG.getDataLayout(), Reg, RetTy, std::nullopt); SDValue Chain = DAG.getRoot(); RFV.getCopyToRegs(Relocated, DAG, getCurSDLoc(), Chain, nullptr); PendingExports.push_back(Chain); @@ -1265,7 +1265,7 @@ void SelectionDAGBuilder::visitGCRelocate(const GCRelocateInst &Relocate) { Register InReg = Record.payload.Reg; RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), DAG.getDataLayout(), InReg, Relocate.getType(), - None); // This is not an ABI copy. + std::nullopt); // This is not an ABI copy. // We generate copy to/from regs even for local uses, hence we must // chain with current root to ensure proper ordering of copies w.r.t. // statepoint. diff --git a/llvm/lib/CodeGen/SplitKit.cpp b/llvm/lib/CodeGen/SplitKit.cpp index 94149f56..2ae85c8 100644 --- a/llvm/lib/CodeGen/SplitKit.cpp +++ b/llvm/lib/CodeGen/SplitKit.cpp @@ -1450,7 +1450,7 @@ void SplitEditor::deleteRematVictims() { if (Dead.empty()) return; - Edit->eliminateDeadDefs(Dead, None); + Edit->eliminateDeadDefs(Dead, std::nullopt); } void SplitEditor::forceRecomputeVNI(const VNInfo &ParentVNI) { diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index f46a944..ff624da 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -1201,7 +1201,7 @@ TargetInstrInfo::describeLoadedValue(const MachineInstr &MI, assert(!TRI->isSuperOrSubRegisterEq(Reg, DestReg) && "TargetInstrInfo::describeLoadedValue can't describe super- or " "sub-regs for copy instructions"); - return None; + return std::nullopt; } else if (auto RegImm = isAddImmediate(MI, Reg)) { Register SrcReg = RegImm->Reg; Offset = RegImm->Imm; @@ -1219,16 +1219,16 @@ TargetInstrInfo::describeLoadedValue(const MachineInstr &MI, // If the address points to "special" memory (e.g. a spill slot), it's // sufficient to check that it isn't aliased by any high-level IR value. if (!PSV || PSV->mayAlias(&MFI)) - return None; + return std::nullopt; const MachineOperand *BaseOp; if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, TRI)) - return None; + return std::nullopt; // FIXME: Scalable offsets are not yet handled in the offset code below. if (OffsetIsScalable) - return None; + return std::nullopt; // TODO: Can currently only handle mem instructions with a single define. // An example from the x86 target: @@ -1237,7 +1237,7 @@ TargetInstrInfo::describeLoadedValue(const MachineInstr &MI, // ... // if (MI.getNumExplicitDefs() != 1) - return None; + return std::nullopt; // TODO: In what way do we need to take Reg into consideration here? @@ -1249,7 +1249,7 @@ TargetInstrInfo::describeLoadedValue(const MachineInstr &MI, return ParamLoadedValue(*BaseOp, Expr); } - return None; + return std::nullopt; } /// Both DefMI and UseMI must be valid. By default, call directly to the diff --git a/llvm/lib/CodeGen/TargetPassConfig.cpp b/llvm/lib/CodeGen/TargetPassConfig.cpp index e95abb6..26ec8ee 100644 --- a/llvm/lib/CodeGen/TargetPassConfig.cpp +++ b/llvm/lib/CodeGen/TargetPassConfig.cpp @@ -341,7 +341,7 @@ static std::string getFSProfileFile(const TargetMachine *TM) { if (!FSProfileFile.empty()) return FSProfileFile.getValue(); const Optional<PGOOptions> &PGOOpt = TM->getPGOOption(); - if (PGOOpt == None || PGOOpt->Action != PGOOptions::SampleUse) + if (PGOOpt == std::nullopt || PGOOpt->Action != PGOOptions::SampleUse) return std::string(); return PGOOpt->ProfileFile; } @@ -352,7 +352,7 @@ static std::string getFSRemappingFile(const TargetMachine *TM) { if (!FSRemappingFile.empty()) return FSRemappingFile.getValue(); const Optional<PGOOptions> &PGOOpt = TM->getPGOOption(); - if (PGOOpt == None || PGOOpt->Action != PGOOptions::SampleUse) + if (PGOOpt == std::nullopt || PGOOpt->Action != PGOOptions::SampleUse) return std::string(); return PGOOpt->ProfileRemappingFile; } |