diff options
author | Fangrui Song <i@maskray.me> | 2022-12-13 09:06:36 +0000 |
---|---|---|
committer | Fangrui Song <i@maskray.me> | 2022-12-13 09:06:36 +0000 |
commit | 67819a72c6ba39267effe8edfc1befddc3f3f2f9 (patch) | |
tree | 9a95db915f8eded88767ac3e9c31c8db045ab505 /llvm/lib/CodeGen | |
parent | 48e6ff9ad3eb1971de6d7ba12e31754781aff675 (diff) | |
download | llvm-67819a72c6ba39267effe8edfc1befddc3f3f2f9.zip llvm-67819a72c6ba39267effe8edfc1befddc3f3f2f9.tar.gz llvm-67819a72c6ba39267effe8edfc1befddc3f3f2f9.tar.bz2 |
[CodeGen] llvm::Optional => std::optional
Diffstat (limited to 'llvm/lib/CodeGen')
35 files changed, 262 insertions, 248 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index fbbe19f..14f0b78 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -1072,7 +1072,7 @@ static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) { // We assume a single instruction only has a spill or reload, not // both. - Optional<unsigned> Size; + std::optional<unsigned> Size; if ((Size = MI.getRestoreSize(TII))) { CommentOS << *Size << "-byte Reload\n"; } else if ((Size = MI.getFoldedRestoreSize(TII))) { diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp index dc09b52..1b2e7ad 100644 --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -1339,7 +1339,7 @@ void CodeViewDebug::calculateRanges( assert(DVInst->isDebugValue() && "Invalid History entry"); // FIXME: Find a way to represent constant variables, since they are // relatively common. - Optional<DbgVariableLocation> Location = + std::optional<DbgVariableLocation> Location = DbgVariableLocation::extractFromMachineInstruction(*DVInst); if (!Location) { diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp b/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp index 2038952..4cf4f02 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp @@ -30,7 +30,7 @@ using namespace llvm; /// variable's lexical scope instruction ranges. static cl::opt<bool> TrimVarLocs("trim-var-locs", cl::Hidden, cl::init(true)); -Optional<DbgVariableLocation> +std::optional<DbgVariableLocation> DbgVariableLocation::extractFromMachineInstruction( const MachineInstr &Instruction) { DbgVariableLocation Location; diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h index dda12f7..a458825 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h +++ b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h @@ -159,7 +159,7 @@ class DebugLocStream::ListBuilder { DbgVariable &V; const MachineInstr &MI; size_t ListIndex; - Optional<uint8_t> TagOffset; + std::optional<uint8_t> TagOffset; public: ListBuilder(DebugLocStream &Locs, DwarfCompileUnit &CU, AsmPrinter &Asm, diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index 1727671..6dde503 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -1129,7 +1129,7 @@ void DwarfCompileUnit::constructAbstractSubprogramScopeDIE( AbsDef = &ContextCU->createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, nullptr); ContextCU->applySubprogramAttributesToDefinition(SP, *AbsDef); ContextCU->addSInt(*AbsDef, dwarf::DW_AT_inline, - DD->getDwarfVersion() <= 4 ? Optional<dwarf::Form>() + DD->getDwarfVersion() <= 4 ? std::optional<dwarf::Form>() : dwarf::DW_FORM_implicit_const, dwarf::DW_INL_inlined); if (DIE *ObjectPointer = ContextCU->createAndAddScopeChildren(Scope, *AbsDef)) diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h index 9245ac9..5d2ef8e 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h @@ -116,7 +116,7 @@ class DbgVariable : public DbgEntity { /// Index of the entry list in DebugLocs. unsigned DebugLocListIndex = ~0u; /// DW_OP_LLVM_tag_offset value from DebugLocs. - Optional<uint8_t> DebugLocListTagOffset; + std::optional<uint8_t> DebugLocListTagOffset; /// Single value location description. std::unique_ptr<DbgValueLoc> ValueLoc = nullptr; @@ -175,7 +175,9 @@ public: void setDebugLocListIndex(unsigned O) { DebugLocListIndex = O; } unsigned getDebugLocListIndex() const { return DebugLocListIndex; } void setDebugLocListTagOffset(uint8_t O) { DebugLocListTagOffset = O; } - Optional<uint8_t> getDebugLocListTagOffset() const { return DebugLocListTagOffset; } + std::optional<uint8_t> getDebugLocListTagOffset() const { + return DebugLocListTagOffset; + } StringRef getName() const { return getVariable()->getName(); } const DbgValueLoc *getValueLoc() const { return ValueLoc.get(); } /// Get the FI entries, sorted by fragment offset. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp index ebe351e..d89caac 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp @@ -494,7 +494,7 @@ bool DwarfExpression::addExpression( // and not any other parts of the following DWARF expression. assert(!IsEmittingEntryValue && "Can't emit entry value around expression"); - Optional<DIExpression::ExprOperand> PrevConvertOp; + std::optional<DIExpression::ExprOperand> PrevConvertOp; while (ExprCursor) { auto Op = ExprCursor.take(); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h index b869e28..a5a19cdf 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h @@ -53,7 +53,7 @@ public: DIExpressionCursor(const DIExpressionCursor &) = default; /// Consume one operation. - Optional<DIExpression::ExprOperand> take() { + std::optional<DIExpression::ExprOperand> take() { if (Start == End) return std::nullopt; return *(Start++); @@ -63,14 +63,14 @@ public: void consume(unsigned N) { std::advance(Start, N); } /// Return the current operation. - Optional<DIExpression::ExprOperand> peek() const { + std::optional<DIExpression::ExprOperand> peek() const { if (Start == End) return std::nullopt; return *(Start); } /// Return the next operation. - Optional<DIExpression::ExprOperand> peekNext() const { + std::optional<DIExpression::ExprOperand> peekNext() const { if (Start == End) return std::nullopt; @@ -170,7 +170,7 @@ public: bool isParameterValue() { return LocationFlags & CallSiteParamValue; } - Optional<uint8_t> TagOffset; + std::optional<uint8_t> TagOffset; protected: /// Push a DW_OP_piece / DW_OP_bit_piece for emitting later, if one is needed diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp index aad00c8..9cd6532 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp @@ -218,7 +218,7 @@ void DwarfUnit::addFlag(DIE &Die, dwarf::Attribute Attribute) { } void DwarfUnit::addUInt(DIEValueList &Die, dwarf::Attribute Attribute, - Optional<dwarf::Form> Form, uint64_t Integer) { + std::optional<dwarf::Form> Form, uint64_t Integer) { if (!Form) Form = DIEInteger::BestForm(false, Integer); assert(Form != dwarf::DW_FORM_implicit_const && @@ -232,13 +232,13 @@ void DwarfUnit::addUInt(DIEValueList &Block, dwarf::Form Form, } void DwarfUnit::addSInt(DIEValueList &Die, dwarf::Attribute Attribute, - Optional<dwarf::Form> Form, int64_t Integer) { + std::optional<dwarf::Form> Form, int64_t Integer) { if (!Form) Form = DIEInteger::BestForm(true, Integer); addAttribute(Die, Attribute, *Form, DIEInteger(Integer)); } -void DwarfUnit::addSInt(DIELoc &Die, Optional<dwarf::Form> Form, +void DwarfUnit::addSInt(DIELoc &Die, std::optional<dwarf::Form> Form, int64_t Integer) { addSInt(Die, (dwarf::Attribute)0, Form, Integer); } diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h index 48d63d1..395539f 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h @@ -15,10 +15,10 @@ #include "DwarfDebug.h" #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/Optional.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/DIE.h" #include "llvm/Target/TargetMachine.h" +#include <optional> #include <string> namespace llvm { @@ -143,15 +143,15 @@ public: /// Add an unsigned integer attribute data and value. void addUInt(DIEValueList &Die, dwarf::Attribute Attribute, - Optional<dwarf::Form> Form, uint64_t Integer); + std::optional<dwarf::Form> Form, uint64_t Integer); void addUInt(DIEValueList &Block, dwarf::Form Form, uint64_t Integer); /// Add an signed integer attribute data and value. void addSInt(DIEValueList &Die, dwarf::Attribute Attribute, - Optional<dwarf::Form> Form, int64_t Integer); + std::optional<dwarf::Form> Form, int64_t Integer); - void addSInt(DIELoc &Die, Optional<dwarf::Form> Form, int64_t Integer); + void addSInt(DIELoc &Die, std::optional<dwarf::Form> Form, int64_t Integer); /// Add a string attribute data and value. /// diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp index 9af2630..8068681 100644 --- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp +++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp @@ -229,7 +229,8 @@ walkToAllocaAndPrependOffsetDeref(const DataLayout &DL, Value *Start, /// Extract the offset used in \p DIExpr. Returns std::nullopt if the expression /// doesn't explicitly describe a memory location with DW_OP_deref or if the /// expression is too complex to interpret. -static Optional<int64_t> getDerefOffsetInBytes(const DIExpression *DIExpr) { +static std::optional<int64_t> +getDerefOffsetInBytes(const DIExpression *DIExpr) { int64_t Offset = 0; const unsigned NumElements = DIExpr->getNumElements(); const auto Elements = DIExpr->getElements(); diff --git a/llvm/lib/CodeGen/CFIInstrInserter.cpp b/llvm/lib/CodeGen/CFIInstrInserter.cpp index 842339a..2574168 100644 --- a/llvm/lib/CodeGen/CFIInstrInserter.cpp +++ b/llvm/lib/CodeGen/CFIInstrInserter.cpp @@ -18,7 +18,6 @@ //===----------------------------------------------------------------------===// #include "llvm/ADT/DepthFirstIterator.h" -#include "llvm/ADT/Optional.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/Passes.h" @@ -89,10 +88,10 @@ class CFIInstrInserter : public MachineFunctionPass { #define INVALID_OFFSET INT_MAX /// contains the location where CSR register is saved. struct CSRSavedLocation { - CSRSavedLocation(Optional<unsigned> R, Optional<int> O) + CSRSavedLocation(std::optional<unsigned> R, std::optional<int> O) : Reg(R), Offset(O) {} - Optional<unsigned> Reg; - Optional<int> Offset; + std::optional<unsigned> Reg; + std::optional<int> Offset; }; /// Contains cfa offset and register values valid at entry and exit of basic @@ -187,8 +186,8 @@ void CFIInstrInserter::calculateOutgoingCFAInfo(MBBCFAInfo &MBBInfo) { // Determine cfa offset and register set by the block. for (MachineInstr &MI : *MBBInfo.MBB) { if (MI.isCFIInstruction()) { - Optional<unsigned> CSRReg; - Optional<int> CSROffset; + std::optional<unsigned> CSRReg; + std::optional<int> CSROffset; unsigned CFIIndex = MI.getOperand(0).getCFIIndex(); const MCCFIInstruction &CFI = Instrs[CFIIndex]; switch (CFI.getOperation()) { diff --git a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp index a432e4e..64e2d51 100644 --- a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp @@ -107,7 +107,7 @@ void CSEMIRBuilder::profileMBBOpcode(GISelInstProfileBuilder &B, void CSEMIRBuilder::profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps, ArrayRef<SrcOp> SrcOps, - Optional<unsigned> Flags, + std::optional<unsigned> Flags, GISelInstProfileBuilder &B) const { profileMBBOpcode(B, Opc); @@ -170,7 +170,7 @@ CSEMIRBuilder::generateCopiesIfRequired(ArrayRef<DstOp> DstOps, MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps, ArrayRef<SrcOp> SrcOps, - Optional<unsigned> Flag) { + std::optional<unsigned> Flag) { switch (Opc) { default: break; @@ -210,8 +210,8 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, break; } - if (Optional<APInt> Cst = ConstantFoldBinOp(Opc, SrcOps[0].getReg(), - SrcOps[1].getReg(), *getMRI())) + if (std::optional<APInt> Cst = ConstantFoldBinOp( + Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI())) return buildConstant(DstOps[0], *Cst); break; } @@ -230,7 +230,7 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, // Try to constant fold these. assert(SrcOps.size() == 2 && "Invalid sources"); assert(DstOps.size() == 1 && "Invalid dsts"); - if (Optional<APFloat> Cst = ConstantFoldFPBinOp( + if (std::optional<APFloat> Cst = ConstantFoldFPBinOp( Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI())) return buildFConstant(DstOps[0], *Cst); break; @@ -251,7 +251,7 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, // Try to constant fold these. assert(SrcOps.size() == 1 && "Invalid sources"); assert(DstOps.size() == 1 && "Invalid dsts"); - if (Optional<APFloat> Cst = ConstantFoldIntToFloat( + if (std::optional<APFloat> Cst = ConstantFoldIntToFloat( Opc, DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getReg(), *getMRI())) return buildFConstant(DstOps[0], *Cst); break; diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 27f4f4f..c5d5d68 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -108,7 +108,7 @@ static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) { /// 1 1 2 /// 2 2 1 /// 3 3 0 -static Optional<bool> +static std::optional<bool> isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, int64_t LowestIdx) { // Need at least two byte positions to decide on endianness. @@ -1285,9 +1285,9 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { LegalizerHelper::LegalizeResult::Legalized; } -static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy, - const Register Op, - const MachineRegisterInfo &MRI) { +static std::optional<APFloat> +constantFoldFpUnary(unsigned Opcode, LLT DstTy, const Register Op, + const MachineRegisterInfo &MRI) { const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI); if (!MaybeCst) return std::nullopt; @@ -1327,8 +1327,8 @@ static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy, return V; } -bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI, - Optional<APFloat> &Cst) { +bool CombinerHelper::matchCombineConstantFoldFpUnary( + MachineInstr &MI, std::optional<APFloat> &Cst) { Register DstReg = MI.getOperand(0).getReg(); Register SrcReg = MI.getOperand(1).getReg(); LLT DstTy = MRI.getType(DstReg); @@ -1336,8 +1336,8 @@ bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI, return Cst.has_value(); } -void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI, - Optional<APFloat> &Cst) { +void CombinerHelper::applyCombineConstantFoldFpUnary( + MachineInstr &MI, std::optional<APFloat> &Cst) { assert(Cst && "Optional is unexpectedly empty!"); Builder.setInstrAndDebugLoc(MI); MachineFunction &MF = Builder.getMF(); @@ -3269,7 +3269,7 @@ bool CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI, return true; } -Optional<SmallVector<Register, 8>> +std::optional<SmallVector<Register, 8>> CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const { assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!"); // We want to detect if Root is part of a tree which represents a bunch @@ -3367,7 +3367,7 @@ matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits, return std::make_pair(Load, Shift / MemSizeInBits); } -Optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>> +std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>> CombinerHelper::findLoadOffsetsForLoadOrCombine( SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) { @@ -3559,7 +3559,7 @@ bool CombinerHelper::matchLoadOrCombine( // pattern. If it does, then we can represent it using a load + possibly a // BSWAP. bool IsBigEndianTarget = MF.getDataLayout().isBigEndian(); - Optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx); + std::optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx); if (!IsBigEndian) return false; bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian; @@ -4612,7 +4612,7 @@ bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI, // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C) // if and only if (G_PTR_ADD X, C) has one use. Register LHSBase; - Optional<ValueAndVReg> LHSCstOff; + std::optional<ValueAndVReg> LHSCstOff; if (!mi_match(MI.getBaseReg(), MRI, m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff))))) return false; @@ -5983,7 +5983,7 @@ bool CombinerHelper::matchBuildVectorIdentityFold(MachineInstr &MI, return MRI.getType(MatchInfo) == DstVecTy; } - Optional<ValueAndVReg> ShiftAmount; + std::optional<ValueAndVReg> ShiftAmount; const auto LoPattern = m_GBitcast(m_Reg(Lo)); const auto HiPattern = m_GLShr(m_GBitcast(m_Reg(Hi)), m_GCst(ShiftAmount)); if (mi_match( @@ -6014,7 +6014,7 @@ bool CombinerHelper::matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo) { // Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with // y if K == size of vector element type - Optional<ValueAndVReg> ShiftAmt; + std::optional<ValueAndVReg> ShiftAmt; if (!mi_match(MI.getOperand(1).getReg(), MRI, m_GLShr(m_GBitcast(m_GBuildVector(m_Reg(), m_Reg(MatchInfo))), m_GCst(ShiftAmt)))) diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 6c44a1c..44fb5db 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -197,7 +197,7 @@ MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res, return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}); } -Optional<MachineInstrBuilder> +std::optional<MachineInstrBuilder> MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) { assert(Res == 0 && "Res is a result argument"); @@ -762,9 +762,9 @@ MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res, return buildInstr(TargetOpcode::G_TRUNC, Res, Op); } -MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res, - const SrcOp &Op, - Optional<unsigned> Flags) { +MachineInstrBuilder +MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op, + std::optional<unsigned> Flags) { return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags); } @@ -779,16 +779,15 @@ MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, - Optional<unsigned> Flags) { + std::optional<unsigned> Flags) { return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags); } -MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res, - const SrcOp &Tst, - const SrcOp &Op0, - const SrcOp &Op1, - Optional<unsigned> Flags) { +MachineInstrBuilder +MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst, + const SrcOp &Op0, const SrcOp &Op1, + std::optional<unsigned> Flags) { return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags); } @@ -1029,10 +1028,10 @@ void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy, #endif } -MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, - ArrayRef<DstOp> DstOps, - ArrayRef<SrcOp> SrcOps, - Optional<unsigned> Flags) { +MachineInstrBuilder +MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps, + ArrayRef<SrcOp> SrcOps, + std::optional<unsigned> Flags) { switch (Opc) { default: break; diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp index 6750edd..a164601 100644 --- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp +++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp @@ -286,9 +286,9 @@ void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, reportGISelFailure(MF, TPC, MORE, R); } -Optional<APInt> llvm::getIConstantVRegVal(Register VReg, - const MachineRegisterInfo &MRI) { - Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough( +std::optional<APInt> llvm::getIConstantVRegVal(Register VReg, + const MachineRegisterInfo &MRI) { + std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough( VReg, MRI, /*LookThroughInstrs*/ false); assert((!ValAndVReg || ValAndVReg->VReg == VReg) && "Value found while looking through instrs"); @@ -297,9 +297,9 @@ Optional<APInt> llvm::getIConstantVRegVal(Register VReg, return ValAndVReg->Value; } -Optional<int64_t> +std::optional<int64_t> llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) { - Optional<APInt> Val = getIConstantVRegVal(VReg, MRI); + std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI); if (Val && Val->getBitWidth() <= 64) return Val->getSExtValue(); return std::nullopt; @@ -308,9 +308,9 @@ llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) { namespace { typedef std::function<bool(const MachineInstr *)> IsOpcodeFn; -typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn; +typedef std::function<std::optional<APInt>(const MachineInstr *MI)> GetAPCstFn; -Optional<ValueAndVReg> getConstantVRegValWithLookThrough( +std::optional<ValueAndVReg> getConstantVRegValWithLookThrough( Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode, GetAPCstFn getAPCstValue, bool LookThroughInstrs = true, bool LookThroughAnyExt = false) { @@ -347,7 +347,7 @@ Optional<ValueAndVReg> getConstantVRegValWithLookThrough( if (!MI || !IsConstantOpcode(MI)) return std::nullopt; - Optional<APInt> MaybeVal = getAPCstValue(MI); + std::optional<APInt> MaybeVal = getAPCstValue(MI); if (!MaybeVal) return std::nullopt; APInt &Val = *MaybeVal; @@ -389,14 +389,14 @@ bool isAnyConstant(const MachineInstr *MI) { return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT; } -Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) { +std::optional<APInt> getCImmAsAPInt(const MachineInstr *MI) { const MachineOperand &CstVal = MI->getOperand(1); if (CstVal.isCImm()) return CstVal.getCImm()->getValue(); return std::nullopt; } -Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) { +std::optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) { const MachineOperand &CstVal = MI->getOperand(1); if (CstVal.isCImm()) return CstVal.getCImm()->getValue(); @@ -407,13 +407,13 @@ Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) { } // end anonymous namespace -Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough( +std::optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough( Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) { return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant, getCImmAsAPInt, LookThroughInstrs); } -Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough( +std::optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough( Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, bool LookThroughAnyExt) { return getConstantVRegValWithLookThrough( @@ -421,7 +421,7 @@ Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough( LookThroughAnyExt); } -Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough( +std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough( Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) { auto Reg = getConstantVRegValWithLookThrough( VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs); @@ -439,7 +439,7 @@ llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) { return MI->getOperand(1).getFPImm(); } -Optional<DefinitionAndSourceRegister> +std::optional<DefinitionAndSourceRegister> llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { Register DefSrcReg = Reg; auto *DefMI = MRI.getVRegDef(Reg); @@ -461,14 +461,14 @@ llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { MachineInstr *llvm::getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { - Optional<DefinitionAndSourceRegister> DefSrcReg = + std::optional<DefinitionAndSourceRegister> DefSrcReg = getDefSrcRegIgnoringCopies(Reg, MRI); return DefSrcReg ? DefSrcReg->MI : nullptr; } Register llvm::getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { - Optional<DefinitionAndSourceRegister> DefSrcReg = + std::optional<DefinitionAndSourceRegister> DefSrcReg = getDefSrcRegIgnoringCopies(Reg, MRI); return DefSrcReg ? DefSrcReg->Reg : Register(); } @@ -492,9 +492,10 @@ APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) { return APF; } -Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, - const Register Op2, - const MachineRegisterInfo &MRI) { +std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, + const Register Op1, + const Register Op2, + const MachineRegisterInfo &MRI) { auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false); if (!MaybeOp2Cst) return std::nullopt; @@ -556,9 +557,9 @@ Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, return std::nullopt; } -Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, - const Register Op2, - const MachineRegisterInfo &MRI) { +std::optional<APFloat> +llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, + const Register Op2, const MachineRegisterInfo &MRI) { const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI); if (!Op2Cst) return std::nullopt; @@ -759,9 +760,9 @@ Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF, return LiveIn; } -Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, - uint64_t Imm, - const MachineRegisterInfo &MRI) { +std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, + const Register Op1, uint64_t Imm, + const MachineRegisterInfo &MRI) { auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI); if (MaybeOp1Cst) { switch (Opcode) { @@ -776,9 +777,9 @@ Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, return std::nullopt; } -Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, - Register Src, - const MachineRegisterInfo &MRI) { +std::optional<APFloat> +llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, + const MachineRegisterInfo &MRI) { assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP); if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) { APFloat DstVal(getFltSemanticForLLT(DstTy)); @@ -789,7 +790,7 @@ Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, return std::nullopt; } -Optional<SmallVector<unsigned>> +std::optional<SmallVector<unsigned>> llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) { LLT Ty = MRI.getType(Src); SmallVector<unsigned> FoldedCTLZs; @@ -822,7 +823,7 @@ llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) { bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI, GISelKnownBits *KB) { - Optional<DefinitionAndSourceRegister> DefSrcReg = + std::optional<DefinitionAndSourceRegister> DefSrcReg = getDefSrcRegIgnoringCopies(Reg, MRI); if (!DefSrcReg) return false; @@ -1000,7 +1001,7 @@ LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) { return LLT::scalar(GCD); } -Optional<int> llvm::getSplatIndex(MachineInstr &MI) { +std::optional<int> llvm::getSplatIndex(MachineInstr &MI) { assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && "Only G_SHUFFLE_VECTOR can have a splat index!"); ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); @@ -1028,9 +1029,9 @@ static bool isBuildVectorOp(unsigned Opcode) { namespace { -Optional<ValueAndVReg> getAnyConstantSplat(Register VReg, - const MachineRegisterInfo &MRI, - bool AllowUndef) { +std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg, + const MachineRegisterInfo &MRI, + bool AllowUndef) { MachineInstr *MI = getDefIgnoringCopies(VReg, MRI); if (!MI) return std::nullopt; @@ -1038,7 +1039,7 @@ Optional<ValueAndVReg> getAnyConstantSplat(Register VReg, if (!isBuildVectorOp(MI->getOpcode())) return std::nullopt; - Optional<ValueAndVReg> SplatValAndReg; + std::optional<ValueAndVReg> SplatValAndReg; for (MachineOperand &Op : MI->uses()) { Register Element = Op.getReg(); auto ElementValAndReg = @@ -1080,11 +1081,11 @@ bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI, AllowUndef); } -Optional<APInt> llvm::getIConstantSplatVal(const Register Reg, - const MachineRegisterInfo &MRI) { +std::optional<APInt> +llvm::getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI) { if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) { - Optional<ValueAndVReg> ValAndVReg = + std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI); return ValAndVReg->Value; } @@ -1092,12 +1093,13 @@ Optional<APInt> llvm::getIConstantSplatVal(const Register Reg, return std::nullopt; } -Optional<APInt> llvm::getIConstantSplatVal(const MachineInstr &MI, - const MachineRegisterInfo &MRI) { +std::optional<APInt> +llvm::getIConstantSplatVal(const MachineInstr &MI, + const MachineRegisterInfo &MRI) { return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI); } -Optional<int64_t> +std::optional<int64_t> llvm::getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI) { if (auto SplatValAndReg = @@ -1106,15 +1108,15 @@ llvm::getIConstantSplatSExtVal(const Register Reg, return std::nullopt; } -Optional<int64_t> +std::optional<int64_t> llvm::getIConstantSplatSExtVal(const MachineInstr &MI, const MachineRegisterInfo &MRI) { return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI); } -Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg, - const MachineRegisterInfo &MRI, - bool AllowUndef) { +std::optional<FPValueAndVReg> +llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, + bool AllowUndef) { if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef)) return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI); return std::nullopt; @@ -1132,8 +1134,8 @@ bool llvm::isBuildVectorAllOnes(const MachineInstr &MI, return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef); } -Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI, - const MachineRegisterInfo &MRI) { +std::optional<RegOrConstant> +llvm::getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) { unsigned Opc = MI.getOpcode(); if (!isBuildVectorOp(Opc)) return std::nullopt; @@ -1202,7 +1204,7 @@ bool llvm::isConstantOrConstantVector(const MachineInstr &MI, return true; } -Optional<APInt> +std::optional<APInt> llvm::isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI) { Register Def = MI.getOperand(0).getReg(); diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp index 2bdae0e..0bdb32c 100644 --- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp +++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp @@ -97,11 +97,11 @@ class ImplicitNullChecks : public MachineFunctionPass { /// If non-None, then an instruction in \p Insts that also must be /// hoisted. - Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; + std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; /*implicit*/ DependenceResult( bool CanReorder, - Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) + std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) : CanReorder(CanReorder), PotentialDependence(PotentialDependence) { assert((!PotentialDependence || CanReorder) && "!CanReorder && PotentialDependence.hasValue() not allowed!"); @@ -254,7 +254,7 @@ ImplicitNullChecks::computeDependence(const MachineInstr *MI, assert(llvm::all_of(Block, canHandle) && "Check this first!"); assert(!is_contained(Block, MI) && "Block must be exclusive of MI!"); - Optional<ArrayRef<MachineInstr *>::iterator> Dep; + std::optional<ArrayRef<MachineInstr *>::iterator> Dep; for (auto I = Block.begin(), E = Block.end(); I != E; ++I) { if (canReorder(*I, MI)) diff --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp index b65eceb..19e523a 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp @@ -1038,7 +1038,7 @@ void MLocTracker::writeRegMask(const MachineOperand *MO, unsigned CurBB, Masks.push_back(std::make_pair(MO, InstID)); } -Optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) { +std::optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) { SpillLocationNo SpillID(SpillLocs.idFor(L)); if (SpillID.id() == 0) { @@ -1278,7 +1278,7 @@ bool InstrRefBasedLDV::isCalleeSavedReg(Register R) const { // void InstrRefBasedLDV::printVarLocInMBB(..) #endif -Optional<SpillLocationNo> +std::optional<SpillLocationNo> InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { assert(MI.hasOneMemOperand() && "Spill instruction does not have exactly one memory operand?"); @@ -1293,9 +1293,9 @@ InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { return MTracker->getOrTrackSpillLoc({Reg, Offset}); } -Optional<LocIdx> +std::optional<LocIdx> InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) { - Optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI); + std::optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI); if (!SpillLoc) return std::nullopt; @@ -1426,7 +1426,7 @@ bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI, // Default machine value number is <None> -- if no instruction defines // the corresponding value, it must have been optimized out. - Optional<ValueIDNum> NewID; + std::optional<ValueIDNum> NewID; // Try to lookup the instruction number, and find the machine value number // that it defines. It could be an instruction, or a PHI. @@ -1440,7 +1440,7 @@ bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI, // a register def was folded into a stack store. if (OpNo == MachineFunction::DebugOperandMemNumber && TargetInstr.hasOneMemOperand()) { - Optional<LocIdx> L = findLocationForMemOperand(TargetInstr); + std::optional<LocIdx> L = findLocationForMemOperand(TargetInstr); if (L) NewID = ValueIDNum(BlockNo, InstrIt->second.second, *L); } else if (OpNo != MachineFunction::DebugOperandMemNumber) { @@ -1658,7 +1658,7 @@ bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) { Register Base; StackOffset Offs = TFI->getFrameIndexReference(*MI.getMF(), FI, Base); SpillLoc SL = {Base, Offs}; - Optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(SL); + std::optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(SL); // We might be able to find a value, but have chosen not to, to avoid // tracking too much stack information. @@ -1753,7 +1753,8 @@ void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) { // If this instruction writes to a spill slot, def that slot. if (hasFoldedStackStore(MI)) { - if (Optional<SpillLocationNo> SpillNo = extractSpillBaseRegAndOffset(MI)) { + if (std::optional<SpillLocationNo> SpillNo = + extractSpillBaseRegAndOffset(MI)) { for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) { unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillNo, I); LocIdx L = MTracker->getSpillMLoc(SpillID); @@ -1795,7 +1796,8 @@ void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) { // Tell TTracker about any folded stack store. if (hasFoldedStackStore(MI)) { - if (Optional<SpillLocationNo> SpillNo = extractSpillBaseRegAndOffset(MI)) { + if (std::optional<SpillLocationNo> SpillNo = + extractSpillBaseRegAndOffset(MI)) { for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) { unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillNo, I); LocIdx L = MTracker->getSpillMLoc(SpillID); @@ -1836,7 +1838,7 @@ void InstrRefBasedLDV::performCopy(Register SrcRegNum, Register DstRegNum) { } } -Optional<SpillLocationNo> +std::optional<SpillLocationNo> InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI, MachineFunction *MF) { // TODO: Handle multiple stores folded into one. @@ -1866,7 +1868,7 @@ bool InstrRefBasedLDV::isLocationSpill(const MachineInstr &MI, return Reg != 0; } -Optional<SpillLocationNo> +std::optional<SpillLocationNo> InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI, MachineFunction *MF, unsigned &Reg) { if (!MI.hasOneMemOperand()) @@ -1910,7 +1912,7 @@ bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) { // First, if there are any DBG_VALUEs pointing at a spill slot that is // written to, terminate that variable location. The value in memory // will have changed. DbgEntityHistoryCalculator doesn't try to detect this. - if (Optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) { + if (std::optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) { // Un-set this location and clobber, so that earlier locations don't // continue past this store. for (unsigned SlotIdx = 0; SlotIdx < MTracker->NumSlotIdxes; ++SlotIdx) { @@ -1961,7 +1963,7 @@ bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) { unsigned SpillID = MTracker->getLocID(Loc, {Size, 0}); DoTransfer(Reg, SpillID); } else { - Optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg); + std::optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg); if (!Loc) return false; @@ -2707,7 +2709,7 @@ bool InstrRefBasedLDV::pickVPHILoc( continue; } - Optional<ValueIDNum> JoinedOpLoc = + std::optional<ValueIDNum> JoinedOpLoc = pickOperandPHILoc(Idx, MBB, LiveOuts, MOutLocs, BlockOrders); if (!JoinedOpLoc) @@ -2720,7 +2722,7 @@ bool InstrRefBasedLDV::pickVPHILoc( return true; } -Optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc( +std::optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc( unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs, const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) { @@ -3954,7 +3956,7 @@ public: } // end namespace llvm -Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs( +std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs( MachineFunction &MF, const ValueTable *MLiveOuts, const ValueTable *MLiveIns, MachineInstr &Here, uint64_t InstrNum) { assert(MLiveOuts && MLiveIns && @@ -3967,13 +3969,13 @@ Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs( if (SeenDbgPHIIt != SeenDbgPHIs.end()) return SeenDbgPHIIt->second; - Optional<ValueIDNum> Result = + std::optional<ValueIDNum> Result = resolveDbgPHIsImpl(MF, MLiveOuts, MLiveIns, Here, InstrNum); SeenDbgPHIs.insert({&Here, Result}); return Result; } -Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( +std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( MachineFunction &MF, const ValueTable *MLiveOuts, const ValueTable *MLiveIns, MachineInstr &Here, uint64_t InstrNum) { // Pick out records of DBG_PHI instructions that have been observed. If there diff --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h index 9d0f4d0..5b8b3e0 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h +++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h @@ -892,7 +892,7 @@ public: /// Find LocIdx for SpillLoc \p L, creating a new one if it's not tracked. /// Returns std::nullopt when in scenarios where a spill slot could be /// tracked, but we would likely run into resource limitations. - Optional<SpillLocationNo> getOrTrackSpillLoc(SpillLoc L); + std::optional<SpillLocationNo> getOrTrackSpillLoc(SpillLoc L); // Get LocIdx of a spill ID. LocIdx getSpillMLoc(unsigned SpillID) { @@ -1129,10 +1129,10 @@ private: MachineBasicBlock *MBB; /// The value number read by the DBG_PHI -- or std::nullopt if it didn't /// refer to a value. - Optional<ValueIDNum> ValueRead; + std::optional<ValueIDNum> ValueRead; /// Register/Stack location the DBG_PHI reads -- or std::nullopt if it /// referred to something unexpected. - Optional<LocIdx> ReadLoc; + std::optional<LocIdx> ReadLoc; operator unsigned() const { return InstrNum; } }; @@ -1151,7 +1151,7 @@ private: /// DBG_INSTR_REFs that call resolveDbgPHIs. These variable references solve /// a mini SSA problem caused by DBG_PHIs being cloned, this collection caches /// the result. - DenseMap<MachineInstr *, Optional<ValueIDNum>> SeenDbgPHIs; + DenseMap<MachineInstr *, std::optional<ValueIDNum>> SeenDbgPHIs; DbgOpIDMap DbgOpStore; @@ -1166,8 +1166,8 @@ private: StringRef StackProbeSymbolName; /// Tests whether this instruction is a spill to a stack slot. - Optional<SpillLocationNo> isSpillInstruction(const MachineInstr &MI, - MachineFunction *MF); + std::optional<SpillLocationNo> isSpillInstruction(const MachineInstr &MI, + MachineFunction *MF); /// Decide if @MI is a spill instruction and return true if it is. We use 2 /// criteria to make this decision: @@ -1180,12 +1180,13 @@ private: /// If a given instruction is identified as a spill, return the spill slot /// and set \p Reg to the spilled register. - Optional<SpillLocationNo> isRestoreInstruction(const MachineInstr &MI, - MachineFunction *MF, unsigned &Reg); + std::optional<SpillLocationNo> isRestoreInstruction(const MachineInstr &MI, + MachineFunction *MF, + unsigned &Reg); /// Given a spill instruction, extract the spill slot information, ensure it's /// tracked, and return the spill number. - Optional<SpillLocationNo> + std::optional<SpillLocationNo> extractSpillBaseRegAndOffset(const MachineInstr &MI); /// Observe a single instruction while stepping through a block. @@ -1230,16 +1231,17 @@ private: /// \p Here the position of a DBG_INSTR_REF seeking a machine value number /// \p InstrNum Debug instruction number defined by DBG_PHI instructions. /// \returns The machine value number at position Here, or std::nullopt. - Optional<ValueIDNum> resolveDbgPHIs(MachineFunction &MF, - const ValueTable *MLiveOuts, - const ValueTable *MLiveIns, - MachineInstr &Here, uint64_t InstrNum); - - Optional<ValueIDNum> resolveDbgPHIsImpl(MachineFunction &MF, - const ValueTable *MLiveOuts, - const ValueTable *MLiveIns, - MachineInstr &Here, - uint64_t InstrNum); + std::optional<ValueIDNum> resolveDbgPHIs(MachineFunction &MF, + const ValueTable *MLiveOuts, + const ValueTable *MLiveIns, + MachineInstr &Here, + uint64_t InstrNum); + + std::optional<ValueIDNum> resolveDbgPHIsImpl(MachineFunction &MF, + const ValueTable *MLiveOuts, + const ValueTable *MLiveIns, + MachineInstr &Here, + uint64_t InstrNum); /// Step through the function, recording register definitions and movements /// in an MLocTracker. Convert the observations into a per-block transfer @@ -1353,7 +1355,7 @@ private: const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs, const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders); - Optional<ValueIDNum> pickOperandPHILoc( + std::optional<ValueIDNum> pickOperandPHILoc( unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs, const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders); @@ -1417,7 +1419,7 @@ public: && !MemOperand->getPseudoValue()->isAliased(MFI); } - Optional<LocIdx> findLocationForMemOperand(const MachineInstr &MI); + std::optional<LocIdx> findLocationForMemOperand(const MachineInstr &MI); }; } // namespace LiveDebugValues diff --git a/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp index 222bbb0..5ad6760 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp @@ -859,7 +859,7 @@ private: /// Insert a set of ranges. void insertFromLocSet(const VarLocSet &ToLoad, const VarLocMap &Map); - llvm::Optional<LocIndices> getEntryValueBackup(DebugVariable Var); + std::optional<LocIndices> getEntryValueBackup(DebugVariable Var); /// Empty the set. void clear() { @@ -946,9 +946,9 @@ private: /// If a given instruction is identified as a spill, return the spill location /// and set \p Reg to the spilled register. - Optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI, - MachineFunction *MF, - Register &Reg); + std::optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI, + MachineFunction *MF, + Register &Reg); /// Given a spill instruction, extract the register and offset used to /// address the spill location in a target independent way. VarLoc::SpillLoc extractSpillBaseRegAndOffset(const MachineInstr &MI); @@ -1110,7 +1110,7 @@ void VarLocBasedLDV::OpenRangesSet::insert(LocIndices VarLocIDs, /// Return the Loc ID of an entry value backup location, if it exists for the /// variable. -llvm::Optional<LocIndices> +std::optional<LocIndices> VarLocBasedLDV::OpenRangesSet::getEntryValueBackup(DebugVariable Var) { auto It = EntryValuesBackupVars.find(Var); if (It != EntryValuesBackupVars.end()) @@ -1398,7 +1398,7 @@ void VarLocBasedLDV::emitEntryValues(MachineInstr &MI, continue; auto DebugVar = VL.Var; - Optional<LocIndices> EntryValBackupIDs = + std::optional<LocIndices> EntryValBackupIDs = OpenRanges.getEntryValueBackup(DebugVar); // If the parameter has the entry value backup, it means we should @@ -1618,9 +1618,9 @@ bool VarLocBasedLDV::isLocationSpill(const MachineInstr &MI, return false; } -Optional<VarLocBasedLDV::VarLoc::SpillLoc> +std::optional<VarLocBasedLDV::VarLoc::SpillLoc> VarLocBasedLDV::isRestoreInstruction(const MachineInstr &MI, - MachineFunction *MF, Register &Reg) { + MachineFunction *MF, Register &Reg) { if (!MI.hasOneMemOperand()) return std::nullopt; @@ -1647,7 +1647,7 @@ void VarLocBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI, MachineFunction *MF = MI.getMF(); TransferKind TKind; Register Reg; - Optional<VarLoc::SpillLoc> Loc; + std::optional<VarLoc::SpillLoc> Loc; LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump();); diff --git a/llvm/lib/CodeGen/LiveDebugVariables.cpp b/llvm/lib/CodeGen/LiveDebugVariables.cpp index 4db941e..d211536 100644 --- a/llvm/lib/CodeGen/LiveDebugVariables.cpp +++ b/llvm/lib/CodeGen/LiveDebugVariables.cpp @@ -441,11 +441,12 @@ public: /// VNInfo. /// \param [out] Kills Append end points of VNI's live range to Kills. /// \param LIS Live intervals analysis. - void extendDef(SlotIndex Idx, DbgVariableValue DbgValue, - SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>> - &LiveIntervalInfo, - Optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills, - LiveIntervals &LIS); + void + extendDef(SlotIndex Idx, DbgVariableValue DbgValue, + SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>> + &LiveIntervalInfo, + std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills, + LiveIntervals &LIS); /// The value in LI may be copies to other registers. Determine if /// any of the copies are available at the kill points, and add defs if @@ -957,7 +958,7 @@ void UserValue::extendDef( SlotIndex Idx, DbgVariableValue DbgValue, SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>> &LiveIntervalInfo, - Optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills, + std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills, LiveIntervals &LIS) { SlotIndex Start = Idx; MachineBasicBlock *MBB = LIS.getMBBFromIndex(Start); @@ -1131,7 +1132,7 @@ void UserValue::computeIntervals(MachineRegisterInfo &MRI, LIs[LocNo] = {LI, VNI}; } if (ShouldExtendDef) { - Optional<std::pair<SlotIndex, SmallVector<unsigned>>> Kills; + std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> Kills; extendDef(Idx, DbgValue, LIs, Kills, LIS); if (Kills) { diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index 8bcf5e2..21c95e1 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -376,10 +376,11 @@ struct ParsedMachineOperand { MachineOperand Operand; StringRef::iterator Begin; StringRef::iterator End; - Optional<unsigned> TiedDefIdx; + std::optional<unsigned> TiedDefIdx; ParsedMachineOperand(const MachineOperand &Operand, StringRef::iterator Begin, - StringRef::iterator End, Optional<unsigned> &TiedDefIdx) + StringRef::iterator End, + std::optional<unsigned> &TiedDefIdx) : Operand(Operand), Begin(Begin), End(End), TiedDefIdx(TiedDefIdx) { if (TiedDefIdx) assert(Operand.isReg() && Operand.isUse() && @@ -448,7 +449,8 @@ public: bool parseSubRegisterIndex(unsigned &SubReg); bool parseRegisterTiedDefIndex(unsigned &TiedDefIdx); bool parseRegisterOperand(MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx, bool IsDef = false); + std::optional<unsigned> &TiedDefIdx, + bool IsDef = false); bool parseImmediateOperand(MachineOperand &Dest); bool parseIRConstant(StringRef::iterator Loc, StringRef StringValue, const Constant *&C); @@ -488,17 +490,17 @@ public: bool parseLiveoutRegisterMaskOperand(MachineOperand &Dest); bool parseMachineOperand(const unsigned OpCode, const unsigned OpIdx, MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx); + std::optional<unsigned> &TiedDefIdx); bool parseMachineOperandAndTargetFlags(const unsigned OpCode, const unsigned OpIdx, MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx); + std::optional<unsigned> &TiedDefIdx); bool parseOffset(int64_t &Offset); bool parseIRBlockAddressTaken(BasicBlock *&BB); bool parseAlignment(uint64_t &Alignment); bool parseAddrspace(unsigned &Addrspace); - bool parseSectionID(Optional<MBBSectionID> &SID); - bool parseBBID(Optional<unsigned> &BBID); + bool parseSectionID(std::optional<MBBSectionID> &SID); + bool parseBBID(std::optional<unsigned> &BBID); bool parseOperandsOffset(MachineOperand &Op); bool parseIRValue(const Value *&V); bool parseMemoryOperandFlag(MachineMemOperand::Flags &Flags); @@ -641,7 +643,7 @@ bool MIParser::consumeIfPresent(MIToken::TokenKind TokenKind) { } // Parse Machine Basic Block Section ID. -bool MIParser::parseSectionID(Optional<MBBSectionID> &SID) { +bool MIParser::parseSectionID(std::optional<MBBSectionID> &SID) { assert(Token.is(MIToken::kw_bbsections)); lex(); if (Token.is(MIToken::IntegerLiteral)) { @@ -663,7 +665,7 @@ bool MIParser::parseSectionID(Optional<MBBSectionID> &SID) { } // Parse Machine Basic Block ID. -bool MIParser::parseBBID(Optional<unsigned> &BBID) { +bool MIParser::parseBBID(std::optional<unsigned> &BBID) { assert(Token.is(MIToken::kw_bb_id)); lex(); unsigned Value = 0; @@ -688,9 +690,9 @@ bool MIParser::parseBasicBlockDefinition( bool IsLandingPad = false; bool IsInlineAsmBrIndirectTarget = false; bool IsEHFuncletEntry = false; - Optional<MBBSectionID> SectionID; + std::optional<MBBSectionID> SectionID; uint64_t Alignment = 0; - Optional<unsigned> BBID; + std::optional<unsigned> BBID; BasicBlock *BB = nullptr; if (consumeIfPresent(MIToken::lparen)) { do { @@ -1021,7 +1023,7 @@ bool MIParser::parse(MachineInstr *&MI) { SmallVector<ParsedMachineOperand, 8> Operands; while (Token.isRegister() || Token.isRegisterFlag()) { auto Loc = Token.location(); - Optional<unsigned> TiedDefIdx; + std::optional<unsigned> TiedDefIdx; if (parseRegisterOperand(MO, TiedDefIdx, /*IsDef=*/true)) return true; Operands.push_back( @@ -1047,7 +1049,7 @@ bool MIParser::parse(MachineInstr *&MI) { Token.isNot(MIToken::kw_debug_instr_number) && Token.isNot(MIToken::coloncolon) && Token.isNot(MIToken::lbrace)) { auto Loc = Token.location(); - Optional<unsigned> TiedDefIdx; + std::optional<unsigned> TiedDefIdx; if (parseMachineOperandAndTargetFlags(OpCode, Operands.size(), MO, TiedDefIdx)) return true; Operands.push_back( @@ -1706,7 +1708,7 @@ bool MIParser::assignRegisterTies(MachineInstr &MI, } bool MIParser::parseRegisterOperand(MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx, + std::optional<unsigned> &TiedDefIdx, bool IsDef) { unsigned Flags = IsDef ? RegState::Define : 0; while (Token.isRegisterFlag()) { @@ -2812,7 +2814,7 @@ bool MIParser::parseLiveoutRegisterMaskOperand(MachineOperand &Dest) { bool MIParser::parseMachineOperand(const unsigned OpCode, const unsigned OpIdx, MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx) { + std::optional<unsigned> &TiedDefIdx) { switch (Token.kind()) { case MIToken::kw_implicit: case MIToken::kw_implicit_define: @@ -2917,7 +2919,7 @@ bool MIParser::parseMachineOperand(const unsigned OpCode, const unsigned OpIdx, bool MIParser::parseMachineOperandAndTargetFlags( const unsigned OpCode, const unsigned OpIdx, MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx) { + std::optional<unsigned> &TiedDefIdx) { unsigned TF = 0; bool HasTargetFlags = false; if (Token.is(MIToken::kw_target_flags)) { diff --git a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp index d384035..8896f05 100644 --- a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp @@ -1113,7 +1113,7 @@ int64_t DevelopmentModeEvictAdvisor::tryFindEvictionCandidatePosition( } bool RegAllocScoring::runOnMachineFunction(MachineFunction &MF) { - Optional<float> CachedReward; + std::optional<float> CachedReward; auto GetReward = [&]() { if (!CachedReward) CachedReward = static_cast<float>( diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index 6c88671..271cd01 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -2329,7 +2329,7 @@ static unsigned getSpillSlotSize(const MMOList &Accesses, return Size; } -Optional<unsigned> +std::optional<unsigned> MachineInstr::getSpillSize(const TargetInstrInfo *TII) const { int FI; if (TII->isStoreToStackSlotPostFE(*this, FI)) { @@ -2340,7 +2340,7 @@ MachineInstr::getSpillSize(const TargetInstrInfo *TII) const { return std::nullopt; } -Optional<unsigned> +std::optional<unsigned> MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const { MMOList Accesses; if (TII->hasStoreToStackSlot(*this, Accesses)) @@ -2348,7 +2348,7 @@ MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const { return std::nullopt; } -Optional<unsigned> +std::optional<unsigned> MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const { int FI; if (TII->isLoadFromStackSlotPostFE(*this, FI)) { @@ -2359,7 +2359,7 @@ MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const { return std::nullopt; } -Optional<unsigned> +std::optional<unsigned> MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const { MMOList Accesses; if (TII->hasLoadFromStackSlot(*this, Accesses)) diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp index bb2d1da..a3ffce9 100644 --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -756,8 +756,9 @@ void MachineOperand::print(raw_ostream &OS, LLT TypeToPrint, } void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, - LLT TypeToPrint, Optional<unsigned> OpIdx, bool PrintDef, - bool IsStandalone, bool ShouldPrintRegisterTies, + LLT TypeToPrint, std::optional<unsigned> OpIdx, + bool PrintDef, bool IsStandalone, + bool ShouldPrintRegisterTies, unsigned TiedOperandIdx, const TargetRegisterInfo *TRI, const TargetIntrinsicInfo *IntrinsicInfo) const { diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp index 195fd45..20d2517 100644 --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -1281,7 +1281,7 @@ class KernelRewriter { // Insert a phi that carries LoopReg from the loop body and InitReg otherwise. // If InitReg is not given it is chosen arbitrarily. It will either be undef // or will be chosen so as to share another phi. - Register phi(Register LoopReg, Optional<Register> InitReg = {}, + Register phi(Register LoopReg, std::optional<Register> InitReg = {}, const TargetRegisterClass *RC = nullptr); // Create an undef register of the given register class. Register undef(const TargetRegisterClass *RC); @@ -1389,7 +1389,7 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) { // First, dive through the phi chain to find the defaults for the generated // phis. - SmallVector<Optional<Register>, 4> Defaults; + SmallVector<std::optional<Register>, 4> Defaults; Register LoopReg = Reg; auto LoopProducer = Producer; while (LoopProducer->isPHI() && LoopProducer->getParent() == BB) { @@ -1400,7 +1400,7 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) { } int LoopProducerStage = S.getStage(LoopProducer); - Optional<Register> IllegalPhiDefault; + std::optional<Register> IllegalPhiDefault; if (LoopProducerStage == -1) { // Do nothing. @@ -1432,9 +1432,9 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) { // If we need more phis than we have defaults for, pad out with undefs for // the earliest phis, which are at the end of the defaults chain (the // chain is in reverse order). - Defaults.resize(Defaults.size() + StageDiff, Defaults.empty() - ? Optional<Register>() - : Defaults.back()); + Defaults.resize(Defaults.size() + StageDiff, + Defaults.empty() ? std::optional<Register>() + : Defaults.back()); } } @@ -1466,7 +1466,7 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) { return LoopReg; } -Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg, +Register KernelRewriter::phi(Register LoopReg, std::optional<Register> InitReg, const TargetRegisterClass *RC) { // If the init register is not undef, try and find an existing phi. if (InitReg) { diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 31e37c4..be1b770 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -66,7 +66,6 @@ //===----------------------------------------------------------------------===// #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" @@ -273,11 +272,11 @@ namespace { : MI(MI), CommutePair(std::make_pair(Idx1, Idx2)) {} MachineInstr *getMI() const { return MI; } - Optional<IndexPair> getCommutePair() const { return CommutePair; } + std::optional<IndexPair> getCommutePair() const { return CommutePair; } private: MachineInstr *MI; - Optional<IndexPair> CommutePair; + std::optional<IndexPair> CommutePair; }; /// Helper class to hold a reply for ValueTracker queries. diff --git a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h index a3936ea..4683857 100644 --- a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h +++ b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h @@ -10,7 +10,6 @@ #define LLVM_CODEGEN_REGALLOCEVICTIONADVISOR_H #include "llvm/ADT/ArrayRef.h" -#include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringRef.h" #include "llvm/CodeGen/Register.h" @@ -126,9 +125,9 @@ protected: // Get the upper limit of elements in the given Order we need to analize. // TODO: is this heuristic, we could consider learning it. - Optional<unsigned> getOrderLimit(const LiveInterval &VirtReg, - const AllocationOrder &Order, - unsigned CostPerUseLimit) const; + std::optional<unsigned> getOrderLimit(const LiveInterval &VirtReg, + const AllocationOrder &Order, + unsigned CostPerUseLimit) const; // Determine if it's worth trying to allocate this reg, given the // CostPerUseLimit diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index ead91f6..7c0f1d5 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -523,7 +523,7 @@ bool RegAllocEvictionAdvisor::isUnusedCalleeSavedReg(MCRegister PhysReg) const { return !Matrix->isPhysRegUsed(PhysReg); } -Optional<unsigned> +std::optional<unsigned> RegAllocEvictionAdvisor::getOrderLimit(const LiveInterval &VirtReg, const AllocationOrder &Order, unsigned CostPerUseLimit) const { diff --git a/llvm/lib/CodeGen/SelectOptimize.cpp b/llvm/lib/CodeGen/SelectOptimize.cpp index ad73e76..5d4d982 100644 --- a/llvm/lib/CodeGen/SelectOptimize.cpp +++ b/llvm/lib/CodeGen/SelectOptimize.cpp @@ -200,7 +200,7 @@ private: SmallPtrSet<const Instruction *, 2> getSIset(const SelectGroups &SIGroups); // Returns the latency cost of a given instruction. - Optional<uint64_t> computeInstCost(const Instruction *I); + std::optional<uint64_t> computeInstCost(const Instruction *I); // Returns the misprediction cost of a given select when converted to branch. Scaled64 getMispredictionCost(const SelectInst *SI, const Scaled64 CondCost); @@ -977,11 +977,11 @@ SelectOptimize::getSIset(const SelectGroups &SIGroups) { return SIset; } -Optional<uint64_t> SelectOptimize::computeInstCost(const Instruction *I) { +std::optional<uint64_t> SelectOptimize::computeInstCost(const Instruction *I) { InstructionCost ICost = TTI->getInstructionCost(I, TargetTransformInfo::TCK_Latency); if (auto OC = ICost.getValue()) - return Optional<uint64_t>(*OC); + return std::optional<uint64_t>(*OC); return std::nullopt; } diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index f9a73351..633198f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7929,7 +7929,7 @@ private: /// LOAD /// /// *ExtractVectorElement -static const Optional<ByteProvider> +static const std::optional<ByteProvider> calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, std::optional<uint64_t> VectorIndex, unsigned StartingIndex = 0) { @@ -8003,7 +8003,7 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, if (Index >= NarrowByteWidth) return Op.getOpcode() == ISD::ZERO_EXTEND - ? Optional<ByteProvider>(ByteProvider::getConstantZero()) + ? std::optional<ByteProvider>(ByteProvider::getConstantZero()) : std::nullopt; return calculateByteProvider(NarrowOp, Index, Depth + 1, VectorIndex, StartingIndex); @@ -8053,7 +8053,7 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, // question if (Index >= NarrowByteWidth) return L->getExtensionType() == ISD::ZEXTLOAD - ? Optional<ByteProvider>(ByteProvider::getConstantZero()) + ? std::optional<ByteProvider>(ByteProvider::getConstantZero()) : std::nullopt; unsigned BPVectorIndex = VectorIndex.value_or(0U); @@ -8075,8 +8075,8 @@ static unsigned bigEndianByteAt(unsigned BW, unsigned i) { // Check if the bytes offsets we are looking at match with either big or // little endian value loaded. Return true for big endian, false for little // endian, and std::nullopt if match failed. -static Optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets, - int64_t FirstOffset) { +static std::optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets, + int64_t FirstOffset) { // The endian can be decided only when it is 2 bytes at least. unsigned Width = ByteOffsets.size(); if (Width < 2) @@ -8367,7 +8367,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) { SDValue Chain; SmallPtrSet<LoadSDNode *, 8> Loads; - Optional<ByteProvider> FirstByteProvider; + std::optional<ByteProvider> FirstByteProvider; int64_t FirstOffset = INT64_MAX; // Check if all the bytes of the OR we are looking at are loaded from the same @@ -8460,7 +8460,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) { // Check if the bytes of the OR we are looking at match with either big or // little endian value load - Optional<bool> IsBigEndian = isBigEndian( + std::optional<bool> IsBigEndian = isBigEndian( makeArrayRef(ByteOffsets).drop_back(ZeroExtendedBytes), FirstOffset); if (!IsBigEndian) return SDValue(); @@ -25157,7 +25157,7 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const { bool IsAtomic; SDValue BasePtr; int64_t Offset; - Optional<int64_t> NumBytes; + std::optional<int64_t> NumBytes; MachineMemOperand *MMO; }; @@ -25172,21 +25172,26 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const { : 0; uint64_t Size = MemoryLocation::getSizeOrUnknown(LSN->getMemoryVT().getStoreSize()); - return {LSN->isVolatile(), LSN->isAtomic(), LSN->getBasePtr(), + return {LSN->isVolatile(), + LSN->isAtomic(), + LSN->getBasePtr(), Offset /*base offset*/, - Optional<int64_t>(Size), + std::optional<int64_t>(Size), LSN->getMemOperand()}; } if (const auto *LN = cast<LifetimeSDNode>(N)) - return {false /*isVolatile*/, /*isAtomic*/ false, LN->getOperand(1), + return {false /*isVolatile*/, + /*isAtomic*/ false, + LN->getOperand(1), (LN->hasOffset()) ? LN->getOffset() : 0, - (LN->hasOffset()) ? Optional<int64_t>(LN->getSize()) - : Optional<int64_t>(), + (LN->hasOffset()) ? std::optional<int64_t>(LN->getSize()) + : std::optional<int64_t>(), (MachineMemOperand *)nullptr}; // Default. - return {false /*isvolatile*/, /*isAtomic*/ false, SDValue(), - (int64_t)0 /*offset*/, - Optional<int64_t>() /*size*/, (MachineMemOperand *)nullptr}; + return {false /*isvolatile*/, + /*isAtomic*/ false, SDValue(), + (int64_t)0 /*offset*/, std::optional<int64_t>() /*size*/, + (MachineMemOperand *)nullptr}; }; MemUseCharacteristics MUC0 = getCharacteristics(Op0), diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index a4eb634..790cba9 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -6593,9 +6593,10 @@ SDValue DAGTypeLegalizer::WidenVecOp_VSELECT(SDNode *N) { // Align: If 0, don't allow use of a wider type // WidenEx: If Align is not 0, the amount additional we can load/store from. -static Optional<EVT> findMemType(SelectionDAG &DAG, const TargetLowering &TLI, - unsigned Width, EVT WidenVT, - unsigned Align = 0, unsigned WidenEx = 0) { +static std::optional<EVT> findMemType(SelectionDAG &DAG, + const TargetLowering &TLI, unsigned Width, + EVT WidenVT, unsigned Align = 0, + unsigned WidenEx = 0) { EVT WidenEltVT = WidenVT.getVectorElementType(); const bool Scalable = WidenVT.isScalableVector(); unsigned WidenWidth = WidenVT.getSizeInBits().getKnownMinSize(); @@ -6718,7 +6719,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain, (!LD->isSimple() || LdVT.isScalableVector()) ? 0 : LD->getAlign().value(); // Find the vector type that can load from. - Optional<EVT> FirstVT = + std::optional<EVT> FirstVT = findMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign, WidthDiff.getKnownMinSize()); @@ -6731,7 +6732,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain, // Unless we're able to load in one instruction we must work out how to load // the remainder. if (!TypeSize::isKnownLE(LdWidth, FirstVTWidth)) { - Optional<EVT> NewVT = FirstVT; + std::optional<EVT> NewVT = FirstVT; TypeSize RemainingWidth = LdWidth; TypeSize NewVTWidth = FirstVTWidth; do { @@ -6954,7 +6955,7 @@ bool DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain, while (StWidth.isNonZero()) { // Find the largest vector type we can store with. - Optional<EVT> NewVT = + std::optional<EVT> NewVT = findMemType(DAG, TLI, StWidth.getKnownMinSize(), ValVT); if (!NewVT) return false; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 2cb93de..e9f61e7 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -493,7 +493,7 @@ bool ISD::isVPReduction(unsigned Opcode) { } /// The operand position of the vector mask. -Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { +std::optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { switch (Opcode) { default: return std::nullopt; @@ -505,7 +505,7 @@ Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { } /// The operand position of the explicit vector length parameter. -Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) { +std::optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) { switch (Opcode) { default: return std::nullopt; @@ -5617,8 +5617,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, return V; } -static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, - const APInt &C2) { +static std::optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, + const APInt &C2) { switch (Opcode) { case ISD::ADD: return C1 + C2; case ISD::SUB: return C1 - C2; @@ -5699,10 +5699,9 @@ static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, // Handle constant folding with UNDEF. // TODO: Handle more cases. -static llvm::Optional<APInt> FoldValueWithUndef(unsigned Opcode, - const APInt &C1, bool IsUndef1, - const APInt &C2, - bool IsUndef2) { +static std::optional<APInt> FoldValueWithUndef(unsigned Opcode, const APInt &C1, + bool IsUndef1, const APInt &C2, + bool IsUndef2) { if (!(IsUndef1 || IsUndef2)) return FoldValue(Opcode, C1, C2); @@ -5787,7 +5786,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, if (C1->isOpaque() || C2->isOpaque()) return SDValue(); - Optional<APInt> FoldAttempt = + std::optional<APInt> FoldAttempt = FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); if (!FoldAttempt) return SDValue(); @@ -5832,7 +5831,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) { SmallVector<APInt> RawBits; for (unsigned I = 0, E = NumElts.getFixedValue(); I != E; ++I) { - Optional<APInt> Fold = FoldValueWithUndef( + std::optional<APInt> Fold = FoldValueWithUndef( Opcode, RawBits1[I], UndefElts1[I], RawBits2[I], UndefElts2[I]); if (!Fold) break; @@ -11967,7 +11966,7 @@ bool BuildVectorSDNode::isConstant() const { return true; } -Optional<std::pair<APInt, APInt>> +std::optional<std::pair<APInt, APInt>> BuildVectorSDNode::isConstantSequence() const { unsigned NumOps = getNumOperands(); if (NumOps < 2) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp index d236433..a432d8e 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp @@ -85,9 +85,9 @@ bool BaseIndexOffset::equalBaseIndex(const BaseIndexOffset &Other, } bool BaseIndexOffset::computeAliasing(const SDNode *Op0, - const Optional<int64_t> NumBytes0, + const std::optional<int64_t> NumBytes0, const SDNode *Op1, - const Optional<int64_t> NumBytes1, + const std::optional<int64_t> NumBytes1, const SelectionDAG &DAG, bool &IsAlias) { BaseIndexOffset BasePtr0 = match(Op0, DAG); diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp index 39e2c39..708596a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -159,9 +159,9 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType, /// Utility function for reservePreviousStackSlotForValue. Tries to find /// stack slot index to which we have spilled value for previous statepoints. /// LookUpDepth specifies maximum DFS depth this function is allowed to look. -static Optional<int> findPreviousSpillSlot(const Value *Val, - SelectionDAGBuilder &Builder, - int LookUpDepth) { +static std::optional<int> findPreviousSpillSlot(const Value *Val, + SelectionDAGBuilder &Builder, + int LookUpDepth) { // Can not look any further - give up now if (LookUpDepth <= 0) return std::nullopt; @@ -196,10 +196,10 @@ static Optional<int> findPreviousSpillSlot(const Value *Val, // All incoming values should have same known stack slot, otherwise result // is unknown. if (const PHINode *Phi = dyn_cast<PHINode>(Val)) { - Optional<int> MergedResult; + std::optional<int> MergedResult; for (const auto &IncomingValue : Phi->incoming_values()) { - Optional<int> SpillSlot = + std::optional<int> SpillSlot = findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1); if (!SpillSlot) return std::nullopt; @@ -283,7 +283,7 @@ static void reservePreviousStackSlotForValue(const Value *IncomingValue, return; const int LookUpDepth = 6; - Optional<int> Index = + std::optional<int> Index = findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth); if (!Index) return; |