diff options
author | Fangrui Song <i@maskray.me> | 2022-12-13 09:06:36 +0000 |
---|---|---|
committer | Fangrui Song <i@maskray.me> | 2022-12-13 09:06:36 +0000 |
commit | 67819a72c6ba39267effe8edfc1befddc3f3f2f9 (patch) | |
tree | 9a95db915f8eded88767ac3e9c31c8db045ab505 /llvm/lib | |
parent | 48e6ff9ad3eb1971de6d7ba12e31754781aff675 (diff) | |
download | llvm-67819a72c6ba39267effe8edfc1befddc3f3f2f9.zip llvm-67819a72c6ba39267effe8edfc1befddc3f3f2f9.tar.gz llvm-67819a72c6ba39267effe8edfc1befddc3f3f2f9.tar.bz2 |
[CodeGen] llvm::Optional => std::optional
Diffstat (limited to 'llvm/lib')
66 files changed, 456 insertions, 439 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index fbbe19f..14f0b78 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -1072,7 +1072,7 @@ static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) { // We assume a single instruction only has a spill or reload, not // both. - Optional<unsigned> Size; + std::optional<unsigned> Size; if ((Size = MI.getRestoreSize(TII))) { CommentOS << *Size << "-byte Reload\n"; } else if ((Size = MI.getFoldedRestoreSize(TII))) { diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp index dc09b52..1b2e7ad 100644 --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -1339,7 +1339,7 @@ void CodeViewDebug::calculateRanges( assert(DVInst->isDebugValue() && "Invalid History entry"); // FIXME: Find a way to represent constant variables, since they are // relatively common. - Optional<DbgVariableLocation> Location = + std::optional<DbgVariableLocation> Location = DbgVariableLocation::extractFromMachineInstruction(*DVInst); if (!Location) { diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp b/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp index 2038952..4cf4f02 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp @@ -30,7 +30,7 @@ using namespace llvm; /// variable's lexical scope instruction ranges. static cl::opt<bool> TrimVarLocs("trim-var-locs", cl::Hidden, cl::init(true)); -Optional<DbgVariableLocation> +std::optional<DbgVariableLocation> DbgVariableLocation::extractFromMachineInstruction( const MachineInstr &Instruction) { DbgVariableLocation Location; diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h index dda12f7..a458825 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h +++ b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h @@ -159,7 +159,7 @@ class DebugLocStream::ListBuilder { DbgVariable &V; const MachineInstr &MI; size_t ListIndex; - Optional<uint8_t> TagOffset; + std::optional<uint8_t> TagOffset; public: ListBuilder(DebugLocStream &Locs, DwarfCompileUnit &CU, AsmPrinter &Asm, diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index 1727671f..6dde503 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -1129,7 +1129,7 @@ void DwarfCompileUnit::constructAbstractSubprogramScopeDIE( AbsDef = &ContextCU->createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, nullptr); ContextCU->applySubprogramAttributesToDefinition(SP, *AbsDef); ContextCU->addSInt(*AbsDef, dwarf::DW_AT_inline, - DD->getDwarfVersion() <= 4 ? Optional<dwarf::Form>() + DD->getDwarfVersion() <= 4 ? std::optional<dwarf::Form>() : dwarf::DW_FORM_implicit_const, dwarf::DW_INL_inlined); if (DIE *ObjectPointer = ContextCU->createAndAddScopeChildren(Scope, *AbsDef)) diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h index 9245ac9..5d2ef8e 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h @@ -116,7 +116,7 @@ class DbgVariable : public DbgEntity { /// Index of the entry list in DebugLocs. unsigned DebugLocListIndex = ~0u; /// DW_OP_LLVM_tag_offset value from DebugLocs. - Optional<uint8_t> DebugLocListTagOffset; + std::optional<uint8_t> DebugLocListTagOffset; /// Single value location description. std::unique_ptr<DbgValueLoc> ValueLoc = nullptr; @@ -175,7 +175,9 @@ public: void setDebugLocListIndex(unsigned O) { DebugLocListIndex = O; } unsigned getDebugLocListIndex() const { return DebugLocListIndex; } void setDebugLocListTagOffset(uint8_t O) { DebugLocListTagOffset = O; } - Optional<uint8_t> getDebugLocListTagOffset() const { return DebugLocListTagOffset; } + std::optional<uint8_t> getDebugLocListTagOffset() const { + return DebugLocListTagOffset; + } StringRef getName() const { return getVariable()->getName(); } const DbgValueLoc *getValueLoc() const { return ValueLoc.get(); } /// Get the FI entries, sorted by fragment offset. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp index ebe351e..d89caac 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp @@ -494,7 +494,7 @@ bool DwarfExpression::addExpression( // and not any other parts of the following DWARF expression. assert(!IsEmittingEntryValue && "Can't emit entry value around expression"); - Optional<DIExpression::ExprOperand> PrevConvertOp; + std::optional<DIExpression::ExprOperand> PrevConvertOp; while (ExprCursor) { auto Op = ExprCursor.take(); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h index b869e28..a5a19cdf 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h @@ -53,7 +53,7 @@ public: DIExpressionCursor(const DIExpressionCursor &) = default; /// Consume one operation. - Optional<DIExpression::ExprOperand> take() { + std::optional<DIExpression::ExprOperand> take() { if (Start == End) return std::nullopt; return *(Start++); @@ -63,14 +63,14 @@ public: void consume(unsigned N) { std::advance(Start, N); } /// Return the current operation. - Optional<DIExpression::ExprOperand> peek() const { + std::optional<DIExpression::ExprOperand> peek() const { if (Start == End) return std::nullopt; return *(Start); } /// Return the next operation. - Optional<DIExpression::ExprOperand> peekNext() const { + std::optional<DIExpression::ExprOperand> peekNext() const { if (Start == End) return std::nullopt; @@ -170,7 +170,7 @@ public: bool isParameterValue() { return LocationFlags & CallSiteParamValue; } - Optional<uint8_t> TagOffset; + std::optional<uint8_t> TagOffset; protected: /// Push a DW_OP_piece / DW_OP_bit_piece for emitting later, if one is needed diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp index aad00c8..9cd6532 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp @@ -218,7 +218,7 @@ void DwarfUnit::addFlag(DIE &Die, dwarf::Attribute Attribute) { } void DwarfUnit::addUInt(DIEValueList &Die, dwarf::Attribute Attribute, - Optional<dwarf::Form> Form, uint64_t Integer) { + std::optional<dwarf::Form> Form, uint64_t Integer) { if (!Form) Form = DIEInteger::BestForm(false, Integer); assert(Form != dwarf::DW_FORM_implicit_const && @@ -232,13 +232,13 @@ void DwarfUnit::addUInt(DIEValueList &Block, dwarf::Form Form, } void DwarfUnit::addSInt(DIEValueList &Die, dwarf::Attribute Attribute, - Optional<dwarf::Form> Form, int64_t Integer) { + std::optional<dwarf::Form> Form, int64_t Integer) { if (!Form) Form = DIEInteger::BestForm(true, Integer); addAttribute(Die, Attribute, *Form, DIEInteger(Integer)); } -void DwarfUnit::addSInt(DIELoc &Die, Optional<dwarf::Form> Form, +void DwarfUnit::addSInt(DIELoc &Die, std::optional<dwarf::Form> Form, int64_t Integer) { addSInt(Die, (dwarf::Attribute)0, Form, Integer); } diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h index 48d63d1..395539f 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h @@ -15,10 +15,10 @@ #include "DwarfDebug.h" #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/Optional.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/DIE.h" #include "llvm/Target/TargetMachine.h" +#include <optional> #include <string> namespace llvm { @@ -143,15 +143,15 @@ public: /// Add an unsigned integer attribute data and value. void addUInt(DIEValueList &Die, dwarf::Attribute Attribute, - Optional<dwarf::Form> Form, uint64_t Integer); + std::optional<dwarf::Form> Form, uint64_t Integer); void addUInt(DIEValueList &Block, dwarf::Form Form, uint64_t Integer); /// Add an signed integer attribute data and value. void addSInt(DIEValueList &Die, dwarf::Attribute Attribute, - Optional<dwarf::Form> Form, int64_t Integer); + std::optional<dwarf::Form> Form, int64_t Integer); - void addSInt(DIELoc &Die, Optional<dwarf::Form> Form, int64_t Integer); + void addSInt(DIELoc &Die, std::optional<dwarf::Form> Form, int64_t Integer); /// Add a string attribute data and value. /// diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp index 9af2630..8068681 100644 --- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp +++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp @@ -229,7 +229,8 @@ walkToAllocaAndPrependOffsetDeref(const DataLayout &DL, Value *Start, /// Extract the offset used in \p DIExpr. Returns std::nullopt if the expression /// doesn't explicitly describe a memory location with DW_OP_deref or if the /// expression is too complex to interpret. -static Optional<int64_t> getDerefOffsetInBytes(const DIExpression *DIExpr) { +static std::optional<int64_t> +getDerefOffsetInBytes(const DIExpression *DIExpr) { int64_t Offset = 0; const unsigned NumElements = DIExpr->getNumElements(); const auto Elements = DIExpr->getElements(); diff --git a/llvm/lib/CodeGen/CFIInstrInserter.cpp b/llvm/lib/CodeGen/CFIInstrInserter.cpp index 842339a..2574168 100644 --- a/llvm/lib/CodeGen/CFIInstrInserter.cpp +++ b/llvm/lib/CodeGen/CFIInstrInserter.cpp @@ -18,7 +18,6 @@ //===----------------------------------------------------------------------===// #include "llvm/ADT/DepthFirstIterator.h" -#include "llvm/ADT/Optional.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/Passes.h" @@ -89,10 +88,10 @@ class CFIInstrInserter : public MachineFunctionPass { #define INVALID_OFFSET INT_MAX /// contains the location where CSR register is saved. struct CSRSavedLocation { - CSRSavedLocation(Optional<unsigned> R, Optional<int> O) + CSRSavedLocation(std::optional<unsigned> R, std::optional<int> O) : Reg(R), Offset(O) {} - Optional<unsigned> Reg; - Optional<int> Offset; + std::optional<unsigned> Reg; + std::optional<int> Offset; }; /// Contains cfa offset and register values valid at entry and exit of basic @@ -187,8 +186,8 @@ void CFIInstrInserter::calculateOutgoingCFAInfo(MBBCFAInfo &MBBInfo) { // Determine cfa offset and register set by the block. for (MachineInstr &MI : *MBBInfo.MBB) { if (MI.isCFIInstruction()) { - Optional<unsigned> CSRReg; - Optional<int> CSROffset; + std::optional<unsigned> CSRReg; + std::optional<int> CSROffset; unsigned CFIIndex = MI.getOperand(0).getCFIIndex(); const MCCFIInstruction &CFI = Instrs[CFIIndex]; switch (CFI.getOperation()) { diff --git a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp index a432e4e..64e2d51 100644 --- a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp @@ -107,7 +107,7 @@ void CSEMIRBuilder::profileMBBOpcode(GISelInstProfileBuilder &B, void CSEMIRBuilder::profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps, ArrayRef<SrcOp> SrcOps, - Optional<unsigned> Flags, + std::optional<unsigned> Flags, GISelInstProfileBuilder &B) const { profileMBBOpcode(B, Opc); @@ -170,7 +170,7 @@ CSEMIRBuilder::generateCopiesIfRequired(ArrayRef<DstOp> DstOps, MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps, ArrayRef<SrcOp> SrcOps, - Optional<unsigned> Flag) { + std::optional<unsigned> Flag) { switch (Opc) { default: break; @@ -210,8 +210,8 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, break; } - if (Optional<APInt> Cst = ConstantFoldBinOp(Opc, SrcOps[0].getReg(), - SrcOps[1].getReg(), *getMRI())) + if (std::optional<APInt> Cst = ConstantFoldBinOp( + Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI())) return buildConstant(DstOps[0], *Cst); break; } @@ -230,7 +230,7 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, // Try to constant fold these. assert(SrcOps.size() == 2 && "Invalid sources"); assert(DstOps.size() == 1 && "Invalid dsts"); - if (Optional<APFloat> Cst = ConstantFoldFPBinOp( + if (std::optional<APFloat> Cst = ConstantFoldFPBinOp( Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI())) return buildFConstant(DstOps[0], *Cst); break; @@ -251,7 +251,7 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, // Try to constant fold these. assert(SrcOps.size() == 1 && "Invalid sources"); assert(DstOps.size() == 1 && "Invalid dsts"); - if (Optional<APFloat> Cst = ConstantFoldIntToFloat( + if (std::optional<APFloat> Cst = ConstantFoldIntToFloat( Opc, DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getReg(), *getMRI())) return buildFConstant(DstOps[0], *Cst); break; diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 27f4f4f..c5d5d68 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -108,7 +108,7 @@ static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) { /// 1 1 2 /// 2 2 1 /// 3 3 0 -static Optional<bool> +static std::optional<bool> isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, int64_t LowestIdx) { // Need at least two byte positions to decide on endianness. @@ -1285,9 +1285,9 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { LegalizerHelper::LegalizeResult::Legalized; } -static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy, - const Register Op, - const MachineRegisterInfo &MRI) { +static std::optional<APFloat> +constantFoldFpUnary(unsigned Opcode, LLT DstTy, const Register Op, + const MachineRegisterInfo &MRI) { const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI); if (!MaybeCst) return std::nullopt; @@ -1327,8 +1327,8 @@ static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy, return V; } -bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI, - Optional<APFloat> &Cst) { +bool CombinerHelper::matchCombineConstantFoldFpUnary( + MachineInstr &MI, std::optional<APFloat> &Cst) { Register DstReg = MI.getOperand(0).getReg(); Register SrcReg = MI.getOperand(1).getReg(); LLT DstTy = MRI.getType(DstReg); @@ -1336,8 +1336,8 @@ bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI, return Cst.has_value(); } -void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI, - Optional<APFloat> &Cst) { +void CombinerHelper::applyCombineConstantFoldFpUnary( + MachineInstr &MI, std::optional<APFloat> &Cst) { assert(Cst && "Optional is unexpectedly empty!"); Builder.setInstrAndDebugLoc(MI); MachineFunction &MF = Builder.getMF(); @@ -3269,7 +3269,7 @@ bool CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI, return true; } -Optional<SmallVector<Register, 8>> +std::optional<SmallVector<Register, 8>> CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const { assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!"); // We want to detect if Root is part of a tree which represents a bunch @@ -3367,7 +3367,7 @@ matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits, return std::make_pair(Load, Shift / MemSizeInBits); } -Optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>> +std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>> CombinerHelper::findLoadOffsetsForLoadOrCombine( SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) { @@ -3559,7 +3559,7 @@ bool CombinerHelper::matchLoadOrCombine( // pattern. If it does, then we can represent it using a load + possibly a // BSWAP. bool IsBigEndianTarget = MF.getDataLayout().isBigEndian(); - Optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx); + std::optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx); if (!IsBigEndian) return false; bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian; @@ -4612,7 +4612,7 @@ bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI, // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C) // if and only if (G_PTR_ADD X, C) has one use. Register LHSBase; - Optional<ValueAndVReg> LHSCstOff; + std::optional<ValueAndVReg> LHSCstOff; if (!mi_match(MI.getBaseReg(), MRI, m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff))))) return false; @@ -5983,7 +5983,7 @@ bool CombinerHelper::matchBuildVectorIdentityFold(MachineInstr &MI, return MRI.getType(MatchInfo) == DstVecTy; } - Optional<ValueAndVReg> ShiftAmount; + std::optional<ValueAndVReg> ShiftAmount; const auto LoPattern = m_GBitcast(m_Reg(Lo)); const auto HiPattern = m_GLShr(m_GBitcast(m_Reg(Hi)), m_GCst(ShiftAmount)); if (mi_match( @@ -6014,7 +6014,7 @@ bool CombinerHelper::matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo) { // Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with // y if K == size of vector element type - Optional<ValueAndVReg> ShiftAmt; + std::optional<ValueAndVReg> ShiftAmt; if (!mi_match(MI.getOperand(1).getReg(), MRI, m_GLShr(m_GBitcast(m_GBuildVector(m_Reg(), m_Reg(MatchInfo))), m_GCst(ShiftAmt)))) diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 6c44a1c..44fb5db 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -197,7 +197,7 @@ MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res, return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}); } -Optional<MachineInstrBuilder> +std::optional<MachineInstrBuilder> MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) { assert(Res == 0 && "Res is a result argument"); @@ -762,9 +762,9 @@ MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res, return buildInstr(TargetOpcode::G_TRUNC, Res, Op); } -MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res, - const SrcOp &Op, - Optional<unsigned> Flags) { +MachineInstrBuilder +MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op, + std::optional<unsigned> Flags) { return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags); } @@ -779,16 +779,15 @@ MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, - Optional<unsigned> Flags) { + std::optional<unsigned> Flags) { return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags); } -MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res, - const SrcOp &Tst, - const SrcOp &Op0, - const SrcOp &Op1, - Optional<unsigned> Flags) { +MachineInstrBuilder +MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst, + const SrcOp &Op0, const SrcOp &Op1, + std::optional<unsigned> Flags) { return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags); } @@ -1029,10 +1028,10 @@ void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy, #endif } -MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, - ArrayRef<DstOp> DstOps, - ArrayRef<SrcOp> SrcOps, - Optional<unsigned> Flags) { +MachineInstrBuilder +MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps, + ArrayRef<SrcOp> SrcOps, + std::optional<unsigned> Flags) { switch (Opc) { default: break; diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp index 6750edd..a164601 100644 --- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp +++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp @@ -286,9 +286,9 @@ void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, reportGISelFailure(MF, TPC, MORE, R); } -Optional<APInt> llvm::getIConstantVRegVal(Register VReg, - const MachineRegisterInfo &MRI) { - Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough( +std::optional<APInt> llvm::getIConstantVRegVal(Register VReg, + const MachineRegisterInfo &MRI) { + std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough( VReg, MRI, /*LookThroughInstrs*/ false); assert((!ValAndVReg || ValAndVReg->VReg == VReg) && "Value found while looking through instrs"); @@ -297,9 +297,9 @@ Optional<APInt> llvm::getIConstantVRegVal(Register VReg, return ValAndVReg->Value; } -Optional<int64_t> +std::optional<int64_t> llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) { - Optional<APInt> Val = getIConstantVRegVal(VReg, MRI); + std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI); if (Val && Val->getBitWidth() <= 64) return Val->getSExtValue(); return std::nullopt; @@ -308,9 +308,9 @@ llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) { namespace { typedef std::function<bool(const MachineInstr *)> IsOpcodeFn; -typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn; +typedef std::function<std::optional<APInt>(const MachineInstr *MI)> GetAPCstFn; -Optional<ValueAndVReg> getConstantVRegValWithLookThrough( +std::optional<ValueAndVReg> getConstantVRegValWithLookThrough( Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode, GetAPCstFn getAPCstValue, bool LookThroughInstrs = true, bool LookThroughAnyExt = false) { @@ -347,7 +347,7 @@ Optional<ValueAndVReg> getConstantVRegValWithLookThrough( if (!MI || !IsConstantOpcode(MI)) return std::nullopt; - Optional<APInt> MaybeVal = getAPCstValue(MI); + std::optional<APInt> MaybeVal = getAPCstValue(MI); if (!MaybeVal) return std::nullopt; APInt &Val = *MaybeVal; @@ -389,14 +389,14 @@ bool isAnyConstant(const MachineInstr *MI) { return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT; } -Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) { +std::optional<APInt> getCImmAsAPInt(const MachineInstr *MI) { const MachineOperand &CstVal = MI->getOperand(1); if (CstVal.isCImm()) return CstVal.getCImm()->getValue(); return std::nullopt; } -Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) { +std::optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) { const MachineOperand &CstVal = MI->getOperand(1); if (CstVal.isCImm()) return CstVal.getCImm()->getValue(); @@ -407,13 +407,13 @@ Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) { } // end anonymous namespace -Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough( +std::optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough( Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) { return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant, getCImmAsAPInt, LookThroughInstrs); } -Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough( +std::optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough( Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, bool LookThroughAnyExt) { return getConstantVRegValWithLookThrough( @@ -421,7 +421,7 @@ Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough( LookThroughAnyExt); } -Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough( +std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough( Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) { auto Reg = getConstantVRegValWithLookThrough( VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs); @@ -439,7 +439,7 @@ llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) { return MI->getOperand(1).getFPImm(); } -Optional<DefinitionAndSourceRegister> +std::optional<DefinitionAndSourceRegister> llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { Register DefSrcReg = Reg; auto *DefMI = MRI.getVRegDef(Reg); @@ -461,14 +461,14 @@ llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { MachineInstr *llvm::getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { - Optional<DefinitionAndSourceRegister> DefSrcReg = + std::optional<DefinitionAndSourceRegister> DefSrcReg = getDefSrcRegIgnoringCopies(Reg, MRI); return DefSrcReg ? DefSrcReg->MI : nullptr; } Register llvm::getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { - Optional<DefinitionAndSourceRegister> DefSrcReg = + std::optional<DefinitionAndSourceRegister> DefSrcReg = getDefSrcRegIgnoringCopies(Reg, MRI); return DefSrcReg ? DefSrcReg->Reg : Register(); } @@ -492,9 +492,10 @@ APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) { return APF; } -Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, - const Register Op2, - const MachineRegisterInfo &MRI) { +std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, + const Register Op1, + const Register Op2, + const MachineRegisterInfo &MRI) { auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false); if (!MaybeOp2Cst) return std::nullopt; @@ -556,9 +557,9 @@ Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, return std::nullopt; } -Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, - const Register Op2, - const MachineRegisterInfo &MRI) { +std::optional<APFloat> +llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, + const Register Op2, const MachineRegisterInfo &MRI) { const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI); if (!Op2Cst) return std::nullopt; @@ -759,9 +760,9 @@ Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF, return LiveIn; } -Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, - uint64_t Imm, - const MachineRegisterInfo &MRI) { +std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, + const Register Op1, uint64_t Imm, + const MachineRegisterInfo &MRI) { auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI); if (MaybeOp1Cst) { switch (Opcode) { @@ -776,9 +777,9 @@ Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, return std::nullopt; } -Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, - Register Src, - const MachineRegisterInfo &MRI) { +std::optional<APFloat> +llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, + const MachineRegisterInfo &MRI) { assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP); if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) { APFloat DstVal(getFltSemanticForLLT(DstTy)); @@ -789,7 +790,7 @@ Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, return std::nullopt; } -Optional<SmallVector<unsigned>> +std::optional<SmallVector<unsigned>> llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) { LLT Ty = MRI.getType(Src); SmallVector<unsigned> FoldedCTLZs; @@ -822,7 +823,7 @@ llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) { bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI, GISelKnownBits *KB) { - Optional<DefinitionAndSourceRegister> DefSrcReg = + std::optional<DefinitionAndSourceRegister> DefSrcReg = getDefSrcRegIgnoringCopies(Reg, MRI); if (!DefSrcReg) return false; @@ -1000,7 +1001,7 @@ LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) { return LLT::scalar(GCD); } -Optional<int> llvm::getSplatIndex(MachineInstr &MI) { +std::optional<int> llvm::getSplatIndex(MachineInstr &MI) { assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && "Only G_SHUFFLE_VECTOR can have a splat index!"); ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); @@ -1028,9 +1029,9 @@ static bool isBuildVectorOp(unsigned Opcode) { namespace { -Optional<ValueAndVReg> getAnyConstantSplat(Register VReg, - const MachineRegisterInfo &MRI, - bool AllowUndef) { +std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg, + const MachineRegisterInfo &MRI, + bool AllowUndef) { MachineInstr *MI = getDefIgnoringCopies(VReg, MRI); if (!MI) return std::nullopt; @@ -1038,7 +1039,7 @@ Optional<ValueAndVReg> getAnyConstantSplat(Register VReg, if (!isBuildVectorOp(MI->getOpcode())) return std::nullopt; - Optional<ValueAndVReg> SplatValAndReg; + std::optional<ValueAndVReg> SplatValAndReg; for (MachineOperand &Op : MI->uses()) { Register Element = Op.getReg(); auto ElementValAndReg = @@ -1080,11 +1081,11 @@ bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI, AllowUndef); } -Optional<APInt> llvm::getIConstantSplatVal(const Register Reg, - const MachineRegisterInfo &MRI) { +std::optional<APInt> +llvm::getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI) { if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) { - Optional<ValueAndVReg> ValAndVReg = + std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI); return ValAndVReg->Value; } @@ -1092,12 +1093,13 @@ Optional<APInt> llvm::getIConstantSplatVal(const Register Reg, return std::nullopt; } -Optional<APInt> llvm::getIConstantSplatVal(const MachineInstr &MI, - const MachineRegisterInfo &MRI) { +std::optional<APInt> +llvm::getIConstantSplatVal(const MachineInstr &MI, + const MachineRegisterInfo &MRI) { return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI); } -Optional<int64_t> +std::optional<int64_t> llvm::getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI) { if (auto SplatValAndReg = @@ -1106,15 +1108,15 @@ llvm::getIConstantSplatSExtVal(const Register Reg, return std::nullopt; } -Optional<int64_t> +std::optional<int64_t> llvm::getIConstantSplatSExtVal(const MachineInstr &MI, const MachineRegisterInfo &MRI) { return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI); } -Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg, - const MachineRegisterInfo &MRI, - bool AllowUndef) { +std::optional<FPValueAndVReg> +llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, + bool AllowUndef) { if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef)) return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI); return std::nullopt; @@ -1132,8 +1134,8 @@ bool llvm::isBuildVectorAllOnes(const MachineInstr &MI, return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef); } -Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI, - const MachineRegisterInfo &MRI) { +std::optional<RegOrConstant> +llvm::getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) { unsigned Opc = MI.getOpcode(); if (!isBuildVectorOp(Opc)) return std::nullopt; @@ -1202,7 +1204,7 @@ bool llvm::isConstantOrConstantVector(const MachineInstr &MI, return true; } -Optional<APInt> +std::optional<APInt> llvm::isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI) { Register Def = MI.getOperand(0).getReg(); diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp index 2bdae0e..0bdb32c 100644 --- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp +++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp @@ -97,11 +97,11 @@ class ImplicitNullChecks : public MachineFunctionPass { /// If non-None, then an instruction in \p Insts that also must be /// hoisted. - Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; + std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; /*implicit*/ DependenceResult( bool CanReorder, - Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) + std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) : CanReorder(CanReorder), PotentialDependence(PotentialDependence) { assert((!PotentialDependence || CanReorder) && "!CanReorder && PotentialDependence.hasValue() not allowed!"); @@ -254,7 +254,7 @@ ImplicitNullChecks::computeDependence(const MachineInstr *MI, assert(llvm::all_of(Block, canHandle) && "Check this first!"); assert(!is_contained(Block, MI) && "Block must be exclusive of MI!"); - Optional<ArrayRef<MachineInstr *>::iterator> Dep; + std::optional<ArrayRef<MachineInstr *>::iterator> Dep; for (auto I = Block.begin(), E = Block.end(); I != E; ++I) { if (canReorder(*I, MI)) diff --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp index b65eceb..19e523a 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp @@ -1038,7 +1038,7 @@ void MLocTracker::writeRegMask(const MachineOperand *MO, unsigned CurBB, Masks.push_back(std::make_pair(MO, InstID)); } -Optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) { +std::optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) { SpillLocationNo SpillID(SpillLocs.idFor(L)); if (SpillID.id() == 0) { @@ -1278,7 +1278,7 @@ bool InstrRefBasedLDV::isCalleeSavedReg(Register R) const { // void InstrRefBasedLDV::printVarLocInMBB(..) #endif -Optional<SpillLocationNo> +std::optional<SpillLocationNo> InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { assert(MI.hasOneMemOperand() && "Spill instruction does not have exactly one memory operand?"); @@ -1293,9 +1293,9 @@ InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { return MTracker->getOrTrackSpillLoc({Reg, Offset}); } -Optional<LocIdx> +std::optional<LocIdx> InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) { - Optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI); + std::optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI); if (!SpillLoc) return std::nullopt; @@ -1426,7 +1426,7 @@ bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI, // Default machine value number is <None> -- if no instruction defines // the corresponding value, it must have been optimized out. - Optional<ValueIDNum> NewID; + std::optional<ValueIDNum> NewID; // Try to lookup the instruction number, and find the machine value number // that it defines. It could be an instruction, or a PHI. @@ -1440,7 +1440,7 @@ bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI, // a register def was folded into a stack store. if (OpNo == MachineFunction::DebugOperandMemNumber && TargetInstr.hasOneMemOperand()) { - Optional<LocIdx> L = findLocationForMemOperand(TargetInstr); + std::optional<LocIdx> L = findLocationForMemOperand(TargetInstr); if (L) NewID = ValueIDNum(BlockNo, InstrIt->second.second, *L); } else if (OpNo != MachineFunction::DebugOperandMemNumber) { @@ -1658,7 +1658,7 @@ bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) { Register Base; StackOffset Offs = TFI->getFrameIndexReference(*MI.getMF(), FI, Base); SpillLoc SL = {Base, Offs}; - Optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(SL); + std::optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(SL); // We might be able to find a value, but have chosen not to, to avoid // tracking too much stack information. @@ -1753,7 +1753,8 @@ void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) { // If this instruction writes to a spill slot, def that slot. if (hasFoldedStackStore(MI)) { - if (Optional<SpillLocationNo> SpillNo = extractSpillBaseRegAndOffset(MI)) { + if (std::optional<SpillLocationNo> SpillNo = + extractSpillBaseRegAndOffset(MI)) { for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) { unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillNo, I); LocIdx L = MTracker->getSpillMLoc(SpillID); @@ -1795,7 +1796,8 @@ void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) { // Tell TTracker about any folded stack store. if (hasFoldedStackStore(MI)) { - if (Optional<SpillLocationNo> SpillNo = extractSpillBaseRegAndOffset(MI)) { + if (std::optional<SpillLocationNo> SpillNo = + extractSpillBaseRegAndOffset(MI)) { for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) { unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillNo, I); LocIdx L = MTracker->getSpillMLoc(SpillID); @@ -1836,7 +1838,7 @@ void InstrRefBasedLDV::performCopy(Register SrcRegNum, Register DstRegNum) { } } -Optional<SpillLocationNo> +std::optional<SpillLocationNo> InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI, MachineFunction *MF) { // TODO: Handle multiple stores folded into one. @@ -1866,7 +1868,7 @@ bool InstrRefBasedLDV::isLocationSpill(const MachineInstr &MI, return Reg != 0; } -Optional<SpillLocationNo> +std::optional<SpillLocationNo> InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI, MachineFunction *MF, unsigned &Reg) { if (!MI.hasOneMemOperand()) @@ -1910,7 +1912,7 @@ bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) { // First, if there are any DBG_VALUEs pointing at a spill slot that is // written to, terminate that variable location. The value in memory // will have changed. DbgEntityHistoryCalculator doesn't try to detect this. - if (Optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) { + if (std::optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) { // Un-set this location and clobber, so that earlier locations don't // continue past this store. for (unsigned SlotIdx = 0; SlotIdx < MTracker->NumSlotIdxes; ++SlotIdx) { @@ -1961,7 +1963,7 @@ bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) { unsigned SpillID = MTracker->getLocID(Loc, {Size, 0}); DoTransfer(Reg, SpillID); } else { - Optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg); + std::optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg); if (!Loc) return false; @@ -2707,7 +2709,7 @@ bool InstrRefBasedLDV::pickVPHILoc( continue; } - Optional<ValueIDNum> JoinedOpLoc = + std::optional<ValueIDNum> JoinedOpLoc = pickOperandPHILoc(Idx, MBB, LiveOuts, MOutLocs, BlockOrders); if (!JoinedOpLoc) @@ -2720,7 +2722,7 @@ bool InstrRefBasedLDV::pickVPHILoc( return true; } -Optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc( +std::optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc( unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs, const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) { @@ -3954,7 +3956,7 @@ public: } // end namespace llvm -Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs( +std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs( MachineFunction &MF, const ValueTable *MLiveOuts, const ValueTable *MLiveIns, MachineInstr &Here, uint64_t InstrNum) { assert(MLiveOuts && MLiveIns && @@ -3967,13 +3969,13 @@ Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs( if (SeenDbgPHIIt != SeenDbgPHIs.end()) return SeenDbgPHIIt->second; - Optional<ValueIDNum> Result = + std::optional<ValueIDNum> Result = resolveDbgPHIsImpl(MF, MLiveOuts, MLiveIns, Here, InstrNum); SeenDbgPHIs.insert({&Here, Result}); return Result; } -Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( +std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( MachineFunction &MF, const ValueTable *MLiveOuts, const ValueTable *MLiveIns, MachineInstr &Here, uint64_t InstrNum) { // Pick out records of DBG_PHI instructions that have been observed. If there diff --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h index 9d0f4d0..5b8b3e0 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h +++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h @@ -892,7 +892,7 @@ public: /// Find LocIdx for SpillLoc \p L, creating a new one if it's not tracked. /// Returns std::nullopt when in scenarios where a spill slot could be /// tracked, but we would likely run into resource limitations. - Optional<SpillLocationNo> getOrTrackSpillLoc(SpillLoc L); + std::optional<SpillLocationNo> getOrTrackSpillLoc(SpillLoc L); // Get LocIdx of a spill ID. LocIdx getSpillMLoc(unsigned SpillID) { @@ -1129,10 +1129,10 @@ private: MachineBasicBlock *MBB; /// The value number read by the DBG_PHI -- or std::nullopt if it didn't /// refer to a value. - Optional<ValueIDNum> ValueRead; + std::optional<ValueIDNum> ValueRead; /// Register/Stack location the DBG_PHI reads -- or std::nullopt if it /// referred to something unexpected. - Optional<LocIdx> ReadLoc; + std::optional<LocIdx> ReadLoc; operator unsigned() const { return InstrNum; } }; @@ -1151,7 +1151,7 @@ private: /// DBG_INSTR_REFs that call resolveDbgPHIs. These variable references solve /// a mini SSA problem caused by DBG_PHIs being cloned, this collection caches /// the result. - DenseMap<MachineInstr *, Optional<ValueIDNum>> SeenDbgPHIs; + DenseMap<MachineInstr *, std::optional<ValueIDNum>> SeenDbgPHIs; DbgOpIDMap DbgOpStore; @@ -1166,8 +1166,8 @@ private: StringRef StackProbeSymbolName; /// Tests whether this instruction is a spill to a stack slot. - Optional<SpillLocationNo> isSpillInstruction(const MachineInstr &MI, - MachineFunction *MF); + std::optional<SpillLocationNo> isSpillInstruction(const MachineInstr &MI, + MachineFunction *MF); /// Decide if @MI is a spill instruction and return true if it is. We use 2 /// criteria to make this decision: @@ -1180,12 +1180,13 @@ private: /// If a given instruction is identified as a spill, return the spill slot /// and set \p Reg to the spilled register. - Optional<SpillLocationNo> isRestoreInstruction(const MachineInstr &MI, - MachineFunction *MF, unsigned &Reg); + std::optional<SpillLocationNo> isRestoreInstruction(const MachineInstr &MI, + MachineFunction *MF, + unsigned &Reg); /// Given a spill instruction, extract the spill slot information, ensure it's /// tracked, and return the spill number. - Optional<SpillLocationNo> + std::optional<SpillLocationNo> extractSpillBaseRegAndOffset(const MachineInstr &MI); /// Observe a single instruction while stepping through a block. @@ -1230,16 +1231,17 @@ private: /// \p Here the position of a DBG_INSTR_REF seeking a machine value number /// \p InstrNum Debug instruction number defined by DBG_PHI instructions. /// \returns The machine value number at position Here, or std::nullopt. - Optional<ValueIDNum> resolveDbgPHIs(MachineFunction &MF, - const ValueTable *MLiveOuts, - const ValueTable *MLiveIns, - MachineInstr &Here, uint64_t InstrNum); - - Optional<ValueIDNum> resolveDbgPHIsImpl(MachineFunction &MF, - const ValueTable *MLiveOuts, - const ValueTable *MLiveIns, - MachineInstr &Here, - uint64_t InstrNum); + std::optional<ValueIDNum> resolveDbgPHIs(MachineFunction &MF, + const ValueTable *MLiveOuts, + const ValueTable *MLiveIns, + MachineInstr &Here, + uint64_t InstrNum); + + std::optional<ValueIDNum> resolveDbgPHIsImpl(MachineFunction &MF, + const ValueTable *MLiveOuts, + const ValueTable *MLiveIns, + MachineInstr &Here, + uint64_t InstrNum); /// Step through the function, recording register definitions and movements /// in an MLocTracker. Convert the observations into a per-block transfer @@ -1353,7 +1355,7 @@ private: const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs, const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders); - Optional<ValueIDNum> pickOperandPHILoc( + std::optional<ValueIDNum> pickOperandPHILoc( unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs, const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders); @@ -1417,7 +1419,7 @@ public: && !MemOperand->getPseudoValue()->isAliased(MFI); } - Optional<LocIdx> findLocationForMemOperand(const MachineInstr &MI); + std::optional<LocIdx> findLocationForMemOperand(const MachineInstr &MI); }; } // namespace LiveDebugValues diff --git a/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp index 222bbb0..5ad6760 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp @@ -859,7 +859,7 @@ private: /// Insert a set of ranges. void insertFromLocSet(const VarLocSet &ToLoad, const VarLocMap &Map); - llvm::Optional<LocIndices> getEntryValueBackup(DebugVariable Var); + std::optional<LocIndices> getEntryValueBackup(DebugVariable Var); /// Empty the set. void clear() { @@ -946,9 +946,9 @@ private: /// If a given instruction is identified as a spill, return the spill location /// and set \p Reg to the spilled register. - Optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI, - MachineFunction *MF, - Register &Reg); + std::optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI, + MachineFunction *MF, + Register &Reg); /// Given a spill instruction, extract the register and offset used to /// address the spill location in a target independent way. VarLoc::SpillLoc extractSpillBaseRegAndOffset(const MachineInstr &MI); @@ -1110,7 +1110,7 @@ void VarLocBasedLDV::OpenRangesSet::insert(LocIndices VarLocIDs, /// Return the Loc ID of an entry value backup location, if it exists for the /// variable. -llvm::Optional<LocIndices> +std::optional<LocIndices> VarLocBasedLDV::OpenRangesSet::getEntryValueBackup(DebugVariable Var) { auto It = EntryValuesBackupVars.find(Var); if (It != EntryValuesBackupVars.end()) @@ -1398,7 +1398,7 @@ void VarLocBasedLDV::emitEntryValues(MachineInstr &MI, continue; auto DebugVar = VL.Var; - Optional<LocIndices> EntryValBackupIDs = + std::optional<LocIndices> EntryValBackupIDs = OpenRanges.getEntryValueBackup(DebugVar); // If the parameter has the entry value backup, it means we should @@ -1618,9 +1618,9 @@ bool VarLocBasedLDV::isLocationSpill(const MachineInstr &MI, return false; } -Optional<VarLocBasedLDV::VarLoc::SpillLoc> +std::optional<VarLocBasedLDV::VarLoc::SpillLoc> VarLocBasedLDV::isRestoreInstruction(const MachineInstr &MI, - MachineFunction *MF, Register &Reg) { + MachineFunction *MF, Register &Reg) { if (!MI.hasOneMemOperand()) return std::nullopt; @@ -1647,7 +1647,7 @@ void VarLocBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI, MachineFunction *MF = MI.getMF(); TransferKind TKind; Register Reg; - Optional<VarLoc::SpillLoc> Loc; + std::optional<VarLoc::SpillLoc> Loc; LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump();); diff --git a/llvm/lib/CodeGen/LiveDebugVariables.cpp b/llvm/lib/CodeGen/LiveDebugVariables.cpp index 4db941e..d211536 100644 --- a/llvm/lib/CodeGen/LiveDebugVariables.cpp +++ b/llvm/lib/CodeGen/LiveDebugVariables.cpp @@ -441,11 +441,12 @@ public: /// VNInfo. /// \param [out] Kills Append end points of VNI's live range to Kills. /// \param LIS Live intervals analysis. - void extendDef(SlotIndex Idx, DbgVariableValue DbgValue, - SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>> - &LiveIntervalInfo, - Optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills, - LiveIntervals &LIS); + void + extendDef(SlotIndex Idx, DbgVariableValue DbgValue, + SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>> + &LiveIntervalInfo, + std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills, + LiveIntervals &LIS); /// The value in LI may be copies to other registers. Determine if /// any of the copies are available at the kill points, and add defs if @@ -957,7 +958,7 @@ void UserValue::extendDef( SlotIndex Idx, DbgVariableValue DbgValue, SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>> &LiveIntervalInfo, - Optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills, + std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills, LiveIntervals &LIS) { SlotIndex Start = Idx; MachineBasicBlock *MBB = LIS.getMBBFromIndex(Start); @@ -1131,7 +1132,7 @@ void UserValue::computeIntervals(MachineRegisterInfo &MRI, LIs[LocNo] = {LI, VNI}; } if (ShouldExtendDef) { - Optional<std::pair<SlotIndex, SmallVector<unsigned>>> Kills; + std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> Kills; extendDef(Idx, DbgValue, LIs, Kills, LIS); if (Kills) { diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index 8bcf5e2..21c95e1 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -376,10 +376,11 @@ struct ParsedMachineOperand { MachineOperand Operand; StringRef::iterator Begin; StringRef::iterator End; - Optional<unsigned> TiedDefIdx; + std::optional<unsigned> TiedDefIdx; ParsedMachineOperand(const MachineOperand &Operand, StringRef::iterator Begin, - StringRef::iterator End, Optional<unsigned> &TiedDefIdx) + StringRef::iterator End, + std::optional<unsigned> &TiedDefIdx) : Operand(Operand), Begin(Begin), End(End), TiedDefIdx(TiedDefIdx) { if (TiedDefIdx) assert(Operand.isReg() && Operand.isUse() && @@ -448,7 +449,8 @@ public: bool parseSubRegisterIndex(unsigned &SubReg); bool parseRegisterTiedDefIndex(unsigned &TiedDefIdx); bool parseRegisterOperand(MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx, bool IsDef = false); + std::optional<unsigned> &TiedDefIdx, + bool IsDef = false); bool parseImmediateOperand(MachineOperand &Dest); bool parseIRConstant(StringRef::iterator Loc, StringRef StringValue, const Constant *&C); @@ -488,17 +490,17 @@ public: bool parseLiveoutRegisterMaskOperand(MachineOperand &Dest); bool parseMachineOperand(const unsigned OpCode, const unsigned OpIdx, MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx); + std::optional<unsigned> &TiedDefIdx); bool parseMachineOperandAndTargetFlags(const unsigned OpCode, const unsigned OpIdx, MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx); + std::optional<unsigned> &TiedDefIdx); bool parseOffset(int64_t &Offset); bool parseIRBlockAddressTaken(BasicBlock *&BB); bool parseAlignment(uint64_t &Alignment); bool parseAddrspace(unsigned &Addrspace); - bool parseSectionID(Optional<MBBSectionID> &SID); - bool parseBBID(Optional<unsigned> &BBID); + bool parseSectionID(std::optional<MBBSectionID> &SID); + bool parseBBID(std::optional<unsigned> &BBID); bool parseOperandsOffset(MachineOperand &Op); bool parseIRValue(const Value *&V); bool parseMemoryOperandFlag(MachineMemOperand::Flags &Flags); @@ -641,7 +643,7 @@ bool MIParser::consumeIfPresent(MIToken::TokenKind TokenKind) { } // Parse Machine Basic Block Section ID. -bool MIParser::parseSectionID(Optional<MBBSectionID> &SID) { +bool MIParser::parseSectionID(std::optional<MBBSectionID> &SID) { assert(Token.is(MIToken::kw_bbsections)); lex(); if (Token.is(MIToken::IntegerLiteral)) { @@ -663,7 +665,7 @@ bool MIParser::parseSectionID(Optional<MBBSectionID> &SID) { } // Parse Machine Basic Block ID. -bool MIParser::parseBBID(Optional<unsigned> &BBID) { +bool MIParser::parseBBID(std::optional<unsigned> &BBID) { assert(Token.is(MIToken::kw_bb_id)); lex(); unsigned Value = 0; @@ -688,9 +690,9 @@ bool MIParser::parseBasicBlockDefinition( bool IsLandingPad = false; bool IsInlineAsmBrIndirectTarget = false; bool IsEHFuncletEntry = false; - Optional<MBBSectionID> SectionID; + std::optional<MBBSectionID> SectionID; uint64_t Alignment = 0; - Optional<unsigned> BBID; + std::optional<unsigned> BBID; BasicBlock *BB = nullptr; if (consumeIfPresent(MIToken::lparen)) { do { @@ -1021,7 +1023,7 @@ bool MIParser::parse(MachineInstr *&MI) { SmallVector<ParsedMachineOperand, 8> Operands; while (Token.isRegister() || Token.isRegisterFlag()) { auto Loc = Token.location(); - Optional<unsigned> TiedDefIdx; + std::optional<unsigned> TiedDefIdx; if (parseRegisterOperand(MO, TiedDefIdx, /*IsDef=*/true)) return true; Operands.push_back( @@ -1047,7 +1049,7 @@ bool MIParser::parse(MachineInstr *&MI) { Token.isNot(MIToken::kw_debug_instr_number) && Token.isNot(MIToken::coloncolon) && Token.isNot(MIToken::lbrace)) { auto Loc = Token.location(); - Optional<unsigned> TiedDefIdx; + std::optional<unsigned> TiedDefIdx; if (parseMachineOperandAndTargetFlags(OpCode, Operands.size(), MO, TiedDefIdx)) return true; Operands.push_back( @@ -1706,7 +1708,7 @@ bool MIParser::assignRegisterTies(MachineInstr &MI, } bool MIParser::parseRegisterOperand(MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx, + std::optional<unsigned> &TiedDefIdx, bool IsDef) { unsigned Flags = IsDef ? RegState::Define : 0; while (Token.isRegisterFlag()) { @@ -2812,7 +2814,7 @@ bool MIParser::parseLiveoutRegisterMaskOperand(MachineOperand &Dest) { bool MIParser::parseMachineOperand(const unsigned OpCode, const unsigned OpIdx, MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx) { + std::optional<unsigned> &TiedDefIdx) { switch (Token.kind()) { case MIToken::kw_implicit: case MIToken::kw_implicit_define: @@ -2917,7 +2919,7 @@ bool MIParser::parseMachineOperand(const unsigned OpCode, const unsigned OpIdx, bool MIParser::parseMachineOperandAndTargetFlags( const unsigned OpCode, const unsigned OpIdx, MachineOperand &Dest, - Optional<unsigned> &TiedDefIdx) { + std::optional<unsigned> &TiedDefIdx) { unsigned TF = 0; bool HasTargetFlags = false; if (Token.is(MIToken::kw_target_flags)) { diff --git a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp index d384035..8896f05 100644 --- a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp @@ -1113,7 +1113,7 @@ int64_t DevelopmentModeEvictAdvisor::tryFindEvictionCandidatePosition( } bool RegAllocScoring::runOnMachineFunction(MachineFunction &MF) { - Optional<float> CachedReward; + std::optional<float> CachedReward; auto GetReward = [&]() { if (!CachedReward) CachedReward = static_cast<float>( diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index 6c88671..271cd01 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -2329,7 +2329,7 @@ static unsigned getSpillSlotSize(const MMOList &Accesses, return Size; } -Optional<unsigned> +std::optional<unsigned> MachineInstr::getSpillSize(const TargetInstrInfo *TII) const { int FI; if (TII->isStoreToStackSlotPostFE(*this, FI)) { @@ -2340,7 +2340,7 @@ MachineInstr::getSpillSize(const TargetInstrInfo *TII) const { return std::nullopt; } -Optional<unsigned> +std::optional<unsigned> MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const { MMOList Accesses; if (TII->hasStoreToStackSlot(*this, Accesses)) @@ -2348,7 +2348,7 @@ MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const { return std::nullopt; } -Optional<unsigned> +std::optional<unsigned> MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const { int FI; if (TII->isLoadFromStackSlotPostFE(*this, FI)) { @@ -2359,7 +2359,7 @@ MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const { return std::nullopt; } -Optional<unsigned> +std::optional<unsigned> MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const { MMOList Accesses; if (TII->hasLoadFromStackSlot(*this, Accesses)) diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp index bb2d1da..a3ffce90 100644 --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -756,8 +756,9 @@ void MachineOperand::print(raw_ostream &OS, LLT TypeToPrint, } void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, - LLT TypeToPrint, Optional<unsigned> OpIdx, bool PrintDef, - bool IsStandalone, bool ShouldPrintRegisterTies, + LLT TypeToPrint, std::optional<unsigned> OpIdx, + bool PrintDef, bool IsStandalone, + bool ShouldPrintRegisterTies, unsigned TiedOperandIdx, const TargetRegisterInfo *TRI, const TargetIntrinsicInfo *IntrinsicInfo) const { diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp index 195fd45..20d2517 100644 --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -1281,7 +1281,7 @@ class KernelRewriter { // Insert a phi that carries LoopReg from the loop body and InitReg otherwise. // If InitReg is not given it is chosen arbitrarily. It will either be undef // or will be chosen so as to share another phi. - Register phi(Register LoopReg, Optional<Register> InitReg = {}, + Register phi(Register LoopReg, std::optional<Register> InitReg = {}, const TargetRegisterClass *RC = nullptr); // Create an undef register of the given register class. Register undef(const TargetRegisterClass *RC); @@ -1389,7 +1389,7 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) { // First, dive through the phi chain to find the defaults for the generated // phis. - SmallVector<Optional<Register>, 4> Defaults; + SmallVector<std::optional<Register>, 4> Defaults; Register LoopReg = Reg; auto LoopProducer = Producer; while (LoopProducer->isPHI() && LoopProducer->getParent() == BB) { @@ -1400,7 +1400,7 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) { } int LoopProducerStage = S.getStage(LoopProducer); - Optional<Register> IllegalPhiDefault; + std::optional<Register> IllegalPhiDefault; if (LoopProducerStage == -1) { // Do nothing. @@ -1432,9 +1432,9 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) { // If we need more phis than we have defaults for, pad out with undefs for // the earliest phis, which are at the end of the defaults chain (the // chain is in reverse order). - Defaults.resize(Defaults.size() + StageDiff, Defaults.empty() - ? Optional<Register>() - : Defaults.back()); + Defaults.resize(Defaults.size() + StageDiff, + Defaults.empty() ? std::optional<Register>() + : Defaults.back()); } } @@ -1466,7 +1466,7 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) { return LoopReg; } -Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg, +Register KernelRewriter::phi(Register LoopReg, std::optional<Register> InitReg, const TargetRegisterClass *RC) { // If the init register is not undef, try and find an existing phi. if (InitReg) { diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 31e37c4..be1b770 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -66,7 +66,6 @@ //===----------------------------------------------------------------------===// #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" @@ -273,11 +272,11 @@ namespace { : MI(MI), CommutePair(std::make_pair(Idx1, Idx2)) {} MachineInstr *getMI() const { return MI; } - Optional<IndexPair> getCommutePair() const { return CommutePair; } + std::optional<IndexPair> getCommutePair() const { return CommutePair; } private: MachineInstr *MI; - Optional<IndexPair> CommutePair; + std::optional<IndexPair> CommutePair; }; /// Helper class to hold a reply for ValueTracker queries. diff --git a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h index a3936ea..4683857 100644 --- a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h +++ b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h @@ -10,7 +10,6 @@ #define LLVM_CODEGEN_REGALLOCEVICTIONADVISOR_H #include "llvm/ADT/ArrayRef.h" -#include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringRef.h" #include "llvm/CodeGen/Register.h" @@ -126,9 +125,9 @@ protected: // Get the upper limit of elements in the given Order we need to analize. // TODO: is this heuristic, we could consider learning it. - Optional<unsigned> getOrderLimit(const LiveInterval &VirtReg, - const AllocationOrder &Order, - unsigned CostPerUseLimit) const; + std::optional<unsigned> getOrderLimit(const LiveInterval &VirtReg, + const AllocationOrder &Order, + unsigned CostPerUseLimit) const; // Determine if it's worth trying to allocate this reg, given the // CostPerUseLimit diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index ead91f6..7c0f1d5 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -523,7 +523,7 @@ bool RegAllocEvictionAdvisor::isUnusedCalleeSavedReg(MCRegister PhysReg) const { return !Matrix->isPhysRegUsed(PhysReg); } -Optional<unsigned> +std::optional<unsigned> RegAllocEvictionAdvisor::getOrderLimit(const LiveInterval &VirtReg, const AllocationOrder &Order, unsigned CostPerUseLimit) const { diff --git a/llvm/lib/CodeGen/SelectOptimize.cpp b/llvm/lib/CodeGen/SelectOptimize.cpp index ad73e76..5d4d982 100644 --- a/llvm/lib/CodeGen/SelectOptimize.cpp +++ b/llvm/lib/CodeGen/SelectOptimize.cpp @@ -200,7 +200,7 @@ private: SmallPtrSet<const Instruction *, 2> getSIset(const SelectGroups &SIGroups); // Returns the latency cost of a given instruction. - Optional<uint64_t> computeInstCost(const Instruction *I); + std::optional<uint64_t> computeInstCost(const Instruction *I); // Returns the misprediction cost of a given select when converted to branch. Scaled64 getMispredictionCost(const SelectInst *SI, const Scaled64 CondCost); @@ -977,11 +977,11 @@ SelectOptimize::getSIset(const SelectGroups &SIGroups) { return SIset; } -Optional<uint64_t> SelectOptimize::computeInstCost(const Instruction *I) { +std::optional<uint64_t> SelectOptimize::computeInstCost(const Instruction *I) { InstructionCost ICost = TTI->getInstructionCost(I, TargetTransformInfo::TCK_Latency); if (auto OC = ICost.getValue()) - return Optional<uint64_t>(*OC); + return std::optional<uint64_t>(*OC); return std::nullopt; } diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index f9a73351..633198f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7929,7 +7929,7 @@ private: /// LOAD /// /// *ExtractVectorElement -static const Optional<ByteProvider> +static const std::optional<ByteProvider> calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, std::optional<uint64_t> VectorIndex, unsigned StartingIndex = 0) { @@ -8003,7 +8003,7 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, if (Index >= NarrowByteWidth) return Op.getOpcode() == ISD::ZERO_EXTEND - ? Optional<ByteProvider>(ByteProvider::getConstantZero()) + ? std::optional<ByteProvider>(ByteProvider::getConstantZero()) : std::nullopt; return calculateByteProvider(NarrowOp, Index, Depth + 1, VectorIndex, StartingIndex); @@ -8053,7 +8053,7 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, // question if (Index >= NarrowByteWidth) return L->getExtensionType() == ISD::ZEXTLOAD - ? Optional<ByteProvider>(ByteProvider::getConstantZero()) + ? std::optional<ByteProvider>(ByteProvider::getConstantZero()) : std::nullopt; unsigned BPVectorIndex = VectorIndex.value_or(0U); @@ -8075,8 +8075,8 @@ static unsigned bigEndianByteAt(unsigned BW, unsigned i) { // Check if the bytes offsets we are looking at match with either big or // little endian value loaded. Return true for big endian, false for little // endian, and std::nullopt if match failed. -static Optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets, - int64_t FirstOffset) { +static std::optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets, + int64_t FirstOffset) { // The endian can be decided only when it is 2 bytes at least. unsigned Width = ByteOffsets.size(); if (Width < 2) @@ -8367,7 +8367,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) { SDValue Chain; SmallPtrSet<LoadSDNode *, 8> Loads; - Optional<ByteProvider> FirstByteProvider; + std::optional<ByteProvider> FirstByteProvider; int64_t FirstOffset = INT64_MAX; // Check if all the bytes of the OR we are looking at are loaded from the same @@ -8460,7 +8460,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) { // Check if the bytes of the OR we are looking at match with either big or // little endian value load - Optional<bool> IsBigEndian = isBigEndian( + std::optional<bool> IsBigEndian = isBigEndian( makeArrayRef(ByteOffsets).drop_back(ZeroExtendedBytes), FirstOffset); if (!IsBigEndian) return SDValue(); @@ -25157,7 +25157,7 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const { bool IsAtomic; SDValue BasePtr; int64_t Offset; - Optional<int64_t> NumBytes; + std::optional<int64_t> NumBytes; MachineMemOperand *MMO; }; @@ -25172,21 +25172,26 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const { : 0; uint64_t Size = MemoryLocation::getSizeOrUnknown(LSN->getMemoryVT().getStoreSize()); - return {LSN->isVolatile(), LSN->isAtomic(), LSN->getBasePtr(), + return {LSN->isVolatile(), + LSN->isAtomic(), + LSN->getBasePtr(), Offset /*base offset*/, - Optional<int64_t>(Size), + std::optional<int64_t>(Size), LSN->getMemOperand()}; } if (const auto *LN = cast<LifetimeSDNode>(N)) - return {false /*isVolatile*/, /*isAtomic*/ false, LN->getOperand(1), + return {false /*isVolatile*/, + /*isAtomic*/ false, + LN->getOperand(1), (LN->hasOffset()) ? LN->getOffset() : 0, - (LN->hasOffset()) ? Optional<int64_t>(LN->getSize()) - : Optional<int64_t>(), + (LN->hasOffset()) ? std::optional<int64_t>(LN->getSize()) + : std::optional<int64_t>(), (MachineMemOperand *)nullptr}; // Default. - return {false /*isvolatile*/, /*isAtomic*/ false, SDValue(), - (int64_t)0 /*offset*/, - Optional<int64_t>() /*size*/, (MachineMemOperand *)nullptr}; + return {false /*isvolatile*/, + /*isAtomic*/ false, SDValue(), + (int64_t)0 /*offset*/, std::optional<int64_t>() /*size*/, + (MachineMemOperand *)nullptr}; }; MemUseCharacteristics MUC0 = getCharacteristics(Op0), diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index a4eb634..790cba9 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -6593,9 +6593,10 @@ SDValue DAGTypeLegalizer::WidenVecOp_VSELECT(SDNode *N) { // Align: If 0, don't allow use of a wider type // WidenEx: If Align is not 0, the amount additional we can load/store from. -static Optional<EVT> findMemType(SelectionDAG &DAG, const TargetLowering &TLI, - unsigned Width, EVT WidenVT, - unsigned Align = 0, unsigned WidenEx = 0) { +static std::optional<EVT> findMemType(SelectionDAG &DAG, + const TargetLowering &TLI, unsigned Width, + EVT WidenVT, unsigned Align = 0, + unsigned WidenEx = 0) { EVT WidenEltVT = WidenVT.getVectorElementType(); const bool Scalable = WidenVT.isScalableVector(); unsigned WidenWidth = WidenVT.getSizeInBits().getKnownMinSize(); @@ -6718,7 +6719,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain, (!LD->isSimple() || LdVT.isScalableVector()) ? 0 : LD->getAlign().value(); // Find the vector type that can load from. - Optional<EVT> FirstVT = + std::optional<EVT> FirstVT = findMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign, WidthDiff.getKnownMinSize()); @@ -6731,7 +6732,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain, // Unless we're able to load in one instruction we must work out how to load // the remainder. if (!TypeSize::isKnownLE(LdWidth, FirstVTWidth)) { - Optional<EVT> NewVT = FirstVT; + std::optional<EVT> NewVT = FirstVT; TypeSize RemainingWidth = LdWidth; TypeSize NewVTWidth = FirstVTWidth; do { @@ -6954,7 +6955,7 @@ bool DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain, while (StWidth.isNonZero()) { // Find the largest vector type we can store with. - Optional<EVT> NewVT = + std::optional<EVT> NewVT = findMemType(DAG, TLI, StWidth.getKnownMinSize(), ValVT); if (!NewVT) return false; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 2cb93de..e9f61e7 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -493,7 +493,7 @@ bool ISD::isVPReduction(unsigned Opcode) { } /// The operand position of the vector mask. -Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { +std::optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { switch (Opcode) { default: return std::nullopt; @@ -505,7 +505,7 @@ Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { } /// The operand position of the explicit vector length parameter. -Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) { +std::optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) { switch (Opcode) { default: return std::nullopt; @@ -5617,8 +5617,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, return V; } -static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, - const APInt &C2) { +static std::optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, + const APInt &C2) { switch (Opcode) { case ISD::ADD: return C1 + C2; case ISD::SUB: return C1 - C2; @@ -5699,10 +5699,9 @@ static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, // Handle constant folding with UNDEF. // TODO: Handle more cases. -static llvm::Optional<APInt> FoldValueWithUndef(unsigned Opcode, - const APInt &C1, bool IsUndef1, - const APInt &C2, - bool IsUndef2) { +static std::optional<APInt> FoldValueWithUndef(unsigned Opcode, const APInt &C1, + bool IsUndef1, const APInt &C2, + bool IsUndef2) { if (!(IsUndef1 || IsUndef2)) return FoldValue(Opcode, C1, C2); @@ -5787,7 +5786,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, if (C1->isOpaque() || C2->isOpaque()) return SDValue(); - Optional<APInt> FoldAttempt = + std::optional<APInt> FoldAttempt = FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); if (!FoldAttempt) return SDValue(); @@ -5832,7 +5831,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) { SmallVector<APInt> RawBits; for (unsigned I = 0, E = NumElts.getFixedValue(); I != E; ++I) { - Optional<APInt> Fold = FoldValueWithUndef( + std::optional<APInt> Fold = FoldValueWithUndef( Opcode, RawBits1[I], UndefElts1[I], RawBits2[I], UndefElts2[I]); if (!Fold) break; @@ -11967,7 +11966,7 @@ bool BuildVectorSDNode::isConstant() const { return true; } -Optional<std::pair<APInt, APInt>> +std::optional<std::pair<APInt, APInt>> BuildVectorSDNode::isConstantSequence() const { unsigned NumOps = getNumOperands(); if (NumOps < 2) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp index d236433..a432d8e 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp @@ -85,9 +85,9 @@ bool BaseIndexOffset::equalBaseIndex(const BaseIndexOffset &Other, } bool BaseIndexOffset::computeAliasing(const SDNode *Op0, - const Optional<int64_t> NumBytes0, + const std::optional<int64_t> NumBytes0, const SDNode *Op1, - const Optional<int64_t> NumBytes1, + const std::optional<int64_t> NumBytes1, const SelectionDAG &DAG, bool &IsAlias) { BaseIndexOffset BasePtr0 = match(Op0, DAG); diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp index 39e2c39..708596a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -159,9 +159,9 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType, /// Utility function for reservePreviousStackSlotForValue. Tries to find /// stack slot index to which we have spilled value for previous statepoints. /// LookUpDepth specifies maximum DFS depth this function is allowed to look. -static Optional<int> findPreviousSpillSlot(const Value *Val, - SelectionDAGBuilder &Builder, - int LookUpDepth) { +static std::optional<int> findPreviousSpillSlot(const Value *Val, + SelectionDAGBuilder &Builder, + int LookUpDepth) { // Can not look any further - give up now if (LookUpDepth <= 0) return std::nullopt; @@ -196,10 +196,10 @@ static Optional<int> findPreviousSpillSlot(const Value *Val, // All incoming values should have same known stack slot, otherwise result // is unknown. if (const PHINode *Phi = dyn_cast<PHINode>(Val)) { - Optional<int> MergedResult; + std::optional<int> MergedResult; for (const auto &IncomingValue : Phi->incoming_values()) { - Optional<int> SpillSlot = + std::optional<int> SpillSlot = findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1); if (!SpillSlot) return std::nullopt; @@ -283,7 +283,7 @@ static void reservePreviousStackSlotForValue(const Value *IncomingValue, return; const int LookUpDepth = 6; - Optional<int> Index = + std::optional<int> Index = findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth); if (!Index) return; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp index 7986903..92db89c 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp @@ -17,7 +17,7 @@ using namespace llvm; -Optional<RegOrConstant> +std::optional<RegOrConstant> AArch64GISelUtils::getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) { if (auto Splat = getVectorSplat(MI, MRI)) @@ -31,7 +31,7 @@ AArch64GISelUtils::getAArch64VectorSplat(const MachineInstr &MI, return RegOrConstant(Src); } -Optional<int64_t> +std::optional<int64_t> AArch64GISelUtils::getAArch64VectorSplatScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI) { auto Splat = getAArch64VectorSplat(MI, MRI); diff --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h index 334147a..791db7e 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h +++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h @@ -31,13 +31,14 @@ constexpr bool isLegalArithImmed(const uint64_t C) { /// \returns A value when \p MI is a vector splat of a Register or constant. /// Checks for generic opcodes and AArch64-specific generic opcodes. -Optional<RegOrConstant> getAArch64VectorSplat(const MachineInstr &MI, - const MachineRegisterInfo &MRI); +std::optional<RegOrConstant> +getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI); /// \returns A value when \p MI is a constant vector splat. /// Checks for generic opcodes and AArch64-specific generic opcodes. -Optional<int64_t> getAArch64VectorSplatScalar(const MachineInstr &MI, - const MachineRegisterInfo &MRI); +std::optional<int64_t> +getAArch64VectorSplatScalar(const MachineInstr &MI, + const MachineRegisterInfo &MRI); /// \returns true if \p MaybeSub and \p Pred are part of a CMN tree for an /// integer compare. diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp index a53efa6..98acd4c 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -150,7 +150,7 @@ private: /// The lane inserted into is defined by \p LaneIdx. The vector source /// register is given by \p SrcReg. The register containing the element is /// given by \p EltReg. - MachineInstr *emitLaneInsert(Optional<Register> DstReg, Register SrcReg, + MachineInstr *emitLaneInsert(std::optional<Register> DstReg, Register SrcReg, Register EltReg, unsigned LaneIdx, const RegisterBank &RB, MachineIRBuilder &MIRBuilder) const; @@ -205,7 +205,7 @@ private: MachineIRBuilder &MIRBuilder) const; // Emit a vector concat operation. - MachineInstr *emitVectorConcat(Optional<Register> Dst, Register Op1, + MachineInstr *emitVectorConcat(std::optional<Register> Dst, Register Op1, Register Op2, MachineIRBuilder &MIRBuilder) const; @@ -218,7 +218,7 @@ private: /// \p Pred if given is the intended predicate to use. MachineInstr * emitFPCompare(Register LHS, Register RHS, MachineIRBuilder &MIRBuilder, - Optional<CmpInst::Predicate> = std::nullopt) const; + std::optional<CmpInst::Predicate> = std::nullopt) const; MachineInstr * emitInstr(unsigned Opcode, std::initializer_list<llvm::DstOp> DstOps, @@ -276,7 +276,7 @@ private: MachineInstr *emitSelect(Register Dst, Register LHS, Register RHS, AArch64CC::CondCode CC, MachineIRBuilder &MIRBuilder) const; - MachineInstr *emitExtractVectorElt(Optional<Register> DstReg, + MachineInstr *emitExtractVectorElt(std::optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy, Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const; @@ -674,7 +674,7 @@ static Register createQTuple(ArrayRef<Register> Regs, MachineIRBuilder &MIB) { return createTuple(Regs, RegClassIDs, SubRegs, MIB); } -static Optional<uint64_t> getImmedFromMO(const MachineOperand &Root) { +static std::optional<uint64_t> getImmedFromMO(const MachineOperand &Root) { auto &MI = *Root.getParent(); auto &MBB = *MI.getParent(); auto &MF = *MBB.getParent(); @@ -1782,8 +1782,8 @@ bool AArch64InstructionSelector::selectCompareBranch( /// Returns the element immediate value of a vector shift operand if found. /// This needs to detect a splat-like operation, e.g. a G_BUILD_VECTOR. -static Optional<int64_t> getVectorShiftImm(Register Reg, - MachineRegisterInfo &MRI) { +static std::optional<int64_t> getVectorShiftImm(Register Reg, + MachineRegisterInfo &MRI) { assert(MRI.getType(Reg).isVector() && "Expected a *vector* shift operand"); MachineInstr *OpMI = MRI.getVRegDef(Reg); return getAArch64VectorSplatScalar(*OpMI, MRI); @@ -1791,8 +1791,9 @@ static Optional<int64_t> getVectorShiftImm(Register Reg, /// Matches and returns the shift immediate value for a SHL instruction given /// a shift operand. -static Optional<int64_t> getVectorSHLImm(LLT SrcTy, Register Reg, MachineRegisterInfo &MRI) { - Optional<int64_t> ShiftImm = getVectorShiftImm(Reg, MRI); +static std::optional<int64_t> getVectorSHLImm(LLT SrcTy, Register Reg, + MachineRegisterInfo &MRI) { + std::optional<int64_t> ShiftImm = getVectorShiftImm(Reg, MRI); if (!ShiftImm) return std::nullopt; // Check the immediate is in range for a SHL. @@ -1836,7 +1837,7 @@ bool AArch64InstructionSelector::selectVectorSHL(MachineInstr &I, // Check if we have a vector of constants on RHS that we can select as the // immediate form. - Optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI); + std::optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI); unsigned Opc = 0; if (Ty == LLT::fixed_vector(2, 64)) { @@ -3102,7 +3103,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { case TargetOpcode::G_PTRMASK: { Register MaskReg = I.getOperand(2).getReg(); - Optional<int64_t> MaskVal = getIConstantVRegSExtVal(MaskReg, MRI); + std::optional<int64_t> MaskVal = getIConstantVRegSExtVal(MaskReg, MRI); // TODO: Implement arbitrary cases if (!MaskVal || !isShiftedMask_64(*MaskVal)) return false; @@ -4112,7 +4113,7 @@ static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg, } MachineInstr *AArch64InstructionSelector::emitExtractVectorElt( - Optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy, + std::optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy, Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const { MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); unsigned CopyOpc = 0; @@ -4645,10 +4646,9 @@ MachineInstr *AArch64InstructionSelector::emitCSetForFCmp( return &*OrMI; } -MachineInstr * -AArch64InstructionSelector::emitFPCompare(Register LHS, Register RHS, - MachineIRBuilder &MIRBuilder, - Optional<CmpInst::Predicate> Pred) const { +MachineInstr *AArch64InstructionSelector::emitFPCompare( + Register LHS, Register RHS, MachineIRBuilder &MIRBuilder, + std::optional<CmpInst::Predicate> Pred) const { MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); LLT Ty = MRI.getType(LHS); if (Ty.isVector()) @@ -4689,7 +4689,7 @@ AArch64InstructionSelector::emitFPCompare(Register LHS, Register RHS, } MachineInstr *AArch64InstructionSelector::emitVectorConcat( - Optional<Register> Dst, Register Op1, Register Op2, + std::optional<Register> Dst, Register Op1, Register Op2, MachineIRBuilder &MIRBuilder) const { // We implement a vector concat by: // 1. Use scalar_to_vector to insert the lower vector into the larger dest @@ -4865,7 +4865,7 @@ MachineInstr *AArch64InstructionSelector::emitConditionalComparison( LLT OpTy = MRI.getType(LHS); assert(OpTy.getSizeInBits() == 32 || OpTy.getSizeInBits() == 64); unsigned CCmpOpc; - Optional<ValueAndVReg> C; + std::optional<ValueAndVReg> C; if (CmpInst::isIntPredicate(CC)) { C = getIConstantVRegValWithLookThrough(RHS, MRI); if (C && C->Value.ult(32)) @@ -5259,7 +5259,7 @@ bool AArch64InstructionSelector::selectShuffleVector( } MachineInstr *AArch64InstructionSelector::emitLaneInsert( - Optional<Register> DstReg, Register SrcReg, Register EltReg, + std::optional<Register> DstReg, Register SrcReg, Register EltReg, unsigned LaneIdx, const RegisterBank &RB, MachineIRBuilder &MIRBuilder) const { MachineInstr *InsElt = nullptr; @@ -6669,7 +6669,7 @@ AArch64_AM::ShiftExtendType AArch64InstructionSelector::getExtendTypeForInst( if (Opc != TargetOpcode::G_AND) return AArch64_AM::InvalidShiftExtend; - Optional<uint64_t> MaybeAndMask = getImmedFromMO(MI.getOperand(2)); + std::optional<uint64_t> MaybeAndMask = getImmedFromMO(MI.getOperand(2)); if (!MaybeAndMask) return AArch64_AM::InvalidShiftExtend; uint64_t AndMask = *MaybeAndMask; @@ -6724,7 +6724,7 @@ AArch64InstructionSelector::selectArithExtendedRegister( if (RootDef->getOpcode() == TargetOpcode::G_SHL) { // Look for a constant on the RHS of the shift. MachineOperand &RHS = RootDef->getOperand(2); - Optional<uint64_t> MaybeShiftVal = getImmedFromMO(RHS); + std::optional<uint64_t> MaybeShiftVal = getImmedFromMO(RHS); if (!MaybeShiftVal) return std::nullopt; ShiftVal = *MaybeShiftVal; @@ -6774,7 +6774,7 @@ void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB, const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && "Expected G_CONSTANT"); - Optional<int64_t> CstVal = + std::optional<int64_t> CstVal = getIConstantVRegSExtVal(MI.getOperand(0).getReg(), MRI); assert(CstVal && "Expected constant value"); MIB.addImm(*CstVal); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp index 73bd446..63b7645 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -360,13 +360,9 @@ bool AMDGPUAsmPrinter::doFinalization(Module &M) { // Print comments that apply to both callable functions and entry points. void AMDGPUAsmPrinter::emitCommonFunctionComments( - uint32_t NumVGPR, - Optional<uint32_t> NumAGPR, - uint32_t TotalNumVGPR, - uint32_t NumSGPR, - uint64_t ScratchSize, - uint64_t CodeSize, - const AMDGPUMachineFunction *MFI) { + uint32_t NumVGPR, std::optional<uint32_t> NumAGPR, uint32_t TotalNumVGPR, + uint32_t NumSGPR, uint64_t ScratchSize, uint64_t CodeSize, + const AMDGPUMachineFunction *MFI) { OutStreamer->emitRawComment(" codeLenInByte = " + Twine(CodeSize), false); OutStreamer->emitRawComment(" NumSgprs: " + Twine(NumSGPR), false); OutStreamer->emitRawComment(" NumVgprs: " + Twine(NumVGPR), false); @@ -523,24 +519,21 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { const AMDGPUResourceUsageAnalysis::SIFunctionResourceInfo &Info = ResourceUsage->getResourceInfo(&MF.getFunction()); emitCommonFunctionComments( - Info.NumVGPR, - STM.hasMAIInsts() ? Info.NumAGPR : Optional<uint32_t>(), - Info.getTotalNumVGPRs(STM), - Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()), - Info.PrivateSegmentSize, - getFunctionCodeSize(MF), MFI); + Info.NumVGPR, + STM.hasMAIInsts() ? Info.NumAGPR : std::optional<uint32_t>(), + Info.getTotalNumVGPRs(STM), + Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()), + Info.PrivateSegmentSize, getFunctionCodeSize(MF), MFI); return false; } OutStreamer->emitRawComment(" Kernel info:", false); - emitCommonFunctionComments(CurrentProgramInfo.NumArchVGPR, - STM.hasMAIInsts() - ? CurrentProgramInfo.NumAccVGPR - : Optional<uint32_t>(), - CurrentProgramInfo.NumVGPR, - CurrentProgramInfo.NumSGPR, - CurrentProgramInfo.ScratchSize, - getFunctionCodeSize(MF), MFI); + emitCommonFunctionComments( + CurrentProgramInfo.NumArchVGPR, + STM.hasMAIInsts() ? CurrentProgramInfo.NumAccVGPR + : std::optional<uint32_t>(), + CurrentProgramInfo.NumVGPR, CurrentProgramInfo.NumSGPR, + CurrentProgramInfo.ScratchSize, getFunctionCodeSize(MF), MFI); OutStreamer->emitRawComment( " FloatMode: " + Twine(CurrentProgramInfo.FloatMode), false); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h index 2881b8d..ea12086 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h @@ -63,12 +63,10 @@ private: const SIProgramInfo &KernelInfo); void emitPALFunctionMetadata(const MachineFunction &MF); void emitCommonFunctionComments(uint32_t NumVGPR, - Optional<uint32_t> NumAGPR, - uint32_t TotalNumVGPR, - uint32_t NumSGPR, - uint64_t ScratchSize, - uint64_t CodeSize, - const AMDGPUMachineFunction* MFI); + std::optional<uint32_t> NumAGPR, + uint32_t TotalNumVGPR, uint32_t NumSGPR, + uint64_t ScratchSize, uint64_t CodeSize, + const AMDGPUMachineFunction *MFI); void emitResourceUsageRemarks(const MachineFunction &MF, const SIProgramInfo &CurrentProgramInfo, bool isModuleEntryFunction, bool hasMAIInsts); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp index a32fd52..7a626fa 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -813,7 +813,7 @@ bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder, } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) { LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder); } else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) { - Optional<uint32_t> Id = + std::optional<uint32_t> Id = AMDGPUMachineFunction::getLDSKernelIdMetadata(MF.getFunction()); if (Id.has_value()) { MIRBuilder.buildConstant(InputReg, Id.value()); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp index c16d8ee..069baf7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp @@ -150,7 +150,7 @@ static bool isInv2Pi(const APFloat &APF) { // additional cost to negate them. static bool isConstantCostlierToNegate(MachineInstr &MI, Register Reg, MachineRegisterInfo &MRI) { - Optional<FPValueAndVReg> FPValReg; + std::optional<FPValueAndVReg> FPValReg; if (mi_match(Reg, MRI, m_GFCstOrSplat(FPValReg))) { if (FPValReg->Value.isZero() && !FPValReg->Value.isNegative()) return true; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp index 1596248..7a3446a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp @@ -500,16 +500,16 @@ void MetadataStreamerMsgPackV3::verify(StringRef HSAMetadataString) const { } } -Optional<StringRef> +std::optional<StringRef> MetadataStreamerMsgPackV3::getAccessQualifier(StringRef AccQual) const { - return StringSwitch<Optional<StringRef>>(AccQual) + return StringSwitch<std::optional<StringRef>>(AccQual) .Case("read_only", StringRef("read_only")) .Case("write_only", StringRef("write_only")) .Case("read_write", StringRef("read_write")) .Default(std::nullopt); } -Optional<StringRef> MetadataStreamerMsgPackV3::getAddressSpaceQualifier( +std::optional<StringRef> MetadataStreamerMsgPackV3::getAddressSpaceQualifier( unsigned AddressSpace) const { switch (AddressSpace) { case AMDGPUAS::PRIVATE_ADDRESS: diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.h b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.h index f9344a5..597bcad 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.h @@ -69,9 +69,10 @@ protected: void verify(StringRef HSAMetadataString) const; - Optional<StringRef> getAccessQualifier(StringRef AccQual) const; + std::optional<StringRef> getAccessQualifier(StringRef AccQual) const; - Optional<StringRef> getAddressSpaceQualifier(unsigned AddressSpace) const; + std::optional<StringRef> + getAddressSpaceQualifier(unsigned AddressSpace) const; StringRef getValueKind(Type *Ty, StringRef TypeQual, StringRef BaseTypeName) const; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp index 50b94af..d29936c 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp @@ -93,7 +93,7 @@ private: SchedGroupMask SGMask; // Maximum number of SUnits that can be added to this group. - Optional<unsigned> MaxSize; + std::optional<unsigned> MaxSize; // SchedGroups will only synchronize with other SchedGroups that have the same // SyncID. @@ -175,13 +175,13 @@ public: SchedGroupMask getMask() { return SGMask; } - SchedGroup(SchedGroupMask SGMask, Optional<unsigned> MaxSize, + SchedGroup(SchedGroupMask SGMask, std::optional<unsigned> MaxSize, ScheduleDAGInstrs *DAG, const SIInstrInfo *TII) : SGMask(SGMask), MaxSize(MaxSize), DAG(DAG), TII(TII) { SGID = NumSchedGroups++; } - SchedGroup(SchedGroupMask SGMask, Optional<unsigned> MaxSize, int SyncID, + SchedGroup(SchedGroupMask SGMask, std::optional<unsigned> MaxSize, int SyncID, ScheduleDAGInstrs *DAG, const SIInstrInfo *TII) : SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), DAG(DAG), TII(TII) { SGID = NumSchedGroups++; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index 7662d3a..d8a6bd8 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -1904,7 +1904,7 @@ bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode, // GFX9 and GFX10 have signed byte immediate offsets. The immediate // offset for S_BUFFER instructions is unsigned. int64_t ByteOffset = IsBuffer ? C->getZExtValue() : C->getSExtValue(); - Optional<int64_t> EncodedOffset = + std::optional<int64_t> EncodedOffset = AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, IsBuffer); if (EncodedOffset && Offset && !Imm32Only) { *Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index 9d90cd3..32f4b79 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -144,7 +144,7 @@ bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { const TargetRegisterClass *SrcRC = TRI.getConstrainedRegClassForOperand(Src, *MRI); - Optional<ValueAndVReg> ConstVal = + std::optional<ValueAndVReg> ConstVal = getIConstantVRegValWithLookThrough(SrcReg, *MRI, true); if (ConstVal) { unsigned MovOpc = @@ -975,7 +975,7 @@ bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const { auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst); - Optional<ValueAndVReg> ConstSelect = + std::optional<ValueAndVReg> ConstSelect = getIConstantVRegValWithLookThrough(LaneSelect, *MRI); if (ConstSelect) { // The selector has to be an inline immediate, so we can use whatever for @@ -984,7 +984,7 @@ bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const { MIB.addImm(ConstSelect->Value.getSExtValue() & maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2())); } else { - Optional<ValueAndVReg> ConstVal = + std::optional<ValueAndVReg> ConstVal = getIConstantVRegValWithLookThrough(Val, *MRI); // If the value written is an inline immediate, we can get away without a @@ -1383,7 +1383,7 @@ bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const { if (Size != STI.getWavefrontSize()) return false; - Optional<ValueAndVReg> Arg = + std::optional<ValueAndVReg> Arg = getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI); if (Arg) { @@ -3082,7 +3082,7 @@ bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const { } Register VOffset = MI.getOperand(4 + OpOffset).getReg(); - Optional<ValueAndVReg> MaybeVOffset = + std::optional<ValueAndVReg> MaybeVOffset = getIConstantVRegValWithLookThrough(VOffset, *MRI); const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue(); @@ -3784,7 +3784,7 @@ bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root, return false; const GEPInfo &GEPI = AddrInfo[0]; - Optional<int64_t> EncodedImm = + std::optional<int64_t> EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false); if (SOffset && Offset) { @@ -3856,7 +3856,7 @@ AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const { const GEPInfo &GEPInfo = AddrInfo[0]; Register PtrReg = GEPInfo.SgprParts[0]; - Optional<int64_t> EncodedImm = + std::optional<int64_t> EncodedImm = AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm); if (!EncodedImm) return std::nullopt; @@ -4293,7 +4293,8 @@ bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI, unsigned ShAmtBits) const { assert(MI.getOpcode() == TargetOpcode::G_AND); - Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI); + std::optional<APInt> RHS = + getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI); if (!RHS) return false; @@ -4477,7 +4478,7 @@ AMDGPUInstructionSelector::getPtrBaseWithConstantOffset( return {Root, 0}; MachineOperand &RHS = RootI->getOperand(2); - Optional<ValueAndVReg> MaybeOffset = + std::optional<ValueAndVReg> MaybeOffset = getIConstantVRegValWithLookThrough(RHS.getReg(), MRI); if (!MaybeOffset) return {Root, 0}; @@ -4804,10 +4805,10 @@ AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const { } /// Get an immediate that must be 32-bits, and treated as zero extended. -static Optional<uint64_t> getConstantZext32Val(Register Reg, - const MachineRegisterInfo &MRI) { +static std::optional<uint64_t> +getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) { // getIConstantVRegVal sexts any values, so see if that matters. - Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI); + std::optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI); if (!OffsetVal || !isInt<32>(*OffsetVal)) return std::nullopt; return Lo_32(*OffsetVal); @@ -4815,11 +4816,11 @@ static Optional<uint64_t> getConstantZext32Val(Register Reg, InstructionSelector::ComplexRendererFns AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const { - Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); + std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); if (!OffsetVal) return {}; - Optional<int64_t> EncodedImm = + std::optional<int64_t> EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true); if (!EncodedImm) return {}; @@ -4831,12 +4832,12 @@ InstructionSelector::ComplexRendererFns AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const { assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); - Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); + std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); if (!OffsetVal) return {}; - Optional<int64_t> EncodedImm - = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal); + std::optional<int64_t> EncodedImm = + AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal); if (!EncodedImm) return {}; @@ -4854,7 +4855,7 @@ AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const { if (!SOffset) return std::nullopt; - Optional<int64_t> EncodedOffset = + std::optional<int64_t> EncodedOffset = AMDGPU::getSMRDEncodedOffset(STI, Offset, /* IsBuffer */ true); if (!EncodedOffset) return std::nullopt; diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp index 52b601d..bac3336 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -2340,7 +2340,7 @@ bool AMDGPULegalizerInfo::legalizeExtractVectorElt( // FIXME: Artifact combiner probably should have replaced the truncated // constant before this, so we shouldn't need // getIConstantVRegValWithLookThrough. - Optional<ValueAndVReg> MaybeIdxVal = + std::optional<ValueAndVReg> MaybeIdxVal = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!MaybeIdxVal) // Dynamic case will be selected to register indexing. return true; @@ -2374,7 +2374,7 @@ bool AMDGPULegalizerInfo::legalizeInsertVectorElt( // FIXME: Artifact combiner probably should have replaced the truncated // constant before this, so we shouldn't need // getIConstantVRegValWithLookThrough. - Optional<ValueAndVReg> MaybeIdxVal = + std::optional<ValueAndVReg> MaybeIdxVal = getIConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); if (!MaybeIdxVal) // Dynamic case will be selected to register indexing. return true; @@ -4188,7 +4188,7 @@ bool AMDGPULegalizerInfo::getLDSKernelId(Register DstReg, MachineRegisterInfo &MRI, MachineIRBuilder &B) const { Function &F = B.getMF().getFunction(); - Optional<uint32_t> KnownSize = + std::optional<uint32_t> KnownSize = AMDGPUMachineFunction::getLDSKernelIdMetadata(F); if (KnownSize.has_value()) B.buildConstant(DstReg, KnownSize.value()); @@ -4282,11 +4282,11 @@ void AMDGPULegalizerInfo::updateBufferMMO(MachineMemOperand *MMO, Register VOffset, Register SOffset, unsigned ImmOffset, Register VIndex, MachineRegisterInfo &MRI) const { - Optional<ValueAndVReg> MaybeVOffsetVal = + std::optional<ValueAndVReg> MaybeVOffsetVal = getIConstantVRegValWithLookThrough(VOffset, MRI); - Optional<ValueAndVReg> MaybeSOffsetVal = + std::optional<ValueAndVReg> MaybeSOffsetVal = getIConstantVRegValWithLookThrough(SOffset, MRI); - Optional<ValueAndVReg> MaybeVIndexVal = + std::optional<ValueAndVReg> MaybeVIndexVal = getIConstantVRegValWithLookThrough(VIndex, MRI); // If the combined VOffset + SOffset + ImmOffset + strided VIndex is constant, // update the MMO with that offset. The stride is unknown so we can only do @@ -5288,7 +5288,7 @@ bool AMDGPULegalizerInfo::legalizeTrapIntrinsic(MachineInstr &MI, ST.getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) return legalizeTrapEndpgm(MI, MRI, B); - if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(&ST)) { + if (std::optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(&ST)) { switch (*HsaAbiVer) { case ELF::ELFABIVERSION_AMDGPU_HSA_V2: case ELF::ELFABIVERSION_AMDGPU_HSA_V3: diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp index d8133a9..3414225 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp @@ -196,7 +196,7 @@ void AMDGPUMachineFunction::allocateKnownAddressLDSGlobal(const Function &F) { } } -Optional<uint32_t> +std::optional<uint32_t> AMDGPUMachineFunction::getLDSKernelIdMetadata(const Function &F) { auto MD = F.getMetadata("llvm.amdgcn.lds.kernel.id"); if (MD && MD->getNumOperands() == 1) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h index 4d97e5a..8c5d7b0 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h @@ -116,7 +116,7 @@ public: static bool isKnownAddressLDSGlobal(const GlobalVariable &GV); static unsigned calculateKnownAddressOfLDSGlobal(const GlobalVariable &GV); - static Optional<uint32_t> getLDSKernelIdMetadata(const Function &F); + static std::optional<uint32_t> getLDSKernelIdMetadata(const Function &F); Align getDynLDSAlign() const { return DynLDSAlign; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h index 91ca7cd..9fa053f7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h @@ -48,7 +48,8 @@ private: /// /// \returns \p SSID's inclusion ordering, or "std::nullopt" if \p SSID is not /// supported by the AMDGPU target. - Optional<uint8_t> getSyncScopeInclusionOrdering(SyncScope::ID SSID) const { + std::optional<uint8_t> + getSyncScopeInclusionOrdering(SyncScope::ID SSID) const { if (SSID == SyncScope::SingleThread || SSID == getSingleThreadOneAddressSpaceSSID()) return 0; @@ -122,7 +123,8 @@ public: /// synchronization scope \p B, false if synchronization scope \p A is smaller /// than synchronization scope \p B, or "std::nullopt" if either /// synchronization scope \p A or \p B is not supported by the AMDGPU target. - Optional<bool> isSyncScopeInclusion(SyncScope::ID A, SyncScope::ID B) const { + std::optional<bool> isSyncScopeInclusion(SyncScope::ID A, + SyncScope::ID B) const { const auto &AIO = getSyncScopeInclusionOrdering(A); const auto &BIO = getSyncScopeInclusionOrdering(B); if (!AIO || !BIO) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp index b0099ff..6e823fb8 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp @@ -91,7 +91,7 @@ class AMDGPUPropagateAttributes { } FeatureBitset Features; - Optional<Attribute> Attributes[NumAttr]; + std::optional<Attribute> Attributes[NumAttr]; }; class Clone { @@ -127,7 +127,8 @@ class AMDGPUPropagateAttributes { void setFeatures(Function &F, const FeatureBitset &NewFeatures); // Set new function's attributes in place. - void setAttributes(Function &F, const ArrayRef<Optional<Attribute>> NewAttrs); + void setAttributes(Function &F, + const ArrayRef<std::optional<Attribute>> NewAttrs); std::string getFeatureString(const FeatureBitset &Features) const; @@ -343,8 +344,8 @@ void AMDGPUPropagateAttributes::setFeatures(Function &F, F.addFnAttr("target-features", NewFeatureStr); } -void AMDGPUPropagateAttributes::setAttributes(Function &F, - const ArrayRef<Optional<Attribute>> NewAttrs) { +void AMDGPUPropagateAttributes::setAttributes( + Function &F, const ArrayRef<std::optional<Attribute>> NewAttrs) { LLVM_DEBUG(dbgs() << "Set attributes on " << F.getName() << ":\n"); for (unsigned I = 0; I < NumAttr; ++I) { F.removeFnAttr(AttributeNames[I]); diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp index b3671ee..b431595 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp @@ -161,7 +161,7 @@ bool AMDGPURegBankCombinerHelper::matchIntMinMaxToMed3( MinMaxMedOpc OpcodeTriple = getMinMaxPair(MI.getOpcode()); Register Val; - Optional<ValueAndVReg> K0, K1; + std::optional<ValueAndVReg> K0, K1; // Match min(max(Val, K0), K1) or max(min(Val, K1), K0). Then see if K0 <= K1. if (!matchMed<GCstAndRegMatch>(MI, MRI, OpcodeTriple, Val, K0, K1)) return false; @@ -206,7 +206,7 @@ bool AMDGPURegBankCombinerHelper::matchFPMinMaxToMed3( auto OpcodeTriple = getMinMaxPair(MI.getOpcode()); Register Val; - Optional<FPValueAndVReg> K0, K1; + std::optional<FPValueAndVReg> K0, K1; // Match min(max(Val, K0), K1) or max(min(Val, K1), K0). Then see if K0 <= K1. if (!matchMed<GFCstAndRegMatch>(MI, MRI, OpcodeTriple, Val, K0, K1)) return false; @@ -238,7 +238,7 @@ bool AMDGPURegBankCombinerHelper::matchFPMinMaxToClamp(MachineInstr &MI, // Clamp is available on all types after regbankselect (f16, f32, f64, v2f16). auto OpcodeTriple = getMinMaxPair(MI.getOpcode()); Register Val; - Optional<FPValueAndVReg> K0, K1; + std::optional<FPValueAndVReg> K0, K1; // Match min(max(Val, K0), K1) or max(min(Val, K1), K0). if (!matchMed<GFCstOrSplatGFCstMatch>(MI, MRI, OpcodeTriple, Val, K0, K1)) return false; diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp index fdeff4e..4d62244 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -1252,7 +1252,8 @@ static unsigned setBufferOffsets(MachineIRBuilder &B, const LLT S32 = LLT::scalar(32); MachineRegisterInfo *MRI = B.getMRI(); - if (Optional<int64_t> Imm = getIConstantVRegSExtVal(CombinedOffset, *MRI)) { + if (std::optional<int64_t> Imm = + getIConstantVRegSExtVal(CombinedOffset, *MRI)) { uint32_t SOffset, ImmOffset; if (AMDGPU::splitMUBUFOffset(*Imm, SOffset, ImmOffset, &RBI.Subtarget, Alignment)) { diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp index 9aa709f..e6d9d5d 100644 --- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -1283,10 +1283,10 @@ private: /// \param SGPRBlocks [out] Result SGPR block count. bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed, - Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR, - SMRange VGPRRange, unsigned NextFreeSGPR, - SMRange SGPRRange, unsigned &VGPRBlocks, - unsigned &SGPRBlocks); + std::optional<bool> EnableWavefrontSize32, + unsigned NextFreeVGPR, SMRange VGPRRange, + unsigned NextFreeSGPR, SMRange SGPRRange, + unsigned &VGPRBlocks, unsigned &SGPRBlocks); bool ParseDirectiveAMDGCNTarget(); bool ParseDirectiveAMDHSAKernel(); bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor); @@ -1334,7 +1334,7 @@ private: bool isRegister(); bool isRegister(const AsmToken &Token, const AsmToken &NextToken) const; - Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind); + std::optional<StringRef> getGprCountSymbolName(RegisterKind RegKind); void initializeGprCountSymbol(RegisterKind RegKind); bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth); @@ -1667,7 +1667,7 @@ private: const SMLoc &IDLoc); bool validateExeczVcczOperands(const OperandVector &Operands); bool validateTFE(const MCInst &Inst, const OperandVector &Operands); - Optional<StringRef> validateLdsDirect(const MCInst &Inst); + std::optional<StringRef> validateLdsDirect(const MCInst &Inst); unsigned getConstantBusLimit(unsigned Opcode) const; bool usesConstantBus(const MCInst &Inst, unsigned OpIdx); bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const; @@ -2853,7 +2853,7 @@ bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg, return false; } -Optional<StringRef> +std::optional<StringRef> AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) { switch (RegKind) { case IS_VGPR: @@ -4056,7 +4056,8 @@ static bool IsRevOpcode(const unsigned Opcode) } } -Optional<StringRef> AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) { +std::optional<StringRef> +AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) { using namespace SIInstrFlags; const unsigned Opcode = Inst.getOpcode(); @@ -4914,9 +4915,9 @@ bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) { bool AMDGPUAsmParser::calculateGPRBlocks( const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed, - bool XNACKUsed, Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR, - SMRange VGPRRange, unsigned NextFreeSGPR, SMRange SGPRRange, - unsigned &VGPRBlocks, unsigned &SGPRBlocks) { + bool XNACKUsed, std::optional<bool> EnableWavefrontSize32, + unsigned NextFreeVGPR, SMRange VGPRRange, unsigned NextFreeSGPR, + SMRange SGPRRange, unsigned &VGPRBlocks, unsigned &SGPRBlocks) { // TODO(scott.linder): These calculations are duplicated from // AMDGPUAsmPrinter::getSIProgramInfo and could be unified. IsaVersion Version = getIsaVersion(getSTI().getCPU()); @@ -4984,7 +4985,7 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() { std::optional<unsigned> ExplicitUserSGPRCount; bool ReserveVCC = true; bool ReserveFlatScr = true; - Optional<bool> EnableWavefrontSize32; + std::optional<bool> EnableWavefrontSize32; while (true) { while (trySkipToken(AsmToken::EndOfStatement)); diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp index 64c3ea6..7a4af1a 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp @@ -407,7 +407,7 @@ void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor( if (IVersion.Major >= 7 && !ReserveFlatScr && !hasArchitectedFlatScratch(STI)) OS << "\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr << '\n'; - if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) { + if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) { switch (*HsaAbiVer) { default: break; @@ -594,7 +594,7 @@ unsigned AMDGPUTargetELFStreamer::getEFlagsUnknownOS() { unsigned AMDGPUTargetELFStreamer::getEFlagsAMDHSA() { assert(STI.getTargetTriple().getOS() == Triple::AMDHSA); - if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) { + if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) { switch (*HsaAbiVer) { case ELF::ELFABIVERSION_AMDGPU_HSA_V2: case ELF::ELFABIVERSION_AMDGPU_HSA_V3: diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h index 030af8a..5051179 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h @@ -36,7 +36,7 @@ class AMDGPUTargetStreamer : public MCTargetStreamer { protected: // TODO: Move HSAMetadataStream to AMDGPUTargetStreamer. - Optional<AMDGPU::IsaInfo::AMDGPUTargetID> TargetID; + std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> TargetID; MCContext &getContext() const { return Streamer.getContext(); } @@ -98,10 +98,10 @@ public: static StringRef getArchNameFromElfMach(unsigned ElfMach); static unsigned getElfMach(StringRef GPU); - const Optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() const { + const std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() const { return TargetID; } - Optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() { + std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() { return TargetID; } void initializeTargetID(const MCSubtargetInfo &STI) { diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp index c785cfdaa..4bac270 100644 --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -59,7 +59,7 @@ static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI, static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF, LivePhysRegs &LiveRegs, Register &TempSGPR, - Optional<int> &FrameIndex, + std::optional<int> &FrameIndex, bool IsFP) { SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); MachineFrameInfo &FrameInfo = MF.getFrameInfo(); @@ -773,8 +773,8 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF, // turn on all lanes before doing the spill to memory. Register ScratchExecCopy; - Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex; - Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex; + std::optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex; + std::optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex; // VGPRs used for SGPR->VGPR spills for (const SIMachineFunctionInfo::SGPRSpillVGPR &Reg : @@ -990,8 +990,8 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF, const Register BasePtrReg = TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register(); - Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex; - Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex; + std::optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex; + std::optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex; if (RoundedSize != 0 && hasFP(MF)) { auto Add = BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_I32), StackPtrReg) diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index b3fb27d..ffbac7f 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1702,7 +1702,7 @@ SDValue SITargetLowering::getLDSKernelId(SelectionDAG &DAG, const SDLoc &SL) const { Function &F = DAG.getMachineFunction().getFunction(); - Optional<uint32_t> KnownSize = + std::optional<uint32_t> KnownSize = AMDGPUMachineFunction::getLDSKernelIdMetadata(F); if (KnownSize.has_value()) return DAG.getConstant(KnownSize.value(), SL, MVT::i32); @@ -2856,7 +2856,8 @@ void SITargetLowering::passSpecialInputs( // input for kernels, and is computed from the kernarg segment pointer. InputReg = getImplicitArgPtr(DAG, DL); } else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) { - Optional<uint32_t> Id = AMDGPUMachineFunction::getLDSKernelIdMetadata(F); + std::optional<uint32_t> Id = + AMDGPUMachineFunction::getLDSKernelIdMetadata(F); if (Id.has_value()) { InputReg = DAG.getConstant(Id.value(), DL, ArgVT); } else { @@ -5421,7 +5422,7 @@ SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) return lowerTrapEndpgm(Op, DAG); - if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) { + if (std::optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) { switch (*HsaAbiVer) { case ELF::ELFABIVERSION_AMDGPU_HSA_V2: case ELF::ELFABIVERSION_AMDGPU_HSA_V3: @@ -10240,7 +10241,7 @@ bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF, if (Opcode == AMDGPU::G_FCANONICALIZE) return true; - Optional<FPValueAndVReg> FCR; + std::optional<FPValueAndVReg> FCR; // Constant splat (can be padded with undef) or scalar constant. if (mi_match(Reg, MRI, MIPatternMatch::m_GFCstOrSplat(FCR))) { if (FCR->Value.isSignaling()) diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp index 214a16f..18c1875 100644 --- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -263,7 +263,7 @@ private: int32_t NewOffset) const; Register computeBase(MachineInstr &MI, const MemAddress &Addr) const; MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const; - Optional<int32_t> extractConstOffset(const MachineOperand &Op) const; + std::optional<int32_t> extractConstOffset(const MachineOperand &Op) const; void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const; /// Promotes constant offset to the immediate by adjusting the base. It /// tries to use a base from the nearby instructions that allows it to have @@ -1983,7 +1983,7 @@ void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI, TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset); } -Optional<int32_t> +std::optional<int32_t> SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const { if (Op.isImm()) return Op.getImm(); diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp index 26a6231..a7f253b 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp @@ -340,7 +340,7 @@ bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF, return false; } - Optional<int> SpillFI; + std::optional<int> SpillFI; // We need to preserve inactive lanes, so always save, even caller-save // registers. if (!isEntryFunction()) { diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h index 70a6009..cbe78f4 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -424,7 +424,7 @@ private: // Current recorded maximum possible occupancy. unsigned Occupancy; - mutable Optional<bool> UsesAGPRs; + mutable std::optional<bool> UsesAGPRs; MCPhysReg getNextUserSGPR() const; @@ -437,9 +437,9 @@ public: // If the VGPR is used for SGPR spills in a non-entrypoint function, the // stack slot used to save/restore it in the prolog/epilog. - Optional<int> FI; + std::optional<int> FI; - SGPRSpillVGPR(Register V, Optional<int> F) : VGPR(V), FI(F) {} + SGPRSpillVGPR(Register V, std::optional<int> F) : VGPR(V), FI(F) {} }; struct VGPRSpillToAGPR { @@ -483,7 +483,7 @@ private: // Emergency stack slot. Sometimes, we create this before finalizing the stack // frame, so save it here and add it to the RegScavenger later. - Optional<int> ScavengeFI; + std::optional<int> ScavengeFI; private: Register VGPRForAGPRCopy; @@ -501,12 +501,12 @@ public: // FIXME /// If this is set, an SGPR used for save/restore of the register used for the /// frame pointer. Register SGPRForFPSaveRestoreCopy; - Optional<int> FramePointerSaveIndex; + std::optional<int> FramePointerSaveIndex; /// If this is set, an SGPR used for save/restore of the register used for the /// base pointer. Register SGPRForBPSaveRestoreCopy; - Optional<int> BasePointerSaveIndex; + std::optional<int> BasePointerSaveIndex; bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg); @@ -573,7 +573,7 @@ public: bool ResetSGPRSpillStackIDs); int getScavengeFI(MachineFrameInfo &MFI, const SIRegisterInfo &TRI); - Optional<int> getOptionalScavengeFI() const { return ScavengeFI; } + std::optional<int> getOptionalScavengeFI() const { return ScavengeFI; } unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp index 2cbfcca..eb6a9f9 100644 --- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -221,7 +221,7 @@ private: /// the SI atomic scope it corresponds to, the address spaces it /// covers, and whether the memory ordering applies between address /// spaces. - Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>> + std::optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>> toSIAtomicScope(SyncScope::ID SSID, SIAtomicAddrSpace InstrAddrSpace) const; /// \return Return a bit set of the address spaces accessed by \p AS. @@ -229,8 +229,8 @@ private: /// \returns Info constructed from \p MI, which has at least machine memory /// operand. - Optional<SIMemOpInfo> constructFromMIWithMMO( - const MachineBasicBlock::iterator &MI) const; + std::optional<SIMemOpInfo> + constructFromMIWithMMO(const MachineBasicBlock::iterator &MI) const; public: /// Construct class to support accessing the machine memory operands @@ -238,23 +238,23 @@ public: SIMemOpAccess(MachineFunction &MF); /// \returns Load info if \p MI is a load operation, "std::nullopt" otherwise. - Optional<SIMemOpInfo> getLoadInfo( - const MachineBasicBlock::iterator &MI) const; + std::optional<SIMemOpInfo> + getLoadInfo(const MachineBasicBlock::iterator &MI) const; /// \returns Store info if \p MI is a store operation, "std::nullopt" /// otherwise. - Optional<SIMemOpInfo> getStoreInfo( - const MachineBasicBlock::iterator &MI) const; + std::optional<SIMemOpInfo> + getStoreInfo(const MachineBasicBlock::iterator &MI) const; /// \returns Atomic fence info if \p MI is an atomic fence operation, /// "std::nullopt" otherwise. - Optional<SIMemOpInfo> getAtomicFenceInfo( - const MachineBasicBlock::iterator &MI) const; + std::optional<SIMemOpInfo> + getAtomicFenceInfo(const MachineBasicBlock::iterator &MI) const; /// \returns Atomic cmpxchg/rmw info if \p MI is an atomic cmpxchg or /// rmw operation, "std::nullopt" otherwise. - Optional<SIMemOpInfo> getAtomicCmpxchgOrRmwInfo( - const MachineBasicBlock::iterator &MI) const; + std::optional<SIMemOpInfo> + getAtomicCmpxchgOrRmwInfo(const MachineBasicBlock::iterator &MI) const; }; class SICacheControl { @@ -622,7 +622,7 @@ void SIMemOpAccess::reportUnsupported(const MachineBasicBlock::iterator &MI, Func.getContext().diagnose(Diag); } -Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>> +std::optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>> SIMemOpAccess::toSIAtomicScope(SyncScope::ID SSID, SIAtomicAddrSpace InstrAddrSpace) const { if (SSID == SyncScope::System) @@ -687,7 +687,7 @@ SIMemOpAccess::SIMemOpAccess(MachineFunction &MF) { MMI = &MF.getMMI().getObjFileInfo<AMDGPUMachineModuleInfo>(); } -Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO( +std::optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO( const MachineBasicBlock::iterator &MI) const { assert(MI->getNumMemOperands() > 0); @@ -747,8 +747,8 @@ Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO( IsNonTemporal); } -Optional<SIMemOpInfo> SIMemOpAccess::getLoadInfo( - const MachineBasicBlock::iterator &MI) const { +std::optional<SIMemOpInfo> +SIMemOpAccess::getLoadInfo(const MachineBasicBlock::iterator &MI) const { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); if (!(MI->mayLoad() && !MI->mayStore())) @@ -761,8 +761,8 @@ Optional<SIMemOpInfo> SIMemOpAccess::getLoadInfo( return constructFromMIWithMMO(MI); } -Optional<SIMemOpInfo> SIMemOpAccess::getStoreInfo( - const MachineBasicBlock::iterator &MI) const { +std::optional<SIMemOpInfo> +SIMemOpAccess::getStoreInfo(const MachineBasicBlock::iterator &MI) const { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); if (!(!MI->mayLoad() && MI->mayStore())) @@ -775,8 +775,8 @@ Optional<SIMemOpInfo> SIMemOpAccess::getStoreInfo( return constructFromMIWithMMO(MI); } -Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo( - const MachineBasicBlock::iterator &MI) const { +std::optional<SIMemOpInfo> +SIMemOpAccess::getAtomicFenceInfo(const MachineBasicBlock::iterator &MI) const { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE) @@ -808,7 +808,7 @@ Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo( IsCrossAddressSpaceOrdering, AtomicOrdering::NotAtomic); } -Optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo( +std::optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo( const MachineBasicBlock::iterator &MI) const { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp index 037e0ec..6151eb0 100644 --- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -53,7 +53,7 @@ private: MapVector<MachineInstr *, SDWAOperandsVector> PotentialMatches; SmallVector<MachineInstr *, 8> ConvertedInstructions; - Optional<int64_t> foldToImm(const MachineOperand &Op) const; + std::optional<int64_t> foldToImm(const MachineOperand &Op) const; public: static char ID; @@ -490,7 +490,8 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI, return SDWADstOperand::convertToSDWA(MI, TII); } -Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const { +std::optional<int64_t> +SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const { if (Op.isImm()) { return Op.getImm(); } diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp index 0d218c8..c77b2c1 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -98,7 +98,7 @@ namespace llvm { namespace AMDGPU { -Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) { +std::optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) { if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA) return std::nullopt; @@ -118,25 +118,25 @@ Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) { } bool isHsaAbiVersion2(const MCSubtargetInfo *STI) { - if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) + if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2; return false; } bool isHsaAbiVersion3(const MCSubtargetInfo *STI) { - if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) + if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3; return false; } bool isHsaAbiVersion4(const MCSubtargetInfo *STI) { - if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) + if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4; return false; } bool isHsaAbiVersion5(const MCSubtargetInfo *STI) { - if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) + if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI)) return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5; return false; } @@ -536,7 +536,7 @@ unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const { return 0; } -Optional<unsigned> InstInfo::getInvalidCompOperandIndex( +std::optional<unsigned> InstInfo::getInvalidCompOperandIndex( std::function<unsigned(unsigned, unsigned)> GetRegIdx) const { auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx); @@ -711,7 +711,7 @@ std::string AMDGPUTargetID::toString() const { .str(); std::string Features; - if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) { + if (std::optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) { switch (*HsaAbiVersion) { case ELF::ELFABIVERSION_AMDGPU_HSA_V2: // Code object V2 only supported specific processors and had fixed @@ -972,7 +972,7 @@ unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) { } unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, - Optional<bool> EnableWavefrontSize32) { + std::optional<bool> EnableWavefrontSize32) { if (STI->getFeatureBits().test(FeatureGFX90AInsts)) return 8; @@ -990,7 +990,7 @@ unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, } unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, - Optional<bool> EnableWavefrontSize32) { + std::optional<bool> EnableWavefrontSize32) { if (STI->getFeatureBits().test(FeatureGFX90AInsts)) return 8; @@ -1062,7 +1062,7 @@ unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) { } unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, - Optional<bool> EnableWavefrontSize32) { + std::optional<bool> EnableWavefrontSize32) { NumVGPRs = alignTo(std::max(1u, NumVGPRs), getVGPREncodingGranule(STI, EnableWavefrontSize32)); // VGPRBlocks is actual number of VGPR blocks minus 1. @@ -2489,12 +2489,13 @@ uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, return ByteOffset >> 2; } -Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST, - int64_t ByteOffset, bool IsBuffer) { +std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST, + int64_t ByteOffset, bool IsBuffer) { // The signed version is always a byte offset. if (!IsBuffer && hasSMRDSignedImmOffset(ST)) { assert(hasSMEMByteOffset(ST)); - return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : std::nullopt; + return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset) + : std::nullopt; } if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST)) @@ -2502,17 +2503,17 @@ Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset); return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset) - ? Optional<int64_t>(EncodedOffset) + ? std::optional<int64_t>(EncodedOffset) : std::nullopt; } -Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, - int64_t ByteOffset) { +std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, + int64_t ByteOffset) { if (!isCI(ST) || !isDwordAligned(ByteOffset)) return std::nullopt; int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset); - return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset) + return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset) : std::nullopt; } diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h index f9136f1..e34617b 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -42,7 +42,7 @@ namespace AMDGPU { struct IsaVersion; /// \returns HSA OS ABI Version identification. -Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI); +std::optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI); /// \returns True if HSA OS ABI Version identification is 2, /// false otherwise. bool isHsaAbiVersion2(const MCSubtargetInfo *STI); @@ -264,15 +264,15 @@ unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs); /// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field. unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, - Optional<bool> EnableWavefrontSize32 = std::nullopt); + std::optional<bool> EnableWavefrontSize32 = std::nullopt); /// \returns VGPR encoding granularity for given subtarget \p STI. /// /// For subtargets which support it, \p EnableWavefrontSize32 should match /// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field. -unsigned -getVGPREncodingGranule(const MCSubtargetInfo *STI, - Optional<bool> EnableWavefrontSize32 = std::nullopt); +unsigned getVGPREncodingGranule( + const MCSubtargetInfo *STI, + std::optional<bool> EnableWavefrontSize32 = std::nullopt); /// \returns Total number of VGPRs for given subtarget \p STI. unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI); @@ -298,8 +298,9 @@ unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, /// /// For subtargets which support it, \p EnableWavefrontSize32 should match the /// ENABLE_WAVEFRONT_SIZE32 kernel descriptor field. -unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs, - Optional<bool> EnableWavefrontSize32 = std::nullopt); +unsigned +getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs, + std::optional<bool> EnableWavefrontSize32 = std::nullopt); } // end namespace IsaInfo @@ -546,7 +547,7 @@ constexpr unsigned COMPONENTS_NUM = 2; class ComponentProps { private: unsigned SrcOperandsNum = 0; - Optional<unsigned> MandatoryLiteralIdx; + std::optional<unsigned> MandatoryLiteralIdx; bool HasSrc2Acc = false; public: @@ -738,7 +739,7 @@ public: // Check VOPD operands constraints. // Return the index of an invalid component operand, if any. - Optional<unsigned> getInvalidCompOperandIndex( + std::optional<unsigned> getInvalidCompOperandIndex( std::function<unsigned(unsigned, unsigned)> GetRegIdx) const; private: @@ -1252,13 +1253,13 @@ uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset); /// SMRD offset field, or std::nullopt if it won't fit. On GFX9 and GFX10 /// S_LOAD instructions have a signed offset, on other subtargets it is /// unsigned. S_BUFFER has an unsigned offset for all subtargets. -Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST, - int64_t ByteOffset, bool IsBuffer); +std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST, + int64_t ByteOffset, bool IsBuffer); /// \return The encoding that can be used for a 32-bit literal offset in an SMRD /// instruction. This is only useful on CI.s -Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, - int64_t ByteOffset); +std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, + int64_t ByteOffset); /// For FLAT segment the offset must be positive; /// MSB is ignored and forced to zero. |