diff options
Diffstat (limited to 'llvm/lib')
73 files changed, 1145 insertions, 1211 deletions
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 82530e7..5907e21 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -5366,7 +5366,7 @@ static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, Type *MidTy = CI->getType(); Type *DstTy = Ty; if (Src->getType() == Ty) { - auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode()); + auto FirstOp = CI->getOpcode(); auto SecondOp = static_cast<Instruction::CastOps>(CastOpc); Type *SrcIntPtrTy = SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr; diff --git a/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp b/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp index 5d7c97a..6356d71 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp @@ -37,8 +37,8 @@ void AIXException::emitExceptionInfoTable(const MCSymbol *LSDA, // unsigned long personality; /* Pointer to the personality routine */ // } - auto *EHInfo = - cast<MCSectionXCOFF>(Asm->getObjFileLowering().getCompactUnwindSection()); + auto *EHInfo = static_cast<MCSectionXCOFF *>( + Asm->getObjFileLowering().getCompactUnwindSection()); if (Asm->TM.getFunctionSections()) { // If option -ffunction-sections is on, append the function name to the // name of EH Info Table csect so that each function has its own EH Info diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index 11b8576..7188833 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -972,10 +972,9 @@ void DwarfDebug::constructCallSiteEntryDIEs(const DISubprogram &SP, // the call graph which could lead to some target function. For tail // calls, no return PC information is needed, unless tuning for GDB in // DWARF4 mode in which case we fake a return PC for compatibility. - const MCSymbol *PCAddr = - (!IsTail || CU.useGNUAnalogForDwarf5Feature()) - ? const_cast<MCSymbol *>(getLabelAfterInsn(TopLevelCallMI)) - : nullptr; + const MCSymbol *PCAddr = (!IsTail || CU.useGNUAnalogForDwarf5Feature()) + ? getLabelAfterInsn(TopLevelCallMI) + : nullptr; // For tail calls, it's necessary to record the address of the branch // instruction so that the debugger can show where the tail call occurred. diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp index c2839d4..5e50898 100644 --- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp +++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp @@ -634,6 +634,9 @@ bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic( << " and factor = " << Factor << "\n"); } else { assert(II); + if (II->getIntrinsicID() != Intrinsic::masked_load && + II->getIntrinsicID() != Intrinsic::vp_load) + return false; // Check mask operand. Handle both all-true/false and interleaved mask. Mask = getMask(getMaskOperand(II), Factor, getDeinterleavedVectorType(DI)); @@ -673,6 +676,9 @@ bool InterleavedAccessImpl::lowerInterleaveIntrinsic( Value *Mask = nullptr; if (II) { + if (II->getIntrinsicID() != Intrinsic::masked_store && + II->getIntrinsicID() != Intrinsic::vp_store) + return false; // Check mask operand. Handle both all-true/false and interleaved mask. Mask = getMask(getMaskOperand(II), Factor, cast<VectorType>(InterleaveValues[0]->getType())); diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp index c93f890..408d07b 100644 --- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -2388,23 +2388,25 @@ TargetLoweringObjectFileXCOFF::getTargetSymbol(const GlobalValue *GV, // here. if (const GlobalObject *GO = dyn_cast<GlobalObject>(GV)) { if (GO->isDeclarationForLinker()) - return cast<MCSectionXCOFF>(getSectionForExternalReference(GO, TM)) + return static_cast<const MCSectionXCOFF *>( + getSectionForExternalReference(GO, TM)) ->getQualNameSymbol(); if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) if (GVar->hasAttribute("toc-data")) - return cast<MCSectionXCOFF>( + return static_cast<const MCSectionXCOFF *>( SectionForGlobal(GVar, SectionKind::getData(), TM)) ->getQualNameSymbol(); SectionKind GOKind = getKindForGlobal(GO, TM); if (GOKind.isText()) - return cast<MCSectionXCOFF>( + return static_cast<const MCSectionXCOFF *>( getSectionForFunctionDescriptor(cast<Function>(GO), TM)) ->getQualNameSymbol(); if ((TM.getDataSections() && !GO->hasSection()) || GO->hasCommonLinkage() || GOKind.isBSSLocal() || GOKind.isThreadBSSLocal()) - return cast<MCSectionXCOFF>(SectionForGlobal(GO, GOKind, TM)) + return static_cast<const MCSectionXCOFF *>( + SectionForGlobal(GO, GOKind, TM)) ->getQualNameSymbol(); } @@ -2740,7 +2742,7 @@ MCSection *TargetLoweringObjectFileXCOFF::getSectionForTOCEntry( MCSection *TargetLoweringObjectFileXCOFF::getSectionForLSDA( const Function &F, const MCSymbol &FnSym, const TargetMachine &TM) const { - auto *LSDA = cast<MCSectionXCOFF>(LSDASection); + auto *LSDA = static_cast<MCSectionXCOFF *>(LSDASection); if (TM.getFunctionSections()) { // If option -ffunction-sections is on, append the function name to the // name of the LSDA csect so that each function has its own LSDA csect. diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp index b3798f1..a8559e7 100644 --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp @@ -183,7 +183,7 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) { std::lock_guard<sys::Mutex> locked(lock); // Save information about our target - Arch = (Triple::ArchType)Obj.getArch(); + Arch = Obj.getArch(); IsTargetLittleEndian = Obj.isLittleEndian(); setMipsABI(Obj); @@ -1361,18 +1361,17 @@ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> RuntimeDyld::loadObject(const ObjectFile &Obj) { if (!Dyld) { if (Obj.isELF()) - Dyld = - createRuntimeDyldELF(static_cast<Triple::ArchType>(Obj.getArch()), - MemMgr, Resolver, ProcessAllSections, - std::move(NotifyStubEmitted)); + Dyld = createRuntimeDyldELF(Obj.getArch(), MemMgr, Resolver, + ProcessAllSections, + std::move(NotifyStubEmitted)); else if (Obj.isMachO()) - Dyld = createRuntimeDyldMachO( - static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver, - ProcessAllSections, std::move(NotifyStubEmitted)); + Dyld = createRuntimeDyldMachO(Obj.getArch(), MemMgr, Resolver, + ProcessAllSections, + std::move(NotifyStubEmitted)); else if (Obj.isCOFF()) - Dyld = createRuntimeDyldCOFF( - static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver, - ProcessAllSections, std::move(NotifyStubEmitted)); + Dyld = createRuntimeDyldCOFF(Obj.getArch(), MemMgr, Resolver, + ProcessAllSections, + std::move(NotifyStubEmitted)); else report_fatal_error("Incompatible object format!"); } diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp index 840ca83..7928772 100644 --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -2617,7 +2617,7 @@ void OpenMPIRBuilder::emitReductionListCopy( Expected<Function *> OpenMPIRBuilder::emitInterWarpCopyFunction( const LocationDescription &Loc, ArrayRef<ReductionInfo> ReductionInfos, AttributeList FuncAttrs) { - IRBuilder<>::InsertPointGuard IPG(Builder); + InsertPointTy SavedIP = Builder.saveIP(); LLVMContext &Ctx = M.getContext(); FunctionType *FuncTy = FunctionType::get( Builder.getVoidTy(), {Builder.getPtrTy(), Builder.getInt32Ty()}, @@ -2630,7 +2630,6 @@ Expected<Function *> OpenMPIRBuilder::emitInterWarpCopyFunction( WcFunc->addParamAttr(1, Attribute::NoUndef); BasicBlock *EntryBB = BasicBlock::Create(M.getContext(), "entry", WcFunc); Builder.SetInsertPoint(EntryBB); - Builder.SetCurrentDebugLocation(llvm::DebugLoc()); // ReduceList: thread local Reduce list. // At the stage of the computation when this function is called, partially @@ -2845,6 +2844,7 @@ Expected<Function *> OpenMPIRBuilder::emitInterWarpCopyFunction( } Builder.CreateRetVoid(); + Builder.restoreIP(SavedIP); return WcFunc; } @@ -2853,7 +2853,6 @@ Function *OpenMPIRBuilder::emitShuffleAndReduceFunction( ArrayRef<ReductionInfo> ReductionInfos, Function *ReduceFn, AttributeList FuncAttrs) { LLVMContext &Ctx = M.getContext(); - IRBuilder<>::InsertPointGuard IPG(Builder); FunctionType *FuncTy = FunctionType::get(Builder.getVoidTy(), {Builder.getPtrTy(), Builder.getInt16Ty(), @@ -2872,7 +2871,6 @@ Function *OpenMPIRBuilder::emitShuffleAndReduceFunction( SarFunc->addParamAttr(3, Attribute::SExt); BasicBlock *EntryBB = BasicBlock::Create(M.getContext(), "entry", SarFunc); Builder.SetInsertPoint(EntryBB); - Builder.SetCurrentDebugLocation(llvm::DebugLoc()); // Thread local Reduce list used to host the values of data to be reduced. Argument *ReduceListArg = SarFunc->getArg(0); @@ -3019,7 +3017,7 @@ Function *OpenMPIRBuilder::emitShuffleAndReduceFunction( Function *OpenMPIRBuilder::emitListToGlobalCopyFunction( ArrayRef<ReductionInfo> ReductionInfos, Type *ReductionsBufferTy, AttributeList FuncAttrs) { - IRBuilder<>::InsertPointGuard IPG(Builder); + OpenMPIRBuilder::InsertPointTy OldIP = Builder.saveIP(); LLVMContext &Ctx = M.getContext(); FunctionType *FuncTy = FunctionType::get( Builder.getVoidTy(), @@ -3035,7 +3033,6 @@ Function *OpenMPIRBuilder::emitListToGlobalCopyFunction( BasicBlock *EntryBlock = BasicBlock::Create(Ctx, "entry", LtGCFunc); Builder.SetInsertPoint(EntryBlock); - Builder.SetCurrentDebugLocation(llvm::DebugLoc()); // Buffer: global reduction buffer. Argument *BufferArg = LtGCFunc->getArg(0); @@ -3123,13 +3120,14 @@ Function *OpenMPIRBuilder::emitListToGlobalCopyFunction( } Builder.CreateRetVoid(); + Builder.restoreIP(OldIP); return LtGCFunc; } Function *OpenMPIRBuilder::emitListToGlobalReduceFunction( ArrayRef<ReductionInfo> ReductionInfos, Function *ReduceFn, Type *ReductionsBufferTy, AttributeList FuncAttrs) { - IRBuilder<>::InsertPointGuard IPG(Builder); + OpenMPIRBuilder::InsertPointTy OldIP = Builder.saveIP(); LLVMContext &Ctx = M.getContext(); FunctionType *FuncTy = FunctionType::get( Builder.getVoidTy(), @@ -3145,7 +3143,6 @@ Function *OpenMPIRBuilder::emitListToGlobalReduceFunction( BasicBlock *EntryBlock = BasicBlock::Create(Ctx, "entry", LtGRFunc); Builder.SetInsertPoint(EntryBlock); - Builder.SetCurrentDebugLocation(llvm::DebugLoc()); // Buffer: global reduction buffer. Argument *BufferArg = LtGRFunc->getArg(0); @@ -3206,13 +3203,14 @@ Function *OpenMPIRBuilder::emitListToGlobalReduceFunction( Builder.CreateCall(ReduceFn, {LocalReduceListAddrCast, ReduceList}) ->addFnAttr(Attribute::NoUnwind); Builder.CreateRetVoid(); + Builder.restoreIP(OldIP); return LtGRFunc; } Function *OpenMPIRBuilder::emitGlobalToListCopyFunction( ArrayRef<ReductionInfo> ReductionInfos, Type *ReductionsBufferTy, AttributeList FuncAttrs) { - IRBuilder<>::InsertPointGuard IPG(Builder); + OpenMPIRBuilder::InsertPointTy OldIP = Builder.saveIP(); LLVMContext &Ctx = M.getContext(); FunctionType *FuncTy = FunctionType::get( Builder.getVoidTy(), @@ -3228,7 +3226,6 @@ Function *OpenMPIRBuilder::emitGlobalToListCopyFunction( BasicBlock *EntryBlock = BasicBlock::Create(Ctx, "entry", LtGCFunc); Builder.SetInsertPoint(EntryBlock); - Builder.SetCurrentDebugLocation(llvm::DebugLoc()); // Buffer: global reduction buffer. Argument *BufferArg = LtGCFunc->getArg(0); @@ -3314,13 +3311,14 @@ Function *OpenMPIRBuilder::emitGlobalToListCopyFunction( } Builder.CreateRetVoid(); + Builder.restoreIP(OldIP); return LtGCFunc; } Function *OpenMPIRBuilder::emitGlobalToListReduceFunction( ArrayRef<ReductionInfo> ReductionInfos, Function *ReduceFn, Type *ReductionsBufferTy, AttributeList FuncAttrs) { - IRBuilder<>::InsertPointGuard IPG(Builder); + OpenMPIRBuilder::InsertPointTy OldIP = Builder.saveIP(); LLVMContext &Ctx = M.getContext(); auto *FuncTy = FunctionType::get( Builder.getVoidTy(), @@ -3336,7 +3334,6 @@ Function *OpenMPIRBuilder::emitGlobalToListReduceFunction( BasicBlock *EntryBlock = BasicBlock::Create(Ctx, "entry", LtGRFunc); Builder.SetInsertPoint(EntryBlock); - Builder.SetCurrentDebugLocation(llvm::DebugLoc()); // Buffer: global reduction buffer. Argument *BufferArg = LtGRFunc->getArg(0); @@ -3397,6 +3394,7 @@ Function *OpenMPIRBuilder::emitGlobalToListReduceFunction( Builder.CreateCall(ReduceFn, {ReduceList, ReductionList}) ->addFnAttr(Attribute::NoUnwind); Builder.CreateRetVoid(); + Builder.restoreIP(OldIP); return LtGRFunc; } @@ -3409,7 +3407,6 @@ std::string OpenMPIRBuilder::getReductionFuncName(StringRef Name) const { Expected<Function *> OpenMPIRBuilder::createReductionFunction( StringRef ReducerName, ArrayRef<ReductionInfo> ReductionInfos, ReductionGenCBKind ReductionGenCBKind, AttributeList FuncAttrs) { - IRBuilder<>::InsertPointGuard IPG(Builder); auto *FuncTy = FunctionType::get(Builder.getVoidTy(), {Builder.getPtrTy(), Builder.getPtrTy()}, /* IsVarArg */ false); @@ -3422,7 +3419,6 @@ Expected<Function *> OpenMPIRBuilder::createReductionFunction( BasicBlock *EntryBB = BasicBlock::Create(M.getContext(), "entry", ReductionFunc); Builder.SetInsertPoint(EntryBB); - Builder.SetCurrentDebugLocation(llvm::DebugLoc()); // Need to alloca memory here and deal with the pointers before getting // LHS/RHS pointers out @@ -3750,12 +3746,10 @@ static Error populateReductionFunction( Function *ReductionFunc, ArrayRef<OpenMPIRBuilder::ReductionInfo> ReductionInfos, IRBuilder<> &Builder, ArrayRef<bool> IsByRef, bool IsGPU) { - IRBuilder<>::InsertPointGuard IPG(Builder); Module *Module = ReductionFunc->getParent(); BasicBlock *ReductionFuncBlock = BasicBlock::Create(Module->getContext(), "", ReductionFunc); Builder.SetInsertPoint(ReductionFuncBlock); - Builder.SetCurrentDebugLocation(llvm::DebugLoc()); Value *LHSArrayPtr = nullptr; Value *RHSArrayPtr = nullptr; if (IsGPU) { diff --git a/llvm/lib/MC/CMakeLists.txt b/llvm/lib/MC/CMakeLists.txt index d662c42..18a85b3 100644 --- a/llvm/lib/MC/CMakeLists.txt +++ b/llvm/lib/MC/CMakeLists.txt @@ -43,13 +43,7 @@ add_llvm_component_library(LLVMMC MCRegisterInfo.cpp MCSchedule.cpp MCSection.cpp - MCSectionCOFF.cpp - MCSectionDXContainer.cpp - MCSectionELF.cpp - MCSectionGOFF.cpp MCSectionMachO.cpp - MCSectionWasm.cpp - MCSectionXCOFF.cpp MCStreamer.cpp MCSPIRVStreamer.cpp MCSubtargetInfo.cpp diff --git a/llvm/lib/MC/GOFFObjectWriter.cpp b/llvm/lib/MC/GOFFObjectWriter.cpp index 1871f5f..88188f3 100644 --- a/llvm/lib/MC/GOFFObjectWriter.cpp +++ b/llvm/lib/MC/GOFFObjectWriter.cpp @@ -336,7 +336,7 @@ void GOFFWriter::defineSymbols() { unsigned Ordinal = 0; // Process all sections. for (MCSection &S : Asm) { - auto &Section = cast<MCSectionGOFF>(S); + auto &Section = static_cast<MCSectionGOFF &>(S); Section.setOrdinal(++Ordinal); defineSectionSymbols(Section); } diff --git a/llvm/lib/MC/MCAsmInfoCOFF.cpp b/llvm/lib/MC/MCAsmInfoCOFF.cpp index 0b8781c..54717df 100644 --- a/llvm/lib/MC/MCAsmInfoCOFF.cpp +++ b/llvm/lib/MC/MCAsmInfoCOFF.cpp @@ -12,7 +12,13 @@ //===----------------------------------------------------------------------===// #include "llvm/MC/MCAsmInfoCOFF.h" +#include "llvm/BinaryFormat/COFF.h" #include "llvm/MC/MCDirectives.h" +#include "llvm/MC/MCSection.h" +#include "llvm/MC/MCSectionCOFF.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Support/raw_ostream.h" +#include <cassert> using namespace llvm; @@ -49,6 +55,10 @@ MCAsmInfoCOFF::MCAsmInfoCOFF() { HasCOFFComdatConstants = true; } +bool MCAsmInfoCOFF::useCodeAlign(const MCSection &Sec) const { + return Sec.isText(); +} + void MCAsmInfoMicrosoft::anchor() {} MCAsmInfoMicrosoft::MCAsmInfoMicrosoft() = default; @@ -64,3 +74,101 @@ MCAsmInfoGNUCOFF::MCAsmInfoGNUCOFF() { // We don't create constants in comdat sections for MinGW. HasCOFFComdatConstants = false; } + +bool MCSectionCOFF::shouldOmitSectionDirective(StringRef Name) const { + if (COMDATSymbol || isUnique()) + return false; + + // FIXME: Does .section .bss/.data/.text work everywhere?? + if (Name == ".text" || Name == ".data" || Name == ".bss") + return true; + + return false; +} + +void MCSectionCOFF::setSelection(int Selection) const { + assert(Selection != 0 && "invalid COMDAT selection type"); + this->Selection = Selection; + Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT; +} + +void MCAsmInfoCOFF::printSwitchToSection(const MCSection &Section, uint32_t, + const Triple &T, + raw_ostream &OS) const { + auto &Sec = static_cast<const MCSectionCOFF &>(Section); + // standard sections don't require the '.section' + if (Sec.shouldOmitSectionDirective(Sec.getName())) { + OS << '\t' << Sec.getName() << '\n'; + return; + } + + OS << "\t.section\t" << Sec.getName() << ",\""; + if (Sec.getCharacteristics() & COFF::IMAGE_SCN_CNT_INITIALIZED_DATA) + OS << 'd'; + if (Sec.getCharacteristics() & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA) + OS << 'b'; + if (Sec.getCharacteristics() & COFF::IMAGE_SCN_MEM_EXECUTE) + OS << 'x'; + if (Sec.getCharacteristics() & COFF::IMAGE_SCN_MEM_WRITE) + OS << 'w'; + else if (Sec.getCharacteristics() & COFF::IMAGE_SCN_MEM_READ) + OS << 'r'; + else + OS << 'y'; + if (Sec.getCharacteristics() & COFF::IMAGE_SCN_LNK_REMOVE) + OS << 'n'; + if (Sec.getCharacteristics() & COFF::IMAGE_SCN_MEM_SHARED) + OS << 's'; + if ((Sec.getCharacteristics() & COFF::IMAGE_SCN_MEM_DISCARDABLE) && + !Sec.isImplicitlyDiscardable(Sec.getName())) + OS << 'D'; + if (Sec.getCharacteristics() & COFF::IMAGE_SCN_LNK_INFO) + OS << 'i'; + OS << '"'; + + // unique should be tail of .section directive. + if (Sec.isUnique() && !Sec.COMDATSymbol) + OS << ",unique," << Sec.UniqueID; + + if (Sec.getCharacteristics() & COFF::IMAGE_SCN_LNK_COMDAT) { + if (Sec.COMDATSymbol) + OS << ","; + else + OS << "\n\t.linkonce\t"; + switch (Sec.Selection) { + case COFF::IMAGE_COMDAT_SELECT_NODUPLICATES: + OS << "one_only"; + break; + case COFF::IMAGE_COMDAT_SELECT_ANY: + OS << "discard"; + break; + case COFF::IMAGE_COMDAT_SELECT_SAME_SIZE: + OS << "same_size"; + break; + case COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH: + OS << "same_contents"; + break; + case COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE: + OS << "associative"; + break; + case COFF::IMAGE_COMDAT_SELECT_LARGEST: + OS << "largest"; + break; + case COFF::IMAGE_COMDAT_SELECT_NEWEST: + OS << "newest"; + break; + default: + assert(false && "unsupported COFF selection type"); + break; + } + if (Sec.COMDATSymbol) { + OS << ","; + Sec.COMDATSymbol->print(OS, this); + } + } + + if (Sec.isUnique() && Sec.COMDATSymbol) + OS << ",unique," << Sec.UniqueID; + + OS << '\n'; +} diff --git a/llvm/lib/MC/MCAsmInfoDarwin.cpp b/llvm/lib/MC/MCAsmInfoDarwin.cpp index 9cba775..e156fa0 100644 --- a/llvm/lib/MC/MCAsmInfoDarwin.cpp +++ b/llvm/lib/MC/MCAsmInfoDarwin.cpp @@ -85,3 +85,8 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() { DwarfUsesRelocationsAcrossSections = false; SetDirectiveSuppressesReloc = true; } + +bool MCAsmInfoDarwin::useCodeAlign(const MCSection &Sec) const { + return static_cast<const MCSectionMachO &>(Sec).hasAttribute( + MachO::S_ATTR_PURE_INSTRUCTIONS); +} diff --git a/llvm/lib/MC/MCAsmInfoELF.cpp b/llvm/lib/MC/MCAsmInfoELF.cpp index 7eb89ef..cdae9d7 100644 --- a/llvm/lib/MC/MCAsmInfoELF.cpp +++ b/llvm/lib/MC/MCAsmInfoELF.cpp @@ -12,9 +12,16 @@ //===----------------------------------------------------------------------===// #include "llvm/MC/MCAsmInfoELF.h" +#include "llvm/ADT/Twine.h" #include "llvm/BinaryFormat/ELF.h" +#include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" #include "llvm/MC/MCSectionELF.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/TargetParser/Triple.h" +#include <cassert> using namespace llvm; @@ -28,9 +35,198 @@ MCSection *MCAsmInfoELF::getNonexecutableStackSection(MCContext &Ctx) const { return Ctx.getELFSection(".note.GNU-stack", ELF::SHT_PROGBITS, 0); } +bool MCAsmInfoELF::useCodeAlign(const MCSection &Sec) const { + return static_cast<const MCSectionELF &>(Sec).getFlags() & ELF::SHF_EXECINSTR; +} + MCAsmInfoELF::MCAsmInfoELF() { HasIdentDirective = true; WeakRefDirective = "\t.weak\t"; PrivateGlobalPrefix = ".L"; PrivateLabelPrefix = ".L"; } + +static void printName(raw_ostream &OS, StringRef Name) { + if (Name.find_first_not_of("0123456789_." + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ") == Name.npos) { + OS << Name; + return; + } + OS << '"'; + for (const char *B = Name.begin(), *E = Name.end(); B < E; ++B) { + if (*B == '"') // Unquoted " + OS << "\\\""; + else if (*B != '\\') // Neither " or backslash + OS << *B; + else if (B + 1 == E) // Trailing backslash + OS << "\\\\"; + else { + OS << B[0] << B[1]; // Quoted character + ++B; + } + } + OS << '"'; +} + +void MCAsmInfoELF::printSwitchToSection(const MCSection &Section, + uint32_t Subsection, const Triple &T, + raw_ostream &OS) const { + auto &Sec = static_cast<const MCSectionELF &>(Section); + if (!Sec.isUnique() && shouldOmitSectionDirective(Sec.getName())) { + OS << '\t' << Sec.getName(); + if (Subsection) + OS << '\t' << Subsection; + OS << '\n'; + return; + } + + OS << "\t.section\t"; + printName(OS, Sec.getName()); + + // Handle the weird solaris syntax if desired. + if (usesSunStyleELFSectionSwitchSyntax() && !(Sec.Flags & ELF::SHF_MERGE)) { + if (Sec.Flags & ELF::SHF_ALLOC) + OS << ",#alloc"; + if (Sec.Flags & ELF::SHF_EXECINSTR) + OS << ",#execinstr"; + if (Sec.Flags & ELF::SHF_WRITE) + OS << ",#write"; + if (Sec.Flags & ELF::SHF_EXCLUDE) + OS << ",#exclude"; + if (Sec.Flags & ELF::SHF_TLS) + OS << ",#tls"; + OS << '\n'; + return; + } + + OS << ",\""; + if (Sec.Flags & ELF::SHF_ALLOC) + OS << 'a'; + if (Sec.Flags & ELF::SHF_EXCLUDE) + OS << 'e'; + if (Sec.Flags & ELF::SHF_EXECINSTR) + OS << 'x'; + if (Sec.Flags & ELF::SHF_WRITE) + OS << 'w'; + if (Sec.Flags & ELF::SHF_MERGE) + OS << 'M'; + if (Sec.Flags & ELF::SHF_STRINGS) + OS << 'S'; + if (Sec.Flags & ELF::SHF_TLS) + OS << 'T'; + if (Sec.Flags & ELF::SHF_LINK_ORDER) + OS << 'o'; + if (Sec.Flags & ELF::SHF_GROUP) + OS << 'G'; + if (Sec.Flags & ELF::SHF_GNU_RETAIN) + OS << 'R'; + + // If there are os-specific flags, print them. + if (T.isOSSolaris()) + if (Sec.Flags & ELF::SHF_SUNW_NODISCARD) + OS << 'R'; + + // If there are tarSec.get-specific flags, print them. + Triple::ArchType Arch = T.getArch(); + if (Arch == Triple::xcore) { + if (Sec.Flags & ELF::XCORE_SHF_CP_SECTION) + OS << 'c'; + if (Sec.Flags & ELF::XCORE_SHF_DP_SECTION) + OS << 'd'; + } else if (T.isARM() || T.isThumb()) { + if (Sec.Flags & ELF::SHF_ARM_PURECODE) + OS << 'y'; + } else if (T.isAArch64()) { + if (Sec.Flags & ELF::SHF_AARCH64_PURECODE) + OS << 'y'; + } else if (Arch == Triple::hexagon) { + if (Sec.Flags & ELF::SHF_HEX_GPREL) + OS << 's'; + } else if (Arch == Triple::x86_64) { + if (Sec.Flags & ELF::SHF_X86_64_LARGE) + OS << 'l'; + } + + OS << '"'; + + OS << ','; + + // If comment string is '@', e.g. as on ARM - use '%' instead + if (getCommentString()[0] == '@') + OS << '%'; + else + OS << '@'; + + if (Sec.Type == ELF::SHT_INIT_ARRAY) + OS << "init_array"; + else if (Sec.Type == ELF::SHT_FINI_ARRAY) + OS << "fini_array"; + else if (Sec.Type == ELF::SHT_PREINIT_ARRAY) + OS << "preinit_array"; + else if (Sec.Type == ELF::SHT_NOBITS) + OS << "nobits"; + else if (Sec.Type == ELF::SHT_NOTE) + OS << "note"; + else if (Sec.Type == ELF::SHT_PROGBITS) + OS << "progbits"; + else if (Sec.Type == ELF::SHT_X86_64_UNWIND) + OS << "unwind"; + else if (Sec.Type == ELF::SHT_MIPS_DWARF) + // Print hex value of the flag while we do not have + // any standard symbolic representation of the flag. + OS << "0x7000001e"; + else if (Sec.Type == ELF::SHT_LLVM_ODRTAB) + OS << "llvm_odrtab"; + else if (Sec.Type == ELF::SHT_LLVM_LINKER_OPTIONS) + OS << "llvm_linker_options"; + else if (Sec.Type == ELF::SHT_LLVM_CALL_GRAPH_PROFILE) + OS << "llvm_call_graph_profile"; + else if (Sec.Type == ELF::SHT_LLVM_DEPENDENT_LIBRARIES) + OS << "llvm_dependent_libraries"; + else if (Sec.Type == ELF::SHT_LLVM_SYMPART) + OS << "llvm_sympart"; + else if (Sec.Type == ELF::SHT_LLVM_BB_ADDR_MAP) + OS << "llvm_bb_addr_map"; + else if (Sec.Type == ELF::SHT_LLVM_OFFLOADING) + OS << "llvm_offloading"; + else if (Sec.Type == ELF::SHT_LLVM_LTO) + OS << "llvm_lto"; + else if (Sec.Type == ELF::SHT_LLVM_JT_SIZES) + OS << "llvm_jt_sizes"; + else if (Sec.Type == ELF::SHT_LLVM_CFI_JUMP_TABLE) + OS << "llvm_cfi_jump_table"; + else + OS << "0x" << Twine::utohexstr(Sec.Type); + + if (Sec.EntrySize) { + assert((Sec.Flags & ELF::SHF_MERGE) || + Sec.Type == ELF::SHT_LLVM_CFI_JUMP_TABLE); + OS << "," << Sec.EntrySize; + } + + if (Sec.Flags & ELF::SHF_LINK_ORDER) { + OS << ","; + if (Sec.LinkedToSym) + printName(OS, Sec.LinkedToSym->getName()); + else + OS << '0'; + } + + if (Sec.Flags & ELF::SHF_GROUP) { + OS << ","; + printName(OS, Sec.Group.getPointer()->getName()); + if (Sec.isComdat()) + OS << ",comdat"; + } + + if (Sec.isUnique()) + OS << ",unique," << Sec.UniqueID; + + OS << '\n'; + + if (Subsection) { + OS << "\t.subsection\t" << Subsection; + OS << '\n'; + } +} diff --git a/llvm/lib/MC/MCAsmInfoGOFF.cpp b/llvm/lib/MC/MCAsmInfoGOFF.cpp index 3c81a46..0a5d1927 100644 --- a/llvm/lib/MC/MCAsmInfoGOFF.cpp +++ b/llvm/lib/MC/MCAsmInfoGOFF.cpp @@ -13,11 +13,12 @@ //===----------------------------------------------------------------------===// #include "llvm/MC/MCAsmInfoGOFF.h" +#include "llvm/BinaryFormat/GOFF.h" +#include "llvm/MC/MCSectionGOFF.h" +#include "llvm/Support/raw_ostream.h" using namespace llvm; -void MCAsmInfoGOFF::anchor() {} - MCAsmInfoGOFF::MCAsmInfoGOFF() { Data64bitsDirective = "\t.quad\t"; HasDotTypeDotSizeDirective = false; @@ -25,3 +26,136 @@ MCAsmInfoGOFF::MCAsmInfoGOFF() { PrivateLabelPrefix = "L#"; ZeroDirective = "\t.space\t"; } + +static void emitCATTR(raw_ostream &OS, StringRef Name, GOFF::ESDRmode Rmode, + GOFF::ESDAlignment Alignment, + GOFF::ESDLoadingBehavior LoadBehavior, + GOFF::ESDExecutable Executable, bool IsReadOnly, + uint32_t SortKey, uint8_t FillByteValue, + StringRef PartName) { + OS << Name << " CATTR "; + OS << "ALIGN(" << static_cast<unsigned>(Alignment) << ")," + << "FILL(" << static_cast<unsigned>(FillByteValue) << ")"; + switch (LoadBehavior) { + case GOFF::ESD_LB_Deferred: + OS << ",DEFLOAD"; + break; + case GOFF::ESD_LB_NoLoad: + OS << ",NOLOAD"; + break; + default: + break; + } + switch (Executable) { + case GOFF::ESD_EXE_CODE: + OS << ",EXECUTABLE"; + break; + case GOFF::ESD_EXE_DATA: + OS << ",NOTEXECUTABLE"; + break; + default: + break; + } + if (IsReadOnly) + OS << ",READONLY"; + if (Rmode != GOFF::ESD_RMODE_None) { + OS << ','; + OS << "RMODE("; + switch (Rmode) { + case GOFF::ESD_RMODE_24: + OS << "24"; + break; + case GOFF::ESD_RMODE_31: + OS << "31"; + break; + case GOFF::ESD_RMODE_64: + OS << "64"; + break; + case GOFF::ESD_RMODE_None: + break; + } + OS << ')'; + } + if (SortKey) + OS << ",PRIORITY(" << SortKey << ")"; + if (!PartName.empty()) + OS << ",PART(" << PartName << ")"; + OS << '\n'; +} + +static void emitXATTR(raw_ostream &OS, StringRef Name, + GOFF::ESDLinkageType Linkage, + GOFF::ESDExecutable Executable, + GOFF::ESDBindingScope BindingScope) { + OS << Name << " XATTR "; + OS << "LINKAGE(" << (Linkage == GOFF::ESD_LT_OS ? "OS" : "XPLINK") << "),"; + if (Executable != GOFF::ESD_EXE_Unspecified) + OS << "REFERENCE(" << (Executable == GOFF::ESD_EXE_CODE ? "CODE" : "DATA") + << "),"; + if (BindingScope != GOFF::ESD_BSC_Unspecified) { + OS << "SCOPE("; + switch (BindingScope) { + case GOFF::ESD_BSC_Section: + OS << "SECTION"; + break; + case GOFF::ESD_BSC_Module: + OS << "MODULE"; + break; + case GOFF::ESD_BSC_Library: + OS << "LIBRARY"; + break; + case GOFF::ESD_BSC_ImportExport: + OS << "EXPORT"; + break; + default: + break; + } + OS << ')'; + } + OS << '\n'; +} + +void MCAsmInfoGOFF::printSwitchToSection(const MCSection &Section, + uint32_t Subsection, const Triple &T, + raw_ostream &OS) const { + auto &Sec = + const_cast<MCSectionGOFF &>(static_cast<const MCSectionGOFF &>(Section)); + switch (Sec.SymbolType) { + case GOFF::ESD_ST_SectionDefinition: { + OS << Sec.getName() << " CSECT\n"; + Sec.Emitted = true; + break; + } + case GOFF::ESD_ST_ElementDefinition: { + printSwitchToSection(*Sec.getParent(), Subsection, T, OS); + if (!Sec.Emitted) { + emitCATTR(OS, Sec.getName(), Sec.EDAttributes.Rmode, + Sec.EDAttributes.Alignment, Sec.EDAttributes.LoadBehavior, + GOFF::ESD_EXE_Unspecified, Sec.EDAttributes.IsReadOnly, 0, + Sec.EDAttributes.FillByteValue, StringRef()); + Sec.Emitted = true; + } else + OS << Sec.getName() << " CATTR\n"; + break; + } + case GOFF::ESD_ST_PartReference: { + MCSectionGOFF *ED = Sec.getParent(); + printSwitchToSection(*ED->getParent(), Subsection, T, OS); + if (!Sec.Emitted) { + emitCATTR(OS, ED->getName(), ED->getEDAttributes().Rmode, + ED->EDAttributes.Alignment, ED->EDAttributes.LoadBehavior, + Sec.PRAttributes.Executable, ED->EDAttributes.IsReadOnly, + Sec.PRAttributes.SortKey, ED->EDAttributes.FillByteValue, + Sec.getName()); + emitXATTR(OS, Sec.getName(), Sec.PRAttributes.Linkage, + Sec.PRAttributes.Executable, Sec.PRAttributes.BindingScope); + ED->Emitted = true; + Sec.Emitted = true; + } else + OS << ED->getName() << " CATTR PART(" << Sec.getName() << ")\n"; + break; + } + default: + llvm_unreachable("Wrong section type"); + } +} diff --git a/llvm/lib/MC/MCAsmInfoWasm.cpp b/llvm/lib/MC/MCAsmInfoWasm.cpp index ce6ec7e..5e44f48 100644 --- a/llvm/lib/MC/MCAsmInfoWasm.cpp +++ b/llvm/lib/MC/MCAsmInfoWasm.cpp @@ -12,9 +12,11 @@ //===----------------------------------------------------------------------===// #include "llvm/MC/MCAsmInfoWasm.h" -using namespace llvm; +#include "llvm/MC/MCSectionWasm.h" +#include "llvm/MC/MCSymbolWasm.h" +#include "llvm/Support/raw_ostream.h" -void MCAsmInfoWasm::anchor() {} +using namespace llvm; MCAsmInfoWasm::MCAsmInfoWasm() { HasIdentDirective = true; @@ -23,3 +25,80 @@ MCAsmInfoWasm::MCAsmInfoWasm() { PrivateGlobalPrefix = ".L"; PrivateLabelPrefix = ".L"; } + +static void printName(raw_ostream &OS, StringRef Name) { + if (Name.find_first_not_of("0123456789_." + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ") == Name.npos) { + OS << Name; + return; + } + OS << '"'; + for (const char *B = Name.begin(), *E = Name.end(); B < E; ++B) { + if (*B == '"') // Unquoted " + OS << "\\\""; + else if (*B != '\\') // Neither " or backslash + OS << *B; + else if (B + 1 == E) // Trailing backslash + OS << "\\\\"; + else { + OS << B[0] << B[1]; // Quoted character + ++B; + } + } + OS << '"'; +} + +void MCAsmInfoWasm::printSwitchToSection(const MCSection &Section, + uint32_t Subsection, const Triple &T, + raw_ostream &OS) const { + auto &Sec = static_cast<const MCSectionWasm &>(Section); + if (shouldOmitSectionDirective(Sec.getName())) { + OS << '\t' << Sec.getName(); + if (Subsection) + OS << '\t' << Subsection; + OS << '\n'; + return; + } + + OS << "\t.section\t"; + printName(OS, Sec.getName()); + OS << ",\""; + + if (Sec.IsPassive) + OS << 'p'; + if (Sec.Group) + OS << 'G'; + if (Sec.SegmentFlags & wasm::WASM_SEG_FLAG_STRINGS) + OS << 'S'; + if (Sec.SegmentFlags & wasm::WASM_SEG_FLAG_TLS) + OS << 'T'; + if (Sec.SegmentFlags & wasm::WASM_SEG_FLAG_RETAIN) + OS << 'R'; + + OS << '"'; + + OS << ','; + + // If comment string is '@', e.g. as on ARM - use '%' instead + if (getCommentString()[0] == '@') + OS << '%'; + else + OS << '@'; + + // TODO: Print section type. + + if (Sec.Group) { + OS << ","; + printName(OS, Sec.Group->getName()); + OS << ",comdat"; + } + + if (Sec.isUnique()) + OS << ",unique," << Sec.UniqueID; + + OS << '\n'; + + if (Subsection) + OS << "\t.subsection\t" << Subsection << '\n'; +} diff --git a/llvm/lib/MC/MCAsmInfoXCOFF.cpp b/llvm/lib/MC/MCAsmInfoXCOFF.cpp index 6ef11ba..0403b44 100644 --- a/llvm/lib/MC/MCAsmInfoXCOFF.cpp +++ b/llvm/lib/MC/MCAsmInfoXCOFF.cpp @@ -8,7 +8,11 @@ #include "llvm/MC/MCAsmInfoXCOFF.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCSectionXCOFF.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/Format.h" +#include "llvm/Support/raw_ostream.h" using namespace llvm; @@ -16,8 +20,6 @@ namespace llvm { extern cl::opt<cl::boolOrDefault> UseLEB128Directives; } -void MCAsmInfoXCOFF::anchor() {} - MCAsmInfoXCOFF::MCAsmInfoXCOFF() { IsAIX = true; IsLittleEndian = false; @@ -56,3 +58,121 @@ bool MCAsmInfoXCOFF::isAcceptableChar(char C) const { // any combination of these. return isAlnum(C) || C == '_' || C == '.'; } + +bool MCAsmInfoXCOFF::useCodeAlign(const MCSection &Sec) const { + return static_cast<const MCSectionXCOFF &>(Sec).getKind().isText(); +} + +MCSectionXCOFF::~MCSectionXCOFF() = default; + +void MCSectionXCOFF::printCsectDirective(raw_ostream &OS) const { + OS << "\t.csect " << QualName->getName() << "," << Log2(getAlign()) << '\n'; +} + +void MCAsmInfoXCOFF::printSwitchToSection(const MCSection &Section, uint32_t, + const Triple &T, + raw_ostream &OS) const { + auto &Sec = static_cast<const MCSectionXCOFF &>(Section); + if (Sec.getKind().isText()) { + if (Sec.getMappingClass() != XCOFF::XMC_PR) + report_fatal_error("Unhandled storage-mapping class for .text csect"); + + Sec.printCsectDirective(OS); + return; + } + + if (Sec.getKind().isReadOnly()) { + if (Sec.getMappingClass() != XCOFF::XMC_RO && + Sec.getMappingClass() != XCOFF::XMC_TD) + report_fatal_error("Unhandled storage-mapping class for .rodata csect."); + Sec.printCsectDirective(OS); + return; + } + + if (Sec.getKind().isReadOnlyWithRel()) { + if (Sec.getMappingClass() != XCOFF::XMC_RW && + Sec.getMappingClass() != XCOFF::XMC_RO && + Sec.getMappingClass() != XCOFF::XMC_TD) + report_fatal_error( + "Unexepected storage-mapping class for ReadOnlyWithRel kind"); + Sec.printCsectDirective(OS); + return; + } + + // Initialized TLS data. + if (Sec.getKind().isThreadData()) { + // We only expect XMC_TL here for initialized TLS data. + if (Sec.getMappingClass() != XCOFF::XMC_TL) + report_fatal_error("Unhandled storage-mapping class for .tdata csect."); + Sec.printCsectDirective(OS); + return; + } + + if (Sec.getKind().isData()) { + switch (Sec.getMappingClass()) { + case XCOFF::XMC_RW: + case XCOFF::XMC_DS: + case XCOFF::XMC_TD: + Sec.printCsectDirective(OS); + break; + case XCOFF::XMC_TC: + case XCOFF::XMC_TE: + break; + case XCOFF::XMC_TC0: + OS << "\t.toc\n"; + break; + default: + report_fatal_error("Unhandled storage-mapping class for .data csect."); + } + return; + } + + if (Sec.isCsect() && Sec.getMappingClass() == XCOFF::XMC_TD) { + // Common csect type (uninitialized storage) does not have to print + // csect directive for section switching unless it is local. + if (Sec.getKind().isCommon() && !Sec.getKind().isBSSLocal()) + return; + + assert(Sec.getKind().isBSS() && "Unexpected section kind for toc-data"); + Sec.printCsectDirective(OS); + return; + } + // Common csect type (uninitialized storage) does not have to print csect + // directive for section switching. + if (Sec.isCsect() && Sec.getCSectType() == XCOFF::XTY_CM) { + assert((Sec.getMappingClass() == XCOFF::XMC_RW || + Sec.getMappingClass() == XCOFF::XMC_BS || + Sec.getMappingClass() == XCOFF::XMC_UL) && + "Generated a storage-mapping class for a common/bss/tbss csect we " + "don't " + "understand how to switch to."); + // Common symbols and local zero-initialized symbols for TLS and Non-TLS are + // eligible for .bss/.tbss csect, getKind().isThreadBSS() is used to + // cover TLS common and zero-initialized local symbols since linkage type + // (in the GlobalVariable) is not accessible in this class. + assert((Sec.getKind().isBSSLocal() || Sec.getKind().isCommon() || + Sec.getKind().isThreadBSS()) && + "wrong symbol type for .bss/.tbss csect"); + // Don't have to print a directive for switching to section for commons + // and zero-initialized TLS data. The '.comm' and '.lcomm' directives of the + // variable will create the needed csect. + return; + } + + // Zero-initialized TLS data with weak or external linkage are not eligible to + // be put into common csect. + if (Sec.getKind().isThreadBSS()) { + Sec.printCsectDirective(OS); + return; + } + + // XCOFF debug sections. + if (Sec.getKind().isMetadata() && Sec.isDwarfSect()) { + OS << "\n\t.dwsect " << format("0x%" PRIx32, *Sec.getDwarfSubtypeFlags()) + << '\n'; + OS << Sec.getName() << ':' << '\n'; + return; + } + + report_fatal_error("Printing for this SectionKind is unimplemented."); +} diff --git a/llvm/lib/MC/MCAsmStreamer.cpp b/llvm/lib/MC/MCAsmStreamer.cpp index 7119ef4..da51da4 100644 --- a/llvm/lib/MC/MCAsmStreamer.cpp +++ b/llvm/lib/MC/MCAsmStreamer.cpp @@ -532,8 +532,8 @@ void MCAsmStreamer::switchSection(MCSection *Section, uint32_t Subsection) { if (MCTargetStreamer *TS = getTargetStreamer()) { TS->changeSection(Cur.first, Section, Subsection, OS); } else { - Section->printSwitchToSection(*MAI, getContext().getTargetTriple(), OS, - Subsection); + MAI->printSwitchToSection(*Section, Subsection, + getContext().getTargetTriple(), OS); } } MCStreamer::switchSection(Section, Subsection); @@ -543,7 +543,7 @@ bool MCAsmStreamer::popSection() { if (!MCStreamer::popSection()) return false; auto [Sec, Subsec] = getCurrentSection(); - Sec->printSwitchToSection(*MAI, getContext().getTargetTriple(), OS, Subsec); + MAI->printSwitchToSection(*Sec, Subsec, getContext().getTargetTriple(), OS); return true; } @@ -1105,7 +1105,7 @@ void MCAsmStreamer::emitZerofill(MCSection *Section, MCSymbol *Symbol, // Note: a .zerofill directive does not switch sections. OS << ".zerofill "; - assert(Section->getVariant() == MCSection::SV_MachO && + assert(getContext().getObjectFileType() == MCContext::IsMachO && ".zerofill is a Mach-O specific directive"); // This is a mach-o specific directive. @@ -1130,7 +1130,7 @@ void MCAsmStreamer::emitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, // Instead of using the Section we'll just use the shortcut. - assert(Section->getVariant() == MCSection::SV_MachO && + assert(getContext().getObjectFileType() == MCContext::IsMachO && ".zerofill is a Mach-O specific directive"); // This is a mach-o specific directive and section. diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp index dbb2fd1..c24c82d 100644 --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -346,17 +346,16 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm, Displacement *= -1; } - // Track whether B is before a relaxable instruction and whether A is after - // a relaxable instruction. If SA and SB are separated by a linker-relaxable - // instruction, the difference cannot be resolved as it may be changed by - // the linker. + // Track whether B is before a relaxable instruction/alignment and whether A + // is after a relaxable instruction/alignment. If SA and SB are separated by + // a linker-relaxable instruction/alignment, the difference cannot be + // resolved as it may be changed by the linker. bool BBeforeRelax = false, AAfterRelax = false; for (auto F = FB; F; F = F->getNext()) { - auto DF = F->getKind() == MCFragment::FT_Data ? F : nullptr; - if (DF && DF->isLinkerRelaxable()) { - if (&*F != FB || SBOffset != DF->getContents().size()) + if (F && F->isLinkerRelaxable()) { + if (&*F != FB || SBOffset != F->getSize()) BBeforeRelax = true; - if (&*F != FA || SAOffset == DF->getContents().size()) + if (&*F != FA || SAOffset == F->getSize()) AAfterRelax = true; if (BBeforeRelax && AAfterRelax) return; @@ -370,17 +369,15 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm, } int64_t Num; - if (DF) { - Displacement += DF->getContents().size(); - } else if (F->getKind() == MCFragment::FT_Relaxable && + if (F->getKind() == MCFragment::FT_Data) { + Displacement += F->getFixedSize(); + } else if ((F->getKind() == MCFragment::FT_Relaxable || + F->getKind() == MCFragment::FT_Align) && Asm->hasFinalLayout()) { // Before finishLayout, a relaxable fragment's size is indeterminate. // After layout, during relocation generation, it can be treated as a // data fragment. Displacement += F->getSize(); - } else if (F->getKind() == MCFragment::FT_Align && Layout && - F->isLinkerRelaxable()) { - Displacement += Asm->computeFragmentSize(*F); } else if (auto *FF = dyn_cast<MCFillFragment>(F); FF && FF->getNumValues().evaluateAsAbsolute(Num)) { Displacement += Num * FF->getValueSize(); diff --git a/llvm/lib/MC/MCFragment.cpp b/llvm/lib/MC/MCFragment.cpp index 3c395e5..6cbdf74 100644 --- a/llvm/lib/MC/MCFragment.cpp +++ b/llvm/lib/MC/MCFragment.cpp @@ -35,7 +35,7 @@ MCFragment::MCFragment(FragmentType Kind, bool HasInstructions) } const MCSymbol *MCFragment::getAtom() const { - return cast<MCSectionMachO>(Parent)->getAtom(LayoutOrder); + return static_cast<const MCSectionMachO *>(Parent)->getAtom(LayoutOrder); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) diff --git a/llvm/lib/MC/MCMachOStreamer.cpp b/llvm/lib/MC/MCMachOStreamer.cpp index 4934815..1074669 100644 --- a/llvm/lib/MC/MCMachOStreamer.cpp +++ b/llvm/lib/MC/MCMachOStreamer.cpp @@ -443,13 +443,13 @@ void MCMachOStreamer::finishImpl() { // Set the fragment atom associations by tracking the last seen atom defining // symbol. for (MCSection &Sec : getAssembler()) { - cast<MCSectionMachO>(Sec).allocAtoms(); + static_cast<MCSectionMachO &>(Sec).allocAtoms(); const MCSymbol *CurrentAtom = nullptr; size_t I = 0; for (MCFragment &Frag : Sec) { if (const MCSymbol *Symbol = DefiningSymbolMap.lookup(&Frag)) CurrentAtom = Symbol; - cast<MCSectionMachO>(Sec).setAtom(I++, CurrentAtom); + static_cast<MCSectionMachO &>(Sec).setAtom(I++, CurrentAtom); } } diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp index f046552..9c7b05b 100644 --- a/llvm/lib/MC/MCObjectStreamer.cpp +++ b/llvm/lib/MC/MCObjectStreamer.cpp @@ -340,31 +340,33 @@ void MCObjectStreamer::emitInstToData(const MCInst &Inst, MCFragment *F = getCurrentFragment(); // Append the instruction to the data fragment. - size_t FixupStartIndex = F->getFixups().size(); size_t CodeOffset = F->getContents().size(); SmallVector<MCFixup, 1> Fixups; getAssembler().getEmitter().encodeInstruction( Inst, F->getContentsForAppending(), Fixups, STI); F->doneAppending(); - if (!Fixups.empty()) - F->appendFixups(Fixups); F->setHasInstructions(STI); + if (Fixups.empty()) + return; bool MarkedLinkerRelaxable = false; - for (auto &Fixup : MutableArrayRef(F->getFixups()).slice(FixupStartIndex)) { + for (auto &Fixup : Fixups) { Fixup.setOffset(Fixup.getOffset() + CodeOffset); - if (!Fixup.isLinkerRelaxable()) + if (!Fixup.isLinkerRelaxable() || MarkedLinkerRelaxable) continue; - F->setLinkerRelaxable(); + MarkedLinkerRelaxable = true; + // Set the fragment's order within the subsection for use by + // MCAssembler::relaxAlign. + auto *Sec = F->getParent(); + if (!Sec->isLinkerRelaxable()) + Sec->setLinkerRelaxable(); // Do not add data after a linker-relaxable instruction. The difference // between a new label and a label at or before the linker-relaxable // instruction cannot be resolved at assemble-time. - if (!MarkedLinkerRelaxable) { - MarkedLinkerRelaxable = true; - getCurrentSectionOnly()->setLinkerRelaxable(); - newFragment(); - } + F->setLinkerRelaxable(); + newFragment(); } + F->appendFixups(Fixups); } void MCObjectStreamer::emitInstToFragment(const MCInst &Inst, diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp index eda5e8c..9f64a98 100644 --- a/llvm/lib/MC/MCParser/AsmParser.cpp +++ b/llvm/lib/MC/MCParser/AsmParser.cpp @@ -3413,7 +3413,7 @@ bool AsmParser::parseDirectiveAlign(bool IsPow2, uint8_t ValueSize) { // Check whether we should use optimal code alignment for this .align // directive. - if (Section->useCodeAlign() && !HasFillExpr) { + if (MAI.useCodeAlign(*Section) && !HasFillExpr) { getStreamer().emitCodeAlignment( Align(Alignment), &getTargetParser().getSTI(), MaxBytesToFill); } else { diff --git a/llvm/lib/MC/MCParser/MasmParser.cpp b/llvm/lib/MC/MCParser/MasmParser.cpp index f4684e6..780289e 100644 --- a/llvm/lib/MC/MCParser/MasmParser.cpp +++ b/llvm/lib/MC/MCParser/MasmParser.cpp @@ -4228,8 +4228,7 @@ bool MasmParser::emitAlignTo(int64_t Alignment) { // Check whether we should use optimal code alignment for this align // directive. const MCSection *Section = getStreamer().getCurrentSectionOnly(); - assert(Section && "must have section to emit alignment"); - if (Section->useCodeAlign()) { + if (MAI.useCodeAlign(*Section)) { getStreamer().emitCodeAlignment(Align(Alignment), &getTargetParser().getSTI(), /*MaxBytesToEmit=*/0); diff --git a/llvm/lib/MC/MCParser/WasmAsmParser.cpp b/llvm/lib/MC/MCParser/WasmAsmParser.cpp index 1f824b8..d97f4f5 100644 --- a/llvm/lib/MC/MCParser/WasmAsmParser.cpp +++ b/llvm/lib/MC/MCParser/WasmAsmParser.cpp @@ -252,7 +252,7 @@ public: if (TypeName == "function") { WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION); auto *Current = - cast<MCSectionWasm>(getStreamer().getCurrentSectionOnly()); + static_cast<MCSectionWasm *>(getStreamer().getCurrentSectionOnly()); if (Current->getGroup()) WasmSym->setComdat(true); } else if (TypeName == "global") diff --git a/llvm/lib/MC/MCSection.cpp b/llvm/lib/MC/MCSection.cpp index e738a22..4f28267 100644 --- a/llvm/lib/MC/MCSection.cpp +++ b/llvm/lib/MC/MCSection.cpp @@ -18,10 +18,9 @@ using namespace llvm; -MCSection::MCSection(SectionVariant V, StringRef Name, bool IsText, bool IsBss, - MCSymbol *Begin) +MCSection::MCSection(StringRef Name, bool IsText, bool IsBss, MCSymbol *Begin) : Begin(Begin), HasInstructions(false), IsRegistered(false), IsText(IsText), - IsBss(IsBss), LinkerRelaxable(false), Name(Name), Variant(V) { + IsBss(IsBss), LinkerRelaxable(false), Name(Name) { DummyFragment.setParent(this); } diff --git a/llvm/lib/MC/MCSectionCOFF.cpp b/llvm/lib/MC/MCSectionCOFF.cpp deleted file mode 100644 index 5bf1473..0000000 --- a/llvm/lib/MC/MCSectionCOFF.cpp +++ /dev/null @@ -1,117 +0,0 @@ -//===- lib/MC/MCSectionCOFF.cpp - COFF Code Section Representation --------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "llvm/MC/MCSectionCOFF.h" -#include "llvm/BinaryFormat/COFF.h" -#include "llvm/MC/MCSymbol.h" -#include "llvm/Support/raw_ostream.h" -#include <cassert> - -using namespace llvm; - -// shouldOmitSectionDirective - Decides whether a '.section' directive -// should be printed before the section name -bool MCSectionCOFF::shouldOmitSectionDirective(StringRef Name, - const MCAsmInfo &MAI) const { - if (COMDATSymbol || isUnique()) - return false; - - // FIXME: Does .section .bss/.data/.text work everywhere?? - if (Name == ".text" || Name == ".data" || Name == ".bss") - return true; - - return false; -} - -void MCSectionCOFF::setSelection(int Selection) const { - assert(Selection != 0 && "invalid COMDAT selection type"); - this->Selection = Selection; - Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT; -} - -void MCSectionCOFF::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T, - raw_ostream &OS, - uint32_t Subsection) const { - // standard sections don't require the '.section' - if (shouldOmitSectionDirective(getName(), MAI)) { - OS << '\t' << getName() << '\n'; - return; - } - - OS << "\t.section\t" << getName() << ",\""; - if (getCharacteristics() & COFF::IMAGE_SCN_CNT_INITIALIZED_DATA) - OS << 'd'; - if (getCharacteristics() & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA) - OS << 'b'; - if (getCharacteristics() & COFF::IMAGE_SCN_MEM_EXECUTE) - OS << 'x'; - if (getCharacteristics() & COFF::IMAGE_SCN_MEM_WRITE) - OS << 'w'; - else if (getCharacteristics() & COFF::IMAGE_SCN_MEM_READ) - OS << 'r'; - else - OS << 'y'; - if (getCharacteristics() & COFF::IMAGE_SCN_LNK_REMOVE) - OS << 'n'; - if (getCharacteristics() & COFF::IMAGE_SCN_MEM_SHARED) - OS << 's'; - if ((getCharacteristics() & COFF::IMAGE_SCN_MEM_DISCARDABLE) && - !isImplicitlyDiscardable(getName())) - OS << 'D'; - if (getCharacteristics() & COFF::IMAGE_SCN_LNK_INFO) - OS << 'i'; - OS << '"'; - - // unique should be tail of .section directive. - if (isUnique() && !COMDATSymbol) - OS << ",unique," << UniqueID; - - if (getCharacteristics() & COFF::IMAGE_SCN_LNK_COMDAT) { - if (COMDATSymbol) - OS << ","; - else - OS << "\n\t.linkonce\t"; - switch (Selection) { - case COFF::IMAGE_COMDAT_SELECT_NODUPLICATES: - OS << "one_only"; - break; - case COFF::IMAGE_COMDAT_SELECT_ANY: - OS << "discard"; - break; - case COFF::IMAGE_COMDAT_SELECT_SAME_SIZE: - OS << "same_size"; - break; - case COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH: - OS << "same_contents"; - break; - case COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE: - OS << "associative"; - break; - case COFF::IMAGE_COMDAT_SELECT_LARGEST: - OS << "largest"; - break; - case COFF::IMAGE_COMDAT_SELECT_NEWEST: - OS << "newest"; - break; - default: - assert(false && "unsupported COFF selection type"); - break; - } - if (COMDATSymbol) { - OS << ","; - COMDATSymbol->print(OS, &MAI); - } - } - - if (isUnique() && COMDATSymbol) - OS << ",unique," << UniqueID; - - OS << '\n'; -} - -bool MCSectionCOFF::useCodeAlign() const { return isText(); } diff --git a/llvm/lib/MC/MCSectionDXContainer.cpp b/llvm/lib/MC/MCSectionDXContainer.cpp deleted file mode 100644 index 7eee59d..0000000 --- a/llvm/lib/MC/MCSectionDXContainer.cpp +++ /dev/null @@ -1,15 +0,0 @@ -//===- lib/MC/MCSectionDXContainer.cpp - DXContainer Section --------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "llvm/MC/MCSectionDXContainer.h" - -using namespace llvm; - -void MCSectionDXContainer::printSwitchToSection(const MCAsmInfo &, - const Triple &, raw_ostream &, - uint32_t) const {} diff --git a/llvm/lib/MC/MCSectionELF.cpp b/llvm/lib/MC/MCSectionELF.cpp deleted file mode 100644 index ef33f9c..0000000 --- a/llvm/lib/MC/MCSectionELF.cpp +++ /dev/null @@ -1,217 +0,0 @@ -//===- lib/MC/MCSectionELF.cpp - ELF Code Section Representation ----------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "llvm/MC/MCSectionELF.h" -#include "llvm/ADT/Twine.h" -#include "llvm/BinaryFormat/ELF.h" -#include "llvm/MC/MCAsmInfo.h" -#include "llvm/MC/MCExpr.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/TargetParser/Triple.h" -#include <cassert> - -using namespace llvm; - -// Decides whether a '.section' directive -// should be printed before the section name. -bool MCSectionELF::shouldOmitSectionDirective(StringRef Name, - const MCAsmInfo &MAI) const { - if (isUnique()) - return false; - - return MAI.shouldOmitSectionDirective(Name); -} - -static void printName(raw_ostream &OS, StringRef Name) { - if (Name.find_first_not_of("0123456789_." - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ") == Name.npos) { - OS << Name; - return; - } - OS << '"'; - for (const char *B = Name.begin(), *E = Name.end(); B < E; ++B) { - if (*B == '"') // Unquoted " - OS << "\\\""; - else if (*B != '\\') // Neither " or backslash - OS << *B; - else if (B + 1 == E) // Trailing backslash - OS << "\\\\"; - else { - OS << B[0] << B[1]; // Quoted character - ++B; - } - } - OS << '"'; -} - -void MCSectionELF::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T, - raw_ostream &OS, - uint32_t Subsection) const { - if (shouldOmitSectionDirective(getName(), MAI)) { - OS << '\t' << getName(); - if (Subsection) - OS << '\t' << Subsection; - OS << '\n'; - return; - } - - OS << "\t.section\t"; - printName(OS, getName()); - - // Handle the weird solaris syntax if desired. - if (MAI.usesSunStyleELFSectionSwitchSyntax() && - !(Flags & ELF::SHF_MERGE)) { - if (Flags & ELF::SHF_ALLOC) - OS << ",#alloc"; - if (Flags & ELF::SHF_EXECINSTR) - OS << ",#execinstr"; - if (Flags & ELF::SHF_WRITE) - OS << ",#write"; - if (Flags & ELF::SHF_EXCLUDE) - OS << ",#exclude"; - if (Flags & ELF::SHF_TLS) - OS << ",#tls"; - OS << '\n'; - return; - } - - OS << ",\""; - if (Flags & ELF::SHF_ALLOC) - OS << 'a'; - if (Flags & ELF::SHF_EXCLUDE) - OS << 'e'; - if (Flags & ELF::SHF_EXECINSTR) - OS << 'x'; - if (Flags & ELF::SHF_WRITE) - OS << 'w'; - if (Flags & ELF::SHF_MERGE) - OS << 'M'; - if (Flags & ELF::SHF_STRINGS) - OS << 'S'; - if (Flags & ELF::SHF_TLS) - OS << 'T'; - if (Flags & ELF::SHF_LINK_ORDER) - OS << 'o'; - if (Flags & ELF::SHF_GROUP) - OS << 'G'; - if (Flags & ELF::SHF_GNU_RETAIN) - OS << 'R'; - - // If there are os-specific flags, print them. - if (T.isOSSolaris()) - if (Flags & ELF::SHF_SUNW_NODISCARD) - OS << 'R'; - - // If there are target-specific flags, print them. - Triple::ArchType Arch = T.getArch(); - if (Arch == Triple::xcore) { - if (Flags & ELF::XCORE_SHF_CP_SECTION) - OS << 'c'; - if (Flags & ELF::XCORE_SHF_DP_SECTION) - OS << 'd'; - } else if (T.isARM() || T.isThumb()) { - if (Flags & ELF::SHF_ARM_PURECODE) - OS << 'y'; - } else if (T.isAArch64()) { - if (Flags & ELF::SHF_AARCH64_PURECODE) - OS << 'y'; - } else if (Arch == Triple::hexagon) { - if (Flags & ELF::SHF_HEX_GPREL) - OS << 's'; - } else if (Arch == Triple::x86_64) { - if (Flags & ELF::SHF_X86_64_LARGE) - OS << 'l'; - } - - OS << '"'; - - OS << ','; - - // If comment string is '@', e.g. as on ARM - use '%' instead - if (MAI.getCommentString()[0] == '@') - OS << '%'; - else - OS << '@'; - - if (Type == ELF::SHT_INIT_ARRAY) - OS << "init_array"; - else if (Type == ELF::SHT_FINI_ARRAY) - OS << "fini_array"; - else if (Type == ELF::SHT_PREINIT_ARRAY) - OS << "preinit_array"; - else if (Type == ELF::SHT_NOBITS) - OS << "nobits"; - else if (Type == ELF::SHT_NOTE) - OS << "note"; - else if (Type == ELF::SHT_PROGBITS) - OS << "progbits"; - else if (Type == ELF::SHT_X86_64_UNWIND) - OS << "unwind"; - else if (Type == ELF::SHT_MIPS_DWARF) - // Print hex value of the flag while we do not have - // any standard symbolic representation of the flag. - OS << "0x7000001e"; - else if (Type == ELF::SHT_LLVM_ODRTAB) - OS << "llvm_odrtab"; - else if (Type == ELF::SHT_LLVM_LINKER_OPTIONS) - OS << "llvm_linker_options"; - else if (Type == ELF::SHT_LLVM_CALL_GRAPH_PROFILE) - OS << "llvm_call_graph_profile"; - else if (Type == ELF::SHT_LLVM_DEPENDENT_LIBRARIES) - OS << "llvm_dependent_libraries"; - else if (Type == ELF::SHT_LLVM_SYMPART) - OS << "llvm_sympart"; - else if (Type == ELF::SHT_LLVM_BB_ADDR_MAP) - OS << "llvm_bb_addr_map"; - else if (Type == ELF::SHT_LLVM_OFFLOADING) - OS << "llvm_offloading"; - else if (Type == ELF::SHT_LLVM_LTO) - OS << "llvm_lto"; - else if (Type == ELF::SHT_LLVM_JT_SIZES) - OS << "llvm_jt_sizes"; - else if (Type == ELF::SHT_LLVM_CFI_JUMP_TABLE) - OS << "llvm_cfi_jump_table"; - else - OS << "0x" << Twine::utohexstr(Type); - - if (EntrySize) { - assert((Flags & ELF::SHF_MERGE) || Type == ELF::SHT_LLVM_CFI_JUMP_TABLE); - OS << "," << EntrySize; - } - - if (Flags & ELF::SHF_LINK_ORDER) { - OS << ","; - if (LinkedToSym) - printName(OS, LinkedToSym->getName()); - else - OS << '0'; - } - - if (Flags & ELF::SHF_GROUP) { - OS << ","; - printName(OS, Group.getPointer()->getName()); - if (isComdat()) - OS << ",comdat"; - } - - if (isUnique()) - OS << ",unique," << UniqueID; - - OS << '\n'; - - if (Subsection) { - OS << "\t.subsection\t" << Subsection; - OS << '\n'; - } -} - -bool MCSectionELF::useCodeAlign() const { - return getFlags() & ELF::SHF_EXECINSTR; -} diff --git a/llvm/lib/MC/MCSectionGOFF.cpp b/llvm/lib/MC/MCSectionGOFF.cpp deleted file mode 100644 index 8163e5b..0000000 --- a/llvm/lib/MC/MCSectionGOFF.cpp +++ /dev/null @@ -1,143 +0,0 @@ -//===- MCSectionGOFF.cpp - GOFF Code Section Representation ---------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "llvm/MC/MCSectionGOFF.h" -#include "llvm/BinaryFormat/GOFF.h" -#include "llvm/Support/raw_ostream.h" - -using namespace llvm; - -static void emitCATTR(raw_ostream &OS, StringRef Name, GOFF::ESDRmode Rmode, - GOFF::ESDAlignment Alignment, - GOFF::ESDLoadingBehavior LoadBehavior, - GOFF::ESDExecutable Executable, bool IsReadOnly, - uint32_t SortKey, uint8_t FillByteValue, - StringRef PartName) { - OS << Name << " CATTR "; - OS << "ALIGN(" << static_cast<unsigned>(Alignment) << ")," - << "FILL(" << static_cast<unsigned>(FillByteValue) << ")"; - switch (LoadBehavior) { - case GOFF::ESD_LB_Deferred: - OS << ",DEFLOAD"; - break; - case GOFF::ESD_LB_NoLoad: - OS << ",NOLOAD"; - break; - default: - break; - } - switch (Executable) { - case GOFF::ESD_EXE_CODE: - OS << ",EXECUTABLE"; - break; - case GOFF::ESD_EXE_DATA: - OS << ",NOTEXECUTABLE"; - break; - default: - break; - } - if (IsReadOnly) - OS << ",READONLY"; - if (Rmode != GOFF::ESD_RMODE_None) { - OS << ','; - OS << "RMODE("; - switch (Rmode) { - case GOFF::ESD_RMODE_24: - OS << "24"; - break; - case GOFF::ESD_RMODE_31: - OS << "31"; - break; - case GOFF::ESD_RMODE_64: - OS << "64"; - break; - case GOFF::ESD_RMODE_None: - break; - } - OS << ')'; - } - if (SortKey) - OS << ",PRIORITY(" << SortKey << ")"; - if (!PartName.empty()) - OS << ",PART(" << PartName << ")"; - OS << '\n'; -} - -static void emitXATTR(raw_ostream &OS, StringRef Name, - GOFF::ESDLinkageType Linkage, - GOFF::ESDExecutable Executable, - GOFF::ESDBindingScope BindingScope) { - OS << Name << " XATTR "; - OS << "LINKAGE(" << (Linkage == GOFF::ESD_LT_OS ? "OS" : "XPLINK") << "),"; - if (Executable != GOFF::ESD_EXE_Unspecified) - OS << "REFERENCE(" << (Executable == GOFF::ESD_EXE_CODE ? "CODE" : "DATA") - << "),"; - if (BindingScope != GOFF::ESD_BSC_Unspecified) { - OS << "SCOPE("; - switch (BindingScope) { - case GOFF::ESD_BSC_Section: - OS << "SECTION"; - break; - case GOFF::ESD_BSC_Module: - OS << "MODULE"; - break; - case GOFF::ESD_BSC_Library: - OS << "LIBRARY"; - break; - case GOFF::ESD_BSC_ImportExport: - OS << "EXPORT"; - break; - default: - break; - } - OS << ')'; - } - OS << '\n'; -} - -void MCSectionGOFF::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T, - raw_ostream &OS, - uint32_t Subsection) const { - switch (SymbolType) { - case GOFF::ESD_ST_SectionDefinition: { - OS << Name << " CSECT\n"; - Emitted = true; - break; - } - case GOFF::ESD_ST_ElementDefinition: { - getParent()->printSwitchToSection(MAI, T, OS, Subsection); - if (!Emitted) { - emitCATTR(OS, Name, EDAttributes.Rmode, EDAttributes.Alignment, - EDAttributes.LoadBehavior, GOFF::ESD_EXE_Unspecified, - EDAttributes.IsReadOnly, 0, EDAttributes.FillByteValue, - StringRef()); - Emitted = true; - } else - OS << Name << " CATTR\n"; - break; - } - case GOFF::ESD_ST_PartReference: { - MCSectionGOFF *ED = getParent(); - ED->getParent()->printSwitchToSection(MAI, T, OS, Subsection); - if (!Emitted) { - emitCATTR(OS, ED->getName(), ED->getEDAttributes().Rmode, - ED->EDAttributes.Alignment, ED->EDAttributes.LoadBehavior, - PRAttributes.Executable, ED->EDAttributes.IsReadOnly, - PRAttributes.SortKey, ED->EDAttributes.FillByteValue, Name); - emitXATTR(OS, Name, PRAttributes.Linkage, PRAttributes.Executable, - PRAttributes.BindingScope); - ED->Emitted = true; - Emitted = true; - } else - OS << ED->getName() << " CATTR PART(" << Name << ")\n"; - break; - } - default: - llvm_unreachable("Wrong section type"); - } -}
\ No newline at end of file diff --git a/llvm/lib/MC/MCSectionMachO.cpp b/llvm/lib/MC/MCSectionMachO.cpp index 67453ce..67c8235 100644 --- a/llvm/lib/MC/MCSectionMachO.cpp +++ b/llvm/lib/MC/MCSectionMachO.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "llvm/MC/MCSectionMachO.h" +#include "llvm/MC/MCAsmInfoDarwin.h" #include "llvm/MC/SectionKind.h" #include "llvm/Support/raw_ostream.h" @@ -92,7 +93,7 @@ ENTRY("" /*FIXME*/, S_ATTR_LOC_RELOC) MCSectionMachO::MCSectionMachO(StringRef Segment, StringRef Section, unsigned TAA, unsigned reserved2, SectionKind K, MCSymbol *Begin) - : MCSection(SV_MachO, Section, K.isText(), + : MCSection(Section, K.isText(), MachO::isVirtualSection(TAA & MachO::SECTION_TYPE), Begin), TypeAndAttributes(TAA), Reserved2(reserved2) { assert(Segment.size() <= 16 && Section.size() <= 16 && @@ -105,19 +106,20 @@ MCSectionMachO::MCSectionMachO(StringRef Segment, StringRef Section, } } -void MCSectionMachO::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T, - raw_ostream &OS, - uint32_t Subsection) const { - OS << "\t.section\t" << getSegmentName() << ',' << getName(); +void MCAsmInfoDarwin::printSwitchToSection(const MCSection &Section, uint32_t, + const Triple &T, + raw_ostream &OS) const { + auto &Sec = static_cast<const MCSectionMachO &>(Section); + OS << "\t.section\t" << Sec.getSegmentName() << ',' << Sec.getName(); // Get the section type and attributes. - unsigned TAA = getTypeAndAttributes(); + unsigned TAA = Sec.getTypeAndAttributes(); if (TAA == 0) { OS << '\n'; return; } - MachO::SectionType SectionType = getType(); + MachO::SectionType SectionType = Sec.getType(); assert(SectionType <= MachO::LAST_KNOWN_SECTION_TYPE && "Invalid SectionType specified!"); @@ -135,8 +137,8 @@ void MCSectionMachO::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T, if (SectionAttrs == 0) { // If we have a S_SYMBOL_STUBS size specified, print it along with 'none' as // the attribute specifier. - if (Reserved2 != 0) - OS << ",none," << Reserved2; + if (Sec.Reserved2 != 0) + OS << ",none," << Sec.Reserved2; OS << '\n'; return; } @@ -164,15 +166,11 @@ void MCSectionMachO::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T, assert(SectionAttrs == 0 && "Unknown section attributes!"); // If we have a S_SYMBOL_STUBS size specified, print it. - if (Reserved2 != 0) - OS << ',' << Reserved2; + if (Sec.Reserved2 != 0) + OS << ',' << Sec.Reserved2; OS << '\n'; } -bool MCSectionMachO::useCodeAlign() const { - return hasAttribute(MachO::S_ATTR_PURE_INSTRUCTIONS); -} - /// ParseSectionSpecifier - Parse the section specifier indicated by "Spec". /// This is a string that can appear after a .section directive in a mach-o /// flavored .s file. If successful, this fills in the specified Out diff --git a/llvm/lib/MC/MCSectionWasm.cpp b/llvm/lib/MC/MCSectionWasm.cpp deleted file mode 100644 index e25af1c..0000000 --- a/llvm/lib/MC/MCSectionWasm.cpp +++ /dev/null @@ -1,101 +0,0 @@ -//===- lib/MC/MCSectionWasm.cpp - Wasm Code Section Representation --------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "llvm/MC/MCSectionWasm.h" -#include "llvm/MC/MCAsmInfo.h" -#include "llvm/MC/MCExpr.h" -#include "llvm/MC/MCSymbolWasm.h" -#include "llvm/Support/raw_ostream.h" - -using namespace llvm; - -// Decides whether a '.section' directive -// should be printed before the section name. -bool MCSectionWasm::shouldOmitSectionDirective(StringRef Name, - const MCAsmInfo &MAI) const { - return MAI.shouldOmitSectionDirective(Name); -} - -static void printName(raw_ostream &OS, StringRef Name) { - if (Name.find_first_not_of("0123456789_." - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ") == Name.npos) { - OS << Name; - return; - } - OS << '"'; - for (const char *B = Name.begin(), *E = Name.end(); B < E; ++B) { - if (*B == '"') // Unquoted " - OS << "\\\""; - else if (*B != '\\') // Neither " or backslash - OS << *B; - else if (B + 1 == E) // Trailing backslash - OS << "\\\\"; - else { - OS << B[0] << B[1]; // Quoted character - ++B; - } - } - OS << '"'; -} - -void MCSectionWasm::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T, - raw_ostream &OS, - uint32_t Subsection) const { - - if (shouldOmitSectionDirective(getName(), MAI)) { - OS << '\t' << getName(); - if (Subsection) - OS << '\t' << Subsection; - OS << '\n'; - return; - } - - OS << "\t.section\t"; - printName(OS, getName()); - OS << ",\""; - - if (IsPassive) - OS << 'p'; - if (Group) - OS << 'G'; - if (SegmentFlags & wasm::WASM_SEG_FLAG_STRINGS) - OS << 'S'; - if (SegmentFlags & wasm::WASM_SEG_FLAG_TLS) - OS << 'T'; - if (SegmentFlags & wasm::WASM_SEG_FLAG_RETAIN) - OS << 'R'; - - OS << '"'; - - OS << ','; - - // If comment string is '@', e.g. as on ARM - use '%' instead - if (MAI.getCommentString()[0] == '@') - OS << '%'; - else - OS << '@'; - - // TODO: Print section type. - - if (Group) { - OS << ","; - printName(OS, Group->getName()); - OS << ",comdat"; - } - - if (isUnique()) - OS << ",unique," << UniqueID; - - OS << '\n'; - - if (Subsection) - OS << "\t.subsection\t" << Subsection << '\n'; -} - -bool MCSectionWasm::useCodeAlign() const { return false; } diff --git a/llvm/lib/MC/MCSectionXCOFF.cpp b/llvm/lib/MC/MCSectionXCOFF.cpp deleted file mode 100644 index 41043b2..0000000 --- a/llvm/lib/MC/MCSectionXCOFF.cpp +++ /dev/null @@ -1,134 +0,0 @@ -//===- lib/MC/MCSectionXCOFF.cpp - XCOFF Code Section Representation ------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "llvm/MC/MCSectionXCOFF.h" -#include "llvm/MC/MCAsmInfo.h" -#include "llvm/Support/Format.h" -#include "llvm/Support/raw_ostream.h" -namespace llvm { -class MCExpr; -class Triple; -} // namespace llvm - -using namespace llvm; - -MCSectionXCOFF::~MCSectionXCOFF() = default; - -void MCSectionXCOFF::printCsectDirective(raw_ostream &OS) const { - OS << "\t.csect " << QualName->getName() << "," << Log2(getAlign()) << '\n'; -} - -void MCSectionXCOFF::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T, - raw_ostream &OS, - uint32_t Subsection) const { - if (getKind().isText()) { - if (getMappingClass() != XCOFF::XMC_PR) - report_fatal_error("Unhandled storage-mapping class for .text csect"); - - printCsectDirective(OS); - return; - } - - if (getKind().isReadOnly()) { - if (getMappingClass() != XCOFF::XMC_RO && - getMappingClass() != XCOFF::XMC_TD) - report_fatal_error("Unhandled storage-mapping class for .rodata csect."); - printCsectDirective(OS); - return; - } - - if (getKind().isReadOnlyWithRel()) { - if (getMappingClass() != XCOFF::XMC_RW && - getMappingClass() != XCOFF::XMC_RO && - getMappingClass() != XCOFF::XMC_TD) - report_fatal_error( - "Unexepected storage-mapping class for ReadOnlyWithRel kind"); - printCsectDirective(OS); - return; - } - - // Initialized TLS data. - if (getKind().isThreadData()) { - // We only expect XMC_TL here for initialized TLS data. - if (getMappingClass() != XCOFF::XMC_TL) - report_fatal_error("Unhandled storage-mapping class for .tdata csect."); - printCsectDirective(OS); - return; - } - - if (getKind().isData()) { - switch (getMappingClass()) { - case XCOFF::XMC_RW: - case XCOFF::XMC_DS: - case XCOFF::XMC_TD: - printCsectDirective(OS); - break; - case XCOFF::XMC_TC: - case XCOFF::XMC_TE: - break; - case XCOFF::XMC_TC0: - OS << "\t.toc\n"; - break; - default: - report_fatal_error( - "Unhandled storage-mapping class for .data csect."); - } - return; - } - - if (isCsect() && getMappingClass() == XCOFF::XMC_TD) { - // Common csect type (uninitialized storage) does not have to print csect - // directive for section switching unless it is local. - if (getKind().isCommon() && !getKind().isBSSLocal()) - return; - - assert(getKind().isBSS() && "Unexpected section kind for toc-data"); - printCsectDirective(OS); - return; - } - // Common csect type (uninitialized storage) does not have to print csect - // directive for section switching. - if (isCsect() && getCSectType() == XCOFF::XTY_CM) { - assert((getMappingClass() == XCOFF::XMC_RW || - getMappingClass() == XCOFF::XMC_BS || - getMappingClass() == XCOFF::XMC_UL) && - "Generated a storage-mapping class for a common/bss/tbss csect we " - "don't " - "understand how to switch to."); - // Common symbols and local zero-initialized symbols for TLS and Non-TLS are - // eligible for .bss/.tbss csect, getKind().isThreadBSS() is used to cover - // TLS common and zero-initialized local symbols since linkage type (in the - // GlobalVariable) is not accessible in this class. - assert((getKind().isBSSLocal() || getKind().isCommon() || - getKind().isThreadBSS()) && - "wrong symbol type for .bss/.tbss csect"); - // Don't have to print a directive for switching to section for commons and - // zero-initialized TLS data. The '.comm' and '.lcomm' directives of the - // variable will create the needed csect. - return; - } - - // Zero-initialized TLS data with weak or external linkage are not eligible to - // be put into common csect. - if (getKind().isThreadBSS()) { - printCsectDirective(OS); - return; - } - - // XCOFF debug sections. - if (getKind().isMetadata() && isDwarfSect()) { - OS << "\n\t.dwsect " << format("0x%" PRIx32, *getDwarfSubtypeFlags()) - << '\n'; - OS << getName() << ':' << '\n'; - return; - } - - report_fatal_error("Printing for this SectionKind is unimplemented."); -} - -bool MCSectionXCOFF::useCodeAlign() const { return getKind().isText(); } diff --git a/llvm/lib/MC/MCStreamer.cpp b/llvm/lib/MC/MCStreamer.cpp index add021b..bc73981 100644 --- a/llvm/lib/MC/MCStreamer.cpp +++ b/llvm/lib/MC/MCStreamer.cpp @@ -56,12 +56,11 @@ void MCTargetStreamer::finish() {} void MCTargetStreamer::emitConstantPools() {} -void MCTargetStreamer::changeSection(const MCSection *CurSection, - MCSection *Section, uint32_t Subsection, - raw_ostream &OS) { - Section->printSwitchToSection(*Streamer.getContext().getAsmInfo(), - Streamer.getContext().getTargetTriple(), OS, - Subsection); +void MCTargetStreamer::changeSection(const MCSection *, MCSection *Sec, + uint32_t Subsection, raw_ostream &OS) { + auto &MAI = *Streamer.getContext().getAsmInfo(); + MAI.printSwitchToSection(*Sec, Subsection, + Streamer.getContext().getTargetTriple(), OS); } void MCTargetStreamer::emitDwarfFileDirective(StringRef Directive) { diff --git a/llvm/lib/MC/MCWasmStreamer.cpp b/llvm/lib/MC/MCWasmStreamer.cpp index 5891420c..e3ef111 100644 --- a/llvm/lib/MC/MCWasmStreamer.cpp +++ b/llvm/lib/MC/MCWasmStreamer.cpp @@ -58,7 +58,7 @@ void MCWasmStreamer::emitLabelAtPos(MCSymbol *S, SMLoc Loc, MCFragment &F, void MCWasmStreamer::changeSection(MCSection *Section, uint32_t Subsection) { MCAssembler &Asm = getAssembler(); - auto *SectionWasm = cast<MCSectionWasm>(Section); + auto *SectionWasm = static_cast<const MCSectionWasm *>(Section); const MCSymbol *Grp = SectionWasm->getGroup(); if (Grp) Asm.registerSymbol(*Grp); diff --git a/llvm/lib/MC/MCXCOFFStreamer.cpp b/llvm/lib/MC/MCXCOFFStreamer.cpp index 78e4b95..898ac5d 100644 --- a/llvm/lib/MC/MCXCOFFStreamer.cpp +++ b/llvm/lib/MC/MCXCOFFStreamer.cpp @@ -38,7 +38,7 @@ XCOFFObjectWriter &MCXCOFFStreamer::getWriter() { void MCXCOFFStreamer::changeSection(MCSection *Section, uint32_t Subsection) { MCObjectStreamer::changeSection(Section, Subsection); - auto *Sec = cast<MCSectionXCOFF>(Section); + auto *Sec = static_cast<const MCSectionXCOFF *>(Section); // We might miss calculating the symbols difference as absolute value before // adding fixups when symbol_A without the fragment set is the csect itself // and symbol_B is in it. diff --git a/llvm/lib/MC/MachObjectWriter.cpp b/llvm/lib/MC/MachObjectWriter.cpp index 48d2fc6..7b5c3c0 100644 --- a/llvm/lib/MC/MachObjectWriter.cpp +++ b/llvm/lib/MC/MachObjectWriter.cpp @@ -126,7 +126,8 @@ uint64_t MachObjectWriter::getSymbolAddress(const MCSymbol &S) const { uint64_t MachObjectWriter::getPaddingSize(const MCAssembler &Asm, const MCSection *Sec) const { uint64_t EndAddr = getSectionAddress(Sec) + Asm.getSectionAddressSize(*Sec); - unsigned Next = cast<MCSectionMachO>(Sec)->getLayoutOrder() + 1; + unsigned Next = + static_cast<const MCSectionMachO *>(Sec)->getLayoutOrder() + 1; if (Next >= SectionOrder.size()) return 0; @@ -259,15 +260,12 @@ void MachObjectWriter::writeSegmentLoadCommand( } void MachObjectWriter::writeSection(const MCAssembler &Asm, - const MCSection &Sec, uint64_t VMAddr, + const MCSectionMachO &Sec, uint64_t VMAddr, uint64_t FileOffset, unsigned Flags, uint64_t RelocationsStart, unsigned NumRelocations) { - uint64_t SectionSize = Asm.getSectionAddressSize(Sec); - const MCSectionMachO &Section = cast<MCSectionMachO>(Sec); - // The offset is unused for virtual sections. - if (Section.isBssSection()) { + if (Sec.isBssSection()) { assert(Asm.getSectionFileSize(Sec) == 0 && "Invalid file size!"); FileOffset = 0; } @@ -275,11 +273,11 @@ void MachObjectWriter::writeSection(const MCAssembler &Asm, // struct section (68 bytes) or // struct section_64 (80 bytes) + uint64_t SectionSize = Asm.getSectionAddressSize(Sec); uint64_t Start = W.OS.tell(); (void) Start; - - writeWithPadding(Section.getName(), 16); - writeWithPadding(Section.getSegmentName(), 16); + writeWithPadding(Sec.getName(), 16); + writeWithPadding(Sec.getSegmentName(), 16); if (is64Bit()) { W.write<uint64_t>(VMAddr); // address W.write<uint64_t>(SectionSize); // size @@ -290,14 +288,14 @@ void MachObjectWriter::writeSection(const MCAssembler &Asm, assert(isUInt<32>(FileOffset) && "Cannot encode offset of section"); W.write<uint32_t>(FileOffset); - W.write<uint32_t>(Log2(Section.getAlign())); + W.write<uint32_t>(Log2(Sec.getAlign())); assert((!NumRelocations || isUInt<32>(RelocationsStart)) && "Cannot encode offset of relocations"); W.write<uint32_t>(NumRelocations ? RelocationsStart : 0); W.write<uint32_t>(NumRelocations); W.write<uint32_t>(Flags); W.write<uint32_t>(IndirectSymBase.lookup(&Sec)); // reserved1 - W.write<uint32_t>(Section.getStubSize()); // reserved2 + W.write<uint32_t>(Sec.getStubSize()); // reserved2 if (is64Bit()) W.write<uint32_t>(0); // reserved3 @@ -531,7 +529,7 @@ void MachObjectWriter::bindIndirectSymbols(MCAssembler &Asm) { // Report errors for use of .indirect_symbol not in a symbol pointer section // or stub section. for (IndirectSymbolData &ISD : IndirectSymbols) { - const MCSectionMachO &Section = cast<MCSectionMachO>(*ISD.Section); + const MCSectionMachO &Section = static_cast<MCSectionMachO &>(*ISD.Section); if (Section.getType() != MachO::S_NON_LAZY_SYMBOL_POINTERS && Section.getType() != MachO::S_LAZY_SYMBOL_POINTERS && @@ -545,7 +543,7 @@ void MachObjectWriter::bindIndirectSymbols(MCAssembler &Asm) { // Bind non-lazy symbol pointers first. for (auto [IndirectIndex, ISD] : enumerate(IndirectSymbols)) { - const auto &Section = cast<MCSectionMachO>(*ISD.Section); + const auto &Section = static_cast<MCSectionMachO &>(*ISD.Section); if (Section.getType() != MachO::S_NON_LAZY_SYMBOL_POINTERS && Section.getType() != MachO::S_THREAD_LOCAL_VARIABLE_POINTERS) @@ -559,7 +557,7 @@ void MachObjectWriter::bindIndirectSymbols(MCAssembler &Asm) { // Then lazy symbol pointers and symbol stubs. for (auto [IndirectIndex, ISD] : enumerate(IndirectSymbols)) { - const auto &Section = cast<MCSectionMachO>(*ISD.Section); + const auto &Section = static_cast<MCSectionMachO &>(*ISD.Section); if (Section.getType() != MachO::S_LAZY_SYMBOL_POINTERS && Section.getType() != MachO::S_SYMBOL_STUBS) @@ -684,13 +682,13 @@ void MachObjectWriter::computeSectionAddresses(const MCAssembler &Asm) { for (MCSection &Sec : Asm) { if (!Sec.isBssSection()) { SectionOrder.push_back(&Sec); - cast<MCSectionMachO>(Sec).setLayoutOrder(i++); + static_cast<MCSectionMachO &>(Sec).setLayoutOrder(i++); } } for (MCSection &Sec : Asm) { if (Sec.isBssSection()) { SectionOrder.push_back(&Sec); - cast<MCSectionMachO>(Sec).setLayoutOrder(i++); + static_cast<MCSectionMachO &>(Sec).setLayoutOrder(i++); } } @@ -907,7 +905,7 @@ uint64_t MachObjectWriter::writeObject() { // ... and then the section headers. uint64_t RelocTableEnd = SectionDataStart + SectionDataFileSize; for (const MCSection &Section : Asm) { - const auto &Sec = cast<MCSectionMachO>(Section); + const auto &Sec = static_cast<const MCSectionMachO &>(Section); std::vector<RelAndSymbol> &Relocs = Relocations[&Sec]; unsigned NumRelocs = Relocs.size(); uint64_t SectionStart = SectionDataStart + getSectionAddress(&Sec); diff --git a/llvm/lib/MC/WasmObjectWriter.cpp b/llvm/lib/MC/WasmObjectWriter.cpp index 3b99af4..bfd6334 100644 --- a/llvm/lib/MC/WasmObjectWriter.cpp +++ b/llvm/lib/MC/WasmObjectWriter.cpp @@ -480,7 +480,7 @@ void WasmObjectWriter::recordRelocation(const MCFragment &F, // The WebAssembly backend should never generate FKF_IsPCRel fixups assert(!Fixup.isPCRel()); - const auto &FixupSection = cast<MCSectionWasm>(*F.getParent()); + const auto &FixupSection = static_cast<MCSectionWasm &>(*F.getParent()); uint64_t C = Target.getConstant(); uint64_t FixupOffset = Asm->getFragmentOffset(F) + Fixup.getOffset(); MCContext &Ctx = getContext(); diff --git a/llvm/lib/MC/XCOFFObjectWriter.cpp b/llvm/lib/MC/XCOFFObjectWriter.cpp index 2f6785f..65f543b 100644 --- a/llvm/lib/MC/XCOFFObjectWriter.cpp +++ b/llvm/lib/MC/XCOFFObjectWriter.cpp @@ -550,13 +550,13 @@ CsectGroup &XCOFFWriter::getCsectGroup(const MCSectionXCOFF *MCSec) { static MCSectionXCOFF *getContainingCsect(const MCSymbolXCOFF *XSym) { if (XSym->isDefined()) - return cast<MCSectionXCOFF>(XSym->getFragment()->getParent()); + return static_cast<MCSectionXCOFF *>(XSym->getFragment()->getParent()); return XSym->getRepresentedCsect(); } void XCOFFWriter::executePostLayoutBinding() { for (const auto &S : *Asm) { - const auto *MCSec = cast<const MCSectionXCOFF>(&S); + auto *MCSec = static_cast<const MCSectionXCOFF *>(&S); assert(!SectionMap.contains(MCSec) && "Cannot add a section twice."); // If the name does not fit in the storage provided in the symbol table @@ -747,7 +747,7 @@ void XCOFFWriter::recordRelocation(const MCFragment &F, const MCFixup &Fixup, FixedValue = TOCEntryOffset; } } else if (Type == XCOFF::RelocationType::R_RBR) { - MCSectionXCOFF *ParentSec = cast<MCSectionXCOFF>(F.getParent()); + auto *ParentSec = static_cast<MCSectionXCOFF *>(F.getParent()); assert((SymASec->getMappingClass() == XCOFF::XMC_PR && ParentSec->getMappingClass() == XCOFF::XMC_PR) && "Only XMC_PR csect may have the R_RBR relocation."); @@ -768,7 +768,7 @@ void XCOFFWriter::recordRelocation(const MCFragment &F, const MCFixup &Fixup, } XCOFFRelocation Reloc = {Index, FixupOffsetInCsect, SignAndSize, Type}; - MCSectionXCOFF *RelocationSec = cast<MCSectionXCOFF>(F.getParent()); + auto *RelocationSec = static_cast<MCSectionXCOFF *>(F.getParent()); assert(SectionMap.contains(RelocationSec) && "Expected containing csect to exist in map."); SectionMap[RelocationSec]->Relocations.push_back(Reloc); diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def index bb7ccdb..fd89583 100644 --- a/llvm/lib/Passes/PassRegistry.def +++ b/llvm/lib/Passes/PassRegistry.def @@ -119,7 +119,6 @@ MODULE_PASS("module-inline", ModuleInlinerPass()) MODULE_PASS("name-anon-globals", NameAnonGlobalPass()) MODULE_PASS("no-op-module", NoOpModulePass()) MODULE_PASS("nsan", NumericalStabilitySanitizerPass()) -MODULE_PASS("objc-arc-apelim", ObjCARCAPElimPass()) MODULE_PASS("openmp-opt", OpenMPOptPass()) MODULE_PASS("openmp-opt-postlink", OpenMPOptPass(ThinOrFullLTOPhase::FullLTOPostLink)) diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp index 1ac340a..a22a17a 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp @@ -132,7 +132,8 @@ static bool canUseLocalRelocation(const MCSectionMachO &Section, // But only if they don't point to a few forbidden sections. if (!Symbol.isInSection()) return true; - const MCSectionMachO &RefSec = cast<MCSectionMachO>(Symbol.getSection()); + const MCSectionMachO &RefSec = + static_cast<MCSectionMachO &>(Symbol.getSection()); if (RefSec.getType() == MachO::S_CSTRING_LITERALS) return false; diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index ec6f4e2..ece6c10 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -12327,7 +12327,7 @@ bool ARMAsmParser::parseDirectiveEven(SMLoc L) { } assert(Section && "must have section to emit alignment"); - if (Section->useCodeAlign()) + if (getContext().getAsmInfo()->useCodeAlign(*Section)) getStreamer().emitCodeAlignment(Align(2), &getSTI()); else getStreamer().emitValueToAlignment(Align(2)); @@ -12525,7 +12525,7 @@ bool ARMAsmParser::parseDirectiveAlign(SMLoc L) { // '.align' is target specifically handled to mean 2**2 byte alignment. const MCSection *Section = getStreamer().getCurrentSectionOnly(); assert(Section && "must have section to emit alignment"); - if (Section->useCodeAlign()) + if (getContext().getAsmInfo()->useCodeAlign(*Section)) getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0); else getStreamer().emitValueToAlignment(Align(4), 0, 1, 0); diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp b/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp index c2c1bb4..0615ec9 100644 --- a/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp +++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp @@ -24,8 +24,6 @@ AVRMCAsmInfo::AVRMCAsmInfo(const Triple &TT, const MCTargetOptions &Options) { CalleeSaveStackSlotSize = 2; CommentString = ";"; SeparatorString = "$"; - PrivateGlobalPrefix = ".L"; - PrivateLabelPrefix = ".L"; UsesELFSectionDirectiveForBSS = true; SupportsDebugInformation = true; } diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h b/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h index fab2713..1915fa8 100644 --- a/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h +++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h @@ -14,7 +14,7 @@ #define LLVM_AVR_ASM_INFO_H #include "MCTargetDesc/AVRMCExpr.h" -#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCAsmInfoELF.h" #include "llvm/MC/MCExpr.h" namespace llvm { @@ -22,7 +22,7 @@ namespace llvm { class Triple; /// Specifies the format of AVR assembly files. -class AVRMCAsmInfo : public MCAsmInfo { +class AVRMCAsmInfo : public MCAsmInfoELF { public: explicit AVRMCAsmInfo(const Triple &TT, const MCTargetOptions &Options); void printSpecifierExpr(raw_ostream &OS, diff --git a/llvm/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h b/llvm/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h index 7b21684..63d6e6f 100644 --- a/llvm/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h +++ b/llvm/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h @@ -13,18 +13,19 @@ #ifndef LLVM_LIB_TARGET_BPF_MCTARGETDESC_BPFMCASMINFO_H #define LLVM_LIB_TARGET_BPF_MCTARGETDESC_BPFMCASMINFO_H -#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCAsmInfoELF.h" #include "llvm/TargetParser/Triple.h" namespace llvm { -class BPFMCAsmInfo : public MCAsmInfo { +class BPFMCAsmInfo : public MCAsmInfoELF { public: explicit BPFMCAsmInfo(const Triple &TT, const MCTargetOptions &Options) { if (TT.getArch() == Triple::bpfeb) IsLittleEndian = false; PrivateGlobalPrefix = ".L"; + PrivateLabelPrefix = "L"; WeakRefDirective = "\t.weak\t"; UsesELFSectionDirectiveForBSS = true; diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp index 8fa72bc..d9ea88c 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp @@ -254,6 +254,7 @@ bool LoongArchAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) { MCFixup Fixup = MCFixup::create(0, Expr, FirstLiteralRelocationKind + ELF::R_LARCH_ALIGN); F.setVarFixups({Fixup}); + F.setLinkerRelaxable(); F.getParent()->setLinkerRelaxable(); return true; } diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp index feb4eb3..d9680c7 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp @@ -969,7 +969,7 @@ void MipsTargetELFStreamer::finish() { Align Alignment = Section.getAlign(); S.switchSection(&Section); - if (Section.useCodeAlign()) + if (getContext().getAsmInfo()->useCodeAlign(Section)) S.emitCodeAlignment(Alignment, &STI, Alignment.value()); else S.emitValueToAlignment(Alignment, 0, 1, Alignment.value()); diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp index 614b321..ce9cd12 100644 --- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp +++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp @@ -15,8 +15,6 @@ using namespace llvm; -void NVPTXMCAsmInfo::anchor() {} - NVPTXMCAsmInfo::NVPTXMCAsmInfo(const Triple &TheTriple, const MCTargetOptions &Options) { if (TheTriple.getArch() == Triple::nvptx64) { diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h index 77c4dae..f071406 100644 --- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h +++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h @@ -19,8 +19,6 @@ namespace llvm { class Triple; class NVPTXMCAsmInfo : public MCAsmInfo { - virtual void anchor(); - public: explicit NVPTXMCAsmInfo(const Triple &TheTriple, const MCTargetOptions &Options); diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.cpp index 9f91143..329e3b5 100644 --- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.cpp +++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.cpp @@ -97,10 +97,7 @@ void NVPTXTargetStreamer::changeSection(const MCSection *CurSection, if (isDwarfSection(FI, Section)) { // Emit DWARF .file directives in the outermost scope. outputDwarfFileDirectives(); - OS << "\t.section"; - Section->printSwitchToSection(*getStreamer().getContext().getAsmInfo(), - getStreamer().getContext().getTargetTriple(), - OS, SubSection); + OS << "\t.section\t" << Section->getName() << '\n'; // DWARF sections are enclosed into braces - emit the open one. OS << "\t{\n"; HasSections = true; diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp index 8baf866..1af2f9c 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp @@ -220,8 +220,6 @@ bool PPCELFMCAsmInfo::evaluateAsRelocatableImpl(const MCSpecifierExpr &Expr, return evaluateAsRelocatable(Expr, Res, Asm); } -void PPCXCOFFMCAsmInfo::anchor() {} - PPCXCOFFMCAsmInfo::PPCXCOFFMCAsmInfo(bool Is64Bit, const Triple &T) { if (T.getArch() == Triple::ppc64le || T.getArch() == Triple::ppcle) report_fatal_error("XCOFF is not supported for little-endian targets"); diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h index 0f945b3..6af1bd7 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h @@ -33,8 +33,6 @@ public: }; class PPCXCOFFMCAsmInfo : public MCAsmInfoXCOFF { - void anchor() override; - public: explicit PPCXCOFFMCAsmInfo(bool is64Bit, const Triple &); void printSpecifierExpr(raw_ostream &OS, diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp index 54497d9..3dad0e8 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp @@ -213,7 +213,7 @@ public: void emitTCEntry(const MCSymbol &S, PPCMCExpr::Specifier Kind) override { if (const MCSymbolXCOFF *XSym = dyn_cast<MCSymbolXCOFF>(&S)) { MCSymbolXCOFF *TCSym = - cast<MCSectionXCOFF>(Streamer.getCurrentSectionOnly()) + static_cast<const MCSectionXCOFF *>(Streamer.getCurrentSectionOnly()) ->getQualNameSymbol(); // On AIX, we have TLS variable offsets (symbol@({gd|ie|le|ld}) depending // on the TLS access method (or model). For the general-dynamic access diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp index a091b21..ce1d51a 100644 --- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -2274,9 +2274,9 @@ void PPCAIXAsmPrinter::emitLinkage(const GlobalValue *GV, void PPCAIXAsmPrinter::SetupMachineFunction(MachineFunction &MF) { // Setup CurrentFnDescSym and its containing csect. - MCSectionXCOFF *FnDescSec = - cast<MCSectionXCOFF>(getObjFileLowering().getSectionForFunctionDescriptor( - &MF.getFunction(), TM)); + auto *FnDescSec = static_cast<MCSectionXCOFF *>( + getObjFileLowering().getSectionForFunctionDescriptor(&MF.getFunction(), + TM)); FnDescSec->setAlignment(Align(Subtarget->isPPC64() ? 8 : 4)); CurrentFnDescSym = FnDescSec->getQualNameSymbol(); @@ -2669,9 +2669,9 @@ void PPCAIXAsmPrinter::emitTracebackTable() { MCSymbol *EHInfoSym = TargetLoweringObjectFileXCOFF::getEHInfoTableSymbol(MF); MCSymbol *TOCEntry = lookUpOrCreateTOCEntry(EHInfoSym, TOCType_EHBlock); - const MCSymbol *TOCBaseSym = - cast<MCSectionXCOFF>(getObjFileLowering().getTOCBaseSection()) - ->getQualNameSymbol(); + const MCSymbol *TOCBaseSym = static_cast<const MCSectionXCOFF *>( + getObjFileLowering().getTOCBaseSection()) + ->getQualNameSymbol(); const MCExpr *Exp = MCBinaryExpr::createSub(MCSymbolRefExpr::create(TOCEntry, Ctx), MCSymbolRefExpr::create(TOCBaseSym, Ctx), Ctx); @@ -2788,7 +2788,7 @@ void PPCAIXAsmPrinter::emitGlobalVariableHelper(const GlobalVariable *GV) { } } - MCSectionXCOFF *Csect = cast<MCSectionXCOFF>( + auto *Csect = static_cast<MCSectionXCOFF *>( getObjFileLowering().SectionForGlobal(GV, GVKind, TM)); // Switch to the containing csect. @@ -2869,9 +2869,9 @@ void PPCAIXAsmPrinter::emitFunctionDescriptor() { OutStreamer->emitValue(MCSymbolRefExpr::create(CurrentFnSym, OutContext), PointerSize); // Emit TOC base address. - const MCSymbol *TOCBaseSym = - cast<MCSectionXCOFF>(getObjFileLowering().getTOCBaseSection()) - ->getQualNameSymbol(); + const MCSymbol *TOCBaseSym = static_cast<const MCSectionXCOFF *>( + getObjFileLowering().getTOCBaseSection()) + ->getQualNameSymbol(); OutStreamer->emitValue(MCSymbolRefExpr::create(TOCBaseSym, OutContext), PointerSize); // Emit a null environment pointer. @@ -2996,10 +2996,10 @@ void PPCAIXAsmPrinter::emitEndOfAsmFile(Module &M) { Name += Prefix; Name += cast<MCSymbolXCOFF>(I.first.first)->getSymbolTableName(); MCSymbol *S = OutContext.getOrCreateSymbol(Name); - TCEntry = cast<MCSectionXCOFF>( + TCEntry = static_cast<MCSectionXCOFF *>( getObjFileLowering().getSectionForTOCEntry(S, TM)); } else { - TCEntry = cast<MCSectionXCOFF>( + TCEntry = static_cast<MCSectionXCOFF *>( getObjFileLowering().getSectionForTOCEntry(I.first.first, TM)); } OutStreamer->switchSection(TCEntry); @@ -3054,7 +3054,7 @@ bool PPCAIXAsmPrinter::doInitialization(Module &M) { return; SectionKind GOKind = getObjFileLowering().getKindForGlobal(GO, TM); - MCSectionXCOFF *Csect = cast<MCSectionXCOFF>( + auto *Csect = static_cast<MCSectionXCOFF *>( getObjFileLowering().SectionForGlobal(GO, GOKind, TM)); Align GOAlign = getGVAlignment(GO, GO->getDataLayout()); @@ -3316,9 +3316,9 @@ void PPCAIXAsmPrinter::emitTTypeReference(const GlobalValue *GV, GlobalType = TOCType_GlobalExternal; MCSymbol *TypeInfoSym = TM.getSymbol(GV); MCSymbol *TOCEntry = lookUpOrCreateTOCEntry(TypeInfoSym, GlobalType); - const MCSymbol *TOCBaseSym = - cast<MCSectionXCOFF>(getObjFileLowering().getTOCBaseSection()) - ->getQualNameSymbol(); + const MCSymbol *TOCBaseSym = static_cast<const MCSectionXCOFF *>( + getObjFileLowering().getTOCBaseSection()) + ->getQualNameSymbol(); auto &Ctx = OutStreamer->getContext(); const MCExpr *Exp = MCBinaryExpr::createSub(MCSymbolRefExpr::create(TOCEntry, Ctx), diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp index 2c37c3b..82e3b5c 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp @@ -320,6 +320,7 @@ bool RISCVAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) { MCFixup Fixup = MCFixup::create(0, Expr, FirstLiteralRelocationKind + ELF::R_RISCV_ALIGN); F.setVarFixups({Fixup}); + F.setLinkerRelaxable(); F.getParent()->setLinkerRelaxable(); return true; } diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp index b1ab76a..9fc0d81 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -1581,7 +1581,8 @@ void RISCVFrameLowering::determineCalleeSaves(MachineFunction &MF, // Set the register and all its subregisters. if (!MRI.def_empty(CSReg) || MRI.getUsedPhysRegsMask().test(CSReg)) { SavedRegs.set(CSReg); - llvm::for_each(SubRegs, [&](unsigned Reg) { return SavedRegs.set(Reg); }); + for (unsigned Reg : SubRegs) + SavedRegs.set(Reg); } // Combine to super register if all of its subregisters are marked. diff --git a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZHLASMAsmStreamer.cpp b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZHLASMAsmStreamer.cpp index 3ef6030..72bb372 100644 --- a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZHLASMAsmStreamer.cpp +++ b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZHLASMAsmStreamer.cpp @@ -69,8 +69,8 @@ void SystemZHLASMAsmStreamer::EmitEOL() { void SystemZHLASMAsmStreamer::changeSection(MCSection *Section, uint32_t Subsection) { - Section->printSwitchToSection(*MAI, getContext().getTargetTriple(), OS, - Subsection); + MAI->printSwitchToSection(*Section, Subsection, + getContext().getTargetTriple(), OS); MCStreamer::changeSection(Section, Subsection); } diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp index 19c9e9c..6ae69a4 100644 --- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp +++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp @@ -900,7 +900,8 @@ public: bool checkDataSection() { if (CurrentState != DataSection) { - auto *WS = cast<MCSectionWasm>(getStreamer().getCurrentSectionOnly()); + auto *WS = static_cast<const MCSectionWasm *>( + getStreamer().getCurrentSectionOnly()); if (WS && WS->isText()) return error("data directive must occur in a data segment: ", Lexer.getTok()); @@ -1218,7 +1219,8 @@ public: void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override { // Code below only applies to labels in text sections. - auto *CWS = cast<MCSectionWasm>(getStreamer().getCurrentSectionOnly()); + auto *CWS = static_cast<const MCSectionWasm *>( + getStreamer().getCurrentSectionOnly()); if (!CWS->isText()) return; diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp index 8213e51..d7671ed 100644 --- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -4803,7 +4803,7 @@ bool X86AsmParser::parseDirectiveEven(SMLoc L) { getStreamer().initSections(false, getSTI()); Section = getStreamer().getCurrentSectionOnly(); } - if (Section->useCodeAlign()) + if (getContext().getAsmInfo()->useCodeAlign(*Section)) getStreamer().emitCodeAlignment(Align(2), &getSTI(), 0); else getStreamer().emitValueToAlignment(Align(2), 0, 1, 0); diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index 37a7b37..90791fc 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -1838,14 +1838,15 @@ InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, return LT.first * *KindCost; static const CostKindTblEntry AVX512BWShuffleTbl[] = { - { TTI::SK_Broadcast, MVT::v32i16, { 1, 1, 1, 1 } }, // vpbroadcastw - { TTI::SK_Broadcast, MVT::v32f16, { 1, 1, 1, 1 } }, // vpbroadcastw - { TTI::SK_Broadcast, MVT::v64i8, { 1, 1, 1, 1 } }, // vpbroadcastb + { TTI::SK_Broadcast, MVT::v32i16, { 1, 3, 1, 1 } }, // vpbroadcastw + { TTI::SK_Broadcast, MVT::v32f16, { 1, 3, 1, 1 } }, // vpbroadcastw + { TTI::SK_Broadcast, MVT::v64i8, { 1, 3, 1, 1 } }, // vpbroadcastb - { TTI::SK_Reverse, MVT::v32i16, { 2, 2, 2, 2 } }, // vpermw - { TTI::SK_Reverse, MVT::v32f16, { 2, 2, 2, 2 } }, // vpermw + { TTI::SK_Reverse, MVT::v32i16, { 2, 6, 2, 4 } }, // vpermw + { TTI::SK_Reverse, MVT::v32f16, { 2, 6, 2, 4 } }, // vpermw { TTI::SK_Reverse, MVT::v16i16, { 2, 2, 2, 2 } }, // vpermw - { TTI::SK_Reverse, MVT::v64i8, { 2, 2, 2, 2 } }, // pshufb + vshufi64x2 + { TTI::SK_Reverse, MVT::v16f16, { 2, 2, 2, 2 } }, // vpermw + { TTI::SK_Reverse, MVT::v64i8, { 2, 9, 2, 3 } }, // pshufb + vshufi64x2 { TTI::SK_PermuteSingleSrc, MVT::v32i16, { 2, 2, 2, 2 } }, // vpermw { TTI::SK_PermuteSingleSrc, MVT::v32f16, { 2, 2, 2, 2 } }, // vpermw @@ -1874,18 +1875,25 @@ InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, return LT.first * *KindCost; static const CostKindTblEntry AVX512ShuffleTbl[] = { - {TTI::SK_Broadcast, MVT::v8f64, { 1, 1, 1, 1 } }, // vbroadcastsd - {TTI::SK_Broadcast, MVT::v16f32, { 1, 1, 1, 1 } }, // vbroadcastss - {TTI::SK_Broadcast, MVT::v8i64, { 1, 1, 1, 1 } }, // vpbroadcastq - {TTI::SK_Broadcast, MVT::v16i32, { 1, 1, 1, 1 } }, // vpbroadcastd - {TTI::SK_Broadcast, MVT::v32i16, { 1, 1, 1, 1 } }, // vpbroadcastw - {TTI::SK_Broadcast, MVT::v32f16, { 1, 1, 1, 1 } }, // vpbroadcastw - {TTI::SK_Broadcast, MVT::v64i8, { 1, 1, 1, 1 } }, // vpbroadcastb - - {TTI::SK_Reverse, MVT::v8f64, { 1, 3, 1, 1 } }, // vpermpd - {TTI::SK_Reverse, MVT::v16f32, { 1, 3, 1, 1 } }, // vpermps - {TTI::SK_Reverse, MVT::v8i64, { 1, 3, 1, 1 } }, // vpermq - {TTI::SK_Reverse, MVT::v16i32, { 1, 3, 1, 1 } }, // vpermd + {TTI::SK_Broadcast, MVT::v8f64, { 1, 3, 1, 1 } }, // vbroadcastsd + {TTI::SK_Broadcast, MVT::v4f64, { 1, 3, 1, 1 } }, // vbroadcastsd + {TTI::SK_Broadcast, MVT::v16f32, { 1, 3, 1, 1 } }, // vbroadcastss + {TTI::SK_Broadcast, MVT::v8f32, { 1, 3, 1, 1 } }, // vbroadcastss + {TTI::SK_Broadcast, MVT::v8i64, { 1, 3, 1, 1 } }, // vpbroadcastq + {TTI::SK_Broadcast, MVT::v4i64, { 1, 3, 1, 1 } }, // vpbroadcastq + {TTI::SK_Broadcast, MVT::v16i32, { 1, 3, 1, 1 } }, // vpbroadcastd + {TTI::SK_Broadcast, MVT::v8i32, { 1, 3, 1, 1 } }, // vpbroadcastd + {TTI::SK_Broadcast, MVT::v32i16, { 1, 3, 1, 1 } }, // vpbroadcastw + {TTI::SK_Broadcast, MVT::v16i16, { 1, 3, 1, 1 } }, // vpbroadcastw + {TTI::SK_Broadcast, MVT::v32f16, { 1, 3, 1, 1 } }, // vpbroadcastw + {TTI::SK_Broadcast, MVT::v16f16, { 1, 3, 1, 1 } }, // vpbroadcastw + {TTI::SK_Broadcast, MVT::v64i8, { 1, 3, 1, 1 } }, // vpbroadcastb + {TTI::SK_Broadcast, MVT::v32i8, { 1, 3, 1, 1 }}, // vpbroadcastb + + {TTI::SK_Reverse, MVT::v8f64, { 1, 5, 2, 3 } }, // vpermpd + {TTI::SK_Reverse, MVT::v16f32, { 1, 3, 2, 3 } }, // vpermps + {TTI::SK_Reverse, MVT::v8i64, { 1, 5, 2, 3 } }, // vpermq + {TTI::SK_Reverse, MVT::v16i32, { 1, 3, 2, 3 } }, // vpermd {TTI::SK_Reverse, MVT::v32i16, { 7, 7, 7, 7 } }, // per mca {TTI::SK_Reverse, MVT::v32f16, { 7, 7, 7, 7 } }, // per mca {TTI::SK_Reverse, MVT::v64i8, { 7, 7, 7, 7 } }, // per mca @@ -1973,21 +1981,24 @@ InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, return LT.first * *KindCost; static const CostKindTblEntry AVX2ShuffleTbl[] = { - { TTI::SK_Broadcast, MVT::v4f64, { 1, 1, 1, 1 } }, // vbroadcastpd - { TTI::SK_Broadcast, MVT::v8f32, { 1, 1, 1, 1 } }, // vbroadcastps - { TTI::SK_Broadcast, MVT::v4i64, { 1, 1, 1, 1 } }, // vpbroadcastq - { TTI::SK_Broadcast, MVT::v8i32, { 1, 1, 1, 1 } }, // vpbroadcastd - { TTI::SK_Broadcast, MVT::v16i16, { 1, 1, 1, 1 } }, // vpbroadcastw - { TTI::SK_Broadcast, MVT::v16f16, { 1, 1, 1, 1 } }, // vpbroadcastw - { TTI::SK_Broadcast, MVT::v32i8, { 1, 1, 1, 1 } }, // vpbroadcastb - - { TTI::SK_Reverse, MVT::v4f64, { 1, 1, 1, 1 } }, // vpermpd - { TTI::SK_Reverse, MVT::v8f32, { 1, 1, 1, 1 } }, // vpermps - { TTI::SK_Reverse, MVT::v4i64, { 1, 1, 1, 1 } }, // vpermq - { TTI::SK_Reverse, MVT::v8i32, { 1, 1, 1, 1 } }, // vpermd - { TTI::SK_Reverse, MVT::v16i16, { 2, 2, 2, 2 } }, // vperm2i128 + pshufb - { TTI::SK_Reverse, MVT::v16f16, { 2, 2, 2, 2 } }, // vperm2i128 + pshufb - { TTI::SK_Reverse, MVT::v32i8, { 2, 2, 2, 2 } }, // vperm2i128 + pshufb + { TTI::SK_Broadcast, MVT::v4f64, { 1, 3, 1, 2 } }, // vbroadcastpd + { TTI::SK_Broadcast, MVT::v8f32, { 1, 3, 1, 2 } }, // vbroadcastps + { TTI::SK_Broadcast, MVT::v4i64, { 1, 3, 1, 2 } }, // vpbroadcastq + { TTI::SK_Broadcast, MVT::v8i32, { 1, 3, 1, 2 } }, // vpbroadcastd + { TTI::SK_Broadcast, MVT::v16i16, { 1, 3, 1, 2 } }, // vpbroadcastw + { TTI::SK_Broadcast, MVT::v8i16, { 1, 3, 1, 1 } }, // vpbroadcastw + { TTI::SK_Broadcast, MVT::v16f16, { 1, 3, 1, 2 } }, // vpbroadcastw + { TTI::SK_Broadcast, MVT::v8f16, { 1, 3, 1, 1 } }, // vpbroadcastw + { TTI::SK_Broadcast, MVT::v32i8, { 1, 3, 1, 2 } }, // vpbroadcastb + { TTI::SK_Broadcast, MVT::v16i8, { 1, 3, 1, 1 } }, // vpbroadcastb + + { TTI::SK_Reverse, MVT::v4f64, { 1, 6, 1, 2 } }, // vpermpd + { TTI::SK_Reverse, MVT::v8f32, { 2, 7, 2, 4 } }, // vpermps + { TTI::SK_Reverse, MVT::v4i64, { 1, 6, 1, 2 } }, // vpermq + { TTI::SK_Reverse, MVT::v8i32, { 2, 7, 2, 4 } }, // vpermd + { TTI::SK_Reverse, MVT::v16i16, { 2, 9, 2, 4 } }, // vperm2i128 + pshufb + { TTI::SK_Reverse, MVT::v16f16, { 2, 9, 2, 4 } }, // vperm2i128 + pshufb + { TTI::SK_Reverse, MVT::v32i8, { 2, 9, 2, 4 } }, // vperm2i128 + pshufb { TTI::SK_Select, MVT::v16i16, { 1, 1, 1, 1 } }, // vpblendvb { TTI::SK_Select, MVT::v16f16, { 1, 1, 1, 1 } }, // vpblendvb @@ -2077,23 +2088,23 @@ InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, return LT.first * *KindCost; static const CostKindTblEntry AVX1ShuffleTbl[] = { - {TTI::SK_Broadcast, MVT::v4f64, {2,2,2,2}}, // vperm2f128 + vpermilpd - {TTI::SK_Broadcast, MVT::v8f32, {2,2,2,2}}, // vperm2f128 + vpermilps - {TTI::SK_Broadcast, MVT::v4i64, {2,2,2,2}}, // vperm2f128 + vpermilpd - {TTI::SK_Broadcast, MVT::v8i32, {2,2,2,2}}, // vperm2f128 + vpermilps - {TTI::SK_Broadcast, MVT::v16i16, {3,3,3,3}}, // vpshuflw + vpshufd + vinsertf128 - {TTI::SK_Broadcast, MVT::v16f16, {3,3,3,3}}, // vpshuflw + vpshufd + vinsertf128 - {TTI::SK_Broadcast, MVT::v32i8, {2,2,2,2}}, // vpshufb + vinsertf128 - - {TTI::SK_Reverse, MVT::v4f64, {2,2,2,2}}, // vperm2f128 + vpermilpd - {TTI::SK_Reverse, MVT::v8f32, {2,2,2,2}}, // vperm2f128 + vpermilps - {TTI::SK_Reverse, MVT::v4i64, {2,2,2,2}}, // vperm2f128 + vpermilpd - {TTI::SK_Reverse, MVT::v8i32, {2,2,2,2}}, // vperm2f128 + vpermilps - {TTI::SK_Reverse, MVT::v16i16, {4,4,4,4}}, // vextractf128 + 2*pshufb + {TTI::SK_Broadcast, MVT::v4f64, {2,3,2,3}}, // vperm2f128 + vpermilpd + {TTI::SK_Broadcast, MVT::v8f32, {2,3,2,3}}, // vperm2f128 + vpermilps + {TTI::SK_Broadcast, MVT::v4i64, {2,3,2,3}}, // vperm2f128 + vpermilpd + {TTI::SK_Broadcast, MVT::v8i32, {2,3,2,3}}, // vperm2f128 + vpermilps + {TTI::SK_Broadcast, MVT::v16i16, {2,3,3,4}}, // vpshuflw + vpshufd + vinsertf128 + {TTI::SK_Broadcast, MVT::v16f16, {2,3,3,4}}, // vpshuflw + vpshufd + vinsertf128 + {TTI::SK_Broadcast, MVT::v32i8, {3,4,3,6}}, // vpshufb + vinsertf128 + + {TTI::SK_Reverse, MVT::v4f64, {2,6,2,2}}, // vperm2f128 + vpermilpd + {TTI::SK_Reverse, MVT::v8f32, {2,7,2,4}}, // vperm2f128 + vpermilps + {TTI::SK_Reverse, MVT::v4i64, {2,6,2,2}}, // vperm2f128 + vpermilpd + {TTI::SK_Reverse, MVT::v8i32, {2,7,2,4}}, // vperm2f128 + vpermilps + {TTI::SK_Reverse, MVT::v16i16, {2,9,5,5}}, // vextractf128 + 2*pshufb // + vinsertf128 - {TTI::SK_Reverse, MVT::v16f16, {4,4,4,4}}, // vextractf128 + 2*pshufb + {TTI::SK_Reverse, MVT::v16f16, {2,9,5,5}}, // vextractf128 + 2*pshufb // + vinsertf128 - {TTI::SK_Reverse, MVT::v32i8, {4,4,4,4}}, // vextractf128 + 2*pshufb + {TTI::SK_Reverse, MVT::v32i8, {2,9,5,5}}, // vextractf128 + 2*pshufb // + vinsertf128 {TTI::SK_Select, MVT::v4i64, {1,1,1,1}}, // vblendpd @@ -2156,13 +2167,13 @@ InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, return LT.first * *KindCost; static const CostKindTblEntry SSSE3ShuffleTbl[] = { - {TTI::SK_Broadcast, MVT::v8i16, {1, 1, 1, 1}}, // pshufb - {TTI::SK_Broadcast, MVT::v8f16, {1, 1, 1, 1}}, // pshufb - {TTI::SK_Broadcast, MVT::v16i8, {1, 1, 1, 1}}, // pshufb + {TTI::SK_Broadcast, MVT::v8i16, {1, 3, 2, 2}}, // pshufb + {TTI::SK_Broadcast, MVT::v8f16, {1, 3, 2, 2}}, // pshufb + {TTI::SK_Broadcast, MVT::v16i8, {1, 3, 2, 2}}, // pshufb - {TTI::SK_Reverse, MVT::v8i16, {1, 1, 1, 1}}, // pshufb - {TTI::SK_Reverse, MVT::v8f16, {1, 1, 1, 1}}, // pshufb - {TTI::SK_Reverse, MVT::v16i8, {1, 1, 1, 1}}, // pshufb + {TTI::SK_Reverse, MVT::v8i16, {1, 2, 1, 2}}, // pshufb + {TTI::SK_Reverse, MVT::v8f16, {1, 2, 1, 2}}, // pshufb + {TTI::SK_Reverse, MVT::v16i8, {1, 2, 1, 2}}, // pshufb {TTI::SK_Select, MVT::v8i16, {3, 3, 3, 3}}, // 2*pshufb + por {TTI::SK_Select, MVT::v8f16, {3, 3, 3, 3}}, // 2*pshufb + por @@ -2192,16 +2203,16 @@ InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, {TTI::SK_Broadcast, MVT::v2f64, {1, 1, 1, 1}}, // shufpd {TTI::SK_Broadcast, MVT::v2i64, {1, 1, 1, 1}}, // pshufd {TTI::SK_Broadcast, MVT::v4i32, {1, 1, 1, 1}}, // pshufd - {TTI::SK_Broadcast, MVT::v8i16, {2, 2, 2, 2}}, // pshuflw + pshufd - {TTI::SK_Broadcast, MVT::v8f16, {2, 2, 2, 2}}, // pshuflw + pshufd - {TTI::SK_Broadcast, MVT::v16i8, {3, 3, 3, 3}}, // unpck + pshuflw + pshufd + {TTI::SK_Broadcast, MVT::v8i16, {1, 2, 2, 2}}, // pshuflw + pshufd + {TTI::SK_Broadcast, MVT::v8f16, {1, 2, 2, 2}}, // pshuflw + pshufd + {TTI::SK_Broadcast, MVT::v16i8, {2, 3, 3, 4}}, // unpck + pshuflw + pshufd {TTI::SK_Reverse, MVT::v2f64, {1, 1, 1, 1}}, // shufpd {TTI::SK_Reverse, MVT::v2i64, {1, 1, 1, 1}}, // pshufd {TTI::SK_Reverse, MVT::v4i32, {1, 1, 1, 1}}, // pshufd - {TTI::SK_Reverse, MVT::v8i16, {3, 3, 3, 3}}, // pshuflw + pshufhw + pshufd - {TTI::SK_Reverse, MVT::v8f16, {3, 3, 3, 3}}, // pshuflw + pshufhw + pshufd - {TTI::SK_Reverse, MVT::v16i8, {9, 9, 9, 9}}, // 2*pshuflw + 2*pshufhw + {TTI::SK_Reverse, MVT::v8i16, {2, 3, 3, 3}}, // pshuflw + pshufhw + pshufd + {TTI::SK_Reverse, MVT::v8f16, {2, 3, 3, 3}}, // pshuflw + pshufhw + pshufd + {TTI::SK_Reverse, MVT::v16i8, {5, 6,11,11}}, // 2*pshuflw + 2*pshufhw // + 2*pshufd + 2*unpck + packus {TTI::SK_Select, MVT::v2i64, {1, 1, 1, 1}}, // movsd diff --git a/llvm/lib/TargetParser/RISCVISAInfo.cpp b/llvm/lib/TargetParser/RISCVISAInfo.cpp index 17c9833..d6afb8a 100644 --- a/llvm/lib/TargetParser/RISCVISAInfo.cpp +++ b/llvm/lib/TargetParser/RISCVISAInfo.cpp @@ -858,16 +858,15 @@ void RISCVISAInfo::updateImplication() { StringRef ExtName = WorkList.pop_back_val(); auto Range = std::equal_range(std::begin(ImpliedExts), std::end(ImpliedExts), ExtName); - std::for_each(Range.first, Range.second, - [&](const ImpliedExtsEntry &Implied) { - const char *ImpliedExt = Implied.ImpliedExt; - auto [It, Inserted] = Exts.try_emplace(ImpliedExt); - if (!Inserted) - return; - auto Version = findDefaultVersion(ImpliedExt); - It->second = *Version; - WorkList.push_back(ImpliedExt); - }); + for (const ImpliedExtsEntry &Implied : llvm::make_range(Range)) { + const char *ImpliedExt = Implied.ImpliedExt; + auto [It, Inserted] = Exts.try_emplace(ImpliedExt); + if (!Inserted) + continue; + auto Version = findDefaultVersion(ImpliedExt); + It->second = *Version; + WorkList.push_back(ImpliedExt); + } } // Add Zcd if C and D are enabled. diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp index 0164fcd..81de0a5 100644 --- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp +++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp @@ -97,6 +97,8 @@ STATISTIC(MissingAllocForContextId, "Number of missing alloc nodes for context ids"); STATISTIC(SkippedCallsCloning, "Number of calls skipped during cloning due to unexpected operand"); +STATISTIC(MismatchedCloneAssignments, + "Number of callsites assigned to call multiple non-matching clones"); static cl::opt<std::string> DotFilePathPrefix( "memprof-dot-file-path-prefix", cl::init(""), cl::Hidden, @@ -2060,6 +2062,20 @@ static bool isMemProfClone(const Function &F) { return F.getName().contains(MemProfCloneSuffix); } +// Return the clone number of the given function by extracting it from the +// memprof suffix. Assumes the caller has already confirmed it is a memprof +// clone. +static unsigned getMemProfCloneNum(const Function &F) { + assert(isMemProfClone(F)); + auto Pos = F.getName().find_last_of('.'); + assert(Pos > 0); + unsigned CloneNo; + bool Err = F.getName().drop_front(Pos + 1).getAsInteger(10, CloneNo); + assert(!Err); + (void)Err; + return CloneNo; +} + std::string ModuleCallsiteContextGraph::getLabel(const Function *Func, const Instruction *Call, unsigned CloneNo) const { @@ -3979,7 +3995,22 @@ IndexCallsiteContextGraph::getAllocationCallType(const CallInfo &Call) const { void ModuleCallsiteContextGraph::updateCall(CallInfo &CallerCall, FuncInfo CalleeFunc) { - if (CalleeFunc.cloneNo() > 0) + auto *CurF = cast<CallBase>(CallerCall.call())->getCalledFunction(); + auto NewCalleeCloneNo = CalleeFunc.cloneNo(); + if (isMemProfClone(*CurF)) { + // If we already assigned this callsite to call a specific non-default + // clone (i.e. not the original function which is clone 0), ensure that we + // aren't trying to now update it to call a different clone, which is + // indicative of a bug in the graph or function assignment. + auto CurCalleeCloneNo = getMemProfCloneNum(*CurF); + if (CurCalleeCloneNo != NewCalleeCloneNo) { + LLVM_DEBUG(dbgs() << "Mismatch in call clone assignment: was " + << CurCalleeCloneNo << " now " << NewCalleeCloneNo + << "\n"); + MismatchedCloneAssignments++; + } + } + if (NewCalleeCloneNo > 0) cast<CallBase>(CallerCall.call())->setCalledFunction(CalleeFunc.func()); OREGetter(CallerCall.call()->getFunction()) .emit(OptimizationRemark(DEBUG_TYPE, "MemprofCall", CallerCall.call()) @@ -3995,7 +4026,19 @@ void IndexCallsiteContextGraph::updateCall(CallInfo &CallerCall, assert(CI && "Caller cannot be an allocation which should not have profiled calls"); assert(CI->Clones.size() > CallerCall.cloneNo()); - CI->Clones[CallerCall.cloneNo()] = CalleeFunc.cloneNo(); + auto NewCalleeCloneNo = CalleeFunc.cloneNo(); + auto &CurCalleeCloneNo = CI->Clones[CallerCall.cloneNo()]; + // If we already assigned this callsite to call a specific non-default + // clone (i.e. not the original function which is clone 0), ensure that we + // aren't trying to now update it to call a different clone, which is + // indicative of a bug in the graph or function assignment. + if (CurCalleeCloneNo != 0 && CurCalleeCloneNo != NewCalleeCloneNo) { + LLVM_DEBUG(dbgs() << "Mismatch in call clone assignment: was " + << CurCalleeCloneNo << " now " << NewCalleeCloneNo + << "\n"); + MismatchedCloneAssignments++; + } + CurCalleeCloneNo = NewCalleeCloneNo; } // Update the debug information attached to NewFunc to use the clone Name. Note @@ -4703,6 +4746,19 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() { // where the callers were assigned to different clones of a function. } + auto FindFirstAvailFuncClone = [&]() { + // Find first function in FuncClonesToCallMap without an assigned + // clone of this callsite Node. We should always have one + // available at this point due to the earlier cloning when the + // FuncClonesToCallMap size was smaller than the clone number. + for (auto &CF : FuncClonesToCallMap) { + if (!FuncCloneToCurNodeCloneMap.count(CF.first)) + return CF.first; + } + assert(false && + "Expected an available func clone for this callsite clone"); + }; + // See if we can use existing function clone. Walk through // all caller edges to see if any have already been assigned to // a clone of this callsite's function. If we can use it, do so. If not, @@ -4819,16 +4875,7 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() { // clone of OrigFunc for another caller during this iteration over // its caller edges. if (!FuncCloneAssignedToCurCallsiteClone) { - // Find first function in FuncClonesToCallMap without an assigned - // clone of this callsite Node. We should always have one - // available at this point due to the earlier cloning when the - // FuncClonesToCallMap size was smaller than the clone number. - for (auto &CF : FuncClonesToCallMap) { - if (!FuncCloneToCurNodeCloneMap.count(CF.first)) { - FuncCloneAssignedToCurCallsiteClone = CF.first; - break; - } - } + FuncCloneAssignedToCurCallsiteClone = FindFirstAvailFuncClone(); assert(FuncCloneAssignedToCurCallsiteClone); // Assign Clone to FuncCloneAssignedToCurCallsiteClone AssignCallsiteCloneToFuncClone( @@ -4842,6 +4889,31 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() { FuncCloneAssignedToCurCallsiteClone); } } + // If we didn't assign a function clone to this callsite clone yet, e.g. + // none of its callers has a non-null call, do the assignment here. + // We want to ensure that every callsite clone is assigned to some + // function clone, so that the call updates below work as expected. + // In particular if this is the original callsite, we want to ensure it + // is assigned to the original function, otherwise the original function + // will appear available for assignment to other callsite clones, + // leading to unintended effects. For one, the unknown and not updated + // callers will call into cloned paths leading to the wrong hints, + // because they still call the original function (clone 0). Also, + // because all callsites start out as being clone 0 by default, we can't + // easily distinguish between callsites explicitly assigned to clone 0 + // vs those never assigned, which can lead to multiple updates of the + // calls when invoking updateCall below, with mismatched clone values. + // TODO: Add a flag to the callsite nodes or some other mechanism to + // better distinguish and identify callsite clones that are not getting + // assigned to function clones as expected. + if (!FuncCloneAssignedToCurCallsiteClone) { + FuncCloneAssignedToCurCallsiteClone = FindFirstAvailFuncClone(); + assert(FuncCloneAssignedToCurCallsiteClone && + "No available func clone for this callsite clone"); + AssignCallsiteCloneToFuncClone( + FuncCloneAssignedToCurCallsiteClone, Call, Clone, + /*IsAlloc=*/AllocationCallToContextNodeMap.contains(Call)); + } } if (VerifyCCG) { checkNode<DerivedCCG, FuncTy, CallTy>(Node); diff --git a/llvm/lib/Transforms/ObjCARC/CMakeLists.txt b/llvm/lib/Transforms/ObjCARC/CMakeLists.txt index 80867db..4274667 100644 --- a/llvm/lib/Transforms/ObjCARC/CMakeLists.txt +++ b/llvm/lib/Transforms/ObjCARC/CMakeLists.txt @@ -2,7 +2,6 @@ add_llvm_component_library(LLVMObjCARCOpts ObjCARC.cpp ObjCARCOpts.cpp ObjCARCExpand.cpp - ObjCARCAPElim.cpp ObjCARCContract.cpp DependencyAnalysis.cpp ProvenanceAnalysis.cpp diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp deleted file mode 100644 index dceb2eb..0000000 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp +++ /dev/null @@ -1,156 +0,0 @@ -//===- ObjCARCAPElim.cpp - ObjC ARC Optimization --------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file -/// -/// This file defines ObjC ARC optimizations. ARC stands for Automatic -/// Reference Counting and is a system for managing reference counts for objects -/// in Objective C. -/// -/// This specific file implements optimizations which remove extraneous -/// autorelease pools. -/// -/// WARNING: This file knows about certain library functions. It recognizes them -/// by name, and hardwires knowledge of their semantics. -/// -/// WARNING: This file knows about how certain Objective-C library functions are -/// used. Naive LLVM IR transformations which would otherwise be -/// behavior-preserving may break these assumptions. -/// -//===----------------------------------------------------------------------===// - -#include "llvm/ADT/STLExtras.h" -#include "llvm/Analysis/ObjCARCAnalysisUtils.h" -#include "llvm/Analysis/ObjCARCInstKind.h" -#include "llvm/IR/Constants.h" -#include "llvm/IR/InstrTypes.h" -#include "llvm/IR/PassManager.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/ObjCARC.h" - -using namespace llvm; -using namespace llvm::objcarc; - -#define DEBUG_TYPE "objc-arc-ap-elim" - -namespace { - -/// Interprocedurally determine if calls made by the given call site can -/// possibly produce autoreleases. -bool MayAutorelease(const CallBase &CB, unsigned Depth = 0) { - if (const Function *Callee = CB.getCalledFunction()) { - if (!Callee->hasExactDefinition()) - return true; - for (const BasicBlock &BB : *Callee) { - for (const Instruction &I : BB) - if (const CallBase *JCB = dyn_cast<CallBase>(&I)) - // This recursion depth limit is arbitrary. It's just great - // enough to cover known interesting testcases. - if (Depth < 3 && !JCB->onlyReadsMemory() && - MayAutorelease(*JCB, Depth + 1)) - return true; - } - return false; - } - - return true; -} - -bool OptimizeBB(BasicBlock *BB) { - bool Changed = false; - - Instruction *Push = nullptr; - for (Instruction &Inst : llvm::make_early_inc_range(*BB)) { - switch (GetBasicARCInstKind(&Inst)) { - case ARCInstKind::AutoreleasepoolPush: - Push = &Inst; - break; - case ARCInstKind::AutoreleasepoolPop: - // If this pop matches a push and nothing in between can autorelease, - // zap the pair. - if (Push && cast<CallInst>(&Inst)->getArgOperand(0) == Push) { - Changed = true; - LLVM_DEBUG(dbgs() << "ObjCARCAPElim::OptimizeBB: Zapping push pop " - "autorelease pair:\n" - " Pop: " - << Inst << "\n" - << " Push: " << *Push - << "\n"); - Inst.eraseFromParent(); - Push->eraseFromParent(); - } - Push = nullptr; - break; - case ARCInstKind::CallOrUser: - if (MayAutorelease(cast<CallBase>(Inst))) - Push = nullptr; - break; - default: - break; - } - } - - return Changed; -} - -bool runImpl(Module &M) { - if (!EnableARCOpts) - return false; - - // If nothing in the Module uses ARC, don't do anything. - if (!ModuleHasARC(M)) - return false; - // Find the llvm.global_ctors variable, as the first step in - // identifying the global constructors. In theory, unnecessary autorelease - // pools could occur anywhere, but in practice it's pretty rare. Global - // ctors are a place where autorelease pools get inserted automatically, - // so it's pretty common for them to be unnecessary, and it's pretty - // profitable to eliminate them. - GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); - if (!GV) - return false; - - assert(GV->hasDefinitiveInitializer() && - "llvm.global_ctors is uncooperative!"); - - bool Changed = false; - - // Dig the constructor functions out of GV's initializer. - ConstantArray *Init = cast<ConstantArray>(GV->getInitializer()); - for (User::op_iterator OI = Init->op_begin(), OE = Init->op_end(); - OI != OE; ++OI) { - Value *Op = *OI; - // llvm.global_ctors is an array of three-field structs where the second - // members are constructor functions. - Function *F = dyn_cast<Function>(cast<ConstantStruct>(Op)->getOperand(1)); - // If the user used a constructor function with the wrong signature and - // it got bitcasted or whatever, look the other way. - if (!F) - continue; - // Only look at function definitions. - if (F->isDeclaration()) - continue; - // Only look at functions with one basic block. - if (std::next(F->begin()) != F->end()) - continue; - // Ok, a single-block constructor function definition. Try to optimize it. - Changed |= OptimizeBB(&F->front()); - } - - return Changed; -} - -} // namespace - -PreservedAnalyses ObjCARCAPElimPass::run(Module &M, ModuleAnalysisManager &AM) { - if (!runImpl(M)) - return PreservedAnalyses::all(); - PreservedAnalyses PA; - PA.preserveSet<CFGAnalyses>(); - return PA; -} diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 84d1c0b..9220abb 100644 --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -1593,11 +1593,8 @@ bool MemCpyOptPass::performStackMoveOptzn(Instruction *Load, Instruction *Store, // since both llvm.lifetime.start and llvm.lifetime.end intrinsics // practically fill all the bytes of the alloca with an undefined // value, although conceptually marked as alive/dead. - int64_t Size = cast<ConstantInt>(UI->getOperand(0))->getSExtValue(); - if (Size < 0 || Size == DestSize) { - LifetimeMarkers.push_back(UI); - continue; - } + LifetimeMarkers.push_back(UI); + continue; } AAMetadataInstrs.insert(UI); @@ -1614,9 +1611,8 @@ bool MemCpyOptPass::performStackMoveOptzn(Instruction *Load, Instruction *Store, return true; }; - // Check that dest has no Mod/Ref, from the alloca to the Store, except full - // size lifetime intrinsics. And collect modref inst for the reachability - // check. + // Check that dest has no Mod/Ref, from the alloca to the Store. And collect + // modref inst for the reachability check. ModRefInfo DestModRef = ModRefInfo::NoModRef; MemoryLocation DestLoc(DestAlloca, LocationSize::precise(Size)); SmallVector<BasicBlock *, 8> ReachabilityWorklist; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 99a96a8..6616e61f 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2021,6 +2021,9 @@ public: /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR /// outside VPlan. std::pair<Value *, BasicBlock *> getMemRuntimeChecks() { + using namespace llvm::PatternMatch; + if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt())) + return {nullptr, nullptr}; return {MemRuntimeCheckCond, MemCheckBlock}; } @@ -7276,6 +7279,7 @@ DenseMap<const SCEV *, Value *> LoopVectorizationPlanner::executePlan( VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader()); VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE); VPlanTransforms::simplifyRecipes(BestVPlan, *Legal->getWidestInductionType()); + VPlanTransforms::removeBranchOnConst(BestVPlan); VPlanTransforms::narrowInterleaveGroups( BestVPlan, BestVF, TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)); @@ -10072,12 +10076,6 @@ bool LoopVectorizePass::processLoop(Loop *L) { // Get user vectorization factor and interleave count. ElementCount UserVF = Hints.getWidth(); unsigned UserIC = Hints.getInterleave(); - if (LVL.hasUncountableEarlyExit() && UserIC != 1) { - UserIC = 1; - reportVectorizationInfo("Interleaving not supported for loops " - "with uncountable early exits", - "InterleaveEarlyExitDisabled", ORE, L); - } // Plan how to best vectorize. LVP.plan(UserVF, UserIC); @@ -10095,9 +10093,20 @@ bool LoopVectorizePass::processLoop(Loop *L) { unsigned SelectedIC = std::max(IC, UserIC); // Optimistically generate runtime checks if they are needed. Drop them if // they turn out to not be profitable. - if (VF.Width.isVector() || SelectedIC > 1) + if (VF.Width.isVector() || SelectedIC > 1) { Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC); + // Bail out early if either the SCEV or memory runtime checks are known to + // fail. In that case, the vector loop would never execute. + using namespace llvm::PatternMatch; + if (Checks.getSCEVChecks().first && + match(Checks.getSCEVChecks().first, m_One())) + return false; + if (Checks.getMemRuntimeChecks().first && + match(Checks.getMemRuntimeChecks().first, m_One())) + return false; + } + // Check if it is profitable to vectorize with runtime checks. bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled; @@ -10228,6 +10237,11 @@ bool LoopVectorizePass::processLoop(Loop *L) { L, PSE, LI, DT, TLI, TTI, AC, ORE, ElementCount::getFixed(1), ElementCount::getFixed(1), IC, &CM, BFI, PSI, Checks, BestPlan); + // TODO: Move to general VPlan pipeline once epilogue loops are also + // supported. + VPlanTransforms::runPass(VPlanTransforms::materializeVectorTripCount, + BestPlan, VF.Width, IC, PSE); + LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT, false); ORE->emit([&]() { @@ -10295,6 +10309,11 @@ bool LoopVectorizePass::processLoop(Loop *L) { InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, VF.MinProfitableTripCount, IC, &CM, BFI, PSI, Checks, BestPlan); + // TODO: Move to general VPlan pipeline once epilogue loops are also + // supported. + VPlanTransforms::runPass(VPlanTransforms::materializeVectorTripCount, + BestPlan, VF.Width, IC, PSE); + LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false); ++LoopsVectorized; diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index 40a5565..25b9616 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -962,7 +962,11 @@ void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV, BackedgeTakenCount->setUnderlyingValue(TCMO); } - VectorTripCount.setUnderlyingValue(VectorTripCountV); + if (!VectorTripCount.getUnderlyingValue()) + VectorTripCount.setUnderlyingValue(VectorTripCountV); + else + assert(VectorTripCount.getUnderlyingValue() == VectorTripCountV && + "VectorTripCount set earlier must much VectorTripCountV"); IRBuilder<> Builder(State.CFG.PrevBB->getTerminator()); // FIXME: Model VF * UF computation completely in VPlan. diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 6655149..a5de593 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -1012,6 +1012,10 @@ public: ReductionStartVector, // Creates a step vector starting from 0 to VF with a step of 1. StepVector, + /// Extracts a single lane (first operand) from a set of vector operands. + /// The lane specifies an index into a vector formed by combining all vector + /// operands (all operands after the first one). + ExtractLane, }; @@ -1837,6 +1841,10 @@ public: getGEPNoWrapFlags(), getDebugLoc()); } + /// Return true if this VPVectorPointerRecipe corresponds to part 0. Note that + /// this is only accurate after the VPlan has been unrolled. + bool isFirstPart() const { return getUnrollPart(*this) == 0; } + /// Return the cost of this VPHeaderPHIRecipe. InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override { @@ -2304,14 +2312,15 @@ public: /// respective masks, ordered [I0, M0, I1, M1, I2, M2, ...]. Note that M0 can /// be omitted (implied by passing an odd number of operands) in which case /// all other incoming values are merged into it. - VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands) - : VPSingleDefRecipe(VPDef::VPBlendSC, Operands, Phi, Phi->getDebugLoc()) { + VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands, DebugLoc DL) + : VPSingleDefRecipe(VPDef::VPBlendSC, Operands, Phi, DL) { assert(Operands.size() > 0 && "Expected at least one operand!"); } VPBlendRecipe *clone() override { SmallVector<VPValue *> Ops(operands()); - return new VPBlendRecipe(cast<PHINode>(getUnderlyingValue()), Ops); + return new VPBlendRecipe(cast_or_null<PHINode>(getUnderlyingValue()), Ops, + getDebugLoc()); } VP_CLASSOF_IMPL(VPDef::VPBlendSC) @@ -4056,6 +4065,10 @@ public: /// Returns VF * UF of the vector loop region. VPValue &getVFxUF() { return VFxUF; } + LLVMContext &getContext() const { + return getScalarHeader()->getIRBasicBlock()->getContext(); + } + void addVF(ElementCount VF) { VFs.insert(VF); } void setVF(ElementCount VF) { diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp index 3499e65..16072f2 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp @@ -110,6 +110,8 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) { case VPInstruction::BuildStructVector: case VPInstruction::BuildVector: return SetResultTyFromOp(); + case VPInstruction::ExtractLane: + return inferScalarType(R->getOperand(1)); case VPInstruction::FirstActiveLane: return Type::getIntNTy(Ctx, 64); case VPInstruction::ExtractLastElement: diff --git a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp index 194874a..6c1f53b 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp @@ -437,9 +437,12 @@ static void addCanonicalIVRecipes(VPlan &Plan, VPBasicBlock *HeaderVPBB, // We are about to replace the branch to exit the region. Remove the original // BranchOnCond, if there is any. + DebugLoc LatchDL = DL; if (!LatchVPBB->empty() && - match(&LatchVPBB->back(), m_BranchOnCond(m_VPValue()))) + match(&LatchVPBB->back(), m_BranchOnCond(m_VPValue()))) { + LatchDL = LatchVPBB->getTerminator()->getDebugLoc(); LatchVPBB->getTerminator()->eraseFromParent(); + } VPBuilder Builder(LatchVPBB); // Add a VPInstruction to increment the scalar canonical IV by VF * UF. @@ -452,7 +455,8 @@ static void addCanonicalIVRecipes(VPlan &Plan, VPBasicBlock *HeaderVPBB, // Add the BranchOnCount VPInstruction to the latch. Builder.createNaryOp(VPInstruction::BranchOnCount, - {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); + {CanonicalIVIncrement, &Plan.getVectorTripCount()}, + LatchDL); } void VPlanTransforms::prepareForVectorization( @@ -462,28 +466,27 @@ void VPlanTransforms::prepareForVectorization( VPDominatorTree VPDT; VPDT.recalculate(Plan); - VPBlockBase *HeaderVPB = Plan.getEntry()->getSingleSuccessor(); - canonicalHeaderAndLatch(HeaderVPB, VPDT); - VPBlockBase *LatchVPB = HeaderVPB->getPredecessors()[1]; + auto *HeaderVPBB = cast<VPBasicBlock>(Plan.getEntry()->getSingleSuccessor()); + canonicalHeaderAndLatch(HeaderVPBB, VPDT); + auto *LatchVPBB = cast<VPBasicBlock>(HeaderVPBB->getPredecessors()[1]); VPBasicBlock *VecPreheader = Plan.createVPBasicBlock("vector.ph"); VPBlockUtils::insertBlockAfter(VecPreheader, Plan.getEntry()); VPBasicBlock *MiddleVPBB = Plan.createVPBasicBlock("middle.block"); - // The canonical LatchVPB has the header block as last successor. If it has + // The canonical LatchVPBB has the header block as last successor. If it has // another successor, this successor is an exit block - insert middle block on // its edge. Otherwise, add middle block as another successor retaining header // as last. - if (LatchVPB->getNumSuccessors() == 2) { - VPBlockBase *LatchExitVPB = LatchVPB->getSuccessors()[0]; - VPBlockUtils::insertOnEdge(LatchVPB, LatchExitVPB, MiddleVPBB); + if (LatchVPBB->getNumSuccessors() == 2) { + VPBlockBase *LatchExitVPB = LatchVPBB->getSuccessors()[0]; + VPBlockUtils::insertOnEdge(LatchVPBB, LatchExitVPB, MiddleVPBB); } else { - VPBlockUtils::connectBlocks(LatchVPB, MiddleVPBB); - LatchVPB->swapSuccessors(); + VPBlockUtils::connectBlocks(LatchVPBB, MiddleVPBB); + LatchVPBB->swapSuccessors(); } - addCanonicalIVRecipes(Plan, cast<VPBasicBlock>(HeaderVPB), - cast<VPBasicBlock>(LatchVPB), InductionTy, IVDL); + addCanonicalIVRecipes(Plan, HeaderVPBB, LatchVPBB, InductionTy, IVDL); [[maybe_unused]] bool HandledUncountableEarlyExit = false; // Disconnect all early exits from the loop leaving it with a single exit from @@ -499,8 +502,7 @@ void VPlanTransforms::prepareForVectorization( assert(!HandledUncountableEarlyExit && "can handle exactly one uncountable early exit"); handleUncountableEarlyExit(cast<VPBasicBlock>(Pred), EB, Plan, - cast<VPBasicBlock>(HeaderVPB), - cast<VPBasicBlock>(LatchVPB), Range); + HeaderVPBB, LatchVPBB, Range); HandledUncountableEarlyExit = true; } else { for (VPRecipeBase &R : EB->phis()) @@ -564,15 +566,15 @@ void VPlanTransforms::prepareForVectorization( // the corresponding compare because they may have ended up with different // line numbers and we want to avoid awkward line stepping while debugging. // E.g., if the compare has got a line number inside the loop. - DebugLoc LatchDL = TheLoop->getLoopLatch()->getTerminator()->getDebugLoc(); + DebugLoc LatchDL = LatchVPBB->getTerminator()->getDebugLoc(); VPBuilder Builder(MiddleVPBB); VPValue *Cmp; if (!RequiresScalarEpilogueCheck) - Cmp = Plan.getOrAddLiveIn(ConstantInt::getFalse( - IntegerType::getInt1Ty(TripCount->getType()->getContext()))); + Cmp = Plan.getOrAddLiveIn( + ConstantInt::getFalse(IntegerType::getInt1Ty(Plan.getContext()))); else if (TailFolded) - Cmp = Plan.getOrAddLiveIn(ConstantInt::getTrue( - IntegerType::getInt1Ty(TripCount->getType()->getContext()))); + Cmp = Plan.getOrAddLiveIn( + ConstantInt::getTrue(IntegerType::getInt1Ty(Plan.getContext()))); else Cmp = Builder.createICmp(CmpInst::ICMP_EQ, Plan.getTripCount(), &Plan.getVectorTripCount(), LatchDL, "cmp.n"); @@ -646,7 +648,7 @@ void VPlanTransforms::attachCheckBlock(VPlan &Plan, Value *Cond, .createNaryOp(VPInstruction::BranchOnCond, {CondVPV}, Plan.getCanonicalIV()->getDebugLoc()); if (AddBranchWeights) { - MDBuilder MDB(Plan.getScalarHeader()->getIRBasicBlock()->getContext()); + MDBuilder MDB(Plan.getContext()); MDNode *BranchWeights = MDB.createBranchWeights(CheckBypassWeights, /*IsExpected=*/false); Term->addMetadata(LLVMContext::MD_prof, BranchWeights); diff --git a/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp b/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp index fc8458c..3b3bbc3 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp @@ -251,8 +251,9 @@ void VPPredicator::convertPhisToBlends(VPBasicBlock *VPBB) { } OperandsWithMask.push_back(EdgeMask); } - PHINode *IRPhi = cast<PHINode>(PhiR->getUnderlyingValue()); - auto *Blend = new VPBlendRecipe(IRPhi, OperandsWithMask); + PHINode *IRPhi = cast_or_null<PHINode>(PhiR->getUnderlyingValue()); + auto *Blend = + new VPBlendRecipe(IRPhi, OperandsWithMask, PhiR->getDebugLoc()); Builder.insert(Blend); PhiR->replaceAllUsesWith(Blend); PhiR->eraseFromParent(); diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 0d6152b..225658b 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -427,6 +427,7 @@ unsigned VPUnrollPartAccessor<PartOpIdx>::getUnrollPart(const VPUser &U) const { } namespace llvm { +template class VPUnrollPartAccessor<1>; template class VPUnrollPartAccessor<2>; template class VPUnrollPartAccessor<3>; } @@ -863,6 +864,31 @@ Value *VPInstruction::generate(VPTransformState &State) { Res = Builder.CreateOr(Res, State.get(Op)); return State.VF.isScalar() ? Res : Builder.CreateOrReduce(Res); } + case VPInstruction::ExtractLane: { + Value *LaneToExtract = State.get(getOperand(0), true); + Type *IdxTy = State.TypeAnalysis.inferScalarType(getOperand(0)); + Value *Res = nullptr; + Value *RuntimeVF = getRuntimeVF(State.Builder, IdxTy, State.VF); + + for (unsigned Idx = 1; Idx != getNumOperands(); ++Idx) { + Value *VectorStart = + Builder.CreateMul(RuntimeVF, ConstantInt::get(IdxTy, Idx - 1)); + Value *VectorIdx = Idx == 1 + ? LaneToExtract + : Builder.CreateSub(LaneToExtract, VectorStart); + Value *Ext = State.VF.isScalar() + ? State.get(getOperand(Idx)) + : Builder.CreateExtractElement( + State.get(getOperand(Idx)), VectorIdx); + if (Res) { + Value *Cmp = Builder.CreateICmpUGE(LaneToExtract, VectorStart); + Res = Builder.CreateSelect(Cmp, Ext, Res); + } else { + Res = Ext; + } + } + return Res; + } case VPInstruction::FirstActiveLane: { if (getNumOperands() == 1) { Value *Mask = State.get(getOperand(0)); @@ -921,7 +947,8 @@ InstructionCost VPInstruction::computeCost(ElementCount VF, } switch (getOpcode()) { - case Instruction::ExtractElement: { + case Instruction::ExtractElement: + case VPInstruction::ExtractLane: { // Add on the cost of extracting the element. auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF); return Ctx.TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, @@ -983,6 +1010,7 @@ bool VPInstruction::isVectorToScalar() const { return getOpcode() == VPInstruction::ExtractLastElement || getOpcode() == VPInstruction::ExtractPenultimateElement || getOpcode() == Instruction::ExtractElement || + getOpcode() == VPInstruction::ExtractLane || getOpcode() == VPInstruction::FirstActiveLane || getOpcode() == VPInstruction::ComputeAnyOfResult || getOpcode() == VPInstruction::ComputeFindIVResult || @@ -1048,6 +1076,7 @@ bool VPInstruction::opcodeMayReadOrWriteFromMemory() const { case VPInstruction::BuildVector: case VPInstruction::CalculateTripCountMinusVF: case VPInstruction::CanonicalIVIncrementForPart: + case VPInstruction::ExtractLane: case VPInstruction::ExtractLastElement: case VPInstruction::ExtractPenultimateElement: case VPInstruction::FirstActiveLane: @@ -1097,6 +1126,8 @@ bool VPInstruction::onlyFirstLaneUsed(const VPValue *Op) const { case VPInstruction::ComputeAnyOfResult: case VPInstruction::ComputeFindIVResult: return Op == getOperand(1); + case VPInstruction::ExtractLane: + return Op == getOperand(0); }; llvm_unreachable("switch should return"); } @@ -1176,6 +1207,9 @@ void VPInstruction::print(raw_ostream &O, const Twine &Indent, case VPInstruction::BuildVector: O << "buildvector"; break; + case VPInstruction::ExtractLane: + O << "extract-lane"; + break; case VPInstruction::ExtractLastElement: O << "extract-last-element"; break; diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 3372bcc..935a4e4 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -774,10 +774,10 @@ static VPValue *optimizeEarlyExitInductionUser(VPlan &Plan, using namespace VPlanPatternMatch; VPValue *Incoming, *Mask; - if (!match(Op, m_VPInstruction<Instruction::ExtractElement>( - m_VPValue(Incoming), + if (!match(Op, m_VPInstruction<VPInstruction::ExtractLane>( m_VPInstruction<VPInstruction::FirstActiveLane>( - m_VPValue(Mask))))) + m_VPValue(Mask)), + m_VPValue(Incoming)))) return nullptr; auto *WideIV = getOptimizableIVOf(Incoming); @@ -1172,6 +1172,14 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) { if (!Plan->isUnrolled()) return; + // VPVectorPointer for part 0 can be replaced by their start pointer. + if (auto *VecPtr = dyn_cast<VPVectorPointerRecipe>(&R)) { + if (VecPtr->isFirstPart()) { + VecPtr->replaceAllUsesWith(VecPtr->getOperand(0)); + return; + } + } + // VPScalarIVSteps for part 0 can be replaced by their start value, if only // the first lane is demanded. if (auto *Steps = dyn_cast<VPScalarIVStepsRecipe>(Def)) { @@ -1307,8 +1315,9 @@ static void simplifyBlends(VPlan &Plan) { OperandsWithMask.push_back(Blend->getMask(I)); } - auto *NewBlend = new VPBlendRecipe( - cast<PHINode>(Blend->getUnderlyingValue()), OperandsWithMask); + auto *NewBlend = + new VPBlendRecipe(cast_or_null<PHINode>(Blend->getUnderlyingValue()), + OperandsWithMask, Blend->getDebugLoc()); NewBlend->insertBefore(&R); VPValue *DeadMask = Blend->getMask(StartIndex); @@ -1361,7 +1370,7 @@ static bool optimizeVectorInductionWidthForTCAndVFUF(VPlan &Plan, unsigned NewBitWidth = ComputeBitWidth(TC->getValue(), BestVF.getKnownMinValue() * BestUF); - LLVMContext &Ctx = Plan.getCanonicalIV()->getScalarType()->getContext(); + LLVMContext &Ctx = Plan.getContext(); auto *NewIVTy = IntegerType::get(Ctx, NewBitWidth); bool MadeChange = false; @@ -1883,9 +1892,7 @@ void VPlanTransforms::truncateToMinimalBitwidths( } } -/// Remove BranchOnCond recipes with true or false conditions together with -/// removing dead edges to their successors. -static void removeBranchOnConst(VPlan &Plan) { +void VPlanTransforms::removeBranchOnConst(VPlan &Plan) { using namespace llvm::VPlanPatternMatch; for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>( vp_depth_first_shallow(Plan.getEntry()))) { @@ -1908,12 +1915,9 @@ static void removeBranchOnConst(VPlan &Plan) { "There must be a single edge between VPBB and its successor"); // Values coming from VPBB into phi recipes of RemoveSucc are removed from // these recipes. - for (VPRecipeBase &R : RemovedSucc->phis()) { - auto *Phi = cast<VPPhiAccessors>(&R); - assert((!isa<VPIRPhi>(&R) || RemovedSucc->getNumPredecessors() == 1) && - "VPIRPhis must have a single predecessor"); - Phi->removeIncomingValueFor(VPBB); - } + for (VPRecipeBase &R : RemovedSucc->phis()) + cast<VPPhiAccessors>(&R)->removeIncomingValueFor(VPBB); + // Disconnect blocks and remove the terminator. RemovedSucc will be deleted // automatically on VPlan destruction if it becomes unreachable. VPBlockUtils::disconnectBlocks(VPBB, RemovedSucc); @@ -2515,8 +2519,8 @@ void VPlanTransforms::createInterleaveGroups( DL.getTypeAllocSize(getLoadStoreType(IRInsertPos)) * IG->getIndex(IRInsertPos), /*IsSigned=*/true); - VPValue *OffsetVPV = Plan.getOrAddLiveIn( - ConstantInt::get(IRInsertPos->getParent()->getContext(), -Offset)); + VPValue *OffsetVPV = + Plan.getOrAddLiveIn(ConstantInt::get(Plan.getContext(), -Offset)); VPBuilder B(InsertPos); Addr = InBounds ? B.createInBoundsPtrAdd(InsertPos->getAddr(), OffsetVPV) : B.createPtrAdd(InsertPos->getAddr(), OffsetVPV); @@ -2842,7 +2846,7 @@ void VPlanTransforms::handleUncountableEarlyExit( VPInstruction::FirstActiveLane, {CondToEarlyExit}, nullptr, "first.active.lane"); IncomingFromEarlyExit = EarlyExitB.createNaryOp( - Instruction::ExtractElement, {IncomingFromEarlyExit, FirstActiveLane}, + VPInstruction::ExtractLane, {FirstActiveLane, IncomingFromEarlyExit}, nullptr, "early.exit.value"); ExitIRI->setOperand(EarlyExitIdx, IncomingFromEarlyExit); } @@ -3093,6 +3097,29 @@ void VPlanTransforms::materializeBroadcasts(VPlan &Plan) { } } +void VPlanTransforms::materializeVectorTripCount( + VPlan &Plan, ElementCount BestVF, unsigned BestUF, + PredicatedScalarEvolution &PSE) { + assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan"); + assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan"); + + VPValue *TC = Plan.getTripCount(); + // Skip cases for which the trip count may be non-trivial to materialize. + if (!Plan.hasScalarTail() || + Plan.getMiddleBlock()->getSingleSuccessor() == + Plan.getScalarPreheader() || + !TC->isLiveIn()) + return; + // Materialize vector trip counts for constants early if it can simply + // be computed as (Original TC / VF * UF) * VF * UF. + ScalarEvolution &SE = *PSE.getSE(); + auto *TCScev = SE.getSCEV(TC->getLiveInIRValue()); + const SCEV *VFxUF = SE.getElementCount(TCScev->getType(), BestVF * BestUF); + auto VecTCScev = SE.getMulExpr(SE.getUDivExpr(TCScev, VFxUF), VFxUF); + if (auto *NewC = dyn_cast<SCEVConstant>(VecTCScev)) + Plan.getVectorTripCount().setUnderlyingValue(NewC->getValue()); +} + /// Returns true if \p V is VPWidenLoadRecipe or VPInterleaveRecipe that can be /// converted to a narrower recipe. \p V is used by a wide recipe that feeds a /// store interleave group at index \p Idx, \p WideMember0 is the recipe feeding @@ -3350,7 +3377,7 @@ void VPlanTransforms::addBranchWeightToMiddleTerminator( if (VF.isScalable() && VScaleForTuning.has_value()) VectorStep *= *VScaleForTuning; assert(VectorStep > 0 && "trip count should not be zero"); - MDBuilder MDB(Plan.getScalarHeader()->getIRBasicBlock()->getContext()); + MDBuilder MDB(Plan.getContext()); MDNode *BranchWeights = MDB.createBranchWeights({1, VectorStep - 1}, /*IsExpected=*/false); MiddleTerm->addMetadata(LLVMContext::MD_prof, BranchWeights); diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h index ab189f6..d5af6cd 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h @@ -224,6 +224,10 @@ struct VPlanTransforms { /// CanonicalIVTy as type for all un-typed live-ins in VPTypeAnalysis. static void simplifyRecipes(VPlan &Plan, Type &CanonicalIVTy); + /// Remove BranchOnCond recipes with true or false conditions together with + /// removing dead edges to their successors. + static void removeBranchOnConst(VPlan &Plan); + /// If there's a single exit block, optimize its phi recipes that use exiting /// IV values by feeding them precomputed end values instead, possibly taken /// one step backwards. @@ -234,6 +238,12 @@ struct VPlanTransforms { /// Add explicit broadcasts for live-ins and VPValues defined in \p Plan's entry block if they are used as vectors. static void materializeBroadcasts(VPlan &Plan); + // Materialize vector trip counts for constants early if it can simply be + // computed as (Original TC / VF * UF) * VF * UF. + static void materializeVectorTripCount(VPlan &Plan, ElementCount BestVF, + unsigned BestUF, + PredicatedScalarEvolution &PSE); + /// Try to convert a plan with interleave groups with VF elements to a plan /// with the interleave groups replaced by wide loads and stores processing VF /// elements, if all transformed interleave groups access the full vector diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp index b89cd21..871e37e 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp @@ -363,6 +363,13 @@ void UnrollState::unrollBlock(VPBlockBase *VPB) { continue; } VPValue *Op0; + if (match(&R, m_VPInstruction<VPInstruction::ExtractLane>( + m_VPValue(Op0), m_VPValue(Op1)))) { + addUniformForAllParts(cast<VPInstruction>(&R)); + for (unsigned Part = 1; Part != UF; ++Part) + R.addOperand(getValueForPart(Op1, Part)); + continue; + } if (match(&R, m_VPInstruction<VPInstruction::ExtractLastElement>( m_VPValue(Op0))) || match(&R, m_VPInstruction<VPInstruction::ExtractPenultimateElement>( |