diff options
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/MC/MCAssembler.cpp | 118 | ||||
-rw-r--r-- | llvm/lib/MC/MCExpr.cpp | 10 | ||||
-rw-r--r-- | llvm/lib/MC/MCFragment.cpp | 12 | ||||
-rw-r--r-- | llvm/lib/MC/MCObjectStreamer.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/MC/MCStreamer.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp | 24 |
6 files changed, 126 insertions, 45 deletions
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp index ad30b5c..62baeb9 100644 --- a/llvm/lib/MC/MCAssembler.cpp +++ b/llvm/lib/MC/MCAssembler.cpp @@ -298,6 +298,43 @@ bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, return IsResolved; } +/// Check if the branch crosses the boundary. +/// +/// \param StartAddr start address of the fused/unfused branch. +/// \param Size size of the fused/unfused branch. +/// \param BoundaryAlignment alignment requirement of the branch. +/// \returns true if the branch cross the boundary. +static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, + Align BoundaryAlignment) { + uint64_t EndAddr = StartAddr + Size; + return (StartAddr >> Log2(BoundaryAlignment)) != + ((EndAddr - 1) >> Log2(BoundaryAlignment)); +} + +/// Check if the branch is against the boundary. +/// +/// \param StartAddr start address of the fused/unfused branch. +/// \param Size size of the fused/unfused branch. +/// \param BoundaryAlignment alignment requirement of the branch. +/// \returns true if the branch is against the boundary. +static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, + Align BoundaryAlignment) { + uint64_t EndAddr = StartAddr + Size; + return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; +} + +/// Check if the branch needs padding. +/// +/// \param StartAddr start address of the fused/unfused branch. +/// \param Size size of the fused/unfused branch. +/// \param BoundaryAlignment alignment requirement of the branch. +/// \returns true if the branch needs padding. +static bool needPadding(uint64_t StartAddr, uint64_t Size, + Align BoundaryAlignment) { + return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || + isAgainstBoundary(StartAddr, Size, BoundaryAlignment); +} + uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, const MCFragment &F) const { assert(getBackendPtr() && "Requires assembler backend"); @@ -358,6 +395,41 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, return Size; } + case MCFragment::FT_NeverAlign: { + // Disclaimer: NeverAlign fragment size depends on the size of its immediate + // successor, but NeverAlign need not be a MCRelaxableFragment. + // NeverAlign fragment size is recomputed if the successor is relaxed: + // - If RelaxableFragment is relaxed, it gets invalidated by marking its + // predecessor as LastValidFragment. + // - This forces the assembler to call MCAsmLayout::layoutFragment on that + // relaxable fragment, which in turn will always ask the predecessor to + // compute its size (see "computeFragmentSize(prev)" in layoutFragment). + // + // In short, the simplest way to ensure that computeFragmentSize() is sane + // is to establish the following rule: it should never examine fragments + // after the current fragment in the section. If we logically need to + // examine any fragment after the current fragment, we need to do that using + // relaxation, inside MCAssembler::layoutSectionOnce. + const MCNeverAlignFragment &NAF = cast<MCNeverAlignFragment>(F); + const MCFragment *NF = F.getNextNode(); + uint64_t Offset = Layout.getFragmentOffset(&NAF); + size_t NextFragSize = 0; + if (const auto *NextFrag = dyn_cast<MCRelaxableFragment>(NF)) { + NextFragSize = NextFrag->getContents().size(); + } else if (const auto *NextFrag = dyn_cast<MCDataFragment>(NF)) { + NextFragSize = NextFrag->getContents().size(); + } else { + llvm_unreachable("Didn't find the expected fragment after NeverAlign"); + } + // Check if the next fragment ends at the alignment we want to avoid. + if (isAgainstBoundary(Offset, NextFragSize, Align(NAF.getAlignment()))) { + // Avoid this alignment by introducing minimum nop. + assert(getBackend().getMinimumNopSize() != NAF.getAlignment()); + return getBackend().getMinimumNopSize(); + } + return 0; + } + case MCFragment::FT_Org: { const MCOrgFragment &OF = cast<MCOrgFragment>(F); MCValue Value; @@ -581,6 +653,15 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, break; } + case MCFragment::FT_NeverAlign: { + const MCNeverAlignFragment &NAF = cast<MCNeverAlignFragment>(F); + if (!Asm.getBackend().writeNopData(OS, FragmentSize, + &NAF.getSubtargetInfo())) + report_fatal_error("unable to write nop sequence of " + + Twine(FragmentSize) + " bytes"); + break; + } + case MCFragment::FT_Data: ++stats::EmittedDataFragments; OS << cast<MCDataFragment>(F).getContents(); @@ -1052,43 +1133,6 @@ bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { return OldSize != LF.getContents().size(); } -/// Check if the branch crosses the boundary. -/// -/// \param StartAddr start address of the fused/unfused branch. -/// \param Size size of the fused/unfused branch. -/// \param BoundaryAlignment alignment requirement of the branch. -/// \returns true if the branch cross the boundary. -static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, - Align BoundaryAlignment) { - uint64_t EndAddr = StartAddr + Size; - return (StartAddr >> Log2(BoundaryAlignment)) != - ((EndAddr - 1) >> Log2(BoundaryAlignment)); -} - -/// Check if the branch is against the boundary. -/// -/// \param StartAddr start address of the fused/unfused branch. -/// \param Size size of the fused/unfused branch. -/// \param BoundaryAlignment alignment requirement of the branch. -/// \returns true if the branch is against the boundary. -static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, - Align BoundaryAlignment) { - uint64_t EndAddr = StartAddr + Size; - return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; -} - -/// Check if the branch needs padding. -/// -/// \param StartAddr start address of the fused/unfused branch. -/// \param Size size of the fused/unfused branch. -/// \param BoundaryAlignment alignment requirement of the branch. -/// \returns true if the branch needs padding. -static bool needPadding(uint64_t StartAddr, uint64_t Size, - Align BoundaryAlignment) { - return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || - isAgainstBoundary(StartAddr, Size, BoundaryAlignment); -} - bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout, MCBoundaryAlignFragment &BF) { // BoundaryAlignFragment that doesn't need to align any fragment should not be diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp index b065d03..9add574 100644 --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -675,14 +675,8 @@ static void AttemptToFoldSymbolOffsetDifference( if (FA == FB) { Reverse = SA.getOffset() < SB.getOffset(); } else if (!isa<MCDummyFragment>(FA)) { - // Testing FA < FB is slow. Use setLayoutOrder to speed up computation. - // The formal layout order will be finalized in MCAssembler::layout. - if (FA->getLayoutOrder() == 0 || FB->getLayoutOrder()== 0) { - unsigned LayoutOrder = 0; - for (MCFragment &F : *FA->getParent()) - F.setLayoutOrder(++LayoutOrder); - } - Reverse = FA->getLayoutOrder() < FB->getLayoutOrder(); + Reverse = std::find_if(std::next(FA->getIterator()), SecA.end(), + [&](auto &I) { return &I == FB; }) != SecA.end(); } uint64_t SAOffset = SA.getOffset(), SBOffset = SB.getOffset(); diff --git a/llvm/lib/MC/MCFragment.cpp b/llvm/lib/MC/MCFragment.cpp index 7d08268..6e09caa 100644 --- a/llvm/lib/MC/MCFragment.cpp +++ b/llvm/lib/MC/MCFragment.cpp @@ -274,6 +274,9 @@ void MCFragment::destroy() { case FT_Align: delete cast<MCAlignFragment>(this); return; + case FT_NeverAlign: + delete cast<MCNeverAlignFragment>(this); + return; case FT_Data: delete cast<MCDataFragment>(this); return; @@ -342,6 +345,9 @@ LLVM_DUMP_METHOD void MCFragment::dump() const { OS << "<"; switch (getKind()) { case MCFragment::FT_Align: OS << "MCAlignFragment"; break; + case MCFragment::FT_NeverAlign: + OS << "MCNeverAlignFragment"; + break; case MCFragment::FT_Data: OS << "MCDataFragment"; break; case MCFragment::FT_CompactEncodedInst: OS << "MCCompactEncodedInstFragment"; break; @@ -381,6 +387,12 @@ LLVM_DUMP_METHOD void MCFragment::dump() const { << " MaxBytesToEmit:" << AF->getMaxBytesToEmit() << ">"; break; } + case MCFragment::FT_NeverAlign: { + const MCNeverAlignFragment *NAF = cast<MCNeverAlignFragment>(this); + OS << "\n "; + OS << " Alignment:" << NAF->getAlignment() << ">"; + break; + } case MCFragment::FT_Data: { const auto *DF = cast<MCDataFragment>(this); OS << "\n "; diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp index 0ccade9..117475b 100644 --- a/llvm/lib/MC/MCObjectStreamer.cpp +++ b/llvm/lib/MC/MCObjectStreamer.cpp @@ -658,6 +658,11 @@ void MCObjectStreamer::emitCodeAlignment(Align Alignment, cast<MCAlignFragment>(getCurrentFragment())->setEmitNops(true, STI); } +void MCObjectStreamer::emitNeverAlignCodeAtEnd(unsigned ByteAlignment, + const MCSubtargetInfo &STI) { + insert(new MCNeverAlignFragment(ByteAlignment, STI)); +} + void MCObjectStreamer::emitValueToOffset(const MCExpr *Offset, unsigned char Value, SMLoc Loc) { diff --git a/llvm/lib/MC/MCStreamer.cpp b/llvm/lib/MC/MCStreamer.cpp index 199d865..a97cba6 100644 --- a/llvm/lib/MC/MCStreamer.cpp +++ b/llvm/lib/MC/MCStreamer.cpp @@ -1235,6 +1235,8 @@ void MCStreamer::emitValueToAlignment(Align Alignment, int64_t Value, unsigned MaxBytesToEmit) {} void MCStreamer::emitCodeAlignment(Align Alignment, const MCSubtargetInfo *STI, unsigned MaxBytesToEmit) {} +void MCStreamer::emitNeverAlignCodeAtEnd(unsigned ByteAlignment, + const MCSubtargetInfo &STI) {} void MCStreamer::emitValueToOffset(const MCExpr *Offset, unsigned char Value, SMLoc Loc) {} void MCStreamer::emitBundleAlignMode(Align Alignment) {} diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp index 6623106..6c6bd2c 100644 --- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -1153,6 +1153,7 @@ private: bool parseDirectiveArch(); bool parseDirectiveNops(SMLoc L); bool parseDirectiveEven(SMLoc L); + bool parseDirectiveAvoidEndAlign(SMLoc L); bool ParseDirectiveCode(StringRef IDVal, SMLoc L); /// CodeView FPO data directives. @@ -4601,6 +4602,8 @@ bool X86AsmParser::ParseDirective(AsmToken DirectiveID) { return false; } else if (IDVal == ".nops") return parseDirectiveNops(DirectiveID.getLoc()); + else if (IDVal == ".avoid_end_align") + return parseDirectiveAvoidEndAlign(DirectiveID.getLoc()); else if (IDVal == ".even") return parseDirectiveEven(DirectiveID.getLoc()); else if (IDVal == ".cv_fpo_proc") @@ -4695,6 +4698,27 @@ bool X86AsmParser::parseDirectiveEven(SMLoc L) { return false; } +/// Directive for NeverAlign fragment testing, not for general usage! +/// parseDirectiveAvoidEndAlign +/// ::= .avoid_end_align alignment +bool X86AsmParser::parseDirectiveAvoidEndAlign(SMLoc L) { + int64_t Alignment = 0; + SMLoc AlignmentLoc; + AlignmentLoc = getTok().getLoc(); + if (getParser().checkForValidSection() || + getParser().parseAbsoluteExpression(Alignment)) + return true; + + if (getParser().parseEOL("unexpected token in directive")) + return true; + + if (Alignment <= 0) + return Error(AlignmentLoc, "expected a positive alignment"); + + getParser().getStreamer().emitNeverAlignCodeAtEnd(Alignment, getSTI()); + return false; +} + /// ParseDirectiveCode /// ::= .code16 | .code32 | .code64 bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) { |