diff options
author | Jinyang He <hejinyang@loongson.cn> | 2023-12-20 10:54:51 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-20 10:54:51 +0800 |
commit | a8081ed8ff0fd11fb8d5f4c83df49da909e49612 (patch) | |
tree | e48280252b9702cdee02ca66765ba7dd17895fa4 /llvm/lib | |
parent | e6a7175c6d72e8c50534cb5494d2d2542e011fe5 (diff) | |
download | llvm-a8081ed8ff0fd11fb8d5f4c83df49da909e49612.zip llvm-a8081ed8ff0fd11fb8d5f4c83df49da909e49612.tar.gz llvm-a8081ed8ff0fd11fb8d5f4c83df49da909e49612.tar.bz2 |
[LoongArch] Allow delayed decision for ADD/SUB relocations (#72960)
Refer to RISCV [1], LoongArch also need delayed decision for ADD/SUB
relocations. In handleAddSubRelocations, just return directly if SecA !=
SecB, handleFixup usually will finish the the rest of creating PCRel
relocations works. Otherwise we emit relocs depends on whether
relaxation is enabled. If not, we return true and avoid record ADD/SUB
relocations.
Now the two symbols separated by alignment directive will return without
folding symbol offset in AttemptToFoldSymbolOffsetDifference, which has
the same effect when relaxation is enabled.
[1] https://reviews.llvm.org/D155357
Diffstat (limited to 'llvm/lib')
4 files changed, 90 insertions, 4 deletions
diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp index 73e6569..061f2ad 100644 --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -632,7 +632,8 @@ static void AttemptToFoldSymbolOffsetDifference( // instructions and InSet is false (not expressions in directive like // .size/.fill), disable the fast path. if (Layout && (InSet || !SecA.hasInstructions() || - !Asm->getContext().getTargetTriple().isRISCV())) { + !(Asm->getContext().getTargetTriple().isRISCV() || + Asm->getContext().getTargetTriple().isLoongArch()))) { // If both symbols are in the same fragment, return the difference of their // offsets. canGetFragmentOffset(FA) may be false. if (FA == FB && !SA.isVariable() && !SB.isVariable()) { diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp index 14bcef7..6d8ef1b 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp @@ -177,6 +177,34 @@ bool LoongArchAsmBackend::shouldForceRelocation(const MCAssembler &Asm, } } +static inline std::pair<MCFixupKind, MCFixupKind> +getRelocPairForSize(unsigned Size) { + switch (Size) { + default: + llvm_unreachable("unsupported fixup size"); + case 6: + return std::make_pair( + MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD6), + MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB6)); + case 8: + return std::make_pair( + MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD8), + MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB8)); + case 16: + return std::make_pair( + MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD16), + MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB16)); + case 32: + return std::make_pair( + MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD32), + MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB32)); + case 64: + return std::make_pair( + MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD64), + MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB64)); + } +} + bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, const MCSubtargetInfo *STI) const { // We mostly follow binutils' convention here: align to 4-byte boundary with a @@ -191,6 +219,56 @@ bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, return true; } +bool LoongArchAsmBackend::handleAddSubRelocations(const MCAsmLayout &Layout, + const MCFragment &F, + const MCFixup &Fixup, + const MCValue &Target, + uint64_t &FixedValue) const { + std::pair<MCFixupKind, MCFixupKind> FK; + uint64_t FixedValueA, FixedValueB; + const MCSection &SecA = Target.getSymA()->getSymbol().getSection(); + const MCSection &SecB = Target.getSymB()->getSymbol().getSection(); + + // We need record relocation if SecA != SecB. Usually SecB is same as the + // section of Fixup, which will be record the relocation as PCRel. If SecB + // is not same as the section of Fixup, it will report error. Just return + // false and then this work can be finished by handleFixup. + if (&SecA != &SecB) + return false; + + // In SecA == SecB case. If the linker relaxation is enabled, we need record + // the ADD, SUB relocations. Otherwise the FixedValue has already been + // calculated out in evaluateFixup, return true and avoid record relocations. + if (!STI.hasFeature(LoongArch::FeatureRelax)) + return true; + + switch (Fixup.getKind()) { + case llvm::FK_Data_1: + FK = getRelocPairForSize(8); + break; + case llvm::FK_Data_2: + FK = getRelocPairForSize(16); + break; + case llvm::FK_Data_4: + FK = getRelocPairForSize(32); + break; + case llvm::FK_Data_8: + FK = getRelocPairForSize(64); + break; + default: + llvm_unreachable("unsupported fixup size"); + } + MCValue A = MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); + MCValue B = MCValue::get(Target.getSymB()); + auto FA = MCFixup::create(Fixup.getOffset(), nullptr, std::get<0>(FK)); + auto FB = MCFixup::create(Fixup.getOffset(), nullptr, std::get<1>(FK)); + auto &Asm = Layout.getAssembler(); + Asm.getWriter().recordRelocation(Asm, Layout, &F, FA, A, FixedValueA); + Asm.getWriter().recordRelocation(Asm, Layout, &F, FB, B, FixedValueB); + FixedValue = FixedValueA - FixedValueB; + return true; +} + std::unique_ptr<MCObjectTargetWriter> LoongArchAsmBackend::createObjectTargetWriter() const { return createLoongArchELFObjectWriter( diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h index d1fbf78..fef0e84 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h @@ -31,10 +31,15 @@ class LoongArchAsmBackend : public MCAsmBackend { public: LoongArchAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI, bool Is64Bit, const MCTargetOptions &Options) - : MCAsmBackend(llvm::endianness::little), STI(STI), OSABI(OSABI), - Is64Bit(Is64Bit), TargetOptions(Options) {} + : MCAsmBackend(llvm::endianness::little, + LoongArch::fixup_loongarch_relax), + STI(STI), OSABI(OSABI), Is64Bit(Is64Bit), TargetOptions(Options) {} ~LoongArchAsmBackend() override {} + bool handleAddSubRelocations(const MCAsmLayout &Layout, const MCFragment &F, + const MCFixup &Fixup, const MCValue &Target, + uint64_t &FixedValue) const override; + void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef<char> Data, uint64_t Value, bool IsResolved, diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchFixupKinds.h b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchFixupKinds.h index ba2d671..178fa6e 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchFixupKinds.h +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchFixupKinds.h @@ -106,7 +106,9 @@ enum Fixups { // 20-bit fixup corresponding to %gd_pc_hi20(foo) for instruction pcalau12i. fixup_loongarch_tls_gd_pc_hi20, // 20-bit fixup corresponding to %gd_hi20(foo) for instruction lu12i.w. - fixup_loongarch_tls_gd_hi20 + fixup_loongarch_tls_gd_hi20, + // Generate an R_LARCH_RELAX which indicates the linker may relax here. + fixup_loongarch_relax = FirstLiteralRelocationKind + ELF::R_LARCH_RELAX }; } // end namespace LoongArch } // end namespace llvm |