Index: head/contrib/llvm-project/lld/ELF/InputSection.cpp =================================================================== --- head/contrib/llvm-project/lld/ELF/InputSection.cpp (revision 359084) +++ head/contrib/llvm-project/lld/ELF/InputSection.cpp (revision 359085) @@ -1,1366 +1,1367 @@ //===- InputSection.cpp ---------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "InputSection.h" #include "Config.h" #include "EhFrame.h" #include "InputFiles.h" #include "LinkerScript.h" #include "OutputSections.h" #include "Relocations.h" #include "SymbolTable.h" #include "Symbols.h" #include "SyntheticSections.h" #include "Target.h" #include "Thunks.h" #include "lld/Common/ErrorHandler.h" #include "lld/Common/Memory.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Compression.h" #include "llvm/Support/Endian.h" #include "llvm/Support/Threading.h" #include "llvm/Support/xxhash.h" #include #include #include #include using namespace llvm; using namespace llvm::ELF; using namespace llvm::object; using namespace llvm::support; using namespace llvm::support::endian; using namespace llvm::sys; namespace lld { // Returns a string to construct an error message. std::string toString(const elf::InputSectionBase *sec) { return (toString(sec->file) + ":(" + sec->name + ")").str(); } namespace elf { std::vector inputSections; template static ArrayRef getSectionContents(ObjFile &file, const typename ELFT::Shdr &hdr) { if (hdr.sh_type == SHT_NOBITS) return makeArrayRef(nullptr, hdr.sh_size); return check(file.getObj().getSectionContents(&hdr)); } InputSectionBase::InputSectionBase(InputFile *file, uint64_t flags, uint32_t type, uint64_t entsize, uint32_t link, uint32_t info, uint32_t alignment, ArrayRef data, StringRef name, Kind sectionKind) : SectionBase(sectionKind, name, flags, entsize, alignment, type, info, link), file(file), rawData(data) { // In order to reduce memory allocation, we assume that mergeable // sections are smaller than 4 GiB, which is not an unreasonable // assumption as of 2017. if (sectionKind == SectionBase::Merge && rawData.size() > UINT32_MAX) error(toString(this) + ": section too large"); numRelocations = 0; areRelocsRela = false; // The ELF spec states that a value of 0 means the section has // no alignment constraints. uint32_t v = std::max(alignment, 1); if (!isPowerOf2_64(v)) fatal(toString(this) + ": sh_addralign is not a power of 2"); this->alignment = v; // In ELF, each section can be compressed by zlib, and if compressed, // section name may be mangled by appending "z" (e.g. ".zdebug_info"). // If that's the case, demangle section name so that we can handle a // section as if it weren't compressed. if ((flags & SHF_COMPRESSED) || name.startswith(".zdebug")) { if (!zlib::isAvailable()) error(toString(file) + ": contains a compressed section, " + "but zlib is not available"); parseCompressedHeader(); } } // Drop SHF_GROUP bit unless we are producing a re-linkable object file. // SHF_GROUP is a marker that a section belongs to some comdat group. // That flag doesn't make sense in an executable. static uint64_t getFlags(uint64_t flags) { flags &= ~(uint64_t)SHF_INFO_LINK; if (!config->relocatable) flags &= ~(uint64_t)SHF_GROUP; return flags; } // GNU assembler 2.24 and LLVM 4.0.0's MC (the newest release as of // March 2017) fail to infer section types for sections starting with // ".init_array." or ".fini_array.". They set SHT_PROGBITS instead of // SHF_INIT_ARRAY. As a result, the following assembler directive // creates ".init_array.100" with SHT_PROGBITS, for example. // // .section .init_array.100, "aw" // // This function forces SHT_{INIT,FINI}_ARRAY so that we can handle // incorrect inputs as if they were correct from the beginning. static uint64_t getType(uint64_t type, StringRef name) { if (type == SHT_PROGBITS && name.startswith(".init_array.")) return SHT_INIT_ARRAY; if (type == SHT_PROGBITS && name.startswith(".fini_array.")) return SHT_FINI_ARRAY; return type; } template InputSectionBase::InputSectionBase(ObjFile &file, const typename ELFT::Shdr &hdr, StringRef name, Kind sectionKind) : InputSectionBase(&file, getFlags(hdr.sh_flags), getType(hdr.sh_type, name), hdr.sh_entsize, hdr.sh_link, hdr.sh_info, hdr.sh_addralign, getSectionContents(file, hdr), name, sectionKind) { // We reject object files having insanely large alignments even though // they are allowed by the spec. I think 4GB is a reasonable limitation. // We might want to relax this in the future. if (hdr.sh_addralign > UINT32_MAX) fatal(toString(&file) + ": section sh_addralign is too large"); } size_t InputSectionBase::getSize() const { if (auto *s = dyn_cast(this)) return s->getSize(); if (uncompressedSize >= 0) return uncompressedSize; return rawData.size(); } void InputSectionBase::uncompress() const { size_t size = uncompressedSize; char *uncompressedBuf; { static std::mutex mu; std::lock_guard lock(mu); uncompressedBuf = bAlloc.Allocate(size); } if (Error e = zlib::uncompress(toStringRef(rawData), uncompressedBuf, size)) fatal(toString(this) + ": uncompress failed: " + llvm::toString(std::move(e))); rawData = makeArrayRef((uint8_t *)uncompressedBuf, size); uncompressedSize = -1; } uint64_t InputSectionBase::getOffsetInFile() const { const uint8_t *fileStart = (const uint8_t *)file->mb.getBufferStart(); const uint8_t *secStart = data().begin(); return secStart - fileStart; } uint64_t SectionBase::getOffset(uint64_t offset) const { switch (kind()) { case Output: { auto *os = cast(this); // For output sections we treat offset -1 as the end of the section. return offset == uint64_t(-1) ? os->size : offset; } case Regular: case Synthetic: return cast(this)->getOffset(offset); case EHFrame: // The file crtbeginT.o has relocations pointing to the start of an empty // .eh_frame that is known to be the first in the link. It does that to // identify the start of the output .eh_frame. return offset; case Merge: const MergeInputSection *ms = cast(this); if (InputSection *isec = ms->getParent()) return isec->getOffset(ms->getParentOffset(offset)); return ms->getParentOffset(offset); } llvm_unreachable("invalid section kind"); } uint64_t SectionBase::getVA(uint64_t offset) const { const OutputSection *out = getOutputSection(); return (out ? out->addr : 0) + getOffset(offset); } OutputSection *SectionBase::getOutputSection() { InputSection *sec; if (auto *isec = dyn_cast(this)) sec = isec; else if (auto *ms = dyn_cast(this)) sec = ms->getParent(); else if (auto *eh = dyn_cast(this)) sec = eh->getParent(); else return cast(this); return sec ? sec->getParent() : nullptr; } // When a section is compressed, `rawData` consists with a header followed // by zlib-compressed data. This function parses a header to initialize // `uncompressedSize` member and remove the header from `rawData`. void InputSectionBase::parseCompressedHeader() { using Chdr64 = typename ELF64LE::Chdr; using Chdr32 = typename ELF32LE::Chdr; // Old-style header if (name.startswith(".zdebug")) { if (!toStringRef(rawData).startswith("ZLIB")) { error(toString(this) + ": corrupted compressed section header"); return; } rawData = rawData.slice(4); if (rawData.size() < 8) { error(toString(this) + ": corrupted compressed section header"); return; } uncompressedSize = read64be(rawData.data()); rawData = rawData.slice(8); // Restore the original section name. // (e.g. ".zdebug_info" -> ".debug_info") name = saver.save("." + name.substr(2)); return; } assert(flags & SHF_COMPRESSED); flags &= ~(uint64_t)SHF_COMPRESSED; // New-style 64-bit header if (config->is64) { if (rawData.size() < sizeof(Chdr64)) { error(toString(this) + ": corrupted compressed section"); return; } auto *hdr = reinterpret_cast(rawData.data()); if (hdr->ch_type != ELFCOMPRESS_ZLIB) { error(toString(this) + ": unsupported compression type"); return; } uncompressedSize = hdr->ch_size; alignment = std::max(hdr->ch_addralign, 1); rawData = rawData.slice(sizeof(*hdr)); return; } // New-style 32-bit header if (rawData.size() < sizeof(Chdr32)) { error(toString(this) + ": corrupted compressed section"); return; } auto *hdr = reinterpret_cast(rawData.data()); if (hdr->ch_type != ELFCOMPRESS_ZLIB) { error(toString(this) + ": unsupported compression type"); return; } uncompressedSize = hdr->ch_size; alignment = std::max(hdr->ch_addralign, 1); rawData = rawData.slice(sizeof(*hdr)); } InputSection *InputSectionBase::getLinkOrderDep() const { assert(link); assert(flags & SHF_LINK_ORDER); return cast(file->getSections()[link]); } // Find a function symbol that encloses a given location. template Defined *InputSectionBase::getEnclosingFunction(uint64_t offset) { for (Symbol *b : file->getSymbols()) if (Defined *d = dyn_cast(b)) if (d->section == this && d->type == STT_FUNC && d->value <= offset && offset < d->value + d->size) return d; return nullptr; } // Returns a source location string. Used to construct an error message. template std::string InputSectionBase::getLocation(uint64_t offset) { std::string secAndOffset = (name + "+0x" + utohexstr(offset)).str(); // We don't have file for synthetic sections. if (getFile() == nullptr) return (config->outputFile + ":(" + secAndOffset + ")") .str(); // First check if we can get desired values from debugging information. if (Optional info = getFile()->getDILineInfo(this, offset)) return info->FileName + ":" + std::to_string(info->Line) + ":(" + secAndOffset + ")"; // File->sourceFile contains STT_FILE symbol that contains a // source file name. If it's missing, we use an object file name. std::string srcFile = getFile()->sourceFile; if (srcFile.empty()) srcFile = toString(file); if (Defined *d = getEnclosingFunction(offset)) return srcFile + ":(function " + toString(*d) + ": " + secAndOffset + ")"; // If there's no symbol, print out the offset in the section. return (srcFile + ":(" + secAndOffset + ")"); } // This function is intended to be used for constructing an error message. // The returned message looks like this: // // foo.c:42 (/home/alice/possibly/very/long/path/foo.c:42) // // Returns an empty string if there's no way to get line info. std::string InputSectionBase::getSrcMsg(const Symbol &sym, uint64_t offset) { return file->getSrcMsg(sym, *this, offset); } // Returns a filename string along with an optional section name. This // function is intended to be used for constructing an error // message. The returned message looks like this: // // path/to/foo.o:(function bar) // // or // // path/to/foo.o:(function bar) in archive path/to/bar.a std::string InputSectionBase::getObjMsg(uint64_t off) { std::string filename = file->getName(); std::string archive; if (!file->archiveName.empty()) archive = " in archive " + file->archiveName; // Find a symbol that encloses a given location. for (Symbol *b : file->getSymbols()) if (auto *d = dyn_cast(b)) if (d->section == this && d->value <= off && off < d->value + d->size) return filename + ":(" + toString(*d) + ")" + archive; // If there's no symbol, print out the offset in the section. return (filename + ":(" + name + "+0x" + utohexstr(off) + ")" + archive) .str(); } InputSection InputSection::discarded(nullptr, 0, 0, 0, ArrayRef(), ""); InputSection::InputSection(InputFile *f, uint64_t flags, uint32_t type, uint32_t alignment, ArrayRef data, StringRef name, Kind k) : InputSectionBase(f, flags, type, /*Entsize*/ 0, /*Link*/ 0, /*Info*/ 0, alignment, data, name, k) {} template InputSection::InputSection(ObjFile &f, const typename ELFT::Shdr &header, StringRef name) : InputSectionBase(f, header, name, InputSectionBase::Regular) {} bool InputSection::classof(const SectionBase *s) { return s->kind() == SectionBase::Regular || s->kind() == SectionBase::Synthetic; } OutputSection *InputSection::getParent() const { return cast_or_null(parent); } // Copy SHT_GROUP section contents. Used only for the -r option. template void InputSection::copyShtGroup(uint8_t *buf) { // ELFT::Word is the 32-bit integral type in the target endianness. using u32 = typename ELFT::Word; ArrayRef from = getDataAs(); auto *to = reinterpret_cast(buf); // The first entry is not a section number but a flag. *to++ = from[0]; // Adjust section numbers because section numbers in an input object // files are different in the output. ArrayRef sections = file->getSections(); for (uint32_t idx : from.slice(1)) *to++ = sections[idx]->getOutputSection()->sectionIndex; } InputSectionBase *InputSection::getRelocatedSection() const { if (!file || (type != SHT_RELA && type != SHT_REL)) return nullptr; ArrayRef sections = file->getSections(); return sections[info]; } // This is used for -r and --emit-relocs. We can't use memcpy to copy // relocations because we need to update symbol table offset and section index // for each relocation. So we copy relocations one by one. template void InputSection::copyRelocations(uint8_t *buf, ArrayRef rels) { InputSectionBase *sec = getRelocatedSection(); for (const RelTy &rel : rels) { RelType type = rel.getType(config->isMips64EL); const ObjFile *file = getFile(); Symbol &sym = file->getRelocTargetSym(rel); auto *p = reinterpret_cast(buf); buf += sizeof(RelTy); if (RelTy::IsRela) p->r_addend = getAddend(rel); // Output section VA is zero for -r, so r_offset is an offset within the // section, but for --emit-relocs it is a virtual address. p->r_offset = sec->getVA(rel.r_offset); p->setSymbolAndType(in.symTab->getSymbolIndex(&sym), type, config->isMips64EL); if (sym.type == STT_SECTION) { // We combine multiple section symbols into only one per // section. This means we have to update the addend. That is // trivial for Elf_Rela, but for Elf_Rel we have to write to the // section data. We do that by adding to the Relocation vector. // .eh_frame is horribly special and can reference discarded sections. To // avoid having to parse and recreate .eh_frame, we just replace any // relocation in it pointing to discarded sections with R_*_NONE, which // hopefully creates a frame that is ignored at runtime. Also, don't warn // on .gcc_except_table and debug sections. // - // See the comment in maybeReportUndefined for PPC64 .toc . + // See the comment in maybeReportUndefined for PPC32 .got2 and PPC64 .toc auto *d = dyn_cast(&sym); if (!d) { if (!sec->name.startswith(".debug") && !sec->name.startswith(".zdebug") && sec->name != ".eh_frame" && - sec->name != ".gcc_except_table" && sec->name != ".toc") { + sec->name != ".gcc_except_table" && sec->name != ".got2" && + sec->name != ".toc") { uint32_t secIdx = cast(sym).discardedSecIdx; Elf_Shdr_Impl sec = CHECK(file->getObj().sections(), file)[secIdx]; warn("relocation refers to a discarded section: " + CHECK(file->getObj().getSectionName(&sec), file) + "\n>>> referenced by " + getObjMsg(p->r_offset)); } p->setSymbolAndType(0, 0, false); continue; } SectionBase *section = d->section->repl; if (!section->isLive()) { p->setSymbolAndType(0, 0, false); continue; } int64_t addend = getAddend(rel); const uint8_t *bufLoc = sec->data().begin() + rel.r_offset; if (!RelTy::IsRela) addend = target->getImplicitAddend(bufLoc, type); if (config->emachine == EM_MIPS && config->relocatable && target->getRelExpr(type, sym, bufLoc) == R_MIPS_GOTREL) { // Some MIPS relocations depend on "gp" value. By default, // this value has 0x7ff0 offset from a .got section. But // relocatable files produced by a compiler or a linker // might redefine this default value and we must use it // for a calculation of the relocation result. When we // generate EXE or DSO it's trivial. Generating a relocatable // output is more difficult case because the linker does // not calculate relocations in this mode and loses // individual "gp" values used by each input object file. // As a workaround we add the "gp" value to the relocation // addend and save it back to the file. addend += sec->getFile()->mipsGp0; } if (RelTy::IsRela) p->r_addend = sym.getVA(addend) - section->getOutputSection()->addr; else if (config->relocatable && type != target->noneRel) sec->relocations.push_back({R_ABS, type, rel.r_offset, addend, &sym}); } else if (config->emachine == EM_PPC && type == R_PPC_PLTREL24 && p->r_addend >= 0x8000) { // Similar to R_MIPS_GPREL{16,32}. If the addend of R_PPC_PLTREL24 // indicates that r30 is relative to the input section .got2 // (r_addend>=0x8000), after linking, r30 should be relative to the output // section .got2 . To compensate for the shift, adjust r_addend by // ppc32Got2OutSecOff. p->r_addend += sec->file->ppc32Got2OutSecOff; } } } // The ARM and AArch64 ABI handle pc-relative relocations to undefined weak // references specially. The general rule is that the value of the symbol in // this context is the address of the place P. A further special case is that // branch relocations to an undefined weak reference resolve to the next // instruction. static uint32_t getARMUndefinedRelativeWeakVA(RelType type, uint32_t a, uint32_t p) { switch (type) { // Unresolved branch relocations to weak references resolve to next // instruction, this will be either 2 or 4 bytes on from P. case R_ARM_THM_JUMP11: return p + 2 + a; case R_ARM_CALL: case R_ARM_JUMP24: case R_ARM_PC24: case R_ARM_PLT32: case R_ARM_PREL31: case R_ARM_THM_JUMP19: case R_ARM_THM_JUMP24: return p + 4 + a; case R_ARM_THM_CALL: // We don't want an interworking BLX to ARM return p + 5 + a; // Unresolved non branch pc-relative relocations // R_ARM_TARGET2 which can be resolved relatively is not present as it never // targets a weak-reference. case R_ARM_MOVW_PREL_NC: case R_ARM_MOVT_PREL: case R_ARM_REL32: case R_ARM_THM_MOVW_PREL_NC: case R_ARM_THM_MOVT_PREL: return p + a; } llvm_unreachable("ARM pc-relative relocation expected\n"); } // The comment above getARMUndefinedRelativeWeakVA applies to this function. static uint64_t getAArch64UndefinedRelativeWeakVA(uint64_t type, uint64_t a, uint64_t p) { switch (type) { // Unresolved branch relocations to weak references resolve to next // instruction, this is 4 bytes on from P. case R_AARCH64_CALL26: case R_AARCH64_CONDBR19: case R_AARCH64_JUMP26: case R_AARCH64_TSTBR14: return p + 4 + a; // Unresolved non branch pc-relative relocations case R_AARCH64_PREL16: case R_AARCH64_PREL32: case R_AARCH64_PREL64: case R_AARCH64_ADR_PREL_LO21: case R_AARCH64_LD_PREL_LO19: return p + a; } llvm_unreachable("AArch64 pc-relative relocation expected\n"); } // ARM SBREL relocations are of the form S + A - B where B is the static base // The ARM ABI defines base to be "addressing origin of the output segment // defining the symbol S". We defined the "addressing origin"/static base to be // the base of the PT_LOAD segment containing the Sym. // The procedure call standard only defines a Read Write Position Independent // RWPI variant so in practice we should expect the static base to be the base // of the RW segment. static uint64_t getARMStaticBase(const Symbol &sym) { OutputSection *os = sym.getOutputSection(); if (!os || !os->ptLoad || !os->ptLoad->firstSec) fatal("SBREL relocation to " + sym.getName() + " without static base"); return os->ptLoad->firstSec->addr; } // For R_RISCV_PC_INDIRECT (R_RISCV_PCREL_LO12_{I,S}), the symbol actually // points the corresponding R_RISCV_PCREL_HI20 relocation, and the target VA // is calculated using PCREL_HI20's symbol. // // This function returns the R_RISCV_PCREL_HI20 relocation from // R_RISCV_PCREL_LO12's symbol and addend. static Relocation *getRISCVPCRelHi20(const Symbol *sym, uint64_t addend) { const Defined *d = cast(sym); if (!d->section) { error("R_RISCV_PCREL_LO12 relocation points to an absolute symbol: " + sym->getName()); return nullptr; } InputSection *isec = cast(d->section); if (addend != 0) warn("Non-zero addend in R_RISCV_PCREL_LO12 relocation to " + isec->getObjMsg(d->value) + " is ignored"); // Relocations are sorted by offset, so we can use std::equal_range to do // binary search. Relocation r; r.offset = d->value; auto range = std::equal_range(isec->relocations.begin(), isec->relocations.end(), r, [](const Relocation &lhs, const Relocation &rhs) { return lhs.offset < rhs.offset; }); for (auto it = range.first; it != range.second; ++it) if (it->type == R_RISCV_PCREL_HI20 || it->type == R_RISCV_GOT_HI20 || it->type == R_RISCV_TLS_GD_HI20 || it->type == R_RISCV_TLS_GOT_HI20) return &*it; error("R_RISCV_PCREL_LO12 relocation points to " + isec->getObjMsg(d->value) + " without an associated R_RISCV_PCREL_HI20 relocation"); return nullptr; } // A TLS symbol's virtual address is relative to the TLS segment. Add a // target-specific adjustment to produce a thread-pointer-relative offset. static int64_t getTlsTpOffset(const Symbol &s) { // On targets that support TLSDESC, _TLS_MODULE_BASE_@tpoff = 0. if (&s == ElfSym::tlsModuleBase) return 0; // There are 2 TLS layouts. Among targets we support, x86 uses TLS Variant 2 // while most others use Variant 1. At run time TP will be aligned to p_align. // Variant 1. TP will be followed by an optional gap (which is the size of 2 // pointers on ARM/AArch64, 0 on other targets), followed by alignment // padding, then the static TLS blocks. The alignment padding is added so that // (TP + gap + padding) is congruent to p_vaddr modulo p_align. // // Variant 2. Static TLS blocks, followed by alignment padding are placed // before TP. The alignment padding is added so that (TP - padding - // p_memsz) is congruent to p_vaddr modulo p_align. PhdrEntry *tls = Out::tlsPhdr; switch (config->emachine) { // Variant 1. case EM_ARM: case EM_AARCH64: return s.getVA(0) + config->wordsize * 2 + ((tls->p_vaddr - config->wordsize * 2) & (tls->p_align - 1)); case EM_MIPS: case EM_PPC: case EM_PPC64: // Adjusted Variant 1. TP is placed with a displacement of 0x7000, which is // to allow a signed 16-bit offset to reach 0x1000 of TCB/thread-library // data and 0xf000 of the program's TLS segment. return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000; case EM_RISCV: return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1)); // Variant 2. case EM_HEXAGON: case EM_386: case EM_X86_64: return s.getVA(0) - tls->p_memsz - ((-tls->p_vaddr - tls->p_memsz) & (tls->p_align - 1)); default: llvm_unreachable("unhandled Config->EMachine"); } } static uint64_t getRelocTargetVA(const InputFile *file, RelType type, int64_t a, uint64_t p, const Symbol &sym, RelExpr expr) { switch (expr) { case R_ABS: case R_DTPREL: case R_RELAX_TLS_LD_TO_LE_ABS: case R_RELAX_GOT_PC_NOPIC: case R_RISCV_ADD: return sym.getVA(a); case R_ADDEND: return a; case R_ARM_SBREL: return sym.getVA(a) - getARMStaticBase(sym); case R_GOT: case R_RELAX_TLS_GD_TO_IE_ABS: return sym.getGotVA() + a; case R_GOTONLY_PC: return in.got->getVA() + a - p; case R_GOTPLTONLY_PC: return in.gotPlt->getVA() + a - p; case R_GOTREL: case R_PPC64_RELAX_TOC: return sym.getVA(a) - in.got->getVA(); case R_GOTPLTREL: return sym.getVA(a) - in.gotPlt->getVA(); case R_GOTPLT: case R_RELAX_TLS_GD_TO_IE_GOTPLT: return sym.getGotVA() + a - in.gotPlt->getVA(); case R_TLSLD_GOT_OFF: case R_GOT_OFF: case R_RELAX_TLS_GD_TO_IE_GOT_OFF: return sym.getGotOffset() + a; case R_AARCH64_GOT_PAGE_PC: case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC: return getAArch64Page(sym.getGotVA() + a) - getAArch64Page(p); case R_GOT_PC: case R_RELAX_TLS_GD_TO_IE: return sym.getGotVA() + a - p; case R_MIPS_GOTREL: return sym.getVA(a) - in.mipsGot->getGp(file); case R_MIPS_GOT_GP: return in.mipsGot->getGp(file) + a; case R_MIPS_GOT_GP_PC: { // R_MIPS_LO16 expression has R_MIPS_GOT_GP_PC type iif the target // is _gp_disp symbol. In that case we should use the following // formula for calculation "AHL + GP - P + 4". For details see p. 4-19 at // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf // microMIPS variants of these relocations use slightly different // expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi() // to correctly handle less-sugnificant bit of the microMIPS symbol. uint64_t v = in.mipsGot->getGp(file) + a - p; if (type == R_MIPS_LO16 || type == R_MICROMIPS_LO16) v += 4; if (type == R_MICROMIPS_LO16 || type == R_MICROMIPS_HI16) v -= 1; return v; } case R_MIPS_GOT_LOCAL_PAGE: // If relocation against MIPS local symbol requires GOT entry, this entry // should be initialized by 'page address'. This address is high 16-bits // of sum the symbol's value and the addend. return in.mipsGot->getVA() + in.mipsGot->getPageEntryOffset(file, sym, a) - in.mipsGot->getGp(file); case R_MIPS_GOT_OFF: case R_MIPS_GOT_OFF32: // In case of MIPS if a GOT relocation has non-zero addend this addend // should be applied to the GOT entry content not to the GOT entry offset. // That is why we use separate expression type. return in.mipsGot->getVA() + in.mipsGot->getSymEntryOffset(file, sym, a) - in.mipsGot->getGp(file); case R_MIPS_TLSGD: return in.mipsGot->getVA() + in.mipsGot->getGlobalDynOffset(file, sym) - in.mipsGot->getGp(file); case R_MIPS_TLSLD: return in.mipsGot->getVA() + in.mipsGot->getTlsIndexOffset(file) - in.mipsGot->getGp(file); case R_AARCH64_PAGE_PC: { uint64_t val = sym.isUndefWeak() ? p + a : sym.getVA(a); return getAArch64Page(val) - getAArch64Page(p); } case R_RISCV_PC_INDIRECT: { if (const Relocation *hiRel = getRISCVPCRelHi20(&sym, a)) return getRelocTargetVA(file, hiRel->type, hiRel->addend, sym.getVA(), *hiRel->sym, hiRel->expr); return 0; } case R_PC: { uint64_t dest; if (sym.isUndefWeak()) { // On ARM and AArch64 a branch to an undefined weak resolves to the // next instruction, otherwise the place. if (config->emachine == EM_ARM) dest = getARMUndefinedRelativeWeakVA(type, a, p); else if (config->emachine == EM_AARCH64) dest = getAArch64UndefinedRelativeWeakVA(type, a, p); else if (config->emachine == EM_PPC) dest = p; else dest = sym.getVA(a); } else { dest = sym.getVA(a); } return dest - p; } case R_PLT: return sym.getPltVA() + a; case R_PLT_PC: case R_PPC64_CALL_PLT: return sym.getPltVA() + a - p; case R_PPC32_PLTREL: // R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30 // stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for // target VA computation. return sym.getPltVA() - p; case R_PPC64_CALL: { uint64_t symVA = sym.getVA(a); // If we have an undefined weak symbol, we might get here with a symbol // address of zero. That could overflow, but the code must be unreachable, // so don't bother doing anything at all. if (!symVA) return 0; // PPC64 V2 ABI describes two entry points to a function. The global entry // point is used for calls where the caller and callee (may) have different // TOC base pointers and r2 needs to be modified to hold the TOC base for // the callee. For local calls the caller and callee share the same // TOC base and so the TOC pointer initialization code should be skipped by // branching to the local entry point. return symVA - p + getPPC64GlobalEntryToLocalEntryOffset(sym.stOther); } case R_PPC64_TOCBASE: return getPPC64TocBase() + a; case R_RELAX_GOT_PC: return sym.getVA(a) - p; case R_RELAX_TLS_GD_TO_LE: case R_RELAX_TLS_IE_TO_LE: case R_RELAX_TLS_LD_TO_LE: case R_TLS: // It is not very clear what to return if the symbol is undefined. With // --noinhibit-exec, even a non-weak undefined reference may reach here. // Just return A, which matches R_ABS, and the behavior of some dynamic // loaders. if (sym.isUndefined()) return a; return getTlsTpOffset(sym) + a; case R_RELAX_TLS_GD_TO_LE_NEG: case R_NEG_TLS: if (sym.isUndefined()) return a; return -getTlsTpOffset(sym) + a; case R_SIZE: return sym.getSize() + a; case R_TLSDESC: return in.got->getGlobalDynAddr(sym) + a; case R_TLSDESC_PC: return in.got->getGlobalDynAddr(sym) + a - p; case R_AARCH64_TLSDESC_PAGE: return getAArch64Page(in.got->getGlobalDynAddr(sym) + a) - getAArch64Page(p); case R_TLSGD_GOT: return in.got->getGlobalDynOffset(sym) + a; case R_TLSGD_GOTPLT: return in.got->getVA() + in.got->getGlobalDynOffset(sym) + a - in.gotPlt->getVA(); case R_TLSGD_PC: return in.got->getGlobalDynAddr(sym) + a - p; case R_TLSLD_GOTPLT: return in.got->getVA() + in.got->getTlsIndexOff() + a - in.gotPlt->getVA(); case R_TLSLD_GOT: return in.got->getTlsIndexOff() + a; case R_TLSLD_PC: return in.got->getTlsIndexVA() + a - p; default: llvm_unreachable("invalid expression"); } } // This function applies relocations to sections without SHF_ALLOC bit. // Such sections are never mapped to memory at runtime. Debug sections are // an example. Relocations in non-alloc sections are much easier to // handle than in allocated sections because it will never need complex // treatment such as GOT or PLT (because at runtime no one refers them). // So, we handle relocations for non-alloc sections directly in this // function as a performance optimization. template void InputSection::relocateNonAlloc(uint8_t *buf, ArrayRef rels) { const unsigned bits = sizeof(typename ELFT::uint) * 8; for (const RelTy &rel : rels) { RelType type = rel.getType(config->isMips64EL); // GCC 8.0 or earlier have a bug that they emit R_386_GOTPC relocations // against _GLOBAL_OFFSET_TABLE_ for .debug_info. The bug has been fixed // in 2017 (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82630), but we // need to keep this bug-compatible code for a while. if (config->emachine == EM_386 && type == R_386_GOTPC) continue; uint64_t offset = getOffset(rel.r_offset); uint8_t *bufLoc = buf + offset; int64_t addend = getAddend(rel); if (!RelTy::IsRela) addend += target->getImplicitAddend(bufLoc, type); Symbol &sym = getFile()->getRelocTargetSym(rel); RelExpr expr = target->getRelExpr(type, sym, bufLoc); if (expr == R_NONE) continue; if (expr != R_ABS && expr != R_DTPREL && expr != R_RISCV_ADD) { std::string msg = getLocation(offset) + ": has non-ABS relocation " + toString(type) + " against symbol '" + toString(sym) + "'"; if (expr != R_PC) { error(msg); return; } // If the control reaches here, we found a PC-relative relocation in a // non-ALLOC section. Since non-ALLOC section is not loaded into memory // at runtime, the notion of PC-relative doesn't make sense here. So, // this is a usage error. However, GNU linkers historically accept such // relocations without any errors and relocate them as if they were at // address 0. For bug-compatibilty, we accept them with warnings. We // know Steel Bank Common Lisp as of 2018 have this bug. warn(msg); target->relocateOne(bufLoc, type, SignExtend64(sym.getVA(addend - offset))); continue; } if (sym.isTls() && !Out::tlsPhdr) target->relocateOne(bufLoc, type, 0); else target->relocateOne(bufLoc, type, SignExtend64(sym.getVA(addend))); } } // This is used when '-r' is given. // For REL targets, InputSection::copyRelocations() may store artificial // relocations aimed to update addends. They are handled in relocateAlloc() // for allocatable sections, and this function does the same for // non-allocatable sections, such as sections with debug information. static void relocateNonAllocForRelocatable(InputSection *sec, uint8_t *buf) { const unsigned bits = config->is64 ? 64 : 32; for (const Relocation &rel : sec->relocations) { // InputSection::copyRelocations() adds only R_ABS relocations. assert(rel.expr == R_ABS); uint8_t *bufLoc = buf + rel.offset + sec->outSecOff; uint64_t targetVA = SignExtend64(rel.sym->getVA(rel.addend), bits); target->relocateOne(bufLoc, rel.type, targetVA); } } template void InputSectionBase::relocate(uint8_t *buf, uint8_t *bufEnd) { if (flags & SHF_EXECINSTR) adjustSplitStackFunctionPrologues(buf, bufEnd); if (flags & SHF_ALLOC) { relocateAlloc(buf, bufEnd); return; } auto *sec = cast(this); if (config->relocatable) relocateNonAllocForRelocatable(sec, buf); else if (sec->areRelocsRela) sec->relocateNonAlloc(buf, sec->template relas()); else sec->relocateNonAlloc(buf, sec->template rels()); } void InputSectionBase::relocateAlloc(uint8_t *buf, uint8_t *bufEnd) { assert(flags & SHF_ALLOC); const unsigned bits = config->wordsize * 8; for (const Relocation &rel : relocations) { uint64_t offset = rel.offset; if (auto *sec = dyn_cast(this)) offset += sec->outSecOff; uint8_t *bufLoc = buf + offset; RelType type = rel.type; uint64_t addrLoc = getOutputSection()->addr + offset; RelExpr expr = rel.expr; uint64_t targetVA = SignExtend64( getRelocTargetVA(file, type, rel.addend, addrLoc, *rel.sym, expr), bits); switch (expr) { case R_RELAX_GOT_PC: case R_RELAX_GOT_PC_NOPIC: target->relaxGot(bufLoc, type, targetVA); break; case R_PPC64_RELAX_TOC: if (!tryRelaxPPC64TocIndirection(type, rel, bufLoc)) target->relocateOne(bufLoc, type, targetVA); break; case R_RELAX_TLS_IE_TO_LE: target->relaxTlsIeToLe(bufLoc, type, targetVA); break; case R_RELAX_TLS_LD_TO_LE: case R_RELAX_TLS_LD_TO_LE_ABS: target->relaxTlsLdToLe(bufLoc, type, targetVA); break; case R_RELAX_TLS_GD_TO_LE: case R_RELAX_TLS_GD_TO_LE_NEG: target->relaxTlsGdToLe(bufLoc, type, targetVA); break; case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC: case R_RELAX_TLS_GD_TO_IE: case R_RELAX_TLS_GD_TO_IE_ABS: case R_RELAX_TLS_GD_TO_IE_GOT_OFF: case R_RELAX_TLS_GD_TO_IE_GOTPLT: target->relaxTlsGdToIe(bufLoc, type, targetVA); break; case R_PPC64_CALL: // If this is a call to __tls_get_addr, it may be part of a TLS // sequence that has been relaxed and turned into a nop. In this // case, we don't want to handle it as a call. if (read32(bufLoc) == 0x60000000) // nop break; // Patch a nop (0x60000000) to a ld. if (rel.sym->needsTocRestore) { // gcc/gfortran 5.4, 6.3 and earlier versions do not add nop for // recursive calls even if the function is preemptible. This is not // wrong in the common case where the function is not preempted at // runtime. Just ignore. if ((bufLoc + 8 > bufEnd || read32(bufLoc + 4) != 0x60000000) && rel.sym->file != file) { // Use substr(6) to remove the "__plt_" prefix. errorOrWarn(getErrorLocation(bufLoc) + "call to " + lld::toString(*rel.sym).substr(6) + " lacks nop, can't restore toc"); break; } write32(bufLoc + 4, 0xe8410018); // ld %r2, 24(%r1) } target->relocateOne(bufLoc, type, targetVA); break; default: target->relocateOne(bufLoc, type, targetVA); break; } } } // For each function-defining prologue, find any calls to __morestack, // and replace them with calls to __morestack_non_split. static void switchMorestackCallsToMorestackNonSplit( DenseSet &prologues, std::vector &morestackCalls) { // If the target adjusted a function's prologue, all calls to // __morestack inside that function should be switched to // __morestack_non_split. Symbol *moreStackNonSplit = symtab->find("__morestack_non_split"); if (!moreStackNonSplit) { error("Mixing split-stack objects requires a definition of " "__morestack_non_split"); return; } // Sort both collections to compare addresses efficiently. llvm::sort(morestackCalls, [](const Relocation *l, const Relocation *r) { return l->offset < r->offset; }); std::vector functions(prologues.begin(), prologues.end()); llvm::sort(functions, [](const Defined *l, const Defined *r) { return l->value < r->value; }); auto it = morestackCalls.begin(); for (Defined *f : functions) { // Find the first call to __morestack within the function. while (it != morestackCalls.end() && (*it)->offset < f->value) ++it; // Adjust all calls inside the function. while (it != morestackCalls.end() && (*it)->offset < f->value + f->size) { (*it)->sym = moreStackNonSplit; ++it; } } } static bool enclosingPrologueAttempted(uint64_t offset, const DenseSet &prologues) { for (Defined *f : prologues) if (f->value <= offset && offset < f->value + f->size) return true; return false; } // If a function compiled for split stack calls a function not // compiled for split stack, then the caller needs its prologue // adjusted to ensure that the called function will have enough stack // available. Find those functions, and adjust their prologues. template void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *buf, uint8_t *end) { if (!getFile()->splitStack) return; DenseSet prologues; std::vector morestackCalls; for (Relocation &rel : relocations) { // Local symbols can't possibly be cross-calls, and should have been // resolved long before this line. if (rel.sym->isLocal()) continue; // Ignore calls into the split-stack api. if (rel.sym->getName().startswith("__morestack")) { if (rel.sym->getName().equals("__morestack")) morestackCalls.push_back(&rel); continue; } // A relocation to non-function isn't relevant. Sometimes // __morestack is not marked as a function, so this check comes // after the name check. if (rel.sym->type != STT_FUNC) continue; // If the callee's-file was compiled with split stack, nothing to do. In // this context, a "Defined" symbol is one "defined by the binary currently // being produced". So an "undefined" symbol might be provided by a shared // library. It is not possible to tell how such symbols were compiled, so be // conservative. if (Defined *d = dyn_cast(rel.sym)) if (InputSection *isec = cast_or_null(d->section)) if (!isec || !isec->getFile() || isec->getFile()->splitStack) continue; if (enclosingPrologueAttempted(rel.offset, prologues)) continue; if (Defined *f = getEnclosingFunction(rel.offset)) { prologues.insert(f); if (target->adjustPrologueForCrossSplitStack(buf + getOffset(f->value), end, f->stOther)) continue; if (!getFile()->someNoSplitStack) error(toString(this) + ": " + f->getName() + " (with -fsplit-stack) calls " + rel.sym->getName() + " (without -fsplit-stack), but couldn't adjust its prologue"); } } if (target->needsMoreStackNonSplit) switchMorestackCallsToMorestackNonSplit(prologues, morestackCalls); } template void InputSection::writeTo(uint8_t *buf) { if (type == SHT_NOBITS) return; if (auto *s = dyn_cast(this)) { s->writeTo(buf + outSecOff); return; } // If -r or --emit-relocs is given, then an InputSection // may be a relocation section. if (type == SHT_RELA) { copyRelocations(buf + outSecOff, getDataAs()); return; } if (type == SHT_REL) { copyRelocations(buf + outSecOff, getDataAs()); return; } // If -r is given, we may have a SHT_GROUP section. if (type == SHT_GROUP) { copyShtGroup(buf + outSecOff); return; } // If this is a compressed section, uncompress section contents directly // to the buffer. if (uncompressedSize >= 0) { size_t size = uncompressedSize; if (Error e = zlib::uncompress(toStringRef(rawData), (char *)(buf + outSecOff), size)) fatal(toString(this) + ": uncompress failed: " + llvm::toString(std::move(e))); uint8_t *bufEnd = buf + outSecOff + size; relocate(buf, bufEnd); return; } // Copy section contents from source object file to output file // and then apply relocations. memcpy(buf + outSecOff, data().data(), data().size()); uint8_t *bufEnd = buf + outSecOff + data().size(); relocate(buf, bufEnd); } void InputSection::replace(InputSection *other) { alignment = std::max(alignment, other->alignment); // When a section is replaced with another section that was allocated to // another partition, the replacement section (and its associated sections) // need to be placed in the main partition so that both partitions will be // able to access it. if (partition != other->partition) { partition = 1; for (InputSection *isec : dependentSections) isec->partition = 1; } other->repl = repl; other->markDead(); } template EhInputSection::EhInputSection(ObjFile &f, const typename ELFT::Shdr &header, StringRef name) : InputSectionBase(f, header, name, InputSectionBase::EHFrame) {} SyntheticSection *EhInputSection::getParent() const { return cast_or_null(parent); } // Returns the index of the first relocation that points to a region between // Begin and Begin+Size. template static unsigned getReloc(IntTy begin, IntTy size, const ArrayRef &rels, unsigned &relocI) { // Start search from RelocI for fast access. That works because the // relocations are sorted in .eh_frame. for (unsigned n = rels.size(); relocI < n; ++relocI) { const RelTy &rel = rels[relocI]; if (rel.r_offset < begin) continue; if (rel.r_offset < begin + size) return relocI; return -1; } return -1; } // .eh_frame is a sequence of CIE or FDE records. // This function splits an input section into records and returns them. template void EhInputSection::split() { if (areRelocsRela) split(relas()); else split(rels()); } template void EhInputSection::split(ArrayRef rels) { unsigned relI = 0; for (size_t off = 0, end = data().size(); off != end;) { size_t size = readEhRecordSize(this, off); pieces.emplace_back(off, this, size, getReloc(off, size, rels, relI)); // The empty record is the end marker. if (size == 4) break; off += size; } } static size_t findNull(StringRef s, size_t entSize) { // Optimize the common case. if (entSize == 1) return s.find(0); for (unsigned i = 0, n = s.size(); i != n; i += entSize) { const char *b = s.begin() + i; if (std::all_of(b, b + entSize, [](char c) { return c == 0; })) return i; } return StringRef::npos; } SyntheticSection *MergeInputSection::getParent() const { return cast_or_null(parent); } // Split SHF_STRINGS section. Such section is a sequence of // null-terminated strings. void MergeInputSection::splitStrings(ArrayRef data, size_t entSize) { size_t off = 0; bool isAlloc = flags & SHF_ALLOC; StringRef s = toStringRef(data); while (!s.empty()) { size_t end = findNull(s, entSize); if (end == StringRef::npos) fatal(toString(this) + ": string is not null terminated"); size_t size = end + entSize; pieces.emplace_back(off, xxHash64(s.substr(0, size)), !isAlloc); s = s.substr(size); off += size; } } // Split non-SHF_STRINGS section. Such section is a sequence of // fixed size records. void MergeInputSection::splitNonStrings(ArrayRef data, size_t entSize) { size_t size = data.size(); assert((size % entSize) == 0); bool isAlloc = flags & SHF_ALLOC; for (size_t i = 0; i != size; i += entSize) pieces.emplace_back(i, xxHash64(data.slice(i, entSize)), !isAlloc); } template MergeInputSection::MergeInputSection(ObjFile &f, const typename ELFT::Shdr &header, StringRef name) : InputSectionBase(f, header, name, InputSectionBase::Merge) {} MergeInputSection::MergeInputSection(uint64_t flags, uint32_t type, uint64_t entsize, ArrayRef data, StringRef name) : InputSectionBase(nullptr, flags, type, entsize, /*Link*/ 0, /*Info*/ 0, /*Alignment*/ entsize, data, name, SectionBase::Merge) {} // This function is called after we obtain a complete list of input sections // that need to be linked. This is responsible to split section contents // into small chunks for further processing. // // Note that this function is called from parallelForEach. This must be // thread-safe (i.e. no memory allocation from the pools). void MergeInputSection::splitIntoPieces() { assert(pieces.empty()); if (flags & SHF_STRINGS) splitStrings(data(), entsize); else splitNonStrings(data(), entsize); } SectionPiece *MergeInputSection::getSectionPiece(uint64_t offset) { if (this->data().size() <= offset) fatal(toString(this) + ": offset is outside the section"); // If Offset is not at beginning of a section piece, it is not in the map. // In that case we need to do a binary search of the original section piece vector. auto it = partition_point( pieces, [=](SectionPiece p) { return p.inputOff <= offset; }); return &it[-1]; } // Returns the offset in an output section for a given input offset. // Because contents of a mergeable section is not contiguous in output, // it is not just an addition to a base output offset. uint64_t MergeInputSection::getParentOffset(uint64_t offset) const { // If Offset is not at beginning of a section piece, it is not in the map. // In that case we need to search from the original section piece vector. const SectionPiece &piece = *(const_cast(this)->getSectionPiece (offset)); uint64_t addend = offset - piece.inputOff; return piece.outputOff + addend; } template InputSection::InputSection(ObjFile &, const ELF32LE::Shdr &, StringRef); template InputSection::InputSection(ObjFile &, const ELF32BE::Shdr &, StringRef); template InputSection::InputSection(ObjFile &, const ELF64LE::Shdr &, StringRef); template InputSection::InputSection(ObjFile &, const ELF64BE::Shdr &, StringRef); template std::string InputSectionBase::getLocation(uint64_t); template std::string InputSectionBase::getLocation(uint64_t); template std::string InputSectionBase::getLocation(uint64_t); template std::string InputSectionBase::getLocation(uint64_t); template void InputSection::writeTo(uint8_t *); template void InputSection::writeTo(uint8_t *); template void InputSection::writeTo(uint8_t *); template void InputSection::writeTo(uint8_t *); template MergeInputSection::MergeInputSection(ObjFile &, const ELF32LE::Shdr &, StringRef); template MergeInputSection::MergeInputSection(ObjFile &, const ELF32BE::Shdr &, StringRef); template MergeInputSection::MergeInputSection(ObjFile &, const ELF64LE::Shdr &, StringRef); template MergeInputSection::MergeInputSection(ObjFile &, const ELF64BE::Shdr &, StringRef); template EhInputSection::EhInputSection(ObjFile &, const ELF32LE::Shdr &, StringRef); template EhInputSection::EhInputSection(ObjFile &, const ELF32BE::Shdr &, StringRef); template EhInputSection::EhInputSection(ObjFile &, const ELF64LE::Shdr &, StringRef); template EhInputSection::EhInputSection(ObjFile &, const ELF64BE::Shdr &, StringRef); template void EhInputSection::split(); template void EhInputSection::split(); template void EhInputSection::split(); template void EhInputSection::split(); } // namespace elf } // namespace lld Index: head/contrib/llvm-project/lld/ELF/Relocations.cpp =================================================================== --- head/contrib/llvm-project/lld/ELF/Relocations.cpp (revision 359084) +++ head/contrib/llvm-project/lld/ELF/Relocations.cpp (revision 359085) @@ -1,1969 +1,1973 @@ //===- Relocations.cpp ----------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains platform-independent functions to process relocations. // I'll describe the overview of this file here. // // Simple relocations are easy to handle for the linker. For example, // for R_X86_64_PC64 relocs, the linker just has to fix up locations // with the relative offsets to the target symbols. It would just be // reading records from relocation sections and applying them to output. // // But not all relocations are that easy to handle. For example, for // R_386_GOTOFF relocs, the linker has to create new GOT entries for // symbols if they don't exist, and fix up locations with GOT entry // offsets from the beginning of GOT section. So there is more than // fixing addresses in relocation processing. // // ELF defines a large number of complex relocations. // // The functions in this file analyze relocations and do whatever needs // to be done. It includes, but not limited to, the following. // // - create GOT/PLT entries // - create new relocations in .dynsym to let the dynamic linker resolve // them at runtime (since ELF supports dynamic linking, not all // relocations can be resolved at link-time) // - create COPY relocs and reserve space in .bss // - replace expensive relocs (in terms of runtime cost) with cheap ones // - error out infeasible combinations such as PIC and non-relative relocs // // Note that the functions in this file don't actually apply relocations // because it doesn't know about the output file nor the output file buffer. // It instead stores Relocation objects to InputSection's Relocations // vector to let it apply later in InputSection::writeTo. // //===----------------------------------------------------------------------===// #include "Relocations.h" #include "Config.h" #include "LinkerScript.h" #include "OutputSections.h" #include "SymbolTable.h" #include "Symbols.h" #include "SyntheticSections.h" #include "Target.h" #include "Thunks.h" #include "lld/Common/ErrorHandler.h" #include "lld/Common/Memory.h" #include "lld/Common/Strings.h" #include "llvm/ADT/SmallSet.h" #include "llvm/Demangle/Demangle.h" #include "llvm/Support/Endian.h" #include "llvm/Support/raw_ostream.h" #include using namespace llvm; using namespace llvm::ELF; using namespace llvm::object; using namespace llvm::support::endian; namespace lld { namespace elf { static Optional getLinkerScriptLocation(const Symbol &sym) { for (BaseCommand *base : script->sectionCommands) if (auto *cmd = dyn_cast(base)) if (cmd->sym == &sym) return cmd->location; return None; } // Construct a message in the following format. // // >>> defined in /home/alice/src/foo.o // >>> referenced by bar.c:12 (/home/alice/src/bar.c:12) // >>> /home/alice/src/bar.o:(.text+0x1) static std::string getLocation(InputSectionBase &s, const Symbol &sym, uint64_t off) { std::string msg = "\n>>> defined in "; if (sym.file) msg += toString(sym.file); else if (Optional loc = getLinkerScriptLocation(sym)) msg += *loc; msg += "\n>>> referenced by "; std::string src = s.getSrcMsg(sym, off); if (!src.empty()) msg += src + "\n>>> "; return msg + s.getObjMsg(off); } namespace { // Build a bitmask with one bit set for each RelExpr. // // Constexpr function arguments can't be used in static asserts, so we // use template arguments to build the mask. // But function template partial specializations don't exist (needed // for base case of the recursion), so we need a dummy struct. template struct RelExprMaskBuilder { static inline uint64_t build() { return 0; } }; // Specialization for recursive case. template struct RelExprMaskBuilder { static inline uint64_t build() { static_assert(0 <= Head && Head < 64, "RelExpr is too large for 64-bit mask!"); return (uint64_t(1) << Head) | RelExprMaskBuilder::build(); } }; } // namespace // Return true if `Expr` is one of `Exprs`. // There are fewer than 64 RelExpr's, so we can represent any set of // RelExpr's as a constant bit mask and test for membership with a // couple cheap bitwise operations. template bool oneof(RelExpr expr) { assert(0 <= expr && (int)expr < 64 && "RelExpr is too large for 64-bit mask!"); return (uint64_t(1) << expr) & RelExprMaskBuilder::build(); } // This function is similar to the `handleTlsRelocation`. MIPS does not // support any relaxations for TLS relocations so by factoring out MIPS // handling in to the separate function we can simplify the code and do not // pollute other `handleTlsRelocation` by MIPS `ifs` statements. // Mips has a custom MipsGotSection that handles the writing of GOT entries // without dynamic relocations. static unsigned handleMipsTlsRelocation(RelType type, Symbol &sym, InputSectionBase &c, uint64_t offset, int64_t addend, RelExpr expr) { if (expr == R_MIPS_TLSLD) { in.mipsGot->addTlsIndex(*c.file); c.relocations.push_back({expr, type, offset, addend, &sym}); return 1; } if (expr == R_MIPS_TLSGD) { in.mipsGot->addDynTlsEntry(*c.file, sym); c.relocations.push_back({expr, type, offset, addend, &sym}); return 1; } return 0; } // Notes about General Dynamic and Local Dynamic TLS models below. They may // require the generation of a pair of GOT entries that have associated dynamic // relocations. The pair of GOT entries created are of the form GOT[e0] Module // Index (Used to find pointer to TLS block at run-time) GOT[e1] Offset of // symbol in TLS block. // // Returns the number of relocations processed. template static unsigned handleTlsRelocation(RelType type, Symbol &sym, InputSectionBase &c, typename ELFT::uint offset, int64_t addend, RelExpr expr) { if (!sym.isTls()) return 0; if (config->emachine == EM_MIPS) return handleMipsTlsRelocation(type, sym, c, offset, addend, expr); if (oneof( expr) && config->shared) { if (in.got->addDynTlsEntry(sym)) { uint64_t off = in.got->getGlobalDynOffset(sym); mainPart->relaDyn->addReloc( {target->tlsDescRel, in.got, off, !sym.isPreemptible, &sym, 0}); } if (expr != R_TLSDESC_CALL) c.relocations.push_back({expr, type, offset, addend, &sym}); return 1; } bool canRelax = config->emachine != EM_ARM && config->emachine != EM_HEXAGON && config->emachine != EM_RISCV; // If we are producing an executable and the symbol is non-preemptable, it // must be defined and the code sequence can be relaxed to use Local-Exec. // // ARM and RISC-V do not support any relaxations for TLS relocations, however, // we can omit the DTPMOD dynamic relocations and resolve them at link time // because them are always 1. This may be necessary for static linking as // DTPMOD may not be expected at load time. bool isLocalInExecutable = !sym.isPreemptible && !config->shared; // Local Dynamic is for access to module local TLS variables, while still // being suitable for being dynamically loaded via dlopen. GOT[e0] is the // module index, with a special value of 0 for the current module. GOT[e1] is // unused. There only needs to be one module index entry. if (oneof( expr)) { // Local-Dynamic relocs can be relaxed to Local-Exec. if (canRelax && !config->shared) { c.relocations.push_back( {target->adjustRelaxExpr(type, nullptr, R_RELAX_TLS_LD_TO_LE), type, offset, addend, &sym}); return target->getTlsGdRelaxSkip(type); } if (expr == R_TLSLD_HINT) return 1; if (in.got->addTlsIndex()) { if (isLocalInExecutable) in.got->relocations.push_back( {R_ADDEND, target->symbolicRel, in.got->getTlsIndexOff(), 1, &sym}); else mainPart->relaDyn->addReloc(target->tlsModuleIndexRel, in.got, in.got->getTlsIndexOff(), nullptr); } c.relocations.push_back({expr, type, offset, addend, &sym}); return 1; } // Local-Dynamic relocs can be relaxed to Local-Exec. if (expr == R_DTPREL && !config->shared) { c.relocations.push_back( {target->adjustRelaxExpr(type, nullptr, R_RELAX_TLS_LD_TO_LE), type, offset, addend, &sym}); return 1; } // Local-Dynamic sequence where offset of tls variable relative to dynamic // thread pointer is stored in the got. This cannot be relaxed to Local-Exec. if (expr == R_TLSLD_GOT_OFF) { if (!sym.isInGot()) { in.got->addEntry(sym); uint64_t off = sym.getGotOffset(); in.got->relocations.push_back( {R_ABS, target->tlsOffsetRel, off, 0, &sym}); } c.relocations.push_back({expr, type, offset, addend, &sym}); return 1; } if (oneof(expr)) { if (!canRelax || config->shared) { if (in.got->addDynTlsEntry(sym)) { uint64_t off = in.got->getGlobalDynOffset(sym); if (isLocalInExecutable) // Write one to the GOT slot. in.got->relocations.push_back( {R_ADDEND, target->symbolicRel, off, 1, &sym}); else mainPart->relaDyn->addReloc(target->tlsModuleIndexRel, in.got, off, &sym); // If the symbol is preemptible we need the dynamic linker to write // the offset too. uint64_t offsetOff = off + config->wordsize; if (sym.isPreemptible) mainPart->relaDyn->addReloc(target->tlsOffsetRel, in.got, offsetOff, &sym); else in.got->relocations.push_back( {R_ABS, target->tlsOffsetRel, offsetOff, 0, &sym}); } c.relocations.push_back({expr, type, offset, addend, &sym}); return 1; } // Global-Dynamic relocs can be relaxed to Initial-Exec or Local-Exec // depending on the symbol being locally defined or not. if (sym.isPreemptible) { c.relocations.push_back( {target->adjustRelaxExpr(type, nullptr, R_RELAX_TLS_GD_TO_IE), type, offset, addend, &sym}); if (!sym.isInGot()) { in.got->addEntry(sym); mainPart->relaDyn->addReloc(target->tlsGotRel, in.got, sym.getGotOffset(), &sym); } } else { c.relocations.push_back( {target->adjustRelaxExpr(type, nullptr, R_RELAX_TLS_GD_TO_LE), type, offset, addend, &sym}); } return target->getTlsGdRelaxSkip(type); } // Initial-Exec relocs can be relaxed to Local-Exec if the symbol is locally // defined. if (oneof(expr) && canRelax && isLocalInExecutable) { c.relocations.push_back({R_RELAX_TLS_IE_TO_LE, type, offset, addend, &sym}); return 1; } if (expr == R_TLSIE_HINT) return 1; return 0; } static RelType getMipsPairType(RelType type, bool isLocal) { switch (type) { case R_MIPS_HI16: return R_MIPS_LO16; case R_MIPS_GOT16: // In case of global symbol, the R_MIPS_GOT16 relocation does not // have a pair. Each global symbol has a unique entry in the GOT // and a corresponding instruction with help of the R_MIPS_GOT16 // relocation loads an address of the symbol. In case of local // symbol, the R_MIPS_GOT16 relocation creates a GOT entry to hold // the high 16 bits of the symbol's value. A paired R_MIPS_LO16 // relocations handle low 16 bits of the address. That allows // to allocate only one GOT entry for every 64 KBytes of local data. return isLocal ? R_MIPS_LO16 : R_MIPS_NONE; case R_MICROMIPS_GOT16: return isLocal ? R_MICROMIPS_LO16 : R_MIPS_NONE; case R_MIPS_PCHI16: return R_MIPS_PCLO16; case R_MICROMIPS_HI16: return R_MICROMIPS_LO16; default: return R_MIPS_NONE; } } // True if non-preemptable symbol always has the same value regardless of where // the DSO is loaded. static bool isAbsolute(const Symbol &sym) { if (sym.isUndefWeak()) return true; if (const auto *dr = dyn_cast(&sym)) return dr->section == nullptr; // Absolute symbol. return false; } static bool isAbsoluteValue(const Symbol &sym) { return isAbsolute(sym) || sym.isTls(); } // Returns true if Expr refers a PLT entry. static bool needsPlt(RelExpr expr) { return oneof(expr); } // Returns true if Expr refers a GOT entry. Note that this function // returns false for TLS variables even though they need GOT, because // TLS variables uses GOT differently than the regular variables. static bool needsGot(RelExpr expr) { return oneof( expr); } // True if this expression is of the form Sym - X, where X is a position in the // file (PC, or GOT for example). static bool isRelExpr(RelExpr expr) { return oneof(expr); } // Returns true if a given relocation can be computed at link-time. // // For instance, we know the offset from a relocation to its target at // link-time if the relocation is PC-relative and refers a // non-interposable function in the same executable. This function // will return true for such relocation. // // If this function returns false, that means we need to emit a // dynamic relocation so that the relocation will be fixed at load-time. static bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym, InputSectionBase &s, uint64_t relOff) { // These expressions always compute a constant if (oneof( e)) return true; // These never do, except if the entire file is position dependent or if // only the low bits are used. if (e == R_GOT || e == R_PLT || e == R_TLSDESC) return target->usesOnlyLowPageBits(type) || !config->isPic; if (sym.isPreemptible) return false; if (!config->isPic) return true; // The size of a non preemptible symbol is a constant. if (e == R_SIZE) return true; // For the target and the relocation, we want to know if they are // absolute or relative. bool absVal = isAbsoluteValue(sym); bool relE = isRelExpr(e); if (absVal && !relE) return true; if (!absVal && relE) return true; if (!absVal && !relE) return target->usesOnlyLowPageBits(type); assert(absVal && relE); // Allow R_PLT_PC (optimized to R_PC here) to a hidden undefined weak symbol // in PIC mode. This is a little strange, but it allows us to link function // calls to such symbols (e.g. glibc/stdlib/exit.c:__run_exit_handlers). // Normally such a call will be guarded with a comparison, which will load a // zero from the GOT. if (sym.isUndefWeak()) return true; // We set the final symbols values for linker script defined symbols later. // They always can be computed as a link time constant. if (sym.scriptDefined) return true; error("relocation " + toString(type) + " cannot refer to absolute symbol: " + toString(sym) + getLocation(s, sym, relOff)); return true; } static RelExpr toPlt(RelExpr expr) { switch (expr) { case R_PPC64_CALL: return R_PPC64_CALL_PLT; case R_PC: return R_PLT_PC; case R_ABS: return R_PLT; default: return expr; } } static RelExpr fromPlt(RelExpr expr) { // We decided not to use a plt. Optimize a reference to the plt to a // reference to the symbol itself. switch (expr) { case R_PLT_PC: case R_PPC32_PLTREL: return R_PC; case R_PPC64_CALL_PLT: return R_PPC64_CALL; case R_PLT: return R_ABS; default: return expr; } } // Returns true if a given shared symbol is in a read-only segment in a DSO. template static bool isReadOnly(SharedSymbol &ss) { using Elf_Phdr = typename ELFT::Phdr; // Determine if the symbol is read-only by scanning the DSO's program headers. const SharedFile &file = ss.getFile(); for (const Elf_Phdr &phdr : check(file.template getObj().program_headers())) if ((phdr.p_type == ELF::PT_LOAD || phdr.p_type == ELF::PT_GNU_RELRO) && !(phdr.p_flags & ELF::PF_W) && ss.value >= phdr.p_vaddr && ss.value < phdr.p_vaddr + phdr.p_memsz) return true; return false; } // Returns symbols at the same offset as a given symbol, including SS itself. // // If two or more symbols are at the same offset, and at least one of // them are copied by a copy relocation, all of them need to be copied. // Otherwise, they would refer to different places at runtime. template static SmallSet getSymbolsAt(SharedSymbol &ss) { using Elf_Sym = typename ELFT::Sym; SharedFile &file = ss.getFile(); SmallSet ret; for (const Elf_Sym &s : file.template getGlobalELFSyms()) { if (s.st_shndx == SHN_UNDEF || s.st_shndx == SHN_ABS || s.getType() == STT_TLS || s.st_value != ss.value) continue; StringRef name = check(s.getName(file.getStringTable())); Symbol *sym = symtab->find(name); if (auto *alias = dyn_cast_or_null(sym)) ret.insert(alias); } return ret; } // When a symbol is copy relocated or we create a canonical plt entry, it is // effectively a defined symbol. In the case of copy relocation the symbol is // in .bss and in the case of a canonical plt entry it is in .plt. This function // replaces the existing symbol with a Defined pointing to the appropriate // location. static void replaceWithDefined(Symbol &sym, SectionBase *sec, uint64_t value, uint64_t size) { Symbol old = sym; sym.replace(Defined{sym.file, sym.getName(), sym.binding, sym.stOther, sym.type, value, size, sec}); sym.pltIndex = old.pltIndex; sym.gotIndex = old.gotIndex; sym.verdefIndex = old.verdefIndex; sym.exportDynamic = true; sym.isUsedInRegularObj = true; } // Reserve space in .bss or .bss.rel.ro for copy relocation. // // The copy relocation is pretty much a hack. If you use a copy relocation // in your program, not only the symbol name but the symbol's size, RW/RO // bit and alignment become part of the ABI. In addition to that, if the // symbol has aliases, the aliases become part of the ABI. That's subtle, // but if you violate that implicit ABI, that can cause very counter- // intuitive consequences. // // So, what is the copy relocation? It's for linking non-position // independent code to DSOs. In an ideal world, all references to data // exported by DSOs should go indirectly through GOT. But if object files // are compiled as non-PIC, all data references are direct. There is no // way for the linker to transform the code to use GOT, as machine // instructions are already set in stone in object files. This is where // the copy relocation takes a role. // // A copy relocation instructs the dynamic linker to copy data from a DSO // to a specified address (which is usually in .bss) at load-time. If the // static linker (that's us) finds a direct data reference to a DSO // symbol, it creates a copy relocation, so that the symbol can be // resolved as if it were in .bss rather than in a DSO. // // As you can see in this function, we create a copy relocation for the // dynamic linker, and the relocation contains not only symbol name but // various other information about the symbol. So, such attributes become a // part of the ABI. // // Note for application developers: I can give you a piece of advice if // you are writing a shared library. You probably should export only // functions from your library. You shouldn't export variables. // // As an example what can happen when you export variables without knowing // the semantics of copy relocations, assume that you have an exported // variable of type T. It is an ABI-breaking change to add new members at // end of T even though doing that doesn't change the layout of the // existing members. That's because the space for the new members are not // reserved in .bss unless you recompile the main program. That means they // are likely to overlap with other data that happens to be laid out next // to the variable in .bss. This kind of issue is sometimes very hard to // debug. What's a solution? Instead of exporting a variable V from a DSO, // define an accessor getV(). template static void addCopyRelSymbol(SharedSymbol &ss) { // Copy relocation against zero-sized symbol doesn't make sense. uint64_t symSize = ss.getSize(); if (symSize == 0 || ss.alignment == 0) fatal("cannot create a copy relocation for symbol " + toString(ss)); // See if this symbol is in a read-only segment. If so, preserve the symbol's // memory protection by reserving space in the .bss.rel.ro section. bool isRO = isReadOnly(ss); BssSection *sec = make(isRO ? ".bss.rel.ro" : ".bss", symSize, ss.alignment); OutputSection *osec = (isRO ? in.bssRelRo : in.bss)->getParent(); // At this point, sectionBases has been migrated to sections. Append sec to // sections. if (osec->sectionCommands.empty() || !isa(osec->sectionCommands.back())) osec->sectionCommands.push_back(make("")); auto *isd = cast(osec->sectionCommands.back()); isd->sections.push_back(sec); osec->commitSection(sec); // Look through the DSO's dynamic symbol table for aliases and create a // dynamic symbol for each one. This causes the copy relocation to correctly // interpose any aliases. for (SharedSymbol *sym : getSymbolsAt(ss)) replaceWithDefined(*sym, sec, 0, sym->size); mainPart->relaDyn->addReloc(target->copyRel, sec, 0, &ss); } // MIPS has an odd notion of "paired" relocations to calculate addends. // For example, if a relocation is of R_MIPS_HI16, there must be a // R_MIPS_LO16 relocation after that, and an addend is calculated using // the two relocations. template static int64_t computeMipsAddend(const RelTy &rel, const RelTy *end, InputSectionBase &sec, RelExpr expr, bool isLocal) { if (expr == R_MIPS_GOTREL && isLocal) return sec.getFile()->mipsGp0; // The ABI says that the paired relocation is used only for REL. // See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf if (RelTy::IsRela) return 0; RelType type = rel.getType(config->isMips64EL); uint32_t pairTy = getMipsPairType(type, isLocal); if (pairTy == R_MIPS_NONE) return 0; const uint8_t *buf = sec.data().data(); uint32_t symIndex = rel.getSymbol(config->isMips64EL); // To make things worse, paired relocations might not be contiguous in // the relocation table, so we need to do linear search. *sigh* for (const RelTy *ri = &rel; ri != end; ++ri) if (ri->getType(config->isMips64EL) == pairTy && ri->getSymbol(config->isMips64EL) == symIndex) return target->getImplicitAddend(buf + ri->r_offset, pairTy); warn("can't find matching " + toString(pairTy) + " relocation for " + toString(type)); return 0; } // Returns an addend of a given relocation. If it is RELA, an addend // is in a relocation itself. If it is REL, we need to read it from an // input section. template static int64_t computeAddend(const RelTy &rel, const RelTy *end, InputSectionBase &sec, RelExpr expr, bool isLocal) { int64_t addend; RelType type = rel.getType(config->isMips64EL); if (RelTy::IsRela) { addend = getAddend(rel); } else { const uint8_t *buf = sec.data().data(); addend = target->getImplicitAddend(buf + rel.r_offset, type); } if (config->emachine == EM_PPC64 && config->isPic && type == R_PPC64_TOC) addend += getPPC64TocBase(); if (config->emachine == EM_MIPS) addend += computeMipsAddend(rel, end, sec, expr, isLocal); return addend; } // Custom error message if Sym is defined in a discarded section. template static std::string maybeReportDiscarded(Undefined &sym) { auto *file = dyn_cast_or_null>(sym.file); if (!file || !sym.discardedSecIdx || file->getSections()[sym.discardedSecIdx] != &InputSection::discarded) return ""; ArrayRef> objSections = CHECK(file->getObj().sections(), file); std::string msg; if (sym.type == ELF::STT_SECTION) { msg = "relocation refers to a discarded section: "; msg += CHECK( file->getObj().getSectionName(&objSections[sym.discardedSecIdx]), file); } else { msg = "relocation refers to a symbol in a discarded section: " + toString(sym); } msg += "\n>>> defined in " + toString(file); Elf_Shdr_Impl elfSec = objSections[sym.discardedSecIdx - 1]; if (elfSec.sh_type != SHT_GROUP) return msg; // If the discarded section is a COMDAT. StringRef signature = file->getShtGroupSignature(objSections, elfSec); if (const InputFile *prevailing = symtab->comdatGroups.lookup(CachedHashStringRef(signature))) msg += "\n>>> section group signature: " + signature.str() + "\n>>> prevailing definition is in " + toString(prevailing); return msg; } // Undefined diagnostics are collected in a vector and emitted once all of // them are known, so that some postprocessing on the list of undefined symbols // can happen before lld emits diagnostics. struct UndefinedDiag { Symbol *sym; struct Loc { InputSectionBase *sec; uint64_t offset; }; std::vector locs; bool isWarning; }; static std::vector undefs; // Check whether the definition name def is a mangled function name that matches // the reference name ref. static bool canSuggestExternCForCXX(StringRef ref, StringRef def) { llvm::ItaniumPartialDemangler d; std::string name = def.str(); if (d.partialDemangle(name.c_str())) return false; char *buf = d.getFunctionName(nullptr, nullptr); if (!buf) return false; bool ret = ref == buf; free(buf); return ret; } // Suggest an alternative spelling of an "undefined symbol" diagnostic. Returns // the suggested symbol, which is either in the symbol table, or in the same // file of sym. template static const Symbol *getAlternativeSpelling(const Undefined &sym, std::string &pre_hint, std::string &post_hint) { DenseMap map; if (auto *file = dyn_cast_or_null>(sym.file)) { // If sym is a symbol defined in a discarded section, maybeReportDiscarded() // will give an error. Don't suggest an alternative spelling. if (file && sym.discardedSecIdx != 0 && file->getSections()[sym.discardedSecIdx] == &InputSection::discarded) return nullptr; // Build a map of local defined symbols. for (const Symbol *s : sym.file->getSymbols()) if (s->isLocal() && s->isDefined()) map.try_emplace(s->getName(), s); } auto suggest = [&](StringRef newName) -> const Symbol * { // If defined locally. if (const Symbol *s = map.lookup(newName)) return s; // If in the symbol table and not undefined. if (const Symbol *s = symtab->find(newName)) if (!s->isUndefined()) return s; return nullptr; }; // This loop enumerates all strings of Levenshtein distance 1 as typo // correction candidates and suggests the one that exists as a non-undefined // symbol. StringRef name = sym.getName(); for (size_t i = 0, e = name.size(); i != e + 1; ++i) { // Insert a character before name[i]. std::string newName = (name.substr(0, i) + "0" + name.substr(i)).str(); for (char c = '0'; c <= 'z'; ++c) { newName[i] = c; if (const Symbol *s = suggest(newName)) return s; } if (i == e) break; // Substitute name[i]. newName = name; for (char c = '0'; c <= 'z'; ++c) { newName[i] = c; if (const Symbol *s = suggest(newName)) return s; } // Transpose name[i] and name[i+1]. This is of edit distance 2 but it is // common. if (i + 1 < e) { newName[i] = name[i + 1]; newName[i + 1] = name[i]; if (const Symbol *s = suggest(newName)) return s; } // Delete name[i]. newName = (name.substr(0, i) + name.substr(i + 1)).str(); if (const Symbol *s = suggest(newName)) return s; } // Case mismatch, e.g. Foo vs FOO. for (auto &it : map) if (name.equals_lower(it.first)) return it.second; for (Symbol *sym : symtab->symbols()) if (!sym->isUndefined() && name.equals_lower(sym->getName())) return sym; // The reference may be a mangled name while the definition is not. Suggest a // missing extern "C". if (name.startswith("_Z")) { std::string buf = name.str(); llvm::ItaniumPartialDemangler d; if (!d.partialDemangle(buf.c_str())) if (char *buf = d.getFunctionName(nullptr, nullptr)) { const Symbol *s = suggest(buf); free(buf); if (s) { pre_hint = ": extern \"C\" "; return s; } } } else { const Symbol *s = nullptr; for (auto &it : map) if (canSuggestExternCForCXX(name, it.first)) { s = it.second; break; } if (!s) for (Symbol *sym : symtab->symbols()) if (canSuggestExternCForCXX(name, sym->getName())) { s = sym; break; } if (s) { pre_hint = " to declare "; post_hint = " as extern \"C\"?"; return s; } } return nullptr; } template static void reportUndefinedSymbol(const UndefinedDiag &undef, bool correctSpelling) { Symbol &sym = *undef.sym; auto visibility = [&]() -> std::string { switch (sym.visibility) { case STV_INTERNAL: return "internal "; case STV_HIDDEN: return "hidden "; case STV_PROTECTED: return "protected "; default: return ""; } }; std::string msg = maybeReportDiscarded(cast(sym)); if (msg.empty()) msg = "undefined " + visibility() + "symbol: " + toString(sym); const size_t maxUndefReferences = 10; size_t i = 0; for (UndefinedDiag::Loc l : undef.locs) { if (i >= maxUndefReferences) break; InputSectionBase &sec = *l.sec; uint64_t offset = l.offset; msg += "\n>>> referenced by "; std::string src = sec.getSrcMsg(sym, offset); if (!src.empty()) msg += src + "\n>>> "; msg += sec.getObjMsg(offset); i++; } if (i < undef.locs.size()) msg += ("\n>>> referenced " + Twine(undef.locs.size() - i) + " more times") .str(); if (correctSpelling) { std::string pre_hint = ": ", post_hint; if (const Symbol *corrected = getAlternativeSpelling( cast(sym), pre_hint, post_hint)) { msg += "\n>>> did you mean" + pre_hint + toString(*corrected) + post_hint; if (corrected->file) msg += "\n>>> defined in: " + toString(corrected->file); } } if (sym.getName().startswith("_ZTV")) msg += "\nthe vtable symbol may be undefined because the class is missing " "its key function (see https://lld.llvm.org/missingkeyfunction)"; if (undef.isWarning) warn(msg); else error(msg); } template void reportUndefinedSymbols() { // Find the first "undefined symbol" diagnostic for each diagnostic, and // collect all "referenced from" lines at the first diagnostic. DenseMap firstRef; for (UndefinedDiag &undef : undefs) { assert(undef.locs.size() == 1); if (UndefinedDiag *canon = firstRef.lookup(undef.sym)) { canon->locs.push_back(undef.locs[0]); undef.locs.clear(); } else firstRef[undef.sym] = &undef; } // Enable spell corrector for the first 2 diagnostics. for (auto it : enumerate(undefs)) if (!it.value().locs.empty()) reportUndefinedSymbol(it.value(), it.index() < 2); undefs.clear(); } // Report an undefined symbol if necessary. // Returns true if the undefined symbol will produce an error message. static bool maybeReportUndefined(Symbol &sym, InputSectionBase &sec, uint64_t offset) { if (!sym.isUndefined() || sym.isWeak()) return false; bool canBeExternal = !sym.isLocal() && sym.visibility == STV_DEFAULT; if (config->unresolvedSymbols == UnresolvedPolicy::Ignore && canBeExternal) return false; // clang (as of 2019-06-12) / gcc (as of 8.2.1) PPC64 may emit a .rela.toc // which references a switch table in a discarded .rodata/.text section. The // .toc and the .rela.toc are incorrectly not placed in the comdat. The ELF // spec says references from outside the group to a STB_LOCAL symbol are not // allowed. Work around the bug. - if (config->emachine == EM_PPC64 && - cast(sym).discardedSecIdx != 0 && sec.name == ".toc") + // + // PPC32 .got2 is similar but cannot be fixed. Multiple .got2 is infeasible + // because .LC0-.LTOC is not representable if the two labels are in different + // .got2 + if (cast(sym).discardedSecIdx != 0 && + (sec.name == ".got2" || sec.name == ".toc")) return false; bool isWarning = (config->unresolvedSymbols == UnresolvedPolicy::Warn && canBeExternal) || config->noinhibitExec; undefs.push_back({&sym, {{&sec, offset}}, isWarning}); return !isWarning; } // MIPS N32 ABI treats series of successive relocations with the same offset // as a single relocation. The similar approach used by N64 ABI, but this ABI // packs all relocations into the single relocation record. Here we emulate // this for the N32 ABI. Iterate over relocation with the same offset and put // theirs types into the single bit-set. template static RelType getMipsN32RelType(RelTy *&rel, RelTy *end) { RelType type = 0; uint64_t offset = rel->r_offset; int n = 0; while (rel != end && rel->r_offset == offset) type |= (rel++)->getType(config->isMips64EL) << (8 * n++); return type; } // .eh_frame sections are mergeable input sections, so their input // offsets are not linearly mapped to output section. For each input // offset, we need to find a section piece containing the offset and // add the piece's base address to the input offset to compute the // output offset. That isn't cheap. // // This class is to speed up the offset computation. When we process // relocations, we access offsets in the monotonically increasing // order. So we can optimize for that access pattern. // // For sections other than .eh_frame, this class doesn't do anything. namespace { class OffsetGetter { public: explicit OffsetGetter(InputSectionBase &sec) { if (auto *eh = dyn_cast(&sec)) pieces = eh->pieces; } // Translates offsets in input sections to offsets in output sections. // Given offset must increase monotonically. We assume that Piece is // sorted by inputOff. uint64_t get(uint64_t off) { if (pieces.empty()) return off; while (i != pieces.size() && pieces[i].inputOff + pieces[i].size <= off) ++i; if (i == pieces.size()) fatal(".eh_frame: relocation is not in any piece"); // Pieces must be contiguous, so there must be no holes in between. assert(pieces[i].inputOff <= off && "Relocation not in any piece"); // Offset -1 means that the piece is dead (i.e. garbage collected). if (pieces[i].outputOff == -1) return -1; return pieces[i].outputOff + off - pieces[i].inputOff; } private: ArrayRef pieces; size_t i = 0; }; } // namespace static void addRelativeReloc(InputSectionBase *isec, uint64_t offsetInSec, Symbol *sym, int64_t addend, RelExpr expr, RelType type) { Partition &part = isec->getPartition(); // Add a relative relocation. If relrDyn section is enabled, and the // relocation offset is guaranteed to be even, add the relocation to // the relrDyn section, otherwise add it to the relaDyn section. // relrDyn sections don't support odd offsets. Also, relrDyn sections // don't store the addend values, so we must write it to the relocated // address. if (part.relrDyn && isec->alignment >= 2 && offsetInSec % 2 == 0) { isec->relocations.push_back({expr, type, offsetInSec, addend, sym}); part.relrDyn->relocs.push_back({isec, offsetInSec}); return; } part.relaDyn->addReloc(target->relativeRel, isec, offsetInSec, sym, addend, expr, type); } template static void addPltEntry(PltSection *plt, GotPltSection *gotPlt, RelocationBaseSection *rel, RelType type, Symbol &sym) { plt->addEntry(sym); gotPlt->addEntry(sym); rel->addReloc( {type, gotPlt, sym.getGotPltOffset(), !sym.isPreemptible, &sym, 0}); } static void addGotEntry(Symbol &sym) { in.got->addEntry(sym); RelExpr expr = sym.isTls() ? R_TLS : R_ABS; uint64_t off = sym.getGotOffset(); // If a GOT slot value can be calculated at link-time, which is now, // we can just fill that out. // // (We don't actually write a value to a GOT slot right now, but we // add a static relocation to a Relocations vector so that // InputSection::relocate will do the work for us. We may be able // to just write a value now, but it is a TODO.) bool isLinkTimeConstant = !sym.isPreemptible && (!config->isPic || isAbsolute(sym)); if (isLinkTimeConstant) { in.got->relocations.push_back({expr, target->symbolicRel, off, 0, &sym}); return; } // Otherwise, we emit a dynamic relocation to .rel[a].dyn so that // the GOT slot will be fixed at load-time. if (!sym.isTls() && !sym.isPreemptible && config->isPic && !isAbsolute(sym)) { addRelativeReloc(in.got, off, &sym, 0, R_ABS, target->symbolicRel); return; } mainPart->relaDyn->addReloc( sym.isTls() ? target->tlsGotRel : target->gotRel, in.got, off, &sym, 0, sym.isPreemptible ? R_ADDEND : R_ABS, target->symbolicRel); } // Return true if we can define a symbol in the executable that // contains the value/function of a symbol defined in a shared // library. static bool canDefineSymbolInExecutable(Symbol &sym) { // If the symbol has default visibility the symbol defined in the // executable will preempt it. // Note that we want the visibility of the shared symbol itself, not // the visibility of the symbol in the output file we are producing. That is // why we use Sym.stOther. if ((sym.stOther & 0x3) == STV_DEFAULT) return true; // If we are allowed to break address equality of functions, defining // a plt entry will allow the program to call the function in the // .so, but the .so and the executable will no agree on the address // of the function. Similar logic for objects. return ((sym.isFunc() && config->ignoreFunctionAddressEquality) || (sym.isObject() && config->ignoreDataAddressEquality)); } // The reason we have to do this early scan is as follows // * To mmap the output file, we need to know the size // * For that, we need to know how many dynamic relocs we will have. // It might be possible to avoid this by outputting the file with write: // * Write the allocated output sections, computing addresses. // * Apply relocations, recording which ones require a dynamic reloc. // * Write the dynamic relocations. // * Write the rest of the file. // This would have some drawbacks. For example, we would only know if .rela.dyn // is needed after applying relocations. If it is, it will go after rw and rx // sections. Given that it is ro, we will need an extra PT_LOAD. This // complicates things for the dynamic linker and means we would have to reserve // space for the extra PT_LOAD even if we end up not using it. template static void processRelocAux(InputSectionBase &sec, RelExpr expr, RelType type, uint64_t offset, Symbol &sym, const RelTy &rel, int64_t addend) { // If the relocation is known to be a link-time constant, we know no dynamic // relocation will be created, pass the control to relocateAlloc() or // relocateNonAlloc() to resolve it. // // The behavior of an undefined weak reference is implementation defined. If // the relocation is to a weak undef, and we are producing an executable, let // relocate{,Non}Alloc() resolve it. if (isStaticLinkTimeConstant(expr, type, sym, sec, offset) || (!config->shared && sym.isUndefWeak())) { sec.relocations.push_back({expr, type, offset, addend, &sym}); return; } bool canWrite = (sec.flags & SHF_WRITE) || !config->zText; if (canWrite) { RelType rel = target->getDynRel(type); if (expr == R_GOT || (rel == target->symbolicRel && !sym.isPreemptible)) { addRelativeReloc(&sec, offset, &sym, addend, expr, type); return; } else if (rel != 0) { if (config->emachine == EM_MIPS && rel == target->symbolicRel) rel = target->relativeRel; sec.getPartition().relaDyn->addReloc(rel, &sec, offset, &sym, addend, R_ADDEND, type); // MIPS ABI turns using of GOT and dynamic relocations inside out. // While regular ABI uses dynamic relocations to fill up GOT entries // MIPS ABI requires dynamic linker to fills up GOT entries using // specially sorted dynamic symbol table. This affects even dynamic // relocations against symbols which do not require GOT entries // creation explicitly, i.e. do not have any GOT-relocations. So if // a preemptible symbol has a dynamic relocation we anyway have // to create a GOT entry for it. // If a non-preemptible symbol has a dynamic relocation against it, // dynamic linker takes it st_value, adds offset and writes down // result of the dynamic relocation. In case of preemptible symbol // dynamic linker performs symbol resolution, writes the symbol value // to the GOT entry and reads the GOT entry when it needs to perform // a dynamic relocation. // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19 if (config->emachine == EM_MIPS) in.mipsGot->addEntry(*sec.file, sym, addend, expr); return; } } // When producing an executable, we can perform copy relocations (for // STT_OBJECT) and canonical PLT (for STT_FUNC). if (!config->shared) { if (!canDefineSymbolInExecutable(sym)) { errorOrWarn("cannot preempt symbol: " + toString(sym) + getLocation(sec, sym, offset)); return; } if (sym.isObject()) { // Produce a copy relocation. if (auto *ss = dyn_cast(&sym)) { if (!config->zCopyreloc) error("unresolvable relocation " + toString(type) + " against symbol '" + toString(*ss) + "'; recompile with -fPIC or remove '-z nocopyreloc'" + getLocation(sec, sym, offset)); addCopyRelSymbol(*ss); } sec.relocations.push_back({expr, type, offset, addend, &sym}); return; } // This handles a non PIC program call to function in a shared library. In // an ideal world, we could just report an error saying the relocation can // overflow at runtime. In the real world with glibc, crt1.o has a // R_X86_64_PC32 pointing to libc.so. // // The general idea on how to handle such cases is to create a PLT entry and // use that as the function value. // // For the static linking part, we just return a plt expr and everything // else will use the PLT entry as the address. // // The remaining problem is making sure pointer equality still works. We // need the help of the dynamic linker for that. We let it know that we have // a direct reference to a so symbol by creating an undefined symbol with a // non zero st_value. Seeing that, the dynamic linker resolves the symbol to // the value of the symbol we created. This is true even for got entries, so // pointer equality is maintained. To avoid an infinite loop, the only entry // that points to the real function is a dedicated got entry used by the // plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT, // R_386_JMP_SLOT, etc). // For position independent executable on i386, the plt entry requires ebx // to be set. This causes two problems: // * If some code has a direct reference to a function, it was probably // compiled without -fPIE/-fPIC and doesn't maintain ebx. // * If a library definition gets preempted to the executable, it will have // the wrong ebx value. if (sym.isFunc()) { if (config->pie && config->emachine == EM_386) errorOrWarn("symbol '" + toString(sym) + "' cannot be preempted; recompile with -fPIE" + getLocation(sec, sym, offset)); if (!sym.isInPlt()) addPltEntry(in.plt, in.gotPlt, in.relaPlt, target->pltRel, sym); if (!sym.isDefined()) { replaceWithDefined( sym, in.plt, target->pltHeaderSize + target->pltEntrySize * sym.pltIndex, 0); if (config->emachine == EM_PPC) { // PPC32 canonical PLT entries are at the beginning of .glink cast(sym).value = in.plt->headerSize; in.plt->headerSize += 16; cast(in.plt)->canonical_plts.push_back(&sym); } } sym.needsPltAddr = true; sec.relocations.push_back({expr, type, offset, addend, &sym}); return; } } if (config->isPic) { if (!canWrite && !isRelExpr(expr)) errorOrWarn( "can't create dynamic relocation " + toString(type) + " against " + (sym.getName().empty() ? "local symbol" : "symbol: " + toString(sym)) + " in readonly segment; recompile object files with -fPIC " "or pass '-Wl,-z,notext' to allow text relocations in the output" + getLocation(sec, sym, offset)); else errorOrWarn( "relocation " + toString(type) + " cannot be used against " + (sym.getName().empty() ? "local symbol" : "symbol " + toString(sym)) + "; recompile with -fPIC" + getLocation(sec, sym, offset)); return; } errorOrWarn("symbol '" + toString(sym) + "' has no type" + getLocation(sec, sym, offset)); } template static void scanReloc(InputSectionBase &sec, OffsetGetter &getOffset, RelTy *&i, RelTy *end) { const RelTy &rel = *i; uint32_t symIndex = rel.getSymbol(config->isMips64EL); Symbol &sym = sec.getFile()->getSymbol(symIndex); RelType type; // Deal with MIPS oddity. if (config->mipsN32Abi) { type = getMipsN32RelType(i, end); } else { type = rel.getType(config->isMips64EL); ++i; } // Get an offset in an output section this relocation is applied to. uint64_t offset = getOffset.get(rel.r_offset); if (offset == uint64_t(-1)) return; // Error if the target symbol is undefined. Symbol index 0 may be used by // marker relocations, e.g. R_*_NONE and R_ARM_V4BX. Don't error on them. if (symIndex != 0 && maybeReportUndefined(sym, sec, rel.r_offset)) return; const uint8_t *relocatedAddr = sec.data().begin() + rel.r_offset; RelExpr expr = target->getRelExpr(type, sym, relocatedAddr); // Ignore R_*_NONE and other marker relocations. if (expr == R_NONE) return; // We can separate the small code model relocations into 2 categories: // 1) Those that access the compiler generated .toc sections. // 2) Those that access the linker allocated got entries. // lld allocates got entries to symbols on demand. Since we don't try to sort // the got entries in any way, we don't have to track which objects have // got-based small code model relocs. The .toc sections get placed after the // end of the linker allocated .got section and we do sort those so sections // addressed with small code model relocations come first. if (config->emachine == EM_PPC64 && isPPC64SmallCodeModelTocReloc(type)) sec.file->ppc64SmallCodeModelTocRelocs = true; if (sym.isGnuIFunc() && !config->zText && config->warnIfuncTextrel) { warn("using ifunc symbols when text relocations are allowed may produce " "a binary that will segfault, if the object file is linked with " "old version of glibc (glibc 2.28 and earlier). If this applies to " "you, consider recompiling the object files without -fPIC and " "without -Wl,-z,notext option. Use -no-warn-ifunc-textrel to " "turn off this warning." + getLocation(sec, sym, offset)); } // Read an addend. int64_t addend = computeAddend(rel, end, sec, expr, sym.isLocal()); // Relax relocations. // // If we know that a PLT entry will be resolved within the same ELF module, we // can skip PLT access and directly jump to the destination function. For // example, if we are linking a main executable, all dynamic symbols that can // be resolved within the executable will actually be resolved that way at // runtime, because the main executable is always at the beginning of a search // list. We can leverage that fact. if (!sym.isPreemptible && (!sym.isGnuIFunc() || config->zIfuncNoplt)) { if (expr == R_GOT_PC && !isAbsoluteValue(sym)) { expr = target->adjustRelaxExpr(type, relocatedAddr, expr); } else { // The 0x8000 bit of r_addend of R_PPC_PLTREL24 is used to choose call // stub type. It should be ignored if optimized to R_PC. if (config->emachine == EM_PPC && expr == R_PPC32_PLTREL) addend &= ~0x8000; expr = fromPlt(expr); } } // If the relocation does not emit a GOT or GOTPLT entry but its computation // uses their addresses, we need GOT or GOTPLT to be created. // // The 4 types that relative GOTPLT are all x86 and x86-64 specific. if (oneof(expr)) { in.gotPlt->hasGotPltOffRel = true; } else if (oneof( expr)) { in.got->hasGotOffRel = true; } // Process some TLS relocations, including relaxing TLS relocations. // Note that this function does not handle all TLS relocations. if (unsigned processed = handleTlsRelocation(type, sym, sec, offset, addend, expr)) { i += (processed - 1); return; } // We were asked not to generate PLT entries for ifuncs. Instead, pass the // direct relocation on through. if (sym.isGnuIFunc() && config->zIfuncNoplt) { sym.exportDynamic = true; mainPart->relaDyn->addReloc(type, &sec, offset, &sym, addend, R_ADDEND, type); return; } // Non-preemptible ifuncs require special handling. First, handle the usual // case where the symbol isn't one of these. if (!sym.isGnuIFunc() || sym.isPreemptible) { // If a relocation needs PLT, we create PLT and GOTPLT slots for the symbol. if (needsPlt(expr) && !sym.isInPlt()) addPltEntry(in.plt, in.gotPlt, in.relaPlt, target->pltRel, sym); // Create a GOT slot if a relocation needs GOT. if (needsGot(expr)) { if (config->emachine == EM_MIPS) { // MIPS ABI has special rules to process GOT entries and doesn't // require relocation entries for them. A special case is TLS // relocations. In that case dynamic loader applies dynamic // relocations to initialize TLS GOT entries. // See "Global Offset Table" in Chapter 5 in the following document // for detailed description: // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf in.mipsGot->addEntry(*sec.file, sym, addend, expr); } else if (!sym.isInGot()) { addGotEntry(sym); } } } else { // Handle a reference to a non-preemptible ifunc. These are special in a // few ways: // // - Unlike most non-preemptible symbols, non-preemptible ifuncs do not have // a fixed value. But assuming that all references to the ifunc are // GOT-generating or PLT-generating, the handling of an ifunc is // relatively straightforward. We create a PLT entry in Iplt, which is // usually at the end of .plt, which makes an indirect call using a // matching GOT entry in igotPlt, which is usually at the end of .got.plt. // The GOT entry is relocated using an IRELATIVE relocation in relaIplt, // which is usually at the end of .rela.plt. Unlike most relocations in // .rela.plt, which may be evaluated lazily without -z now, dynamic // loaders evaluate IRELATIVE relocs eagerly, which means that for // IRELATIVE relocs only, GOT-generating relocations can point directly to // .got.plt without requiring a separate GOT entry. // // - Despite the fact that an ifunc does not have a fixed value, compilers // that are not passed -fPIC will assume that they do, and will emit // direct (non-GOT-generating, non-PLT-generating) relocations to the // symbol. This means that if a direct relocation to the symbol is // seen, the linker must set a value for the symbol, and this value must // be consistent no matter what type of reference is made to the symbol. // This can be done by creating a PLT entry for the symbol in the way // described above and making it canonical, that is, making all references // point to the PLT entry instead of the resolver. In lld we also store // the address of the PLT entry in the dynamic symbol table, which means // that the symbol will also have the same value in other modules. // Because the value loaded from the GOT needs to be consistent with // the value computed using a direct relocation, a non-preemptible ifunc // may end up with two GOT entries, one in .got.plt that points to the // address returned by the resolver and is used only by the PLT entry, // and another in .got that points to the PLT entry and is used by // GOT-generating relocations. // // - The fact that these symbols do not have a fixed value makes them an // exception to the general rule that a statically linked executable does // not require any form of dynamic relocation. To handle these relocations // correctly, the IRELATIVE relocations are stored in an array which a // statically linked executable's startup code must enumerate using the // linker-defined symbols __rela?_iplt_{start,end}. if (!sym.isInPlt()) { // Create PLT and GOTPLT slots for the symbol. sym.isInIplt = true; // Create a copy of the symbol to use as the target of the IRELATIVE // relocation in the igotPlt. This is in case we make the PLT canonical // later, which would overwrite the original symbol. // // FIXME: Creating a copy of the symbol here is a bit of a hack. All // that's really needed to create the IRELATIVE is the section and value, // so ideally we should just need to copy those. auto *directSym = make(cast(sym)); addPltEntry(in.iplt, in.igotPlt, in.relaIplt, target->iRelativeRel, *directSym); sym.pltIndex = directSym->pltIndex; } if (needsGot(expr)) { // Redirect GOT accesses to point to the Igot. // // This field is also used to keep track of whether we ever needed a GOT // entry. If we did and we make the PLT canonical later, we'll need to // create a GOT entry pointing to the PLT entry for Sym. sym.gotInIgot = true; } else if (!needsPlt(expr)) { // Make the ifunc's PLT entry canonical by changing the value of its // symbol to redirect all references to point to it. auto &d = cast(sym); d.section = in.iplt; d.value = sym.pltIndex * target->ipltEntrySize; d.size = 0; // It's important to set the symbol type here so that dynamic loaders // don't try to call the PLT as if it were an ifunc resolver. d.type = STT_FUNC; if (sym.gotInIgot) { // We previously encountered a GOT generating reference that we // redirected to the Igot. Now that the PLT entry is canonical we must // clear the redirection to the Igot and add a GOT entry. As we've // changed the symbol type to STT_FUNC future GOT generating references // will naturally use this GOT entry. // // We don't need to worry about creating a MIPS GOT here because ifuncs // aren't a thing on MIPS. sym.gotInIgot = false; addGotEntry(sym); } } } processRelocAux(sec, expr, type, offset, sym, rel, addend); } template static void scanRelocs(InputSectionBase &sec, ArrayRef rels) { OffsetGetter getOffset(sec); // Not all relocations end up in Sec.Relocations, but a lot do. sec.relocations.reserve(rels.size()); for (auto i = rels.begin(), end = rels.end(); i != end;) scanReloc(sec, getOffset, i, end); // Sort relocations by offset for more efficient searching for // R_RISCV_PCREL_HI20 and R_PPC64_ADDR64. if (config->emachine == EM_RISCV || (config->emachine == EM_PPC64 && sec.name == ".toc")) llvm::stable_sort(sec.relocations, [](const Relocation &lhs, const Relocation &rhs) { return lhs.offset < rhs.offset; }); } template void scanRelocations(InputSectionBase &s) { if (s.areRelocsRela) scanRelocs(s, s.relas()); else scanRelocs(s, s.rels()); } static bool mergeCmp(const InputSection *a, const InputSection *b) { // std::merge requires a strict weak ordering. if (a->outSecOff < b->outSecOff) return true; if (a->outSecOff == b->outSecOff) { auto *ta = dyn_cast(a); auto *tb = dyn_cast(b); // Check if Thunk is immediately before any specific Target // InputSection for example Mips LA25 Thunks. if (ta && ta->getTargetInputSection() == b) return true; // Place Thunk Sections without specific targets before // non-Thunk Sections. if (ta && !tb && !ta->getTargetInputSection()) return true; } return false; } // Call Fn on every executable InputSection accessed via the linker script // InputSectionDescription::Sections. static void forEachInputSectionDescription( ArrayRef outputSections, llvm::function_ref fn) { for (OutputSection *os : outputSections) { if (!(os->flags & SHF_ALLOC) || !(os->flags & SHF_EXECINSTR)) continue; for (BaseCommand *bc : os->sectionCommands) if (auto *isd = dyn_cast(bc)) fn(os, isd); } } // Thunk Implementation // // Thunks (sometimes called stubs, veneers or branch islands) are small pieces // of code that the linker inserts inbetween a caller and a callee. The thunks // are added at link time rather than compile time as the decision on whether // a thunk is needed, such as the caller and callee being out of range, can only // be made at link time. // // It is straightforward to tell given the current state of the program when a // thunk is needed for a particular call. The more difficult part is that // the thunk needs to be placed in the program such that the caller can reach // the thunk and the thunk can reach the callee; furthermore, adding thunks to // the program alters addresses, which can mean more thunks etc. // // In lld we have a synthetic ThunkSection that can hold many Thunks. // The decision to have a ThunkSection act as a container means that we can // more easily handle the most common case of a single block of contiguous // Thunks by inserting just a single ThunkSection. // // The implementation of Thunks in lld is split across these areas // Relocations.cpp : Framework for creating and placing thunks // Thunks.cpp : The code generated for each supported thunk // Target.cpp : Target specific hooks that the framework uses to decide when // a thunk is used // Synthetic.cpp : Implementation of ThunkSection // Writer.cpp : Iteratively call framework until no more Thunks added // // Thunk placement requirements: // Mips LA25 thunks. These must be placed immediately before the callee section // We can assume that the caller is in range of the Thunk. These are modelled // by Thunks that return the section they must precede with // getTargetInputSection(). // // ARM interworking and range extension thunks. These thunks must be placed // within range of the caller. All implemented ARM thunks can always reach the // callee as they use an indirect jump via a register that has no range // restrictions. // // Thunk placement algorithm: // For Mips LA25 ThunkSections; the placement is explicit, it has to be before // getTargetInputSection(). // // For thunks that must be placed within range of the caller there are many // possible choices given that the maximum range from the caller is usually // much larger than the average InputSection size. Desirable properties include: // - Maximize reuse of thunks by multiple callers // - Minimize number of ThunkSections to simplify insertion // - Handle impact of already added Thunks on addresses // - Simple to understand and implement // // In lld for the first pass, we pre-create one or more ThunkSections per // InputSectionDescription at Target specific intervals. A ThunkSection is // placed so that the estimated end of the ThunkSection is within range of the // start of the InputSectionDescription or the previous ThunkSection. For // example: // InputSectionDescription // Section 0 // ... // Section N // ThunkSection 0 // Section N + 1 // ... // Section N + K // Thunk Section 1 // // The intention is that we can add a Thunk to a ThunkSection that is well // spaced enough to service a number of callers without having to do a lot // of work. An important principle is that it is not an error if a Thunk cannot // be placed in a pre-created ThunkSection; when this happens we create a new // ThunkSection placed next to the caller. This allows us to handle the vast // majority of thunks simply, but also handle rare cases where the branch range // is smaller than the target specific spacing. // // The algorithm is expected to create all the thunks that are needed in a // single pass, with a small number of programs needing a second pass due to // the insertion of thunks in the first pass increasing the offset between // callers and callees that were only just in range. // // A consequence of allowing new ThunkSections to be created outside of the // pre-created ThunkSections is that in rare cases calls to Thunks that were in // range in pass K, are out of range in some pass > K due to the insertion of // more Thunks in between the caller and callee. When this happens we retarget // the relocation back to the original target and create another Thunk. // Remove ThunkSections that are empty, this should only be the initial set // precreated on pass 0. // Insert the Thunks for OutputSection OS into their designated place // in the Sections vector, and recalculate the InputSection output section // offsets. // This may invalidate any output section offsets stored outside of InputSection void ThunkCreator::mergeThunks(ArrayRef outputSections) { forEachInputSectionDescription( outputSections, [&](OutputSection *os, InputSectionDescription *isd) { if (isd->thunkSections.empty()) return; // Remove any zero sized precreated Thunks. llvm::erase_if(isd->thunkSections, [](const std::pair &ts) { return ts.first->getSize() == 0; }); // ISD->ThunkSections contains all created ThunkSections, including // those inserted in previous passes. Extract the Thunks created this // pass and order them in ascending outSecOff. std::vector newThunks; for (std::pair ts : isd->thunkSections) if (ts.second == pass) newThunks.push_back(ts.first); llvm::stable_sort(newThunks, [](const ThunkSection *a, const ThunkSection *b) { return a->outSecOff < b->outSecOff; }); // Merge sorted vectors of Thunks and InputSections by outSecOff std::vector tmp; tmp.reserve(isd->sections.size() + newThunks.size()); std::merge(isd->sections.begin(), isd->sections.end(), newThunks.begin(), newThunks.end(), std::back_inserter(tmp), mergeCmp); isd->sections = std::move(tmp); }); } // Find or create a ThunkSection within the InputSectionDescription (ISD) that // is in range of Src. An ISD maps to a range of InputSections described by a // linker script section pattern such as { .text .text.* }. ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *os, InputSection *isec, InputSectionDescription *isd, uint32_t type, uint64_t src) { for (std::pair tp : isd->thunkSections) { ThunkSection *ts = tp.first; uint64_t tsBase = os->addr + ts->outSecOff; uint64_t tsLimit = tsBase + ts->getSize(); if (target->inBranchRange(type, src, (src > tsLimit) ? tsBase : tsLimit)) return ts; } // No suitable ThunkSection exists. This can happen when there is a branch // with lower range than the ThunkSection spacing or when there are too // many Thunks. Create a new ThunkSection as close to the InputSection as // possible. Error if InputSection is so large we cannot place ThunkSection // anywhere in Range. uint64_t thunkSecOff = isec->outSecOff; if (!target->inBranchRange(type, src, os->addr + thunkSecOff)) { thunkSecOff = isec->outSecOff + isec->getSize(); if (!target->inBranchRange(type, src, os->addr + thunkSecOff)) fatal("InputSection too large for range extension thunk " + isec->getObjMsg(src - (os->addr + isec->outSecOff))); } return addThunkSection(os, isd, thunkSecOff); } // Add a Thunk that needs to be placed in a ThunkSection that immediately // precedes its Target. ThunkSection *ThunkCreator::getISThunkSec(InputSection *isec) { ThunkSection *ts = thunkedSections.lookup(isec); if (ts) return ts; // Find InputSectionRange within Target Output Section (TOS) that the // InputSection (IS) that we need to precede is in. OutputSection *tos = isec->getParent(); for (BaseCommand *bc : tos->sectionCommands) { auto *isd = dyn_cast(bc); if (!isd || isd->sections.empty()) continue; InputSection *first = isd->sections.front(); InputSection *last = isd->sections.back(); if (isec->outSecOff < first->outSecOff || last->outSecOff < isec->outSecOff) continue; ts = addThunkSection(tos, isd, isec->outSecOff); thunkedSections[isec] = ts; return ts; } return nullptr; } // Create one or more ThunkSections per OS that can be used to place Thunks. // We attempt to place the ThunkSections using the following desirable // properties: // - Within range of the maximum number of callers // - Minimise the number of ThunkSections // // We follow a simple but conservative heuristic to place ThunkSections at // offsets that are multiples of a Target specific branch range. // For an InputSectionDescription that is smaller than the range, a single // ThunkSection at the end of the range will do. // // For an InputSectionDescription that is more than twice the size of the range, // we place the last ThunkSection at range bytes from the end of the // InputSectionDescription in order to increase the likelihood that the // distance from a thunk to its target will be sufficiently small to // allow for the creation of a short thunk. void ThunkCreator::createInitialThunkSections( ArrayRef outputSections) { uint32_t thunkSectionSpacing = target->getThunkSectionSpacing(); forEachInputSectionDescription( outputSections, [&](OutputSection *os, InputSectionDescription *isd) { if (isd->sections.empty()) return; uint32_t isdBegin = isd->sections.front()->outSecOff; uint32_t isdEnd = isd->sections.back()->outSecOff + isd->sections.back()->getSize(); uint32_t lastThunkLowerBound = -1; if (isdEnd - isdBegin > thunkSectionSpacing * 2) lastThunkLowerBound = isdEnd - thunkSectionSpacing; uint32_t isecLimit; uint32_t prevIsecLimit = isdBegin; uint32_t thunkUpperBound = isdBegin + thunkSectionSpacing; for (const InputSection *isec : isd->sections) { isecLimit = isec->outSecOff + isec->getSize(); if (isecLimit > thunkUpperBound) { addThunkSection(os, isd, prevIsecLimit); thunkUpperBound = prevIsecLimit + thunkSectionSpacing; } if (isecLimit > lastThunkLowerBound) break; prevIsecLimit = isecLimit; } addThunkSection(os, isd, isecLimit); }); } ThunkSection *ThunkCreator::addThunkSection(OutputSection *os, InputSectionDescription *isd, uint64_t off) { auto *ts = make(os, off); ts->partition = os->partition; if ((config->fixCortexA53Errata843419 || config->fixCortexA8) && !isd->sections.empty()) { // The errata fixes are sensitive to addresses modulo 4 KiB. When we add // thunks we disturb the base addresses of sections placed after the thunks // this makes patches we have generated redundant, and may cause us to // generate more patches as different instructions are now in sensitive // locations. When we generate more patches we may force more branches to // go out of range, causing more thunks to be generated. In pathological // cases this can cause the address dependent content pass not to converge. // We fix this by rounding up the size of the ThunkSection to 4KiB, this // limits the insertion of a ThunkSection on the addresses modulo 4 KiB, // which means that adding Thunks to the section does not invalidate // errata patches for following code. // Rounding up the size to 4KiB has consequences for code-size and can // trip up linker script defined assertions. For example the linux kernel // has an assertion that what LLD represents as an InputSectionDescription // does not exceed 4 KiB even if the overall OutputSection is > 128 Mib. // We use the heuristic of rounding up the size when both of the following // conditions are true: // 1.) The OutputSection is larger than the ThunkSectionSpacing. This // accounts for the case where no single InputSectionDescription is // larger than the OutputSection size. This is conservative but simple. // 2.) The InputSectionDescription is larger than 4 KiB. This will prevent // any assertion failures that an InputSectionDescription is < 4 KiB // in size. uint64_t isdSize = isd->sections.back()->outSecOff + isd->sections.back()->getSize() - isd->sections.front()->outSecOff; if (os->size > target->getThunkSectionSpacing() && isdSize > 4096) ts->roundUpSizeForErrata = true; } isd->thunkSections.push_back({ts, pass}); return ts; } static bool isThunkSectionCompatible(InputSection *source, SectionBase *target) { // We can't reuse thunks in different loadable partitions because they might // not be loaded. But partition 1 (the main partition) will always be loaded. if (source->partition != target->partition) return target->partition == 1; return true; } static int64_t getPCBias(RelType type) { if (config->emachine != EM_ARM) return 0; switch (type) { case R_ARM_THM_JUMP19: case R_ARM_THM_JUMP24: case R_ARM_THM_CALL: return 4; default: return 8; } } std::pair ThunkCreator::getThunk(InputSection *isec, Relocation &rel, uint64_t src) { std::vector *thunkVec = nullptr; int64_t addend = rel.addend + getPCBias(rel.type); // We use a ((section, offset), addend) pair to find the thunk position if // possible so that we create only one thunk for aliased symbols or ICFed // sections. There may be multiple relocations sharing the same (section, // offset + addend) pair. We may revert the relocation back to its original // non-Thunk target, so we cannot fold offset + addend. if (auto *d = dyn_cast(rel.sym)) if (!d->isInPlt() && d->section) thunkVec = &thunkedSymbolsBySectionAndAddend[{ {d->section->repl, d->value}, addend}]; if (!thunkVec) thunkVec = &thunkedSymbols[{rel.sym, addend}]; // Check existing Thunks for Sym to see if they can be reused for (Thunk *t : *thunkVec) if (isThunkSectionCompatible(isec, t->getThunkTargetSym()->section) && t->isCompatibleWith(*isec, rel) && target->inBranchRange(rel.type, src, t->getThunkTargetSym()->getVA(rel.addend) + getPCBias(rel.type))) return std::make_pair(t, false); // No existing compatible Thunk in range, create a new one Thunk *t = addThunk(*isec, rel); thunkVec->push_back(t); return std::make_pair(t, true); } // Return true if the relocation target is an in range Thunk. // Return false if the relocation is not to a Thunk. If the relocation target // was originally to a Thunk, but is no longer in range we revert the // relocation back to its original non-Thunk target. bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) { if (Thunk *t = thunks.lookup(rel.sym)) { if (target->inBranchRange(rel.type, src, rel.sym->getVA(rel.addend) + getPCBias(rel.type))) return true; rel.sym = &t->destination; rel.addend = t->addend; if (rel.sym->isInPlt()) rel.expr = toPlt(rel.expr); } return false; } // Process all relocations from the InputSections that have been assigned // to InputSectionDescriptions and redirect through Thunks if needed. The // function should be called iteratively until it returns false. // // PreConditions: // All InputSections that may need a Thunk are reachable from // OutputSectionCommands. // // All OutputSections have an address and all InputSections have an offset // within the OutputSection. // // The offsets between caller (relocation place) and callee // (relocation target) will not be modified outside of createThunks(). // // PostConditions: // If return value is true then ThunkSections have been inserted into // OutputSections. All relocations that needed a Thunk based on the information // available to createThunks() on entry have been redirected to a Thunk. Note // that adding Thunks changes offsets between caller and callee so more Thunks // may be required. // // If return value is false then no more Thunks are needed, and createThunks has // made no changes. If the target requires range extension thunks, currently // ARM, then any future change in offset between caller and callee risks a // relocation out of range error. bool ThunkCreator::createThunks(ArrayRef outputSections) { bool addressesChanged = false; if (pass == 0 && target->getThunkSectionSpacing()) createInitialThunkSections(outputSections); // Create all the Thunks and insert them into synthetic ThunkSections. The // ThunkSections are later inserted back into InputSectionDescriptions. // We separate the creation of ThunkSections from the insertion of the // ThunkSections as ThunkSections are not always inserted into the same // InputSectionDescription as the caller. forEachInputSectionDescription( outputSections, [&](OutputSection *os, InputSectionDescription *isd) { for (InputSection *isec : isd->sections) for (Relocation &rel : isec->relocations) { uint64_t src = isec->getVA(rel.offset); // If we are a relocation to an existing Thunk, check if it is // still in range. If not then Rel will be altered to point to its // original target so another Thunk can be generated. if (pass > 0 && normalizeExistingThunk(rel, src)) continue; if (!target->needsThunk(rel.expr, rel.type, isec->file, src, *rel.sym, rel.addend)) continue; Thunk *t; bool isNew; std::tie(t, isNew) = getThunk(isec, rel, src); if (isNew) { // Find or create a ThunkSection for the new Thunk ThunkSection *ts; if (auto *tis = t->getTargetInputSection()) ts = getISThunkSec(tis); else ts = getISDThunkSec(os, isec, isd, rel.type, src); ts->addThunk(t); thunks[t->getThunkTargetSym()] = t; } // Redirect relocation to Thunk, we never go via the PLT to a Thunk rel.sym = t->getThunkTargetSym(); rel.expr = fromPlt(rel.expr); // On AArch64 and PPC, a jump/call relocation may be encoded as // STT_SECTION + non-zero addend, clear the addend after // redirection. if (config->emachine != EM_MIPS) rel.addend = -getPCBias(rel.type); } for (auto &p : isd->thunkSections) addressesChanged |= p.first->assignOffsets(); }); for (auto &p : thunkedSections) addressesChanged |= p.second->assignOffsets(); // Merge all created synthetic ThunkSections back into OutputSection mergeThunks(outputSections); ++pass; return addressesChanged; } template void scanRelocations(InputSectionBase &); template void scanRelocations(InputSectionBase &); template void scanRelocations(InputSectionBase &); template void scanRelocations(InputSectionBase &); template void reportUndefinedSymbols(); template void reportUndefinedSymbols(); template void reportUndefinedSymbols(); template void reportUndefinedSymbols(); } // namespace elf } // namespace lld