Index: head/contrib/llvm/tools/lld/ELF/Config.h
===================================================================
--- head/contrib/llvm/tools/lld/ELF/Config.h	(revision 350466)
+++ head/contrib/llvm/tools/lld/ELF/Config.h	(revision 350467)
@@ -1,298 +1,298 @@
 //===- Config.h -------------------------------------------------*- C++ -*-===//
 //
 //                             The LLVM Linker
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
 //
 //===----------------------------------------------------------------------===//
 
 #ifndef LLD_ELF_CONFIG_H
 #define LLD_ELF_CONFIG_H
 
 #include "lld/Common/ErrorHandler.h"
 #include "llvm/ADT/MapVector.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/StringSet.h"
 #include "llvm/BinaryFormat/ELF.h"
 #include "llvm/Support/CachePruning.h"
 #include "llvm/Support/CodeGen.h"
 #include "llvm/Support/Endian.h"
 #include <atomic>
 #include <vector>
 
 namespace lld {
 namespace elf {
 
 class InputFile;
 class InputSectionBase;
 
 enum ELFKind {
   ELFNoneKind,
   ELF32LEKind,
   ELF32BEKind,
   ELF64LEKind,
   ELF64BEKind
 };
 
 // For --build-id.
 enum class BuildIdKind { None, Fast, Md5, Sha1, Hexstring, Uuid };
 
 // For --discard-{all,locals,none}.
 enum class DiscardPolicy { Default, All, Locals, None };
 
 // For --icf={none,safe,all}.
 enum class ICFLevel { None, Safe, All };
 
 // For --strip-{all,debug}.
 enum class StripPolicy { None, All, Debug };
 
 // For --unresolved-symbols.
 enum class UnresolvedPolicy { ReportError, Warn, Ignore };
 
 // For --orphan-handling.
 enum class OrphanHandlingPolicy { Place, Warn, Error };
 
 // For --sort-section and linkerscript sorting rules.
 enum class SortSectionPolicy { Default, None, Alignment, Name, Priority };
 
 // For --target2
 enum class Target2Policy { Abs, Rel, GotRel };
 
 // For tracking ARM Float Argument PCS
 enum class ARMVFPArgKind { Default, Base, VFP, ToolChain };
 
 struct SymbolVersion {
   llvm::StringRef Name;
   bool IsExternCpp;
   bool HasWildcard;
 };
 
 // This struct contains symbols version definition that
 // can be found in version script if it is used for link.
 struct VersionDefinition {
   llvm::StringRef Name;
   uint16_t Id = 0;
   std::vector<SymbolVersion> Globals;
   size_t NameOff = 0; // Offset in the string table
 };
 
 // This struct contains the global configuration for the linker.
 // Most fields are direct mapping from the command line options
 // and such fields have the same name as the corresponding options.
 // Most fields are initialized by the driver.
 struct Configuration {
   std::atomic<bool> HasStaticTlsModel{false};
   uint8_t OSABI = 0;
   llvm::CachePruningPolicy ThinLTOCachePolicy;
   llvm::StringMap<uint64_t> SectionStartMap;
   llvm::StringRef Chroot;
   llvm::StringRef DynamicLinker;
   llvm::StringRef DwoDir;
   llvm::StringRef Entry;
   llvm::StringRef Emulation;
   llvm::StringRef Fini;
   llvm::StringRef Init;
   llvm::StringRef LTOAAPipeline;
   llvm::StringRef LTONewPmPasses;
   llvm::StringRef LTOObjPath;
   llvm::StringRef LTOSampleProfile;
   llvm::StringRef MapFile;
   llvm::StringRef OutputFile;
   llvm::StringRef OptRemarksFilename;
   llvm::StringRef ProgName;
   llvm::StringRef SoName;
   llvm::StringRef Sysroot;
   llvm::StringRef ThinLTOCacheDir;
   llvm::StringRef ThinLTOIndexOnlyArg;
   std::pair<llvm::StringRef, llvm::StringRef> ThinLTOObjectSuffixReplace;
   std::pair<llvm::StringRef, llvm::StringRef> ThinLTOPrefixReplace;
   std::string Rpath;
   std::vector<VersionDefinition> VersionDefinitions;
   std::vector<llvm::StringRef> AuxiliaryList;
   std::vector<llvm::StringRef> FilterList;
   std::vector<llvm::StringRef> SearchPaths;
   std::vector<llvm::StringRef> SymbolOrderingFile;
   std::vector<llvm::StringRef> Undefined;
   std::vector<SymbolVersion> DynamicList;
   std::vector<SymbolVersion> VersionScriptGlobals;
   std::vector<SymbolVersion> VersionScriptLocals;
   std::vector<uint8_t> BuildIdVector;
   llvm::MapVector<std::pair<const InputSectionBase *, const InputSectionBase *>,
                   uint64_t>
       CallGraphProfile;
   bool AllowMultipleDefinition;
   bool AllowShlibUndefined;
   bool AndroidPackDynRelocs;
   bool ARMHasBlx = false;
   bool ARMHasMovtMovw = false;
   bool ARMJ1J2BranchEncoding = false;
   bool AsNeeded = false;
   bool Bsymbolic;
   bool BsymbolicFunctions;
   bool CallGraphProfileSort;
   bool CheckSections;
   bool CompressDebugSections;
   bool Cref;
   bool DefineCommon;
   bool Demangle = true;
   bool DisableVerify;
   bool EhFrameHdr;
   bool EmitLLVM;
   bool EmitRelocs;
   bool EnableNewDtags;
   bool ExecuteOnly;
   bool ExportDynamic;
   bool FixCortexA53Errata843419;
   bool FormatBinary = false;
   bool GcSections;
   bool GdbIndex;
   bool GnuHash = false;
   bool GnuUnique;
   bool HasDynamicList = false;
   bool HasDynSymTab;
   bool IgnoreDataAddressEquality;
   bool IgnoreFunctionAddressEquality;
   bool LTODebugPassManager;
   bool LTONewPassManager;
   bool MergeArmExidx;
   bool MipsN32Abi = false;
   bool NoinhibitExec;
   bool Nostdlib;
   bool OFormatBinary;
   bool Omagic;
   bool OptRemarksWithHotness;
   bool PicThunk;
   bool Pie;
   bool PrintGcSections;
   bool PrintIcfSections;
   bool Relocatable;
   bool RelrPackDynRelocs;
   bool SaveTemps;
   bool SingleRoRx;
   bool Shared;
   bool Static = false;
   bool SysvHash = false;
   bool Target1Rel;
   bool Trace;
   bool ThinLTOEmitImportsFiles;
   bool ThinLTOIndexOnly;
   bool TocOptimize;
   bool UndefinedVersion;
   bool UseAndroidRelrTags = false;
   bool WarnBackrefs;
   bool WarnCommon;
   bool WarnIfuncTextrel;
   bool WarnMissingEntry;
   bool WarnSymbolOrdering;
   bool WriteAddends;
   bool ZCombreloc;
   bool ZCopyreloc;
   bool ZExecstack;
   bool ZGlobal;
   bool ZHazardplt;
-  bool ZIfuncnoplt;
+  bool ZIfuncNoplt;
   bool ZInitfirst;
   bool ZInterpose;
   bool ZKeepTextSectionPrefix;
   bool ZNodefaultlib;
   bool ZNodelete;
   bool ZNodlopen;
   bool ZNow;
   bool ZOrigin;
   bool ZRelro;
   bool ZRodynamic;
   bool ZText;
   bool ZRetpolineplt;
   bool ZWxneeded;
   DiscardPolicy Discard;
   ICFLevel ICF;
   OrphanHandlingPolicy OrphanHandling;
   SortSectionPolicy SortSection;
   StripPolicy Strip;
   UnresolvedPolicy UnresolvedSymbols;
   Target2Policy Target2;
   ARMVFPArgKind ARMVFPArgs = ARMVFPArgKind::Default;
   BuildIdKind BuildId = BuildIdKind::None;
   ELFKind EKind = ELFNoneKind;
   uint16_t DefaultSymbolVersion = llvm::ELF::VER_NDX_GLOBAL;
   uint16_t EMachine = llvm::ELF::EM_NONE;
   llvm::Optional<uint64_t> ImageBase;
   uint64_t MaxPageSize;
   uint64_t MipsGotSize;
   uint64_t ZStackSize;
   unsigned LTOPartitions;
   unsigned LTOO;
   unsigned Optimize;
   unsigned ThinLTOJobs;
   int32_t SplitStackAdjustSize;
 
   // The following config options do not directly correspond to any
   // particualr command line options.
 
   // True if we need to pass through relocations in input files to the
   // output file. Usually false because we consume relocations.
   bool CopyRelocs;
 
   // True if the target is ELF64. False if ELF32.
   bool Is64;
 
   // True if the target is little-endian. False if big-endian.
   bool IsLE;
 
   // endianness::little if IsLE is true. endianness::big otherwise.
   llvm::support::endianness Endianness;
 
   // True if the target is the little-endian MIPS64.
   //
   // The reason why we have this variable only for the MIPS is because
   // we use this often.  Some ELF headers for MIPS64EL are in a
   // mixed-endian (which is horrible and I'd say that's a serious spec
   // bug), and we need to know whether we are reading MIPS ELF files or
   // not in various places.
   //
   // (Note that MIPS64EL is not a typo for MIPS64LE. This is the official
   // name whatever that means. A fun hypothesis is that "EL" is short for
   // little-endian written in the little-endian order, but I don't know
   // if that's true.)
   bool IsMips64EL;
 
   // Holds set of ELF header flags for the target.
   uint32_t EFlags = 0;
 
   // The ELF spec defines two types of relocation table entries, RELA and
   // REL. RELA is a triplet of (offset, info, addend) while REL is a
   // tuple of (offset, info). Addends for REL are implicit and read from
   // the location where the relocations are applied. So, REL is more
   // compact than RELA but requires a bit of more work to process.
   //
   // (From the linker writer's view, this distinction is not necessary.
   // If the ELF had chosen whichever and sticked with it, it would have
   // been easier to write code to process relocations, but it's too late
   // to change the spec.)
   //
   // Each ABI defines its relocation type. IsRela is true if target
   // uses RELA. As far as we know, all 64-bit ABIs are using RELA. A
   // few 32-bit ABIs are using RELA too.
   bool IsRela;
 
   // True if we are creating position-independent code.
   bool Pic;
 
   // 4 for ELF32, 8 for ELF64.
   int Wordsize;
 };
 
 // The only instance of Configuration struct.
 extern Configuration *Config;
 
 static inline void errorOrWarn(const Twine &Msg) {
   if (!Config->NoinhibitExec)
     error(Msg);
   else
     warn(Msg);
 }
 } // namespace elf
 } // namespace lld
 
 #endif
Index: head/contrib/llvm/tools/lld/ELF/Driver.cpp
===================================================================
--- head/contrib/llvm/tools/lld/ELF/Driver.cpp	(revision 350466)
+++ head/contrib/llvm/tools/lld/ELF/Driver.cpp	(revision 350467)
@@ -1,1654 +1,1656 @@
 //===- Driver.cpp ---------------------------------------------------------===//
 //
 //                             The LLVM Linker
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
 //
 //===----------------------------------------------------------------------===//
 //
 // The driver drives the entire linking process. It is responsible for
 // parsing command line options and doing whatever it is instructed to do.
 //
 // One notable thing in the LLD's driver when compared to other linkers is
 // that the LLD's driver is agnostic on the host operating system.
 // Other linkers usually have implicit default values (such as a dynamic
 // linker path or library paths) for each host OS.
 //
 // I don't think implicit default values are useful because they are
 // usually explicitly specified by the compiler driver. They can even
 // be harmful when you are doing cross-linking. Therefore, in LLD, we
 // simply trust the compiler driver to pass all required options and
 // don't try to make effort on our side.
 //
 //===----------------------------------------------------------------------===//
 
 #include "Driver.h"
 #include "Config.h"
 #include "Filesystem.h"
 #include "ICF.h"
 #include "InputFiles.h"
 #include "InputSection.h"
 #include "LinkerScript.h"
 #include "MarkLive.h"
 #include "OutputSections.h"
 #include "ScriptParser.h"
 #include "SymbolTable.h"
 #include "Symbols.h"
 #include "SyntheticSections.h"
 #include "Target.h"
 #include "Writer.h"
 #include "lld/Common/Args.h"
 #include "lld/Common/Driver.h"
 #include "lld/Common/ErrorHandler.h"
 #include "lld/Common/Memory.h"
 #include "lld/Common/Strings.h"
 #include "lld/Common/TargetOptionsCommandFlags.h"
 #include "lld/Common/Threads.h"
 #include "lld/Common/Version.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Compression.h"
 #include "llvm/Support/LEB128.h"
 #include "llvm/Support/Path.h"
 #include "llvm/Support/TarWriter.h"
 #include "llvm/Support/TargetSelect.h"
 #include "llvm/Support/raw_ostream.h"
 #include <cstdlib>
 #include <utility>
 
 using namespace llvm;
 using namespace llvm::ELF;
 using namespace llvm::object;
 using namespace llvm::sys;
 using namespace llvm::support;
 
 using namespace lld;
 using namespace lld::elf;
 
 Configuration *elf::Config;
 LinkerDriver *elf::Driver;
 
 static void setConfigs(opt::InputArgList &Args);
 
 bool elf::link(ArrayRef<const char *> Args, bool CanExitEarly,
                raw_ostream &Error) {
   errorHandler().LogName = args::getFilenameWithoutExe(Args[0]);
   errorHandler().ErrorLimitExceededMsg =
       "too many errors emitted, stopping now (use "
       "-error-limit=0 to see all errors)";
   errorHandler().ErrorOS = &Error;
   errorHandler().ExitEarly = CanExitEarly;
   errorHandler().ColorDiagnostics = Error.has_colors();
 
   InputSections.clear();
   OutputSections.clear();
   BinaryFiles.clear();
   BitcodeFiles.clear();
   ObjectFiles.clear();
   SharedFiles.clear();
 
   Config = make<Configuration>();
   Driver = make<LinkerDriver>();
   Script = make<LinkerScript>();
   Symtab = make<SymbolTable>();
 
   Tar = nullptr;
   memset(&In, 0, sizeof(In));
 
   Config->ProgName = Args[0];
 
   Driver->main(Args);
 
   // Exit immediately if we don't need to return to the caller.
   // This saves time because the overhead of calling destructors
   // for all globally-allocated objects is not negligible.
   if (CanExitEarly)
     exitLld(errorCount() ? 1 : 0);
 
   freeArena();
   return !errorCount();
 }
 
 // Parses a linker -m option.
 static std::tuple<ELFKind, uint16_t, uint8_t> parseEmulation(StringRef Emul) {
   uint8_t OSABI = 0;
   StringRef S = Emul;
   if (S.endswith("_fbsd")) {
     S = S.drop_back(5);
     OSABI = ELFOSABI_FREEBSD;
   }
 
   std::pair<ELFKind, uint16_t> Ret =
       StringSwitch<std::pair<ELFKind, uint16_t>>(S)
           .Cases("aarch64elf", "aarch64linux", "aarch64_elf64_le_vec",
                  {ELF64LEKind, EM_AARCH64})
           .Cases("armelf", "armelf_linux_eabi", {ELF32LEKind, EM_ARM})
           .Case("elf32_x86_64", {ELF32LEKind, EM_X86_64})
           .Cases("elf32btsmip", "elf32btsmipn32", {ELF32BEKind, EM_MIPS})
           .Cases("elf32ltsmip", "elf32ltsmipn32", {ELF32LEKind, EM_MIPS})
           .Case("elf32lriscv", {ELF32LEKind, EM_RISCV})
           .Cases("elf32ppc", "elf32ppclinux", {ELF32BEKind, EM_PPC})
           .Case("elf64btsmip", {ELF64BEKind, EM_MIPS})
           .Case("elf64ltsmip", {ELF64LEKind, EM_MIPS})
           .Case("elf64lriscv", {ELF64LEKind, EM_RISCV})
           .Case("elf64ppc", {ELF64BEKind, EM_PPC64})
           .Case("elf64lppc", {ELF64LEKind, EM_PPC64})
           .Cases("elf_amd64", "elf_x86_64", {ELF64LEKind, EM_X86_64})
           .Case("elf_i386", {ELF32LEKind, EM_386})
           .Case("elf_iamcu", {ELF32LEKind, EM_IAMCU})
           .Default({ELFNoneKind, EM_NONE});
 
   if (Ret.first == ELFNoneKind)
     error("unknown emulation: " + Emul);
   return std::make_tuple(Ret.first, Ret.second, OSABI);
 }
 
 // Returns slices of MB by parsing MB as an archive file.
 // Each slice consists of a member file in the archive.
 std::vector<std::pair<MemoryBufferRef, uint64_t>> static getArchiveMembers(
     MemoryBufferRef MB) {
   std::unique_ptr<Archive> File =
       CHECK(Archive::create(MB),
             MB.getBufferIdentifier() + ": failed to parse archive");
 
   std::vector<std::pair<MemoryBufferRef, uint64_t>> V;
   Error Err = Error::success();
   bool AddToTar = File->isThin() && Tar;
   for (const ErrorOr<Archive::Child> &COrErr : File->children(Err)) {
     Archive::Child C =
         CHECK(COrErr, MB.getBufferIdentifier() +
                           ": could not get the child of the archive");
     MemoryBufferRef MBRef =
         CHECK(C.getMemoryBufferRef(),
               MB.getBufferIdentifier() +
                   ": could not get the buffer for a child of the archive");
     if (AddToTar)
       Tar->append(relativeToRoot(check(C.getFullName())), MBRef.getBuffer());
     V.push_back(std::make_pair(MBRef, C.getChildOffset()));
   }
   if (Err)
     fatal(MB.getBufferIdentifier() + ": Archive::children failed: " +
           toString(std::move(Err)));
 
   // Take ownership of memory buffers created for members of thin archives.
   for (std::unique_ptr<MemoryBuffer> &MB : File->takeThinBuffers())
     make<std::unique_ptr<MemoryBuffer>>(std::move(MB));
 
   return V;
 }
 
 // Opens a file and create a file object. Path has to be resolved already.
 void LinkerDriver::addFile(StringRef Path, bool WithLOption) {
   using namespace sys::fs;
 
   Optional<MemoryBufferRef> Buffer = readFile(Path);
   if (!Buffer.hasValue())
     return;
   MemoryBufferRef MBRef = *Buffer;
 
   if (Config->FormatBinary) {
     Files.push_back(make<BinaryFile>(MBRef));
     return;
   }
 
   switch (identify_magic(MBRef.getBuffer())) {
   case file_magic::unknown:
     readLinkerScript(MBRef);
     return;
   case file_magic::archive: {
     // Handle -whole-archive.
     if (InWholeArchive) {
       for (const auto &P : getArchiveMembers(MBRef))
         Files.push_back(createObjectFile(P.first, Path, P.second));
       return;
     }
 
     std::unique_ptr<Archive> File =
         CHECK(Archive::create(MBRef), Path + ": failed to parse archive");
 
     // If an archive file has no symbol table, it is likely that a user
     // is attempting LTO and using a default ar command that doesn't
     // understand the LLVM bitcode file. It is a pretty common error, so
     // we'll handle it as if it had a symbol table.
     if (!File->isEmpty() && !File->hasSymbolTable()) {
       for (const auto &P : getArchiveMembers(MBRef))
         Files.push_back(make<LazyObjFile>(P.first, Path, P.second));
       return;
     }
 
     // Handle the regular case.
     Files.push_back(make<ArchiveFile>(std::move(File)));
     return;
   }
   case file_magic::elf_shared_object:
     if (Config->Static || Config->Relocatable) {
       error("attempted static link of dynamic object " + Path);
       return;
     }
 
     // DSOs usually have DT_SONAME tags in their ELF headers, and the
     // sonames are used to identify DSOs. But if they are missing,
     // they are identified by filenames. We don't know whether the new
     // file has a DT_SONAME or not because we haven't parsed it yet.
     // Here, we set the default soname for the file because we might
     // need it later.
     //
     // If a file was specified by -lfoo, the directory part is not
     // significant, as a user did not specify it. This behavior is
     // compatible with GNU.
     Files.push_back(
         createSharedFile(MBRef, WithLOption ? path::filename(Path) : Path));
     return;
   case file_magic::bitcode:
   case file_magic::elf_relocatable:
     if (InLib)
       Files.push_back(make<LazyObjFile>(MBRef, "", 0));
     else
       Files.push_back(createObjectFile(MBRef));
     break;
   default:
     error(Path + ": unknown file type");
   }
 }
 
 // Add a given library by searching it from input search paths.
 void LinkerDriver::addLibrary(StringRef Name) {
   if (Optional<std::string> Path = searchLibrary(Name))
     addFile(*Path, /*WithLOption=*/true);
   else
     error("unable to find library -l" + Name);
 }
 
 // This function is called on startup. We need this for LTO since
 // LTO calls LLVM functions to compile bitcode files to native code.
 // Technically this can be delayed until we read bitcode files, but
 // we don't bother to do lazily because the initialization is fast.
 static void initLLVM() {
   InitializeAllTargets();
   InitializeAllTargetMCs();
   InitializeAllAsmPrinters();
   InitializeAllAsmParsers();
 }
 
 // Some command line options or some combinations of them are not allowed.
 // This function checks for such errors.
 static void checkOptions() {
   // The MIPS ABI as of 2016 does not support the GNU-style symbol lookup
   // table which is a relatively new feature.
   if (Config->EMachine == EM_MIPS && Config->GnuHash)
     error("the .gnu.hash section is not compatible with the MIPS target");
 
   if (Config->FixCortexA53Errata843419 && Config->EMachine != EM_AARCH64)
     error("--fix-cortex-a53-843419 is only supported on AArch64 targets");
 
   if (Config->TocOptimize && Config->EMachine != EM_PPC64)
     error("--toc-optimize is only supported on the PowerPC64 target");
 
   if (Config->Pie && Config->Shared)
     error("-shared and -pie may not be used together");
 
   if (!Config->Shared && !Config->FilterList.empty())
     error("-F may not be used without -shared");
 
   if (!Config->Shared && !Config->AuxiliaryList.empty())
     error("-f may not be used without -shared");
 
   if (!Config->Relocatable && !Config->DefineCommon)
     error("-no-define-common not supported in non relocatable output");
 
+  if (Config->ZText && Config->ZIfuncNoplt)
+    error("-z text and -z ifunc-noplt may not be used together");
+
   if (Config->Relocatable) {
     if (Config->Shared)
       error("-r and -shared may not be used together");
     if (Config->GcSections)
       error("-r and --gc-sections may not be used together");
     if (Config->GdbIndex)
       error("-r and --gdb-index may not be used together");
     if (Config->ICF != ICFLevel::None)
       error("-r and --icf may not be used together");
     if (Config->Pie)
       error("-r and -pie may not be used together");
   }
 
   if (Config->ExecuteOnly) {
     if (Config->EMachine != EM_AARCH64)
       error("-execute-only is only supported on AArch64 targets");
 
     if (Config->SingleRoRx && !Script->HasSectionsCommand)
       error("-execute-only and -no-rosegment cannot be used together");
   }
 }
 
 static const char *getReproduceOption(opt::InputArgList &Args) {
   if (auto *Arg = Args.getLastArg(OPT_reproduce))
     return Arg->getValue();
   return getenv("LLD_REPRODUCE");
 }
 
 static bool hasZOption(opt::InputArgList &Args, StringRef Key) {
   for (auto *Arg : Args.filtered(OPT_z))
     if (Key == Arg->getValue())
       return true;
   return false;
 }
 
 static bool getZFlag(opt::InputArgList &Args, StringRef K1, StringRef K2,
                      bool Default) {
   for (auto *Arg : Args.filtered_reverse(OPT_z)) {
     if (K1 == Arg->getValue())
       return true;
     if (K2 == Arg->getValue())
       return false;
   }
   return Default;
 }
 
 static bool isKnownZFlag(StringRef S) {
   return S == "combreloc" || S == "copyreloc" || S == "defs" ||
          S == "execstack" || S == "global" || S == "hazardplt" ||
-         S == "ifunc-noplt" ||
-         S == "initfirst" || S == "interpose" ||
+         S == "ifunc-noplt" || S == "initfirst" || S == "interpose" ||
          S == "keep-text-section-prefix" || S == "lazy" || S == "muldefs" ||
          S == "nocombreloc" || S == "nocopyreloc" || S == "nodefaultlib" ||
          S == "nodelete" || S == "nodlopen" || S == "noexecstack" ||
          S == "nokeep-text-section-prefix" || S == "norelro" || S == "notext" ||
          S == "now" || S == "origin" || S == "relro" || S == "retpolineplt" ||
          S == "rodynamic" || S == "text" || S == "wxneeded" ||
          S.startswith("max-page-size=") || S.startswith("stack-size=");
 }
 
 // Report an error for an unknown -z option.
 static void checkZOptions(opt::InputArgList &Args) {
   for (auto *Arg : Args.filtered(OPT_z))
     if (!isKnownZFlag(Arg->getValue()))
       error("unknown -z value: " + StringRef(Arg->getValue()));
 }
 
 void LinkerDriver::main(ArrayRef<const char *> ArgsArr) {
   ELFOptTable Parser;
   opt::InputArgList Args = Parser.parse(ArgsArr.slice(1));
 
   // Interpret this flag early because error() depends on them.
   errorHandler().ErrorLimit = args::getInteger(Args, OPT_error_limit, 20);
 
   // Handle -help
   if (Args.hasArg(OPT_help)) {
     printHelp();
     return;
   }
 
   // Handle -v or -version.
   //
   // A note about "compatible with GNU linkers" message: this is a hack for
   // scripts generated by GNU Libtool 2.4.6 (released in February 2014 and
   // still the newest version in March 2017) or earlier to recognize LLD as
   // a GNU compatible linker. As long as an output for the -v option
   // contains "GNU" or "with BFD", they recognize us as GNU-compatible.
   //
   // This is somewhat ugly hack, but in reality, we had no choice other
   // than doing this. Considering the very long release cycle of Libtool,
   // it is not easy to improve it to recognize LLD as a GNU compatible
   // linker in a timely manner. Even if we can make it, there are still a
   // lot of "configure" scripts out there that are generated by old version
   // of Libtool. We cannot convince every software developer to migrate to
   // the latest version and re-generate scripts. So we have this hack.
   if (Args.hasArg(OPT_v) || Args.hasArg(OPT_version))
     message(getLLDVersion() + " (compatible with GNU linkers)");
 
   if (const char *Path = getReproduceOption(Args)) {
     // Note that --reproduce is a debug option so you can ignore it
     // if you are trying to understand the whole picture of the code.
     Expected<std::unique_ptr<TarWriter>> ErrOrWriter =
         TarWriter::create(Path, path::stem(Path));
     if (ErrOrWriter) {
       Tar = std::move(*ErrOrWriter);
       Tar->append("response.txt", createResponseFile(Args));
       Tar->append("version.txt", getLLDVersion() + "\n");
     } else {
       error("--reproduce: " + toString(ErrOrWriter.takeError()));
     }
   }
 
   readConfigs(Args);
   checkZOptions(Args);
 
   // The behavior of -v or --version is a bit strange, but this is
   // needed for compatibility with GNU linkers.
   if (Args.hasArg(OPT_v) && !Args.hasArg(OPT_INPUT))
     return;
   if (Args.hasArg(OPT_version))
     return;
 
   initLLVM();
   createFiles(Args);
   if (errorCount())
     return;
 
   inferMachineType();
   setConfigs(Args);
   checkOptions();
   if (errorCount())
     return;
 
   switch (Config->EKind) {
   case ELF32LEKind:
     link<ELF32LE>(Args);
     return;
   case ELF32BEKind:
     link<ELF32BE>(Args);
     return;
   case ELF64LEKind:
     link<ELF64LE>(Args);
     return;
   case ELF64BEKind:
     link<ELF64BE>(Args);
     return;
   default:
     llvm_unreachable("unknown Config->EKind");
   }
 }
 
 static std::string getRpath(opt::InputArgList &Args) {
   std::vector<StringRef> V = args::getStrings(Args, OPT_rpath);
   return llvm::join(V.begin(), V.end(), ":");
 }
 
 // Determines what we should do if there are remaining unresolved
 // symbols after the name resolution.
 static UnresolvedPolicy getUnresolvedSymbolPolicy(opt::InputArgList &Args) {
   UnresolvedPolicy ErrorOrWarn = Args.hasFlag(OPT_error_unresolved_symbols,
                                               OPT_warn_unresolved_symbols, true)
                                      ? UnresolvedPolicy::ReportError
                                      : UnresolvedPolicy::Warn;
 
   // Process the last of -unresolved-symbols, -no-undefined or -z defs.
   for (auto *Arg : llvm::reverse(Args)) {
     switch (Arg->getOption().getID()) {
     case OPT_unresolved_symbols: {
       StringRef S = Arg->getValue();
       if (S == "ignore-all" || S == "ignore-in-object-files")
         return UnresolvedPolicy::Ignore;
       if (S == "ignore-in-shared-libs" || S == "report-all")
         return ErrorOrWarn;
       error("unknown --unresolved-symbols value: " + S);
       continue;
     }
     case OPT_no_undefined:
       return ErrorOrWarn;
     case OPT_z:
       if (StringRef(Arg->getValue()) == "defs")
         return ErrorOrWarn;
       continue;
     }
   }
 
   // -shared implies -unresolved-symbols=ignore-all because missing
   // symbols are likely to be resolved at runtime using other DSOs.
   if (Config->Shared)
     return UnresolvedPolicy::Ignore;
   return ErrorOrWarn;
 }
 
 static Target2Policy getTarget2(opt::InputArgList &Args) {
   StringRef S = Args.getLastArgValue(OPT_target2, "got-rel");
   if (S == "rel")
     return Target2Policy::Rel;
   if (S == "abs")
     return Target2Policy::Abs;
   if (S == "got-rel")
     return Target2Policy::GotRel;
   error("unknown --target2 option: " + S);
   return Target2Policy::GotRel;
 }
 
 static bool isOutputFormatBinary(opt::InputArgList &Args) {
   StringRef S = Args.getLastArgValue(OPT_oformat, "elf");
   if (S == "binary")
     return true;
   if (!S.startswith("elf"))
     error("unknown --oformat value: " + S);
   return false;
 }
 
 static DiscardPolicy getDiscard(opt::InputArgList &Args) {
   if (Args.hasArg(OPT_relocatable))
     return DiscardPolicy::None;
 
   auto *Arg =
       Args.getLastArg(OPT_discard_all, OPT_discard_locals, OPT_discard_none);
   if (!Arg)
     return DiscardPolicy::Default;
   if (Arg->getOption().getID() == OPT_discard_all)
     return DiscardPolicy::All;
   if (Arg->getOption().getID() == OPT_discard_locals)
     return DiscardPolicy::Locals;
   return DiscardPolicy::None;
 }
 
 static StringRef getDynamicLinker(opt::InputArgList &Args) {
   auto *Arg = Args.getLastArg(OPT_dynamic_linker, OPT_no_dynamic_linker);
   if (!Arg || Arg->getOption().getID() == OPT_no_dynamic_linker)
     return "";
   return Arg->getValue();
 }
 
 static ICFLevel getICF(opt::InputArgList &Args) {
   auto *Arg = Args.getLastArg(OPT_icf_none, OPT_icf_safe, OPT_icf_all);
   if (!Arg || Arg->getOption().getID() == OPT_icf_none)
     return ICFLevel::None;
   if (Arg->getOption().getID() == OPT_icf_safe)
     return ICFLevel::Safe;
   return ICFLevel::All;
 }
 
 static StripPolicy getStrip(opt::InputArgList &Args) {
   if (Args.hasArg(OPT_relocatable))
     return StripPolicy::None;
 
   auto *Arg = Args.getLastArg(OPT_strip_all, OPT_strip_debug);
   if (!Arg)
     return StripPolicy::None;
   if (Arg->getOption().getID() == OPT_strip_all)
     return StripPolicy::All;
   return StripPolicy::Debug;
 }
 
 static uint64_t parseSectionAddress(StringRef S, const opt::Arg &Arg) {
   uint64_t VA = 0;
   if (S.startswith("0x"))
     S = S.drop_front(2);
   if (!to_integer(S, VA, 16))
     error("invalid argument: " + toString(Arg));
   return VA;
 }
 
 static StringMap<uint64_t> getSectionStartMap(opt::InputArgList &Args) {
   StringMap<uint64_t> Ret;
   for (auto *Arg : Args.filtered(OPT_section_start)) {
     StringRef Name;
     StringRef Addr;
     std::tie(Name, Addr) = StringRef(Arg->getValue()).split('=');
     Ret[Name] = parseSectionAddress(Addr, *Arg);
   }
 
   if (auto *Arg = Args.getLastArg(OPT_Ttext))
     Ret[".text"] = parseSectionAddress(Arg->getValue(), *Arg);
   if (auto *Arg = Args.getLastArg(OPT_Tdata))
     Ret[".data"] = parseSectionAddress(Arg->getValue(), *Arg);
   if (auto *Arg = Args.getLastArg(OPT_Tbss))
     Ret[".bss"] = parseSectionAddress(Arg->getValue(), *Arg);
   return Ret;
 }
 
 static SortSectionPolicy getSortSection(opt::InputArgList &Args) {
   StringRef S = Args.getLastArgValue(OPT_sort_section);
   if (S == "alignment")
     return SortSectionPolicy::Alignment;
   if (S == "name")
     return SortSectionPolicy::Name;
   if (!S.empty())
     error("unknown --sort-section rule: " + S);
   return SortSectionPolicy::Default;
 }
 
 static OrphanHandlingPolicy getOrphanHandling(opt::InputArgList &Args) {
   StringRef S = Args.getLastArgValue(OPT_orphan_handling, "place");
   if (S == "warn")
     return OrphanHandlingPolicy::Warn;
   if (S == "error")
     return OrphanHandlingPolicy::Error;
   if (S != "place")
     error("unknown --orphan-handling mode: " + S);
   return OrphanHandlingPolicy::Place;
 }
 
 // Parse --build-id or --build-id=<style>. We handle "tree" as a
 // synonym for "sha1" because all our hash functions including
 // -build-id=sha1 are actually tree hashes for performance reasons.
 static std::pair<BuildIdKind, std::vector<uint8_t>>
 getBuildId(opt::InputArgList &Args) {
   auto *Arg = Args.getLastArg(OPT_build_id, OPT_build_id_eq);
   if (!Arg)
     return {BuildIdKind::None, {}};
 
   if (Arg->getOption().getID() == OPT_build_id)
     return {BuildIdKind::Fast, {}};
 
   StringRef S = Arg->getValue();
   if (S == "fast")
     return {BuildIdKind::Fast, {}};
   if (S == "md5")
     return {BuildIdKind::Md5, {}};
   if (S == "sha1" || S == "tree")
     return {BuildIdKind::Sha1, {}};
   if (S == "uuid")
     return {BuildIdKind::Uuid, {}};
   if (S.startswith("0x"))
     return {BuildIdKind::Hexstring, parseHex(S.substr(2))};
 
   if (S != "none")
     error("unknown --build-id style: " + S);
   return {BuildIdKind::None, {}};
 }
 
 static std::pair<bool, bool> getPackDynRelocs(opt::InputArgList &Args) {
   StringRef S = Args.getLastArgValue(OPT_pack_dyn_relocs, "none");
   if (S == "android")
     return {true, false};
   if (S == "relr")
     return {false, true};
   if (S == "android+relr")
     return {true, true};
 
   if (S != "none")
     error("unknown -pack-dyn-relocs format: " + S);
   return {false, false};
 }
 
 static void readCallGraph(MemoryBufferRef MB) {
   // Build a map from symbol name to section
   DenseMap<StringRef, Symbol *> Map;
   for (InputFile *File : ObjectFiles)
     for (Symbol *Sym : File->getSymbols())
       Map[Sym->getName()] = Sym;
 
   auto FindSection = [&](StringRef Name) -> InputSectionBase * {
     Symbol *Sym = Map.lookup(Name);
     if (!Sym) {
       if (Config->WarnSymbolOrdering)
         warn(MB.getBufferIdentifier() + ": no such symbol: " + Name);
       return nullptr;
     }
     maybeWarnUnorderableSymbol(Sym);
 
     if (Defined *DR = dyn_cast_or_null<Defined>(Sym))
       return dyn_cast_or_null<InputSectionBase>(DR->Section);
     return nullptr;
   };
 
   for (StringRef Line : args::getLines(MB)) {
     SmallVector<StringRef, 3> Fields;
     Line.split(Fields, ' ');
     uint64_t Count;
 
     if (Fields.size() != 3 || !to_integer(Fields[2], Count)) {
       error(MB.getBufferIdentifier() + ": parse error");
       return;
     }
 
     if (InputSectionBase *From = FindSection(Fields[0]))
       if (InputSectionBase *To = FindSection(Fields[1]))
         Config->CallGraphProfile[std::make_pair(From, To)] += Count;
   }
 }
 
 template <class ELFT> static void readCallGraphsFromObjectFiles() {
   for (auto File : ObjectFiles) {
     auto *Obj = cast<ObjFile<ELFT>>(File);
 
     for (const Elf_CGProfile_Impl<ELFT> &CGPE : Obj->CGProfile) {
       auto *FromSym = dyn_cast<Defined>(&Obj->getSymbol(CGPE.cgp_from));
       auto *ToSym = dyn_cast<Defined>(&Obj->getSymbol(CGPE.cgp_to));
       if (!FromSym || !ToSym)
         continue;
 
       auto *From = dyn_cast_or_null<InputSectionBase>(FromSym->Section);
       auto *To = dyn_cast_or_null<InputSectionBase>(ToSym->Section);
       if (From && To)
         Config->CallGraphProfile[{From, To}] += CGPE.cgp_weight;
     }
   }
 }
 
 static bool getCompressDebugSections(opt::InputArgList &Args) {
   StringRef S = Args.getLastArgValue(OPT_compress_debug_sections, "none");
   if (S == "none")
     return false;
   if (S != "zlib")
     error("unknown --compress-debug-sections value: " + S);
   if (!zlib::isAvailable())
     error("--compress-debug-sections: zlib is not available");
   return true;
 }
 
 static std::pair<StringRef, StringRef> getOldNewOptions(opt::InputArgList &Args,
                                                         unsigned Id) {
   auto *Arg = Args.getLastArg(Id);
   if (!Arg)
     return {"", ""};
 
   StringRef S = Arg->getValue();
   std::pair<StringRef, StringRef> Ret = S.split(';');
   if (Ret.second.empty())
     error(Arg->getSpelling() + " expects 'old;new' format, but got " + S);
   return Ret;
 }
 
 // Parse the symbol ordering file and warn for any duplicate entries.
 static std::vector<StringRef> getSymbolOrderingFile(MemoryBufferRef MB) {
   SetVector<StringRef> Names;
   for (StringRef S : args::getLines(MB))
     if (!Names.insert(S) && Config->WarnSymbolOrdering)
       warn(MB.getBufferIdentifier() + ": duplicate ordered symbol: " + S);
 
   return Names.takeVector();
 }
 
 static void parseClangOption(StringRef Opt, const Twine &Msg) {
   std::string Err;
   raw_string_ostream OS(Err);
 
   const char *Argv[] = {Config->ProgName.data(), Opt.data()};
   if (cl::ParseCommandLineOptions(2, Argv, "", &OS))
     return;
   OS.flush();
   error(Msg + ": " + StringRef(Err).trim());
 }
 
 // Initializes Config members by the command line options.
 void LinkerDriver::readConfigs(opt::InputArgList &Args) {
   errorHandler().Verbose = Args.hasArg(OPT_verbose);
   errorHandler().FatalWarnings =
       Args.hasFlag(OPT_fatal_warnings, OPT_no_fatal_warnings, false);
   ThreadsEnabled = Args.hasFlag(OPT_threads, OPT_no_threads, true);
 
   Config->AllowMultipleDefinition =
       Args.hasFlag(OPT_allow_multiple_definition,
                    OPT_no_allow_multiple_definition, false) ||
       hasZOption(Args, "muldefs");
   Config->AllowShlibUndefined =
       Args.hasFlag(OPT_allow_shlib_undefined, OPT_no_allow_shlib_undefined,
                    Args.hasArg(OPT_shared));
   Config->AuxiliaryList = args::getStrings(Args, OPT_auxiliary);
   Config->Bsymbolic = Args.hasArg(OPT_Bsymbolic);
   Config->BsymbolicFunctions = Args.hasArg(OPT_Bsymbolic_functions);
   Config->CheckSections =
       Args.hasFlag(OPT_check_sections, OPT_no_check_sections, true);
   Config->Chroot = Args.getLastArgValue(OPT_chroot);
   Config->CompressDebugSections = getCompressDebugSections(Args);
   Config->Cref = Args.hasFlag(OPT_cref, OPT_no_cref, false);
   Config->DefineCommon = Args.hasFlag(OPT_define_common, OPT_no_define_common,
                                       !Args.hasArg(OPT_relocatable));
   Config->Demangle = Args.hasFlag(OPT_demangle, OPT_no_demangle, true);
   Config->DisableVerify = Args.hasArg(OPT_disable_verify);
   Config->Discard = getDiscard(Args);
   Config->DwoDir = Args.getLastArgValue(OPT_plugin_opt_dwo_dir_eq);
   Config->DynamicLinker = getDynamicLinker(Args);
   Config->EhFrameHdr =
       Args.hasFlag(OPT_eh_frame_hdr, OPT_no_eh_frame_hdr, false);
   Config->EmitLLVM = Args.hasArg(OPT_plugin_opt_emit_llvm, false);
   Config->EmitRelocs = Args.hasArg(OPT_emit_relocs);
   Config->CallGraphProfileSort = Args.hasFlag(
       OPT_call_graph_profile_sort, OPT_no_call_graph_profile_sort, true);
   Config->EnableNewDtags =
       Args.hasFlag(OPT_enable_new_dtags, OPT_disable_new_dtags, true);
   Config->Entry = Args.getLastArgValue(OPT_entry);
   Config->ExecuteOnly =
       Args.hasFlag(OPT_execute_only, OPT_no_execute_only, false);
   Config->ExportDynamic =
       Args.hasFlag(OPT_export_dynamic, OPT_no_export_dynamic, false);
   Config->FilterList = args::getStrings(Args, OPT_filter);
   Config->Fini = Args.getLastArgValue(OPT_fini, "_fini");
   Config->FixCortexA53Errata843419 = Args.hasArg(OPT_fix_cortex_a53_843419);
   Config->GcSections = Args.hasFlag(OPT_gc_sections, OPT_no_gc_sections, false);
   Config->GnuUnique = Args.hasFlag(OPT_gnu_unique, OPT_no_gnu_unique, true);
   Config->GdbIndex = Args.hasFlag(OPT_gdb_index, OPT_no_gdb_index, false);
   Config->ICF = getICF(Args);
   Config->IgnoreDataAddressEquality =
       Args.hasArg(OPT_ignore_data_address_equality);
   Config->IgnoreFunctionAddressEquality =
       Args.hasArg(OPT_ignore_function_address_equality);
   Config->Init = Args.getLastArgValue(OPT_init, "_init");
   Config->LTOAAPipeline = Args.getLastArgValue(OPT_lto_aa_pipeline);
   Config->LTODebugPassManager = Args.hasArg(OPT_lto_debug_pass_manager);
   Config->LTONewPassManager = Args.hasArg(OPT_lto_new_pass_manager);
   Config->LTONewPmPasses = Args.getLastArgValue(OPT_lto_newpm_passes);
   Config->LTOO = args::getInteger(Args, OPT_lto_O, 2);
   Config->LTOObjPath = Args.getLastArgValue(OPT_plugin_opt_obj_path_eq);
   Config->LTOPartitions = args::getInteger(Args, OPT_lto_partitions, 1);
   Config->LTOSampleProfile = Args.getLastArgValue(OPT_lto_sample_profile);
   Config->MapFile = Args.getLastArgValue(OPT_Map);
   Config->MipsGotSize = args::getInteger(Args, OPT_mips_got_size, 0xfff0);
   Config->MergeArmExidx =
       Args.hasFlag(OPT_merge_exidx_entries, OPT_no_merge_exidx_entries, true);
   Config->NoinhibitExec = Args.hasArg(OPT_noinhibit_exec);
   Config->Nostdlib = Args.hasArg(OPT_nostdlib);
   Config->OFormatBinary = isOutputFormatBinary(Args);
   Config->Omagic = Args.hasFlag(OPT_omagic, OPT_no_omagic, false);
   Config->OptRemarksFilename = Args.getLastArgValue(OPT_opt_remarks_filename);
   Config->OptRemarksWithHotness = Args.hasArg(OPT_opt_remarks_with_hotness);
   Config->Optimize = args::getInteger(Args, OPT_O, 1);
   Config->OrphanHandling = getOrphanHandling(Args);
   Config->OutputFile = Args.getLastArgValue(OPT_o);
   Config->Pie = Args.hasFlag(OPT_pie, OPT_no_pie, false);
   Config->PrintIcfSections =
       Args.hasFlag(OPT_print_icf_sections, OPT_no_print_icf_sections, false);
   Config->PrintGcSections =
       Args.hasFlag(OPT_print_gc_sections, OPT_no_print_gc_sections, false);
   Config->Rpath = getRpath(Args);
   Config->Relocatable = Args.hasArg(OPT_relocatable);
   Config->SaveTemps = Args.hasArg(OPT_save_temps);
   Config->SearchPaths = args::getStrings(Args, OPT_library_path);
   Config->SectionStartMap = getSectionStartMap(Args);
   Config->Shared = Args.hasArg(OPT_shared);
   Config->SingleRoRx = Args.hasArg(OPT_no_rosegment);
   Config->SoName = Args.getLastArgValue(OPT_soname);
   Config->SortSection = getSortSection(Args);
   Config->SplitStackAdjustSize = args::getInteger(Args, OPT_split_stack_adjust_size, 16384);
   Config->Strip = getStrip(Args);
   Config->Sysroot = Args.getLastArgValue(OPT_sysroot);
   Config->Target1Rel = Args.hasFlag(OPT_target1_rel, OPT_target1_abs, false);
   Config->Target2 = getTarget2(Args);
   Config->ThinLTOCacheDir = Args.getLastArgValue(OPT_thinlto_cache_dir);
   Config->ThinLTOCachePolicy = CHECK(
       parseCachePruningPolicy(Args.getLastArgValue(OPT_thinlto_cache_policy)),
       "--thinlto-cache-policy: invalid cache policy");
   Config->ThinLTOEmitImportsFiles =
       Args.hasArg(OPT_plugin_opt_thinlto_emit_imports_files);
   Config->ThinLTOIndexOnly = Args.hasArg(OPT_plugin_opt_thinlto_index_only) ||
                              Args.hasArg(OPT_plugin_opt_thinlto_index_only_eq);
   Config->ThinLTOIndexOnlyArg =
       Args.getLastArgValue(OPT_plugin_opt_thinlto_index_only_eq);
   Config->ThinLTOJobs = args::getInteger(Args, OPT_thinlto_jobs, -1u);
   Config->ThinLTOObjectSuffixReplace =
       getOldNewOptions(Args, OPT_plugin_opt_thinlto_object_suffix_replace_eq);
   Config->ThinLTOPrefixReplace =
       getOldNewOptions(Args, OPT_plugin_opt_thinlto_prefix_replace_eq);
   Config->Trace = Args.hasArg(OPT_trace);
   Config->Undefined = args::getStrings(Args, OPT_undefined);
   Config->UndefinedVersion =
       Args.hasFlag(OPT_undefined_version, OPT_no_undefined_version, true);
   Config->UseAndroidRelrTags = Args.hasFlag(
       OPT_use_android_relr_tags, OPT_no_use_android_relr_tags, false);
   Config->UnresolvedSymbols = getUnresolvedSymbolPolicy(Args);
   Config->WarnBackrefs =
       Args.hasFlag(OPT_warn_backrefs, OPT_no_warn_backrefs, false);
   Config->WarnCommon = Args.hasFlag(OPT_warn_common, OPT_no_warn_common, false);
   Config->WarnIfuncTextrel =
       Args.hasFlag(OPT_warn_ifunc_textrel, OPT_no_warn_ifunc_textrel, false);
   Config->WarnSymbolOrdering =
       Args.hasFlag(OPT_warn_symbol_ordering, OPT_no_warn_symbol_ordering, true);
   Config->ZCombreloc = getZFlag(Args, "combreloc", "nocombreloc", true);
   Config->ZCopyreloc = getZFlag(Args, "copyreloc", "nocopyreloc", true);
   Config->ZExecstack = getZFlag(Args, "execstack", "noexecstack", false);
   Config->ZGlobal = hasZOption(Args, "global");
   Config->ZHazardplt = hasZOption(Args, "hazardplt");
-  Config->ZIfuncnoplt = hasZOption(Args, "ifunc-noplt");
+  Config->ZIfuncNoplt = hasZOption(Args, "ifunc-noplt");
   Config->ZInitfirst = hasZOption(Args, "initfirst");
   Config->ZInterpose = hasZOption(Args, "interpose");
   Config->ZKeepTextSectionPrefix = getZFlag(
       Args, "keep-text-section-prefix", "nokeep-text-section-prefix", false);
   Config->ZNodefaultlib = hasZOption(Args, "nodefaultlib");
   Config->ZNodelete = hasZOption(Args, "nodelete");
   Config->ZNodlopen = hasZOption(Args, "nodlopen");
   Config->ZNow = getZFlag(Args, "now", "lazy", false);
   Config->ZOrigin = hasZOption(Args, "origin");
   Config->ZRelro = getZFlag(Args, "relro", "norelro", true);
   Config->ZRetpolineplt = hasZOption(Args, "retpolineplt");
   Config->ZRodynamic = hasZOption(Args, "rodynamic");
   Config->ZStackSize = args::getZOptionValue(Args, OPT_z, "stack-size", 0);
   Config->ZText = getZFlag(Args, "text", "notext", true);
   Config->ZWxneeded = hasZOption(Args, "wxneeded");
 
   // Parse LTO options.
   if (auto *Arg = Args.getLastArg(OPT_plugin_opt_mcpu_eq))
     parseClangOption(Saver.save("-mcpu=" + StringRef(Arg->getValue())),
                      Arg->getSpelling());
 
   for (auto *Arg : Args.filtered(OPT_plugin_opt))
     parseClangOption(Arg->getValue(), Arg->getSpelling());
 
   // Parse -mllvm options.
   for (auto *Arg : Args.filtered(OPT_mllvm))
     parseClangOption(Arg->getValue(), Arg->getSpelling());
 
   if (Config->LTOO > 3)
     error("invalid optimization level for LTO: " + Twine(Config->LTOO));
   if (Config->LTOPartitions == 0)
     error("--lto-partitions: number of threads must be > 0");
   if (Config->ThinLTOJobs == 0)
     error("--thinlto-jobs: number of threads must be > 0");
 
   if (Config->SplitStackAdjustSize < 0)
     error("--split-stack-adjust-size: size must be >= 0");
 
   // Parse ELF{32,64}{LE,BE} and CPU type.
   if (auto *Arg = Args.getLastArg(OPT_m)) {
     StringRef S = Arg->getValue();
     std::tie(Config->EKind, Config->EMachine, Config->OSABI) =
         parseEmulation(S);
     Config->MipsN32Abi = (S == "elf32btsmipn32" || S == "elf32ltsmipn32");
     Config->Emulation = S;
   }
 
   // Parse -hash-style={sysv,gnu,both}.
   if (auto *Arg = Args.getLastArg(OPT_hash_style)) {
     StringRef S = Arg->getValue();
     if (S == "sysv")
       Config->SysvHash = true;
     else if (S == "gnu")
       Config->GnuHash = true;
     else if (S == "both")
       Config->SysvHash = Config->GnuHash = true;
     else
       error("unknown -hash-style: " + S);
   }
 
   if (Args.hasArg(OPT_print_map))
     Config->MapFile = "-";
 
   // --omagic is an option to create old-fashioned executables in which
   // .text segments are writable. Today, the option is still in use to
   // create special-purpose programs such as boot loaders. It doesn't
   // make sense to create PT_GNU_RELRO for such executables.
   if (Config->Omagic)
     Config->ZRelro = false;
 
   std::tie(Config->BuildId, Config->BuildIdVector) = getBuildId(Args);
 
   std::tie(Config->AndroidPackDynRelocs, Config->RelrPackDynRelocs) =
       getPackDynRelocs(Args);
 
   if (auto *Arg = Args.getLastArg(OPT_symbol_ordering_file))
     if (Optional<MemoryBufferRef> Buffer = readFile(Arg->getValue()))
       Config->SymbolOrderingFile = getSymbolOrderingFile(*Buffer);
 
   // If --retain-symbol-file is used, we'll keep only the symbols listed in
   // the file and discard all others.
   if (auto *Arg = Args.getLastArg(OPT_retain_symbols_file)) {
     Config->DefaultSymbolVersion = VER_NDX_LOCAL;
     if (Optional<MemoryBufferRef> Buffer = readFile(Arg->getValue()))
       for (StringRef S : args::getLines(*Buffer))
         Config->VersionScriptGlobals.push_back(
             {S, /*IsExternCpp*/ false, /*HasWildcard*/ false});
   }
 
   bool HasExportDynamic =
       Args.hasFlag(OPT_export_dynamic, OPT_no_export_dynamic, false);
 
   // Parses -dynamic-list and -export-dynamic-symbol. They make some
   // symbols private. Note that -export-dynamic takes precedence over them
   // as it says all symbols should be exported.
   if (!HasExportDynamic) {
     for (auto *Arg : Args.filtered(OPT_dynamic_list))
       if (Optional<MemoryBufferRef> Buffer = readFile(Arg->getValue()))
         readDynamicList(*Buffer);
 
     for (auto *Arg : Args.filtered(OPT_export_dynamic_symbol))
       Config->DynamicList.push_back(
           {Arg->getValue(), /*IsExternCpp*/ false, /*HasWildcard*/ false});
   }
 
   // If --export-dynamic-symbol=foo is given and symbol foo is defined in
   // an object file in an archive file, that object file should be pulled
   // out and linked. (It doesn't have to behave like that from technical
   // point of view, but this is needed for compatibility with GNU.)
   for (auto *Arg : Args.filtered(OPT_export_dynamic_symbol))
     Config->Undefined.push_back(Arg->getValue());
 
   for (auto *Arg : Args.filtered(OPT_version_script))
     if (Optional<std::string> Path = searchScript(Arg->getValue())) {
       if (Optional<MemoryBufferRef> Buffer = readFile(*Path))
         readVersionScript(*Buffer);
     } else {
       error(Twine("cannot find version script ") + Arg->getValue());
     }
 }
 
 // Some Config members do not directly correspond to any particular
 // command line options, but computed based on other Config values.
 // This function initialize such members. See Config.h for the details
 // of these values.
 static void setConfigs(opt::InputArgList &Args) {
   ELFKind K = Config->EKind;
   uint16_t M = Config->EMachine;
 
   Config->CopyRelocs = (Config->Relocatable || Config->EmitRelocs);
   Config->Is64 = (K == ELF64LEKind || K == ELF64BEKind);
   Config->IsLE = (K == ELF32LEKind || K == ELF64LEKind);
   Config->Endianness = Config->IsLE ? endianness::little : endianness::big;
   Config->IsMips64EL = (K == ELF64LEKind && M == EM_MIPS);
   Config->Pic = Config->Pie || Config->Shared;
   Config->PicThunk = Args.hasArg(OPT_pic_veneer, Config->Pic);
   Config->Wordsize = Config->Is64 ? 8 : 4;
 
   // ELF defines two different ways to store relocation addends as shown below:
   //
   //  Rel:  Addends are stored to the location where relocations are applied.
   //  Rela: Addends are stored as part of relocation entry.
   //
   // In other words, Rela makes it easy to read addends at the price of extra
   // 4 or 8 byte for each relocation entry. We don't know why ELF defined two
   // different mechanisms in the first place, but this is how the spec is
   // defined.
   //
   // You cannot choose which one, Rel or Rela, you want to use. Instead each
   // ABI defines which one you need to use. The following expression expresses
   // that.
   Config->IsRela = M == EM_AARCH64 || M == EM_AMDGPU || M == EM_HEXAGON ||
                    M == EM_PPC || M == EM_PPC64 || M == EM_RISCV ||
                    M == EM_X86_64;
 
   // If the output uses REL relocations we must store the dynamic relocation
   // addends to the output sections. We also store addends for RELA relocations
   // if --apply-dynamic-relocs is used.
   // We default to not writing the addends when using RELA relocations since
   // any standard conforming tool can find it in r_addend.
   Config->WriteAddends = Args.hasFlag(OPT_apply_dynamic_relocs,
                                       OPT_no_apply_dynamic_relocs, false) ||
                          !Config->IsRela;
 
   Config->TocOptimize =
       Args.hasFlag(OPT_toc_optimize, OPT_no_toc_optimize, M == EM_PPC64);
 }
 
 // Returns a value of "-format" option.
 static bool isFormatBinary(StringRef S) {
   if (S == "binary")
     return true;
   if (S == "elf" || S == "default")
     return false;
   error("unknown -format value: " + S +
         " (supported formats: elf, default, binary)");
   return false;
 }
 
 void LinkerDriver::createFiles(opt::InputArgList &Args) {
   // For --{push,pop}-state.
   std::vector<std::tuple<bool, bool, bool>> Stack;
 
   // Iterate over argv to process input files and positional arguments.
   for (auto *Arg : Args) {
     switch (Arg->getOption().getUnaliasedOption().getID()) {
     case OPT_library:
       addLibrary(Arg->getValue());
       break;
     case OPT_INPUT:
       addFile(Arg->getValue(), /*WithLOption=*/false);
       break;
     case OPT_defsym: {
       StringRef From;
       StringRef To;
       std::tie(From, To) = StringRef(Arg->getValue()).split('=');
       if (From.empty() || To.empty())
         error("-defsym: syntax error: " + StringRef(Arg->getValue()));
       else
         readDefsym(From, MemoryBufferRef(To, "-defsym"));
       break;
     }
     case OPT_script:
       if (Optional<std::string> Path = searchScript(Arg->getValue())) {
         if (Optional<MemoryBufferRef> MB = readFile(*Path))
           readLinkerScript(*MB);
         break;
       }
       error(Twine("cannot find linker script ") + Arg->getValue());
       break;
     case OPT_as_needed:
       Config->AsNeeded = true;
       break;
     case OPT_format:
       Config->FormatBinary = isFormatBinary(Arg->getValue());
       break;
     case OPT_no_as_needed:
       Config->AsNeeded = false;
       break;
     case OPT_Bstatic:
       Config->Static = true;
       break;
     case OPT_Bdynamic:
       Config->Static = false;
       break;
     case OPT_whole_archive:
       InWholeArchive = true;
       break;
     case OPT_no_whole_archive:
       InWholeArchive = false;
       break;
     case OPT_just_symbols:
       if (Optional<MemoryBufferRef> MB = readFile(Arg->getValue())) {
         Files.push_back(createObjectFile(*MB));
         Files.back()->JustSymbols = true;
       }
       break;
     case OPT_start_group:
       if (InputFile::IsInGroup)
         error("nested --start-group");
       InputFile::IsInGroup = true;
       break;
     case OPT_end_group:
       if (!InputFile::IsInGroup)
         error("stray --end-group");
       InputFile::IsInGroup = false;
       ++InputFile::NextGroupId;
       break;
     case OPT_start_lib:
       if (InLib)
         error("nested --start-lib");
       if (InputFile::IsInGroup)
         error("may not nest --start-lib in --start-group");
       InLib = true;
       InputFile::IsInGroup = true;
       break;
     case OPT_end_lib:
       if (!InLib)
         error("stray --end-lib");
       InLib = false;
       InputFile::IsInGroup = false;
       ++InputFile::NextGroupId;
       break;
     case OPT_push_state:
       Stack.emplace_back(Config->AsNeeded, Config->Static, InWholeArchive);
       break;
     case OPT_pop_state:
       if (Stack.empty()) {
         error("unbalanced --push-state/--pop-state");
         break;
       }
       std::tie(Config->AsNeeded, Config->Static, InWholeArchive) = Stack.back();
       Stack.pop_back();
       break;
     }
   }
 
   if (Files.empty() && errorCount() == 0)
     error("no input files");
 }
 
 // If -m <machine_type> was not given, infer it from object files.
 void LinkerDriver::inferMachineType() {
   if (Config->EKind != ELFNoneKind)
     return;
 
   for (InputFile *F : Files) {
     if (F->EKind == ELFNoneKind)
       continue;
     Config->EKind = F->EKind;
     Config->EMachine = F->EMachine;
     Config->OSABI = F->OSABI;
     Config->MipsN32Abi = Config->EMachine == EM_MIPS && isMipsN32Abi(F);
     return;
   }
   error("target emulation unknown: -m or at least one .o file required");
 }
 
 // Parse -z max-page-size=<value>. The default value is defined by
 // each target.
 static uint64_t getMaxPageSize(opt::InputArgList &Args) {
   uint64_t Val = args::getZOptionValue(Args, OPT_z, "max-page-size",
                                        Target->DefaultMaxPageSize);
   if (!isPowerOf2_64(Val))
     error("max-page-size: value isn't a power of 2");
   return Val;
 }
 
 // Parses -image-base option.
 static Optional<uint64_t> getImageBase(opt::InputArgList &Args) {
   // Because we are using "Config->MaxPageSize" here, this function has to be
   // called after the variable is initialized.
   auto *Arg = Args.getLastArg(OPT_image_base);
   if (!Arg)
     return None;
 
   StringRef S = Arg->getValue();
   uint64_t V;
   if (!to_integer(S, V)) {
     error("-image-base: number expected, but got " + S);
     return 0;
   }
   if ((V % Config->MaxPageSize) != 0)
     warn("-image-base: address isn't multiple of page size: " + S);
   return V;
 }
 
 // Parses `--exclude-libs=lib,lib,...`.
 // The library names may be delimited by commas or colons.
 static DenseSet<StringRef> getExcludeLibs(opt::InputArgList &Args) {
   DenseSet<StringRef> Ret;
   for (auto *Arg : Args.filtered(OPT_exclude_libs)) {
     StringRef S = Arg->getValue();
     for (;;) {
       size_t Pos = S.find_first_of(",:");
       if (Pos == StringRef::npos)
         break;
       Ret.insert(S.substr(0, Pos));
       S = S.substr(Pos + 1);
     }
     Ret.insert(S);
   }
   return Ret;
 }
 
 // Handles the -exclude-libs option. If a static library file is specified
 // by the -exclude-libs option, all public symbols from the archive become
 // private unless otherwise specified by version scripts or something.
 // A special library name "ALL" means all archive files.
 //
 // This is not a popular option, but some programs such as bionic libc use it.
 template <class ELFT>
 static void excludeLibs(opt::InputArgList &Args) {
   DenseSet<StringRef> Libs = getExcludeLibs(Args);
   bool All = Libs.count("ALL");
 
   auto Visit = [&](InputFile *File) {
     if (!File->ArchiveName.empty())
       if (All || Libs.count(path::filename(File->ArchiveName)))
         for (Symbol *Sym : File->getSymbols())
           if (!Sym->isLocal() && Sym->File == File)
             Sym->VersionId = VER_NDX_LOCAL;
   };
 
   for (InputFile *File : ObjectFiles)
     Visit(File);
 
   for (BitcodeFile *File : BitcodeFiles)
     Visit(File);
 }
 
 // Force Sym to be entered in the output. Used for -u or equivalent.
 template <class ELFT> static void handleUndefined(StringRef Name) {
   Symbol *Sym = Symtab->find(Name);
   if (!Sym)
     return;
 
   // Since symbol S may not be used inside the program, LTO may
   // eliminate it. Mark the symbol as "used" to prevent it.
   Sym->IsUsedInRegularObj = true;
 
   if (Sym->isLazy())
     Symtab->fetchLazy<ELFT>(Sym);
 }
 
 template <class ELFT> static void handleLibcall(StringRef Name) {
   Symbol *Sym = Symtab->find(Name);
   if (!Sym || !Sym->isLazy())
     return;
 
   MemoryBufferRef MB;
   if (auto *LO = dyn_cast<LazyObject>(Sym))
     MB = LO->File->MB;
   else
     MB = cast<LazyArchive>(Sym)->getMemberBuffer();
 
   if (isBitcode(MB))
     Symtab->fetchLazy<ELFT>(Sym);
 }
 
 // If all references to a DSO happen to be weak, the DSO is not added
 // to DT_NEEDED. If that happens, we need to eliminate shared symbols
 // created from the DSO. Otherwise, they become dangling references
 // that point to a non-existent DSO.
 template <class ELFT> static void demoteSharedSymbols() {
   for (Symbol *Sym : Symtab->getSymbols()) {
     if (auto *S = dyn_cast<SharedSymbol>(Sym)) {
       if (!S->getFile<ELFT>().IsNeeded) {
         bool Used = S->Used;
         replaceSymbol<Undefined>(S, nullptr, S->getName(), STB_WEAK, S->StOther,
                                  S->Type);
         S->Used = Used;
       }
     }
   }
 }
 
 // The section referred to by S is considered address-significant. Set the
 // KeepUnique flag on the section if appropriate.
 static void markAddrsig(Symbol *S) {
   if (auto *D = dyn_cast_or_null<Defined>(S))
     if (D->Section)
       // We don't need to keep text sections unique under --icf=all even if they
       // are address-significant.
       if (Config->ICF == ICFLevel::Safe || !(D->Section->Flags & SHF_EXECINSTR))
         D->Section->KeepUnique = true;
 }
 
 // Record sections that define symbols mentioned in --keep-unique <symbol>
 // and symbols referred to by address-significance tables. These sections are
 // ineligible for ICF.
 template <class ELFT>
 static void findKeepUniqueSections(opt::InputArgList &Args) {
   for (auto *Arg : Args.filtered(OPT_keep_unique)) {
     StringRef Name = Arg->getValue();
     auto *D = dyn_cast_or_null<Defined>(Symtab->find(Name));
     if (!D || !D->Section) {
       warn("could not find symbol " + Name + " to keep unique");
       continue;
     }
     D->Section->KeepUnique = true;
   }
 
   // --icf=all --ignore-data-address-equality means that we can ignore
   // the dynsym and address-significance tables entirely.
   if (Config->ICF == ICFLevel::All && Config->IgnoreDataAddressEquality)
     return;
 
   // Symbols in the dynsym could be address-significant in other executables
   // or DSOs, so we conservatively mark them as address-significant.
   for (Symbol *S : Symtab->getSymbols())
     if (S->includeInDynsym())
       markAddrsig(S);
 
   // Visit the address-significance table in each object file and mark each
   // referenced symbol as address-significant.
   for (InputFile *F : ObjectFiles) {
     auto *Obj = cast<ObjFile<ELFT>>(F);
     ArrayRef<Symbol *> Syms = Obj->getSymbols();
     if (Obj->AddrsigSec) {
       ArrayRef<uint8_t> Contents =
           check(Obj->getObj().getSectionContents(Obj->AddrsigSec));
       const uint8_t *Cur = Contents.begin();
       while (Cur != Contents.end()) {
         unsigned Size;
         const char *Err;
         uint64_t SymIndex = decodeULEB128(Cur, &Size, Contents.end(), &Err);
         if (Err)
           fatal(toString(F) + ": could not decode addrsig section: " + Err);
         markAddrsig(Syms[SymIndex]);
         Cur += Size;
       }
     } else {
       // If an object file does not have an address-significance table,
       // conservatively mark all of its symbols as address-significant.
       for (Symbol *S : Syms)
         markAddrsig(S);
     }
   }
 }
 
 template <class ELFT> static Symbol *addUndefined(StringRef Name) {
   return Symtab->addUndefined<ELFT>(Name, STB_GLOBAL, STV_DEFAULT, 0, false,
                                     nullptr);
 }
 
 // The --wrap option is a feature to rename symbols so that you can write
 // wrappers for existing functions. If you pass `-wrap=foo`, all
 // occurrences of symbol `foo` are resolved to `wrap_foo` (so, you are
 // expected to write `wrap_foo` function as a wrapper). The original
 // symbol becomes accessible as `real_foo`, so you can call that from your
 // wrapper.
 //
 // This data structure is instantiated for each -wrap option.
 struct WrappedSymbol {
   Symbol *Sym;
   Symbol *Real;
   Symbol *Wrap;
 };
 
 // Handles -wrap option.
 //
 // This function instantiates wrapper symbols. At this point, they seem
 // like they are not being used at all, so we explicitly set some flags so
 // that LTO won't eliminate them.
 template <class ELFT>
 static std::vector<WrappedSymbol> addWrappedSymbols(opt::InputArgList &Args) {
   std::vector<WrappedSymbol> V;
   DenseSet<StringRef> Seen;
 
   for (auto *Arg : Args.filtered(OPT_wrap)) {
     StringRef Name = Arg->getValue();
     if (!Seen.insert(Name).second)
       continue;
 
     Symbol *Sym = Symtab->find(Name);
     if (!Sym)
       continue;
 
     Symbol *Real = addUndefined<ELFT>(Saver.save("__real_" + Name));
     Symbol *Wrap = addUndefined<ELFT>(Saver.save("__wrap_" + Name));
     V.push_back({Sym, Real, Wrap});
 
     // We want to tell LTO not to inline symbols to be overwritten
     // because LTO doesn't know the final symbol contents after renaming.
     Real->CanInline = false;
     Sym->CanInline = false;
 
     // Tell LTO not to eliminate these symbols.
     Sym->IsUsedInRegularObj = true;
     Wrap->IsUsedInRegularObj = true;
   }
   return V;
 }
 
 // Do renaming for -wrap by updating pointers to symbols.
 //
 // When this function is executed, only InputFiles and symbol table
 // contain pointers to symbol objects. We visit them to replace pointers,
 // so that wrapped symbols are swapped as instructed by the command line.
 template <class ELFT> static void wrapSymbols(ArrayRef<WrappedSymbol> Wrapped) {
   DenseMap<Symbol *, Symbol *> Map;
   for (const WrappedSymbol &W : Wrapped) {
     Map[W.Sym] = W.Wrap;
     Map[W.Real] = W.Sym;
   }
 
   // Update pointers in input files.
   parallelForEach(ObjectFiles, [&](InputFile *File) {
     std::vector<Symbol *> &Syms = File->getMutableSymbols();
     for (size_t I = 0, E = Syms.size(); I != E; ++I)
       if (Symbol *S = Map.lookup(Syms[I]))
         Syms[I] = S;
   });
 
   // Update pointers in the symbol table.
   for (const WrappedSymbol &W : Wrapped)
     Symtab->wrap(W.Sym, W.Real, W.Wrap);
 }
 
 static const char *LibcallRoutineNames[] = {
 #define HANDLE_LIBCALL(code, name) name,
 #include "llvm/IR/RuntimeLibcalls.def"
 #undef HANDLE_LIBCALL
 };
 
 // Do actual linking. Note that when this function is called,
 // all linker scripts have already been parsed.
 template <class ELFT> void LinkerDriver::link(opt::InputArgList &Args) {
   Target = getTarget();
   InX<ELFT>::VerSym = nullptr;
   InX<ELFT>::VerNeed = nullptr;
 
   Config->MaxPageSize = getMaxPageSize(Args);
   Config->ImageBase = getImageBase(Args);
 
   // If a -hash-style option was not given, set to a default value,
   // which varies depending on the target.
   if (!Args.hasArg(OPT_hash_style)) {
     if (Config->EMachine == EM_MIPS)
       Config->SysvHash = true;
     else
       Config->SysvHash = Config->GnuHash = true;
   }
 
   // Default output filename is "a.out" by the Unix tradition.
   if (Config->OutputFile.empty())
     Config->OutputFile = "a.out";
 
   // Fail early if the output file or map file is not writable. If a user has a
   // long link, e.g. due to a large LTO link, they do not wish to run it and
   // find that it failed because there was a mistake in their command-line.
   if (auto E = tryCreateFile(Config->OutputFile))
     error("cannot open output file " + Config->OutputFile + ": " + E.message());
   if (auto E = tryCreateFile(Config->MapFile))
     error("cannot open map file " + Config->MapFile + ": " + E.message());
   if (errorCount())
     return;
 
   // Use default entry point name if no name was given via the command
   // line nor linker scripts. For some reason, MIPS entry point name is
   // different from others.
   Config->WarnMissingEntry =
       (!Config->Entry.empty() || (!Config->Shared && !Config->Relocatable));
   if (Config->Entry.empty() && !Config->Relocatable)
     Config->Entry = (Config->EMachine == EM_MIPS) ? "__start" : "_start";
 
   // Handle --trace-symbol.
   for (auto *Arg : Args.filtered(OPT_trace_symbol))
     Symtab->trace(Arg->getValue());
 
   // Add all files to the symbol table. This will add almost all
   // symbols that we need to the symbol table.
   for (InputFile *F : Files)
     Symtab->addFile<ELFT>(F);
 
   // Now that we have every file, we can decide if we will need a
   // dynamic symbol table.
   // We need one if we were asked to export dynamic symbols or if we are
   // producing a shared library.
   // We also need one if any shared libraries are used and for pie executables
   // (probably because the dynamic linker needs it).
   Config->HasDynSymTab =
       !SharedFiles.empty() || Config->Pic || Config->ExportDynamic;
 
   // Some symbols (such as __ehdr_start) are defined lazily only when there
   // are undefined symbols for them, so we add these to trigger that logic.
   for (StringRef Name : Script->ReferencedSymbols)
     addUndefined<ELFT>(Name);
 
   // Handle the `--undefined <sym>` options.
   for (StringRef S : Config->Undefined)
     handleUndefined<ELFT>(S);
 
   // If an entry symbol is in a static archive, pull out that file now.
   handleUndefined<ELFT>(Config->Entry);
 
   // If any of our inputs are bitcode files, the LTO code generator may create
   // references to certain library functions that might not be explicit in the
   // bitcode file's symbol table. If any of those library functions are defined
   // in a bitcode file in an archive member, we need to arrange to use LTO to
   // compile those archive members by adding them to the link beforehand.
   //
   // However, adding all libcall symbols to the link can have undesired
   // consequences. For example, the libgcc implementation of
   // __sync_val_compare_and_swap_8 on 32-bit ARM pulls in an .init_array entry
   // that aborts the program if the Linux kernel does not support 64-bit
   // atomics, which would prevent the program from running even if it does not
   // use 64-bit atomics.
   //
   // Therefore, we only add libcall symbols to the link before LTO if we have
   // to, i.e. if the symbol's definition is in bitcode. Any other required
   // libcall symbols will be added to the link after LTO when we add the LTO
   // object file to the link.
   if (!BitcodeFiles.empty())
     for (const char *S : LibcallRoutineNames)
       handleLibcall<ELFT>(S);
 
   // Return if there were name resolution errors.
   if (errorCount())
     return;
 
   // Now when we read all script files, we want to finalize order of linker
   // script commands, which can be not yet final because of INSERT commands.
   Script->processInsertCommands();
 
   // We want to declare linker script's symbols early,
   // so that we can version them.
   // They also might be exported if referenced by DSOs.
   Script->declareSymbols();
 
   // Handle the -exclude-libs option.
   if (Args.hasArg(OPT_exclude_libs))
     excludeLibs<ELFT>(Args);
 
   // Create ElfHeader early. We need a dummy section in
   // addReservedSymbols to mark the created symbols as not absolute.
   Out::ElfHeader = make<OutputSection>("", 0, SHF_ALLOC);
   Out::ElfHeader->Size = sizeof(typename ELFT::Ehdr);
 
   // Create wrapped symbols for -wrap option.
   std::vector<WrappedSymbol> Wrapped = addWrappedSymbols<ELFT>(Args);
 
   // We need to create some reserved symbols such as _end. Create them.
   if (!Config->Relocatable)
     addReservedSymbols();
 
   // Apply version scripts.
   //
   // For a relocatable output, version scripts don't make sense, and
   // parsing a symbol version string (e.g. dropping "@ver1" from a symbol
   // name "foo@ver1") rather do harm, so we don't call this if -r is given.
   if (!Config->Relocatable)
     Symtab->scanVersionScript();
 
   // Do link-time optimization if given files are LLVM bitcode files.
   // This compiles bitcode files into real object files.
   //
   // With this the symbol table should be complete. After this, no new names
   // except a few linker-synthesized ones will be added to the symbol table.
   Symtab->addCombinedLTOObject<ELFT>();
   if (errorCount())
     return;
 
   // If -thinlto-index-only is given, we should create only "index
   // files" and not object files. Index file creation is already done
   // in addCombinedLTOObject, so we are done if that's the case.
   if (Config->ThinLTOIndexOnly)
     return;
 
   // Likewise, --plugin-opt=emit-llvm is an option to make LTO create
   // an output file in bitcode and exit, so that you can just get a
   // combined bitcode file.
   if (Config->EmitLLVM)
     return;
 
   // Apply symbol renames for -wrap.
   if (!Wrapped.empty())
     wrapSymbols<ELFT>(Wrapped);
 
   // Now that we have a complete list of input files.
   // Beyond this point, no new files are added.
   // Aggregate all input sections into one place.
   for (InputFile *F : ObjectFiles)
     for (InputSectionBase *S : F->getSections())
       if (S && S != &InputSection::Discarded)
         InputSections.push_back(S);
   for (BinaryFile *F : BinaryFiles)
     for (InputSectionBase *S : F->getSections())
       InputSections.push_back(cast<InputSection>(S));
 
   // We do not want to emit debug sections if --strip-all
   // or -strip-debug are given.
   if (Config->Strip != StripPolicy::None)
     llvm::erase_if(InputSections, [](InputSectionBase *S) {
       return S->Name.startswith(".debug") || S->Name.startswith(".zdebug");
     });
 
   Config->EFlags = Target->calcEFlags();
 
   if (Config->EMachine == EM_ARM) {
     // FIXME: These warnings can be removed when lld only uses these features
     // when the input objects have been compiled with an architecture that
     // supports them.
     if (Config->ARMHasBlx == false)
       warn("lld uses blx instruction, no object with architecture supporting "
            "feature detected");
   }
 
   // This adds a .comment section containing a version string. We have to add it
   // before mergeSections because the .comment section is a mergeable section.
   if (!Config->Relocatable)
     InputSections.push_back(createCommentSection());
 
   // Do size optimizations: garbage collection, merging of SHF_MERGE sections
   // and identical code folding.
   splitSections<ELFT>();
   markLive<ELFT>();
   demoteSharedSymbols<ELFT>();
   mergeSections();
   if (Config->ICF != ICFLevel::None) {
     findKeepUniqueSections<ELFT>(Args);
     doIcf<ELFT>();
   }
 
   // Read the callgraph now that we know what was gced or icfed
   if (Config->CallGraphProfileSort) {
     if (auto *Arg = Args.getLastArg(OPT_call_graph_ordering_file))
       if (Optional<MemoryBufferRef> Buffer = readFile(Arg->getValue()))
         readCallGraph(*Buffer);
     readCallGraphsFromObjectFiles<ELFT>();
   }
 
   // Write the result to the file.
   writeResult<ELFT>();
 }
Index: head/contrib/llvm/tools/lld/ELF/InputSection.cpp
===================================================================
--- head/contrib/llvm/tools/lld/ELF/InputSection.cpp	(revision 350466)
+++ head/contrib/llvm/tools/lld/ELF/InputSection.cpp	(revision 350467)
@@ -1,1281 +1,1275 @@
 //===- InputSection.cpp ---------------------------------------------------===//
 //
 //                             The LLVM Linker
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
 //
 //===----------------------------------------------------------------------===//
 
 #include "InputSection.h"
 #include "Config.h"
 #include "EhFrame.h"
 #include "InputFiles.h"
 #include "LinkerScript.h"
 #include "OutputSections.h"
 #include "Relocations.h"
 #include "SymbolTable.h"
 #include "Symbols.h"
 #include "SyntheticSections.h"
 #include "Target.h"
 #include "Thunks.h"
 #include "lld/Common/ErrorHandler.h"
 #include "lld/Common/Memory.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/Compression.h"
 #include "llvm/Support/Endian.h"
 #include "llvm/Support/Threading.h"
 #include "llvm/Support/xxhash.h"
 #include <algorithm>
 #include <mutex>
 #include <set>
 #include <vector>
 
 using namespace llvm;
 using namespace llvm::ELF;
 using namespace llvm::object;
 using namespace llvm::support;
 using namespace llvm::support::endian;
 using namespace llvm::sys;
 
 using namespace lld;
 using namespace lld::elf;
 
 std::vector<InputSectionBase *> elf::InputSections;
 
 // Returns a string to construct an error message.
 std::string lld::toString(const InputSectionBase *Sec) {
   return (toString(Sec->File) + ":(" + Sec->Name + ")").str();
 }
 
 template <class ELFT>
 static ArrayRef<uint8_t> getSectionContents(ObjFile<ELFT> &File,
                                             const typename ELFT::Shdr &Hdr) {
   if (Hdr.sh_type == SHT_NOBITS)
     return makeArrayRef<uint8_t>(nullptr, Hdr.sh_size);
   return check(File.getObj().getSectionContents(&Hdr));
 }
 
 InputSectionBase::InputSectionBase(InputFile *File, uint64_t Flags,
                                    uint32_t Type, uint64_t Entsize,
                                    uint32_t Link, uint32_t Info,
                                    uint32_t Alignment, ArrayRef<uint8_t> Data,
                                    StringRef Name, Kind SectionKind)
     : SectionBase(SectionKind, Name, Flags, Entsize, Alignment, Type, Info,
                   Link),
       File(File), RawData(Data) {
   // In order to reduce memory allocation, we assume that mergeable
   // sections are smaller than 4 GiB, which is not an unreasonable
   // assumption as of 2017.
   if (SectionKind == SectionBase::Merge && RawData.size() > UINT32_MAX)
     error(toString(this) + ": section too large");
 
   NumRelocations = 0;
   AreRelocsRela = false;
 
   // The ELF spec states that a value of 0 means the section has
   // no alignment constraits.
   uint32_t V = std::max<uint64_t>(Alignment, 1);
   if (!isPowerOf2_64(V))
     fatal(toString(File) + ": section sh_addralign is not a power of 2");
   this->Alignment = V;
 
   // In ELF, each section can be compressed by zlib, and if compressed,
   // section name may be mangled by appending "z" (e.g. ".zdebug_info").
   // If that's the case, demangle section name so that we can handle a
   // section as if it weren't compressed.
   if ((Flags & SHF_COMPRESSED) || Name.startswith(".zdebug")) {
     if (!zlib::isAvailable())
       error(toString(File) + ": contains a compressed section, " +
             "but zlib is not available");
     parseCompressedHeader();
   }
 }
 
 // Drop SHF_GROUP bit unless we are producing a re-linkable object file.
 // SHF_GROUP is a marker that a section belongs to some comdat group.
 // That flag doesn't make sense in an executable.
 static uint64_t getFlags(uint64_t Flags) {
   Flags &= ~(uint64_t)SHF_INFO_LINK;
   if (!Config->Relocatable)
     Flags &= ~(uint64_t)SHF_GROUP;
   return Flags;
 }
 
 // GNU assembler 2.24 and LLVM 4.0.0's MC (the newest release as of
 // March 2017) fail to infer section types for sections starting with
 // ".init_array." or ".fini_array.". They set SHT_PROGBITS instead of
 // SHF_INIT_ARRAY. As a result, the following assembler directive
 // creates ".init_array.100" with SHT_PROGBITS, for example.
 //
 //   .section .init_array.100, "aw"
 //
 // This function forces SHT_{INIT,FINI}_ARRAY so that we can handle
 // incorrect inputs as if they were correct from the beginning.
 static uint64_t getType(uint64_t Type, StringRef Name) {
   if (Type == SHT_PROGBITS && Name.startswith(".init_array."))
     return SHT_INIT_ARRAY;
   if (Type == SHT_PROGBITS && Name.startswith(".fini_array."))
     return SHT_FINI_ARRAY;
   return Type;
 }
 
 template <class ELFT>
 InputSectionBase::InputSectionBase(ObjFile<ELFT> &File,
                                    const typename ELFT::Shdr &Hdr,
                                    StringRef Name, Kind SectionKind)
     : InputSectionBase(&File, getFlags(Hdr.sh_flags),
                        getType(Hdr.sh_type, Name), Hdr.sh_entsize, Hdr.sh_link,
                        Hdr.sh_info, Hdr.sh_addralign,
                        getSectionContents(File, Hdr), Name, SectionKind) {
   // We reject object files having insanely large alignments even though
   // they are allowed by the spec. I think 4GB is a reasonable limitation.
   // We might want to relax this in the future.
   if (Hdr.sh_addralign > UINT32_MAX)
     fatal(toString(&File) + ": section sh_addralign is too large");
 }
 
 size_t InputSectionBase::getSize() const {
   if (auto *S = dyn_cast<SyntheticSection>(this))
     return S->getSize();
   if (UncompressedSize >= 0)
     return UncompressedSize;
   return RawData.size();
 }
 
 void InputSectionBase::uncompress() const {
   size_t Size = UncompressedSize;
   UncompressedBuf.reset(new char[Size]);
 
   if (Error E =
           zlib::uncompress(toStringRef(RawData), UncompressedBuf.get(), Size))
     fatal(toString(this) +
           ": uncompress failed: " + llvm::toString(std::move(E)));
   RawData = makeArrayRef((uint8_t *)UncompressedBuf.get(), Size);
 }
 
 uint64_t InputSectionBase::getOffsetInFile() const {
   const uint8_t *FileStart = (const uint8_t *)File->MB.getBufferStart();
   const uint8_t *SecStart = data().begin();
   return SecStart - FileStart;
 }
 
 uint64_t SectionBase::getOffset(uint64_t Offset) const {
   switch (kind()) {
   case Output: {
     auto *OS = cast<OutputSection>(this);
     // For output sections we treat offset -1 as the end of the section.
     return Offset == uint64_t(-1) ? OS->Size : Offset;
   }
   case Regular:
   case Synthetic:
     return cast<InputSection>(this)->getOffset(Offset);
   case EHFrame:
     // The file crtbeginT.o has relocations pointing to the start of an empty
     // .eh_frame that is known to be the first in the link. It does that to
     // identify the start of the output .eh_frame.
     return Offset;
   case Merge:
     const MergeInputSection *MS = cast<MergeInputSection>(this);
     if (InputSection *IS = MS->getParent())
       return IS->getOffset(MS->getParentOffset(Offset));
     return MS->getParentOffset(Offset);
   }
   llvm_unreachable("invalid section kind");
 }
 
 uint64_t SectionBase::getVA(uint64_t Offset) const {
   const OutputSection *Out = getOutputSection();
   return (Out ? Out->Addr : 0) + getOffset(Offset);
 }
 
 OutputSection *SectionBase::getOutputSection() {
   InputSection *Sec;
   if (auto *IS = dyn_cast<InputSection>(this))
     Sec = IS;
   else if (auto *MS = dyn_cast<MergeInputSection>(this))
     Sec = MS->getParent();
   else if (auto *EH = dyn_cast<EhInputSection>(this))
     Sec = EH->getParent();
   else
     return cast<OutputSection>(this);
   return Sec ? Sec->getParent() : nullptr;
 }
 
 // When a section is compressed, `RawData` consists with a header followed
 // by zlib-compressed data. This function parses a header to initialize
 // `UncompressedSize` member and remove the header from `RawData`.
 void InputSectionBase::parseCompressedHeader() {
   typedef typename ELF64LE::Chdr Chdr64;
   typedef typename ELF32LE::Chdr Chdr32;
 
   // Old-style header
   if (Name.startswith(".zdebug")) {
     if (!toStringRef(RawData).startswith("ZLIB")) {
       error(toString(this) + ": corrupted compressed section header");
       return;
     }
     RawData = RawData.slice(4);
 
     if (RawData.size() < 8) {
       error(toString(this) + ": corrupted compressed section header");
       return;
     }
 
     UncompressedSize = read64be(RawData.data());
     RawData = RawData.slice(8);
 
     // Restore the original section name.
     // (e.g. ".zdebug_info" -> ".debug_info")
     Name = Saver.save("." + Name.substr(2));
     return;
   }
 
   assert(Flags & SHF_COMPRESSED);
   Flags &= ~(uint64_t)SHF_COMPRESSED;
 
   // New-style 64-bit header
   if (Config->Is64) {
     if (RawData.size() < sizeof(Chdr64)) {
       error(toString(this) + ": corrupted compressed section");
       return;
     }
 
     auto *Hdr = reinterpret_cast<const Chdr64 *>(RawData.data());
     if (Hdr->ch_type != ELFCOMPRESS_ZLIB) {
       error(toString(this) + ": unsupported compression type");
       return;
     }
 
     UncompressedSize = Hdr->ch_size;
     Alignment = std::max<uint64_t>(Hdr->ch_addralign, 1);
     RawData = RawData.slice(sizeof(*Hdr));
     return;
   }
 
   // New-style 32-bit header
   if (RawData.size() < sizeof(Chdr32)) {
     error(toString(this) + ": corrupted compressed section");
     return;
   }
 
   auto *Hdr = reinterpret_cast<const Chdr32 *>(RawData.data());
   if (Hdr->ch_type != ELFCOMPRESS_ZLIB) {
     error(toString(this) + ": unsupported compression type");
     return;
   }
 
   UncompressedSize = Hdr->ch_size;
   Alignment = std::max<uint64_t>(Hdr->ch_addralign, 1);
   RawData = RawData.slice(sizeof(*Hdr));
 }
 
 InputSection *InputSectionBase::getLinkOrderDep() const {
   assert(Link);
   assert(Flags & SHF_LINK_ORDER);
   return cast<InputSection>(File->getSections()[Link]);
 }
 
 // Find a function symbol that encloses a given location.
 template <class ELFT>
 Defined *InputSectionBase::getEnclosingFunction(uint64_t Offset) {
   for (Symbol *B : File->getSymbols())
     if (Defined *D = dyn_cast<Defined>(B))
       if (D->Section == this && D->Type == STT_FUNC && D->Value <= Offset &&
           Offset < D->Value + D->Size)
         return D;
   return nullptr;
 }
 
 // Returns a source location string. Used to construct an error message.
 template <class ELFT>
 std::string InputSectionBase::getLocation(uint64_t Offset) {
   std::string SecAndOffset = (Name + "+0x" + utohexstr(Offset)).str();
 
   // We don't have file for synthetic sections.
   if (getFile<ELFT>() == nullptr)
     return (Config->OutputFile + ":(" + SecAndOffset + ")")
         .str();
 
   // First check if we can get desired values from debugging information.
   if (Optional<DILineInfo> Info = getFile<ELFT>()->getDILineInfo(this, Offset))
     return Info->FileName + ":" + std::to_string(Info->Line) + ":(" +
            SecAndOffset + ")";
 
   // File->SourceFile contains STT_FILE symbol that contains a
   // source file name. If it's missing, we use an object file name.
   std::string SrcFile = getFile<ELFT>()->SourceFile;
   if (SrcFile.empty())
     SrcFile = toString(File);
 
   if (Defined *D = getEnclosingFunction<ELFT>(Offset))
     return SrcFile + ":(function " + toString(*D) + ": " + SecAndOffset + ")";
 
   // If there's no symbol, print out the offset in the section.
   return (SrcFile + ":(" + SecAndOffset + ")");
 }
 
 // This function is intended to be used for constructing an error message.
 // The returned message looks like this:
 //
 //   foo.c:42 (/home/alice/possibly/very/long/path/foo.c:42)
 //
 //  Returns an empty string if there's no way to get line info.
 std::string InputSectionBase::getSrcMsg(const Symbol &Sym, uint64_t Offset) {
   return File->getSrcMsg(Sym, *this, Offset);
 }
 
 // Returns a filename string along with an optional section name. This
 // function is intended to be used for constructing an error
 // message. The returned message looks like this:
 //
 //   path/to/foo.o:(function bar)
 //
 // or
 //
 //   path/to/foo.o:(function bar) in archive path/to/bar.a
 std::string InputSectionBase::getObjMsg(uint64_t Off) {
   std::string Filename = File->getName();
 
   std::string Archive;
   if (!File->ArchiveName.empty())
     Archive = " in archive " + File->ArchiveName;
 
   // Find a symbol that encloses a given location.
   for (Symbol *B : File->getSymbols())
     if (auto *D = dyn_cast<Defined>(B))
       if (D->Section == this && D->Value <= Off && Off < D->Value + D->Size)
         return Filename + ":(" + toString(*D) + ")" + Archive;
 
   // If there's no symbol, print out the offset in the section.
   return (Filename + ":(" + Name + "+0x" + utohexstr(Off) + ")" + Archive)
       .str();
 }
 
 InputSection InputSection::Discarded(nullptr, 0, 0, 0, ArrayRef<uint8_t>(), "");
 
 InputSection::InputSection(InputFile *F, uint64_t Flags, uint32_t Type,
                            uint32_t Alignment, ArrayRef<uint8_t> Data,
                            StringRef Name, Kind K)
     : InputSectionBase(F, Flags, Type,
                        /*Entsize*/ 0, /*Link*/ 0, /*Info*/ 0, Alignment, Data,
                        Name, K) {}
 
 template <class ELFT>
 InputSection::InputSection(ObjFile<ELFT> &F, const typename ELFT::Shdr &Header,
                            StringRef Name)
     : InputSectionBase(F, Header, Name, InputSectionBase::Regular) {}
 
 bool InputSection::classof(const SectionBase *S) {
   return S->kind() == SectionBase::Regular ||
          S->kind() == SectionBase::Synthetic;
 }
 
 OutputSection *InputSection::getParent() const {
   return cast_or_null<OutputSection>(Parent);
 }
 
 // Copy SHT_GROUP section contents. Used only for the -r option.
 template <class ELFT> void InputSection::copyShtGroup(uint8_t *Buf) {
   // ELFT::Word is the 32-bit integral type in the target endianness.
   typedef typename ELFT::Word u32;
   ArrayRef<u32> From = getDataAs<u32>();
   auto *To = reinterpret_cast<u32 *>(Buf);
 
   // The first entry is not a section number but a flag.
   *To++ = From[0];
 
   // Adjust section numbers because section numbers in an input object
   // files are different in the output.
   ArrayRef<InputSectionBase *> Sections = File->getSections();
   for (uint32_t Idx : From.slice(1))
     *To++ = Sections[Idx]->getOutputSection()->SectionIndex;
 }
 
 InputSectionBase *InputSection::getRelocatedSection() const {
   if (!File || (Type != SHT_RELA && Type != SHT_REL))
     return nullptr;
   ArrayRef<InputSectionBase *> Sections = File->getSections();
   return Sections[Info];
 }
 
 // This is used for -r and --emit-relocs. We can't use memcpy to copy
 // relocations because we need to update symbol table offset and section index
 // for each relocation. So we copy relocations one by one.
 template <class ELFT, class RelTy>
 void InputSection::copyRelocations(uint8_t *Buf, ArrayRef<RelTy> Rels) {
   InputSectionBase *Sec = getRelocatedSection();
 
   for (const RelTy &Rel : Rels) {
     RelType Type = Rel.getType(Config->IsMips64EL);
     Symbol &Sym = getFile<ELFT>()->getRelocTargetSym(Rel);
 
     auto *P = reinterpret_cast<typename ELFT::Rela *>(Buf);
     Buf += sizeof(RelTy);
 
     if (RelTy::IsRela)
       P->r_addend = getAddend<ELFT>(Rel);
 
     // Output section VA is zero for -r, so r_offset is an offset within the
     // section, but for --emit-relocs it is an virtual address.
     P->r_offset = Sec->getVA(Rel.r_offset);
     P->setSymbolAndType(In.SymTab->getSymbolIndex(&Sym), Type,
                         Config->IsMips64EL);
 
     if (Sym.Type == STT_SECTION) {
       // We combine multiple section symbols into only one per
       // section. This means we have to update the addend. That is
       // trivial for Elf_Rela, but for Elf_Rel we have to write to the
       // section data. We do that by adding to the Relocation vector.
 
       // .eh_frame is horribly special and can reference discarded sections. To
       // avoid having to parse and recreate .eh_frame, we just replace any
       // relocation in it pointing to discarded sections with R_*_NONE, which
       // hopefully creates a frame that is ignored at runtime.
       auto *D = dyn_cast<Defined>(&Sym);
       if (!D) {
         error("STT_SECTION symbol should be defined");
         continue;
       }
       SectionBase *Section = D->Section->Repl;
       if (!Section->Live) {
         P->setSymbolAndType(0, 0, false);
         continue;
       }
 
       int64_t Addend = getAddend<ELFT>(Rel);
       const uint8_t *BufLoc = Sec->data().begin() + Rel.r_offset;
       if (!RelTy::IsRela)
         Addend = Target->getImplicitAddend(BufLoc, Type);
 
       if (Config->EMachine == EM_MIPS && Config->Relocatable &&
           Target->getRelExpr(Type, Sym, BufLoc) == R_MIPS_GOTREL) {
         // Some MIPS relocations depend on "gp" value. By default,
         // this value has 0x7ff0 offset from a .got section. But
         // relocatable files produced by a complier or a linker
         // might redefine this default value and we must use it
         // for a calculation of the relocation result. When we
         // generate EXE or DSO it's trivial. Generating a relocatable
         // output is more difficult case because the linker does
         // not calculate relocations in this mode and loses
         // individual "gp" values used by each input object file.
         // As a workaround we add the "gp" value to the relocation
         // addend and save it back to the file.
         Addend += Sec->getFile<ELFT>()->MipsGp0;
       }
 
       if (RelTy::IsRela)
         P->r_addend = Sym.getVA(Addend) - Section->getOutputSection()->Addr;
       else if (Config->Relocatable)
         Sec->Relocations.push_back({R_ABS, Type, Rel.r_offset, Addend, &Sym});
     }
   }
 }
 
 // The ARM and AArch64 ABI handle pc-relative relocations to undefined weak
 // references specially. The general rule is that the value of the symbol in
 // this context is the address of the place P. A further special case is that
 // branch relocations to an undefined weak reference resolve to the next
 // instruction.
 static uint32_t getARMUndefinedRelativeWeakVA(RelType Type, uint32_t A,
                                               uint32_t P) {
   switch (Type) {
   // Unresolved branch relocations to weak references resolve to next
   // instruction, this will be either 2 or 4 bytes on from P.
   case R_ARM_THM_JUMP11:
     return P + 2 + A;
   case R_ARM_CALL:
   case R_ARM_JUMP24:
   case R_ARM_PC24:
   case R_ARM_PLT32:
   case R_ARM_PREL31:
   case R_ARM_THM_JUMP19:
   case R_ARM_THM_JUMP24:
     return P + 4 + A;
   case R_ARM_THM_CALL:
     // We don't want an interworking BLX to ARM
     return P + 5 + A;
   // Unresolved non branch pc-relative relocations
   // R_ARM_TARGET2 which can be resolved relatively is not present as it never
   // targets a weak-reference.
   case R_ARM_MOVW_PREL_NC:
   case R_ARM_MOVT_PREL:
   case R_ARM_REL32:
   case R_ARM_THM_MOVW_PREL_NC:
   case R_ARM_THM_MOVT_PREL:
     return P + A;
   }
   llvm_unreachable("ARM pc-relative relocation expected\n");
 }
 
 // The comment above getARMUndefinedRelativeWeakVA applies to this function.
 static uint64_t getAArch64UndefinedRelativeWeakVA(uint64_t Type, uint64_t A,
                                                   uint64_t P) {
   switch (Type) {
   // Unresolved branch relocations to weak references resolve to next
   // instruction, this is 4 bytes on from P.
   case R_AARCH64_CALL26:
   case R_AARCH64_CONDBR19:
   case R_AARCH64_JUMP26:
   case R_AARCH64_TSTBR14:
     return P + 4 + A;
   // Unresolved non branch pc-relative relocations
   case R_AARCH64_PREL16:
   case R_AARCH64_PREL32:
   case R_AARCH64_PREL64:
   case R_AARCH64_ADR_PREL_LO21:
   case R_AARCH64_LD_PREL_LO19:
     return P + A;
   }
   llvm_unreachable("AArch64 pc-relative relocation expected\n");
 }
 
 // ARM SBREL relocations are of the form S + A - B where B is the static base
 // The ARM ABI defines base to be "addressing origin of the output segment
 // defining the symbol S". We defined the "addressing origin"/static base to be
 // the base of the PT_LOAD segment containing the Sym.
 // The procedure call standard only defines a Read Write Position Independent
 // RWPI variant so in practice we should expect the static base to be the base
 // of the RW segment.
 static uint64_t getARMStaticBase(const Symbol &Sym) {
   OutputSection *OS = Sym.getOutputSection();
   if (!OS || !OS->PtLoad || !OS->PtLoad->FirstSec)
     fatal("SBREL relocation to " + Sym.getName() + " without static base");
   return OS->PtLoad->FirstSec->Addr;
 }
 
 // For R_RISCV_PC_INDIRECT (R_RISCV_PCREL_LO12_{I,S}), the symbol actually
 // points the corresponding R_RISCV_PCREL_HI20 relocation, and the target VA
 // is calculated using PCREL_HI20's symbol.
 //
 // This function returns the R_RISCV_PCREL_HI20 relocation from
 // R_RISCV_PCREL_LO12's symbol and addend.
 static Relocation *getRISCVPCRelHi20(const Symbol *Sym, uint64_t Addend) {
   const Defined *D = cast<Defined>(Sym);
   InputSection *IS = cast<InputSection>(D->Section);
 
   if (Addend != 0)
     warn("Non-zero addend in R_RISCV_PCREL_LO12 relocation to " +
          IS->getObjMsg(D->Value) + " is ignored");
 
   // Relocations are sorted by offset, so we can use std::equal_range to do
   // binary search.
   auto Range = std::equal_range(IS->Relocations.begin(), IS->Relocations.end(),
                                 D->Value, RelocationOffsetComparator{});
   for (auto It = std::get<0>(Range); It != std::get<1>(Range); ++It)
     if (isRelExprOneOf<R_PC>(It->Expr))
       return &*It;
 
   error("R_RISCV_PCREL_LO12 relocation points to " + IS->getObjMsg(D->Value) +
         " without an associated R_RISCV_PCREL_HI20 relocation");
   return nullptr;
 }
 
 // A TLS symbol's virtual address is relative to the TLS segment. Add a
 // target-specific adjustment to produce a thread-pointer-relative offset.
 static int64_t getTlsTpOffset() {
   switch (Config->EMachine) {
   case EM_ARM:
   case EM_AARCH64:
     // Variant 1. The thread pointer points to a TCB with a fixed 2-word size,
     // followed by a variable amount of alignment padding, followed by the TLS
     // segment.
     return alignTo(Config->Wordsize * 2, Out::TlsPhdr->p_align);
   case EM_386:
   case EM_X86_64:
     // Variant 2. The TLS segment is located just before the thread pointer.
     return -Out::TlsPhdr->p_memsz;
   case EM_PPC64:
     // The thread pointer points to a fixed offset from the start of the
     // executable's TLS segment. An offset of 0x7000 allows a signed 16-bit
     // offset to reach 0x1000 of TCB/thread-library data and 0xf000 of the
     // program's TLS segment.
     return -0x7000;
   default:
     llvm_unreachable("unhandled Config->EMachine");
   }
 }
 
 static uint64_t getRelocTargetVA(const InputFile *File, RelType Type, int64_t A,
                                  uint64_t P, const Symbol &Sym, RelExpr Expr) {
   switch (Expr) {
   case R_INVALID:
     return 0;
   case R_ABS:
   case R_RELAX_TLS_LD_TO_LE_ABS:
   case R_RELAX_GOT_PC_NOPIC:
     return Sym.getVA(A);
   case R_ADDEND:
     return A;
   case R_ARM_SBREL:
     return Sym.getVA(A) - getARMStaticBase(Sym);
   case R_GOT:
-  case R_GOT_PLT:
   case R_RELAX_TLS_GD_TO_IE_ABS:
     return Sym.getGotVA() + A;
   case R_GOTONLY_PC:
     return In.Got->getVA() + A - P;
   case R_GOTONLY_PC_FROM_END:
     return In.Got->getVA() + A - P + In.Got->getSize();
   case R_GOTREL:
     return Sym.getVA(A) - In.Got->getVA();
   case R_GOTREL_FROM_END:
     return Sym.getVA(A) - In.Got->getVA() - In.Got->getSize();
   case R_GOT_FROM_END:
   case R_RELAX_TLS_GD_TO_IE_END:
     return Sym.getGotOffset() + A - In.Got->getSize();
   case R_TLSLD_GOT_OFF:
   case R_GOT_OFF:
   case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
     return Sym.getGotOffset() + A;
   case R_AARCH64_GOT_PAGE_PC:
-  case R_AARCH64_GOT_PAGE_PC_PLT:
   case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
     return getAArch64Page(Sym.getGotVA() + A) - getAArch64Page(P);
   case R_GOT_PC:
   case R_RELAX_TLS_GD_TO_IE:
     return Sym.getGotVA() + A - P;
   case R_HEXAGON_GOT:
     return Sym.getGotVA() - In.GotPlt->getVA();
   case R_MIPS_GOTREL:
     return Sym.getVA(A) - In.MipsGot->getGp(File);
   case R_MIPS_GOT_GP:
     return In.MipsGot->getGp(File) + A;
   case R_MIPS_GOT_GP_PC: {
     // R_MIPS_LO16 expression has R_MIPS_GOT_GP_PC type iif the target
     // is _gp_disp symbol. In that case we should use the following
     // formula for calculation "AHL + GP - P + 4". For details see p. 4-19 at
     // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
     // microMIPS variants of these relocations use slightly different
     // expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi()
     // to correctly handle less-sugnificant bit of the microMIPS symbol.
     uint64_t V = In.MipsGot->getGp(File) + A - P;
     if (Type == R_MIPS_LO16 || Type == R_MICROMIPS_LO16)
       V += 4;
     if (Type == R_MICROMIPS_LO16 || Type == R_MICROMIPS_HI16)
       V -= 1;
     return V;
   }
   case R_MIPS_GOT_LOCAL_PAGE:
     // If relocation against MIPS local symbol requires GOT entry, this entry
     // should be initialized by 'page address'. This address is high 16-bits
     // of sum the symbol's value and the addend.
     return In.MipsGot->getVA() + In.MipsGot->getPageEntryOffset(File, Sym, A) -
            In.MipsGot->getGp(File);
   case R_MIPS_GOT_OFF:
   case R_MIPS_GOT_OFF32:
     // In case of MIPS if a GOT relocation has non-zero addend this addend
     // should be applied to the GOT entry content not to the GOT entry offset.
     // That is why we use separate expression type.
     return In.MipsGot->getVA() + In.MipsGot->getSymEntryOffset(File, Sym, A) -
            In.MipsGot->getGp(File);
   case R_MIPS_TLSGD:
     return In.MipsGot->getVA() + In.MipsGot->getGlobalDynOffset(File, Sym) -
            In.MipsGot->getGp(File);
   case R_MIPS_TLSLD:
     return In.MipsGot->getVA() + In.MipsGot->getTlsIndexOffset(File) -
            In.MipsGot->getGp(File);
   case R_AARCH64_PAGE_PC: {
     uint64_t Val = Sym.isUndefWeak() ? P + A : Sym.getVA(A);
-    return getAArch64Page(Val) - getAArch64Page(P);
-  }
-  case R_AARCH64_PLT_PAGE_PC: {
-    uint64_t Val = Sym.isUndefWeak() ? P + A : Sym.getPltVA() + A;
     return getAArch64Page(Val) - getAArch64Page(P);
   }
   case R_RISCV_PC_INDIRECT: {
     if (const Relocation *HiRel = getRISCVPCRelHi20(&Sym, A))
       return getRelocTargetVA(File, HiRel->Type, HiRel->Addend, Sym.getVA(),
                               *HiRel->Sym, HiRel->Expr);
     return 0;
   }
   case R_PC: {
     uint64_t Dest;
     if (Sym.isUndefWeak()) {
       // On ARM and AArch64 a branch to an undefined weak resolves to the
       // next instruction, otherwise the place.
       if (Config->EMachine == EM_ARM)
         Dest = getARMUndefinedRelativeWeakVA(Type, A, P);
       else if (Config->EMachine == EM_AARCH64)
         Dest = getAArch64UndefinedRelativeWeakVA(Type, A, P);
       else
         Dest = Sym.getVA(A);
     } else {
       Dest = Sym.getVA(A);
     }
     return Dest - P;
   }
   case R_PLT:
     return Sym.getPltVA() + A;
   case R_PLT_PC:
   case R_PPC_CALL_PLT:
     return Sym.getPltVA() + A - P;
   case R_PPC_CALL: {
     uint64_t SymVA = Sym.getVA(A);
     // If we have an undefined weak symbol, we might get here with a symbol
     // address of zero. That could overflow, but the code must be unreachable,
     // so don't bother doing anything at all.
     if (!SymVA)
       return 0;
 
     // PPC64 V2 ABI describes two entry points to a function. The global entry
     // point is used for calls where the caller and callee (may) have different
     // TOC base pointers and r2 needs to be modified to hold the TOC base for
     // the callee. For local calls the caller and callee share the same
     // TOC base and so the TOC pointer initialization code should be skipped by
     // branching to the local entry point.
     return SymVA - P + getPPC64GlobalEntryToLocalEntryOffset(Sym.StOther);
   }
   case R_PPC_TOC:
     return getPPC64TocBase() + A;
   case R_RELAX_GOT_PC:
     return Sym.getVA(A) - P;
   case R_RELAX_TLS_GD_TO_LE:
   case R_RELAX_TLS_IE_TO_LE:
   case R_RELAX_TLS_LD_TO_LE:
   case R_TLS:
     // A weak undefined TLS symbol resolves to the base of the TLS
     // block, i.e. gets a value of zero. If we pass --gc-sections to
     // lld and .tbss is not referenced, it gets reclaimed and we don't
     // create a TLS program header. Therefore, we resolve this
     // statically to zero.
     if (Sym.isTls() && Sym.isUndefWeak())
       return 0;
     return Sym.getVA(A) + getTlsTpOffset();
   case R_RELAX_TLS_GD_TO_LE_NEG:
   case R_NEG_TLS:
     return Out::TlsPhdr->p_memsz - Sym.getVA(A);
   case R_SIZE:
     return Sym.getSize() + A;
   case R_TLSDESC:
     return In.Got->getGlobalDynAddr(Sym) + A;
   case R_AARCH64_TLSDESC_PAGE:
     return getAArch64Page(In.Got->getGlobalDynAddr(Sym) + A) -
            getAArch64Page(P);
   case R_TLSGD_GOT:
     return In.Got->getGlobalDynOffset(Sym) + A;
   case R_TLSGD_GOT_FROM_END:
     return In.Got->getGlobalDynOffset(Sym) + A - In.Got->getSize();
   case R_TLSGD_PC:
     return In.Got->getGlobalDynAddr(Sym) + A - P;
   case R_TLSLD_GOT_FROM_END:
     return In.Got->getTlsIndexOff() + A - In.Got->getSize();
   case R_TLSLD_GOT:
     return In.Got->getTlsIndexOff() + A;
   case R_TLSLD_PC:
     return In.Got->getTlsIndexVA() + A - P;
   default:
     llvm_unreachable("invalid expression");
   }
 }
 
 // This function applies relocations to sections without SHF_ALLOC bit.
 // Such sections are never mapped to memory at runtime. Debug sections are
 // an example. Relocations in non-alloc sections are much easier to
 // handle than in allocated sections because it will never need complex
 // treatement such as GOT or PLT (because at runtime no one refers them).
 // So, we handle relocations for non-alloc sections directly in this
 // function as a performance optimization.
 template <class ELFT, class RelTy>
 void InputSection::relocateNonAlloc(uint8_t *Buf, ArrayRef<RelTy> Rels) {
   const unsigned Bits = sizeof(typename ELFT::uint) * 8;
 
   for (const RelTy &Rel : Rels) {
     RelType Type = Rel.getType(Config->IsMips64EL);
 
     // GCC 8.0 or earlier have a bug that they emit R_386_GOTPC relocations
     // against _GLOBAL_OFFSET_TABLE_ for .debug_info. The bug has been fixed
     // in 2017 (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82630), but we
     // need to keep this bug-compatible code for a while.
     if (Config->EMachine == EM_386 && Type == R_386_GOTPC)
       continue;
 
     uint64_t Offset = getOffset(Rel.r_offset);
     uint8_t *BufLoc = Buf + Offset;
     int64_t Addend = getAddend<ELFT>(Rel);
     if (!RelTy::IsRela)
       Addend += Target->getImplicitAddend(BufLoc, Type);
 
     Symbol &Sym = getFile<ELFT>()->getRelocTargetSym(Rel);
     RelExpr Expr = Target->getRelExpr(Type, Sym, BufLoc);
     if (Expr == R_NONE)
       continue;
 
     if (Expr != R_ABS) {
       std::string Msg = getLocation<ELFT>(Offset) +
                         ": has non-ABS relocation " + toString(Type) +
                         " against symbol '" + toString(Sym) + "'";
       if (Expr != R_PC) {
         error(Msg);
         return;
       }
 
       // If the control reaches here, we found a PC-relative relocation in a
       // non-ALLOC section. Since non-ALLOC section is not loaded into memory
       // at runtime, the notion of PC-relative doesn't make sense here. So,
       // this is a usage error. However, GNU linkers historically accept such
       // relocations without any errors and relocate them as if they were at
       // address 0. For bug-compatibilty, we accept them with warnings. We
       // know Steel Bank Common Lisp as of 2018 have this bug.
       warn(Msg);
       Target->relocateOne(BufLoc, Type,
                           SignExtend64<Bits>(Sym.getVA(Addend - Offset)));
       continue;
     }
 
     if (Sym.isTls() && !Out::TlsPhdr)
       Target->relocateOne(BufLoc, Type, 0);
     else
       Target->relocateOne(BufLoc, Type, SignExtend64<Bits>(Sym.getVA(Addend)));
   }
 }
 
 // This is used when '-r' is given.
 // For REL targets, InputSection::copyRelocations() may store artificial
 // relocations aimed to update addends. They are handled in relocateAlloc()
 // for allocatable sections, and this function does the same for
 // non-allocatable sections, such as sections with debug information.
 static void relocateNonAllocForRelocatable(InputSection *Sec, uint8_t *Buf) {
   const unsigned Bits = Config->Is64 ? 64 : 32;
 
   for (const Relocation &Rel : Sec->Relocations) {
     // InputSection::copyRelocations() adds only R_ABS relocations.
     assert(Rel.Expr == R_ABS);
     uint8_t *BufLoc = Buf + Rel.Offset + Sec->OutSecOff;
     uint64_t TargetVA = SignExtend64(Rel.Sym->getVA(Rel.Addend), Bits);
     Target->relocateOne(BufLoc, Rel.Type, TargetVA);
   }
 }
 
 template <class ELFT>
 void InputSectionBase::relocate(uint8_t *Buf, uint8_t *BufEnd) {
   if (Flags & SHF_EXECINSTR)
     adjustSplitStackFunctionPrologues<ELFT>(Buf, BufEnd);
 
   if (Flags & SHF_ALLOC) {
     relocateAlloc(Buf, BufEnd);
     return;
   }
 
   auto *Sec = cast<InputSection>(this);
   if (Config->Relocatable)
     relocateNonAllocForRelocatable(Sec, Buf);
   else if (Sec->AreRelocsRela)
     Sec->relocateNonAlloc<ELFT>(Buf, Sec->template relas<ELFT>());
   else
     Sec->relocateNonAlloc<ELFT>(Buf, Sec->template rels<ELFT>());
 }
 
 void InputSectionBase::relocateAlloc(uint8_t *Buf, uint8_t *BufEnd) {
   assert(Flags & SHF_ALLOC);
   const unsigned Bits = Config->Wordsize * 8;
 
   for (const Relocation &Rel : Relocations) {
     uint64_t Offset = Rel.Offset;
     if (auto *Sec = dyn_cast<InputSection>(this))
       Offset += Sec->OutSecOff;
     uint8_t *BufLoc = Buf + Offset;
     RelType Type = Rel.Type;
 
     uint64_t AddrLoc = getOutputSection()->Addr + Offset;
     RelExpr Expr = Rel.Expr;
     uint64_t TargetVA = SignExtend64(
         getRelocTargetVA(File, Type, Rel.Addend, AddrLoc, *Rel.Sym, Expr),
         Bits);
 
     switch (Expr) {
     case R_RELAX_GOT_PC:
     case R_RELAX_GOT_PC_NOPIC:
       Target->relaxGot(BufLoc, TargetVA);
       break;
     case R_RELAX_TLS_IE_TO_LE:
       Target->relaxTlsIeToLe(BufLoc, Type, TargetVA);
       break;
     case R_RELAX_TLS_LD_TO_LE:
     case R_RELAX_TLS_LD_TO_LE_ABS:
       Target->relaxTlsLdToLe(BufLoc, Type, TargetVA);
       break;
     case R_RELAX_TLS_GD_TO_LE:
     case R_RELAX_TLS_GD_TO_LE_NEG:
       Target->relaxTlsGdToLe(BufLoc, Type, TargetVA);
       break;
     case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
     case R_RELAX_TLS_GD_TO_IE:
     case R_RELAX_TLS_GD_TO_IE_ABS:
     case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
     case R_RELAX_TLS_GD_TO_IE_END:
       Target->relaxTlsGdToIe(BufLoc, Type, TargetVA);
       break;
     case R_PPC_CALL:
       // If this is a call to __tls_get_addr, it may be part of a TLS
       // sequence that has been relaxed and turned into a nop. In this
       // case, we don't want to handle it as a call.
       if (read32(BufLoc) == 0x60000000) // nop
         break;
 
       // Patch a nop (0x60000000) to a ld.
       if (Rel.Sym->NeedsTocRestore) {
         if (BufLoc + 8 > BufEnd || read32(BufLoc + 4) != 0x60000000) {
           error(getErrorLocation(BufLoc) + "call lacks nop, can't restore toc");
           break;
         }
         write32(BufLoc + 4, 0xe8410018); // ld %r2, 24(%r1)
       }
       Target->relocateOne(BufLoc, Type, TargetVA);
       break;
     default:
       Target->relocateOne(BufLoc, Type, TargetVA);
       break;
     }
   }
 }
 
 // For each function-defining prologue, find any calls to __morestack,
 // and replace them with calls to __morestack_non_split.
 static void switchMorestackCallsToMorestackNonSplit(
     DenseSet<Defined *> &Prologues, std::vector<Relocation *> &MorestackCalls) {
 
   // If the target adjusted a function's prologue, all calls to
   // __morestack inside that function should be switched to
   // __morestack_non_split.
   Symbol *MoreStackNonSplit = Symtab->find("__morestack_non_split");
   if (!MoreStackNonSplit) {
     error("Mixing split-stack objects requires a definition of "
           "__morestack_non_split");
     return;
   }
 
   // Sort both collections to compare addresses efficiently.
   llvm::sort(MorestackCalls, [](const Relocation *L, const Relocation *R) {
     return L->Offset < R->Offset;
   });
   std::vector<Defined *> Functions(Prologues.begin(), Prologues.end());
   llvm::sort(Functions, [](const Defined *L, const Defined *R) {
     return L->Value < R->Value;
   });
 
   auto It = MorestackCalls.begin();
   for (Defined *F : Functions) {
     // Find the first call to __morestack within the function.
     while (It != MorestackCalls.end() && (*It)->Offset < F->Value)
       ++It;
     // Adjust all calls inside the function.
     while (It != MorestackCalls.end() && (*It)->Offset < F->Value + F->Size) {
       (*It)->Sym = MoreStackNonSplit;
       ++It;
     }
   }
 }
 
 static bool enclosingPrologueAttempted(uint64_t Offset,
                                        const DenseSet<Defined *> &Prologues) {
   for (Defined *F : Prologues)
     if (F->Value <= Offset && Offset < F->Value + F->Size)
       return true;
   return false;
 }
 
 // If a function compiled for split stack calls a function not
 // compiled for split stack, then the caller needs its prologue
 // adjusted to ensure that the called function will have enough stack
 // available. Find those functions, and adjust their prologues.
 template <class ELFT>
 void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *Buf,
                                                          uint8_t *End) {
   if (!getFile<ELFT>()->SplitStack)
     return;
   DenseSet<Defined *> Prologues;
   std::vector<Relocation *> MorestackCalls;
 
   for (Relocation &Rel : Relocations) {
     // Local symbols can't possibly be cross-calls, and should have been
     // resolved long before this line.
     if (Rel.Sym->isLocal())
       continue;
 
     // Ignore calls into the split-stack api.
     if (Rel.Sym->getName().startswith("__morestack")) {
       if (Rel.Sym->getName().equals("__morestack"))
         MorestackCalls.push_back(&Rel);
       continue;
     }
 
     // A relocation to non-function isn't relevant. Sometimes
     // __morestack is not marked as a function, so this check comes
     // after the name check.
     if (Rel.Sym->Type != STT_FUNC)
       continue;
 
     // If the callee's-file was compiled with split stack, nothing to do.  In
     // this context, a "Defined" symbol is one "defined by the binary currently
     // being produced". So an "undefined" symbol might be provided by a shared
     // library. It is not possible to tell how such symbols were compiled, so be
     // conservative.
     if (Defined *D = dyn_cast<Defined>(Rel.Sym))
       if (InputSection *IS = cast_or_null<InputSection>(D->Section))
         if (!IS || !IS->getFile<ELFT>() || IS->getFile<ELFT>()->SplitStack)
           continue;
 
     if (enclosingPrologueAttempted(Rel.Offset, Prologues))
       continue;
 
     if (Defined *F = getEnclosingFunction<ELFT>(Rel.Offset)) {
       Prologues.insert(F);
       if (Target->adjustPrologueForCrossSplitStack(Buf + getOffset(F->Value),
                                                    End, F->StOther))
         continue;
       if (!getFile<ELFT>()->SomeNoSplitStack)
         error(lld::toString(this) + ": " + F->getName() +
               " (with -fsplit-stack) calls " + Rel.Sym->getName() +
               " (without -fsplit-stack), but couldn't adjust its prologue");
     }
   }
 
   if (Target->NeedsMoreStackNonSplit)
     switchMorestackCallsToMorestackNonSplit(Prologues, MorestackCalls);
 }
 
 template <class ELFT> void InputSection::writeTo(uint8_t *Buf) {
   if (Type == SHT_NOBITS)
     return;
 
   if (auto *S = dyn_cast<SyntheticSection>(this)) {
     S->writeTo(Buf + OutSecOff);
     return;
   }
 
   // If -r or --emit-relocs is given, then an InputSection
   // may be a relocation section.
   if (Type == SHT_RELA) {
     copyRelocations<ELFT>(Buf + OutSecOff, getDataAs<typename ELFT::Rela>());
     return;
   }
   if (Type == SHT_REL) {
     copyRelocations<ELFT>(Buf + OutSecOff, getDataAs<typename ELFT::Rel>());
     return;
   }
 
   // If -r is given, we may have a SHT_GROUP section.
   if (Type == SHT_GROUP) {
     copyShtGroup<ELFT>(Buf + OutSecOff);
     return;
   }
 
   // If this is a compressed section, uncompress section contents directly
   // to the buffer.
   if (UncompressedSize >= 0 && !UncompressedBuf) {
     size_t Size = UncompressedSize;
     if (Error E = zlib::uncompress(toStringRef(RawData),
                                    (char *)(Buf + OutSecOff), Size))
       fatal(toString(this) +
             ": uncompress failed: " + llvm::toString(std::move(E)));
     uint8_t *BufEnd = Buf + OutSecOff + Size;
     relocate<ELFT>(Buf, BufEnd);
     return;
   }
 
   // Copy section contents from source object file to output file
   // and then apply relocations.
   memcpy(Buf + OutSecOff, data().data(), data().size());
   uint8_t *BufEnd = Buf + OutSecOff + data().size();
   relocate<ELFT>(Buf, BufEnd);
 }
 
 void InputSection::replace(InputSection *Other) {
   Alignment = std::max(Alignment, Other->Alignment);
   Other->Repl = Repl;
   Other->Live = false;
 }
 
 template <class ELFT>
 EhInputSection::EhInputSection(ObjFile<ELFT> &F,
                                const typename ELFT::Shdr &Header,
                                StringRef Name)
     : InputSectionBase(F, Header, Name, InputSectionBase::EHFrame) {}
 
 SyntheticSection *EhInputSection::getParent() const {
   return cast_or_null<SyntheticSection>(Parent);
 }
 
 // Returns the index of the first relocation that points to a region between
 // Begin and Begin+Size.
 template <class IntTy, class RelTy>
 static unsigned getReloc(IntTy Begin, IntTy Size, const ArrayRef<RelTy> &Rels,
                          unsigned &RelocI) {
   // Start search from RelocI for fast access. That works because the
   // relocations are sorted in .eh_frame.
   for (unsigned N = Rels.size(); RelocI < N; ++RelocI) {
     const RelTy &Rel = Rels[RelocI];
     if (Rel.r_offset < Begin)
       continue;
 
     if (Rel.r_offset < Begin + Size)
       return RelocI;
     return -1;
   }
   return -1;
 }
 
 // .eh_frame is a sequence of CIE or FDE records.
 // This function splits an input section into records and returns them.
 template <class ELFT> void EhInputSection::split() {
   if (AreRelocsRela)
     split<ELFT>(relas<ELFT>());
   else
     split<ELFT>(rels<ELFT>());
 }
 
 template <class ELFT, class RelTy>
 void EhInputSection::split(ArrayRef<RelTy> Rels) {
   unsigned RelI = 0;
   for (size_t Off = 0, End = data().size(); Off != End;) {
     size_t Size = readEhRecordSize(this, Off);
     Pieces.emplace_back(Off, this, Size, getReloc(Off, Size, Rels, RelI));
     // The empty record is the end marker.
     if (Size == 4)
       break;
     Off += Size;
   }
 }
 
 static size_t findNull(StringRef S, size_t EntSize) {
   // Optimize the common case.
   if (EntSize == 1)
     return S.find(0);
 
   for (unsigned I = 0, N = S.size(); I != N; I += EntSize) {
     const char *B = S.begin() + I;
     if (std::all_of(B, B + EntSize, [](char C) { return C == 0; }))
       return I;
   }
   return StringRef::npos;
 }
 
 SyntheticSection *MergeInputSection::getParent() const {
   return cast_or_null<SyntheticSection>(Parent);
 }
 
 // Split SHF_STRINGS section. Such section is a sequence of
 // null-terminated strings.
 void MergeInputSection::splitStrings(ArrayRef<uint8_t> Data, size_t EntSize) {
   size_t Off = 0;
   bool IsAlloc = Flags & SHF_ALLOC;
   StringRef S = toStringRef(Data);
 
   while (!S.empty()) {
     size_t End = findNull(S, EntSize);
     if (End == StringRef::npos)
       fatal(toString(this) + ": string is not null terminated");
     size_t Size = End + EntSize;
 
     Pieces.emplace_back(Off, xxHash64(S.substr(0, Size)), !IsAlloc);
     S = S.substr(Size);
     Off += Size;
   }
 }
 
 // Split non-SHF_STRINGS section. Such section is a sequence of
 // fixed size records.
 void MergeInputSection::splitNonStrings(ArrayRef<uint8_t> Data,
                                         size_t EntSize) {
   size_t Size = Data.size();
   assert((Size % EntSize) == 0);
   bool IsAlloc = Flags & SHF_ALLOC;
 
   for (size_t I = 0; I != Size; I += EntSize)
     Pieces.emplace_back(I, xxHash64(Data.slice(I, EntSize)), !IsAlloc);
 }
 
 template <class ELFT>
 MergeInputSection::MergeInputSection(ObjFile<ELFT> &F,
                                      const typename ELFT::Shdr &Header,
                                      StringRef Name)
     : InputSectionBase(F, Header, Name, InputSectionBase::Merge) {}
 
 MergeInputSection::MergeInputSection(uint64_t Flags, uint32_t Type,
                                      uint64_t Entsize, ArrayRef<uint8_t> Data,
                                      StringRef Name)
     : InputSectionBase(nullptr, Flags, Type, Entsize, /*Link*/ 0, /*Info*/ 0,
                        /*Alignment*/ Entsize, Data, Name, SectionBase::Merge) {}
 
 // This function is called after we obtain a complete list of input sections
 // that need to be linked. This is responsible to split section contents
 // into small chunks for further processing.
 //
 // Note that this function is called from parallelForEach. This must be
 // thread-safe (i.e. no memory allocation from the pools).
 void MergeInputSection::splitIntoPieces() {
   assert(Pieces.empty());
 
   if (Flags & SHF_STRINGS)
     splitStrings(data(), Entsize);
   else
     splitNonStrings(data(), Entsize);
 }
 
 SectionPiece *MergeInputSection::getSectionPiece(uint64_t Offset) {
   if (this->data().size() <= Offset)
     fatal(toString(this) + ": offset is outside the section");
 
   // If Offset is not at beginning of a section piece, it is not in the map.
   // In that case we need to  do a binary search of the original section piece vector.
   auto It2 =
       llvm::upper_bound(Pieces, Offset, [](uint64_t Offset, SectionPiece P) {
         return Offset < P.InputOff;
       });
   return &It2[-1];
 }
 
 // Returns the offset in an output section for a given input offset.
 // Because contents of a mergeable section is not contiguous in output,
 // it is not just an addition to a base output offset.
 uint64_t MergeInputSection::getParentOffset(uint64_t Offset) const {
   // If Offset is not at beginning of a section piece, it is not in the map.
   // In that case we need to search from the original section piece vector.
   const SectionPiece &Piece =
       *(const_cast<MergeInputSection *>(this)->getSectionPiece (Offset));
   uint64_t Addend = Offset - Piece.InputOff;
   return Piece.OutputOff + Addend;
 }
 
 template InputSection::InputSection(ObjFile<ELF32LE> &, const ELF32LE::Shdr &,
                                     StringRef);
 template InputSection::InputSection(ObjFile<ELF32BE> &, const ELF32BE::Shdr &,
                                     StringRef);
 template InputSection::InputSection(ObjFile<ELF64LE> &, const ELF64LE::Shdr &,
                                     StringRef);
 template InputSection::InputSection(ObjFile<ELF64BE> &, const ELF64BE::Shdr &,
                                     StringRef);
 
 template std::string InputSectionBase::getLocation<ELF32LE>(uint64_t);
 template std::string InputSectionBase::getLocation<ELF32BE>(uint64_t);
 template std::string InputSectionBase::getLocation<ELF64LE>(uint64_t);
 template std::string InputSectionBase::getLocation<ELF64BE>(uint64_t);
 
 template void InputSection::writeTo<ELF32LE>(uint8_t *);
 template void InputSection::writeTo<ELF32BE>(uint8_t *);
 template void InputSection::writeTo<ELF64LE>(uint8_t *);
 template void InputSection::writeTo<ELF64BE>(uint8_t *);
 
 template MergeInputSection::MergeInputSection(ObjFile<ELF32LE> &,
                                               const ELF32LE::Shdr &, StringRef);
 template MergeInputSection::MergeInputSection(ObjFile<ELF32BE> &,
                                               const ELF32BE::Shdr &, StringRef);
 template MergeInputSection::MergeInputSection(ObjFile<ELF64LE> &,
                                               const ELF64LE::Shdr &, StringRef);
 template MergeInputSection::MergeInputSection(ObjFile<ELF64BE> &,
                                               const ELF64BE::Shdr &, StringRef);
 
 template EhInputSection::EhInputSection(ObjFile<ELF32LE> &,
                                         const ELF32LE::Shdr &, StringRef);
 template EhInputSection::EhInputSection(ObjFile<ELF32BE> &,
                                         const ELF32BE::Shdr &, StringRef);
 template EhInputSection::EhInputSection(ObjFile<ELF64LE> &,
                                         const ELF64LE::Shdr &, StringRef);
 template EhInputSection::EhInputSection(ObjFile<ELF64BE> &,
                                         const ELF64BE::Shdr &, StringRef);
 
 template void EhInputSection::split<ELF32LE>();
 template void EhInputSection::split<ELF32BE>();
 template void EhInputSection::split<ELF64LE>();
 template void EhInputSection::split<ELF64BE>();
Index: head/contrib/llvm/tools/lld/ELF/Relocations.cpp
===================================================================
--- head/contrib/llvm/tools/lld/ELF/Relocations.cpp	(revision 350466)
+++ head/contrib/llvm/tools/lld/ELF/Relocations.cpp	(revision 350467)
@@ -1,1520 +1,1634 @@
 //===- Relocations.cpp ----------------------------------------------------===//
 //
 //                             The LLVM Linker
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
 //
 //===----------------------------------------------------------------------===//
 //
 // This file contains platform-independent functions to process relocations.
 // I'll describe the overview of this file here.
 //
 // Simple relocations are easy to handle for the linker. For example,
 // for R_X86_64_PC64 relocs, the linker just has to fix up locations
 // with the relative offsets to the target symbols. It would just be
 // reading records from relocation sections and applying them to output.
 //
 // But not all relocations are that easy to handle. For example, for
 // R_386_GOTOFF relocs, the linker has to create new GOT entries for
 // symbols if they don't exist, and fix up locations with GOT entry
 // offsets from the beginning of GOT section. So there is more than
 // fixing addresses in relocation processing.
 //
 // ELF defines a large number of complex relocations.
 //
 // The functions in this file analyze relocations and do whatever needs
 // to be done. It includes, but not limited to, the following.
 //
 //  - create GOT/PLT entries
 //  - create new relocations in .dynsym to let the dynamic linker resolve
 //    them at runtime (since ELF supports dynamic linking, not all
 //    relocations can be resolved at link-time)
 //  - create COPY relocs and reserve space in .bss
 //  - replace expensive relocs (in terms of runtime cost) with cheap ones
 //  - error out infeasible combinations such as PIC and non-relative relocs
 //
 // Note that the functions in this file don't actually apply relocations
 // because it doesn't know about the output file nor the output file buffer.
 // It instead stores Relocation objects to InputSection's Relocations
 // vector to let it apply later in InputSection::writeTo.
 //
 //===----------------------------------------------------------------------===//
 
 #include "Relocations.h"
 #include "Config.h"
 #include "LinkerScript.h"
 #include "OutputSections.h"
 #include "SymbolTable.h"
 #include "Symbols.h"
 #include "SyntheticSections.h"
 #include "Target.h"
 #include "Thunks.h"
 #include "lld/Common/ErrorHandler.h"
 #include "lld/Common/Memory.h"
 #include "lld/Common/Strings.h"
 #include "llvm/ADT/SmallSet.h"
 #include "llvm/Support/Endian.h"
 #include "llvm/Support/raw_ostream.h"
 #include <algorithm>
 
 using namespace llvm;
 using namespace llvm::ELF;
 using namespace llvm::object;
 using namespace llvm::support::endian;
 
 using namespace lld;
 using namespace lld::elf;
 
 static Optional<std::string> getLinkerScriptLocation(const Symbol &Sym) {
   for (BaseCommand *Base : Script->SectionCommands)
     if (auto *Cmd = dyn_cast<SymbolAssignment>(Base))
       if (Cmd->Sym == &Sym)
         return Cmd->Location;
   return None;
 }
 
 // Construct a message in the following format.
 //
 // >>> defined in /home/alice/src/foo.o
 // >>> referenced by bar.c:12 (/home/alice/src/bar.c:12)
 // >>>               /home/alice/src/bar.o:(.text+0x1)
 static std::string getLocation(InputSectionBase &S, const Symbol &Sym,
                                uint64_t Off) {
   std::string Msg = "\n>>> defined in ";
   if (Sym.File)
     Msg += toString(Sym.File);
   else if (Optional<std::string> Loc = getLinkerScriptLocation(Sym))
     Msg += *Loc;
 
   Msg += "\n>>> referenced by ";
   std::string Src = S.getSrcMsg(Sym, Off);
   if (!Src.empty())
     Msg += Src + "\n>>>               ";
   return Msg + S.getObjMsg(Off);
 }
 
 // This function is similar to the `handleTlsRelocation`. MIPS does not
 // support any relaxations for TLS relocations so by factoring out MIPS
 // handling in to the separate function we can simplify the code and do not
 // pollute other `handleTlsRelocation` by MIPS `ifs` statements.
 // Mips has a custom MipsGotSection that handles the writing of GOT entries
 // without dynamic relocations.
 static unsigned handleMipsTlsRelocation(RelType Type, Symbol &Sym,
                                         InputSectionBase &C, uint64_t Offset,
                                         int64_t Addend, RelExpr Expr) {
   if (Expr == R_MIPS_TLSLD) {
     In.MipsGot->addTlsIndex(*C.File);
     C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return 1;
   }
   if (Expr == R_MIPS_TLSGD) {
     In.MipsGot->addDynTlsEntry(*C.File, Sym);
     C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return 1;
   }
   return 0;
 }
 
 // This function is similar to the `handleMipsTlsRelocation`. ARM also does not
 // support any relaxations for TLS relocations. ARM is logically similar to Mips
 // in how it handles TLS, but Mips uses its own custom GOT which handles some
 // of the cases that ARM uses GOT relocations for.
 //
 // We look for TLS global dynamic and local dynamic relocations, these may
 // require the generation of a pair of GOT entries that have associated
 // dynamic relocations. When the results of the dynamic relocations can be
 // resolved at static link time we do so. This is necessary for static linking
 // as there will be no dynamic loader to resolve them at load-time.
 //
 // The pair of GOT entries created are of the form
 // GOT[e0] Module Index (Used to find pointer to TLS block at run-time)
 // GOT[e1] Offset of symbol in TLS block
 template <class ELFT>
 static unsigned handleARMTlsRelocation(RelType Type, Symbol &Sym,
                                        InputSectionBase &C, uint64_t Offset,
                                        int64_t Addend, RelExpr Expr) {
   // The Dynamic TLS Module Index Relocation for a symbol defined in an
   // executable is always 1. If the target Symbol is not preemptible then
   // we know the offset into the TLS block at static link time.
   bool NeedDynId = Sym.IsPreemptible || Config->Shared;
   bool NeedDynOff = Sym.IsPreemptible;
 
   auto AddTlsReloc = [&](uint64_t Off, RelType Type, Symbol *Dest, bool Dyn) {
     if (Dyn)
       In.RelaDyn->addReloc(Type, In.Got, Off, Dest);
     else
       In.Got->Relocations.push_back({R_ABS, Type, Off, 0, Dest});
   };
 
   // Local Dynamic is for access to module local TLS variables, while still
   // being suitable for being dynamically loaded via dlopen.
   // GOT[e0] is the module index, with a special value of 0 for the current
   // module. GOT[e1] is unused. There only needs to be one module index entry.
   if (Expr == R_TLSLD_PC && In.Got->addTlsIndex()) {
     AddTlsReloc(In.Got->getTlsIndexOff(), Target->TlsModuleIndexRel,
                 NeedDynId ? nullptr : &Sym, NeedDynId);
     C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return 1;
   }
 
   // Global Dynamic is the most general purpose access model. When we know
   // the module index and offset of symbol in TLS block we can fill these in
   // using static GOT relocations.
   if (Expr == R_TLSGD_PC) {
     if (In.Got->addDynTlsEntry(Sym)) {
       uint64_t Off = In.Got->getGlobalDynOffset(Sym);
       AddTlsReloc(Off, Target->TlsModuleIndexRel, &Sym, NeedDynId);
       AddTlsReloc(Off + Config->Wordsize, Target->TlsOffsetRel, &Sym,
                   NeedDynOff);
     }
     C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return 1;
   }
   return 0;
 }
 
 // Returns the number of relocations processed.
 template <class ELFT>
 static unsigned
 handleTlsRelocation(RelType Type, Symbol &Sym, InputSectionBase &C,
                     typename ELFT::uint Offset, int64_t Addend, RelExpr Expr) {
   if (!Sym.isTls())
     return 0;
 
   if (Config->EMachine == EM_ARM)
     return handleARMTlsRelocation<ELFT>(Type, Sym, C, Offset, Addend, Expr);
   if (Config->EMachine == EM_MIPS)
     return handleMipsTlsRelocation(Type, Sym, C, Offset, Addend, Expr);
 
   if (isRelExprOneOf<R_TLSDESC, R_AARCH64_TLSDESC_PAGE, R_TLSDESC_CALL>(Expr) &&
       Config->Shared) {
     if (In.Got->addDynTlsEntry(Sym)) {
       uint64_t Off = In.Got->getGlobalDynOffset(Sym);
       In.RelaDyn->addReloc(
           {Target->TlsDescRel, In.Got, Off, !Sym.IsPreemptible, &Sym, 0});
     }
     if (Expr != R_TLSDESC_CALL)
       C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return 1;
   }
 
   if (isRelExprOneOf<R_TLSLD_GOT, R_TLSLD_GOT_FROM_END, R_TLSLD_PC,
                      R_TLSLD_HINT>(Expr)) {
     // Local-Dynamic relocs can be relaxed to Local-Exec.
     if (!Config->Shared) {
       C.Relocations.push_back(
           {Target->adjustRelaxExpr(Type, nullptr, R_RELAX_TLS_LD_TO_LE), Type,
            Offset, Addend, &Sym});
       return Target->TlsGdRelaxSkip;
     }
     if (Expr == R_TLSLD_HINT)
       return 1;
     if (In.Got->addTlsIndex())
       In.RelaDyn->addReloc(Target->TlsModuleIndexRel, In.Got,
                            In.Got->getTlsIndexOff(), nullptr);
     C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return 1;
   }
 
   // Local-Dynamic relocs can be relaxed to Local-Exec.
   if (Expr == R_ABS && !Config->Shared) {
     C.Relocations.push_back(
         {Target->adjustRelaxExpr(Type, nullptr, R_RELAX_TLS_LD_TO_LE), Type,
          Offset, Addend, &Sym});
     return 1;
   }
 
   // Local-Dynamic sequence where offset of tls variable relative to dynamic
   // thread pointer is stored in the got.
   if (Expr == R_TLSLD_GOT_OFF) {
     // Local-Dynamic relocs can be relaxed to local-exec
     if (!Config->Shared) {
       C.Relocations.push_back({R_RELAX_TLS_LD_TO_LE, Type, Offset, Addend, &Sym});
       return 1;
     }
     if (!Sym.isInGot()) {
       In.Got->addEntry(Sym);
       uint64_t Off = Sym.getGotOffset();
       In.Got->Relocations.push_back(
           {R_ABS, Target->TlsOffsetRel, Off, 0, &Sym});
     }
     C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return 1;
   }
 
   if (isRelExprOneOf<R_TLSDESC, R_AARCH64_TLSDESC_PAGE, R_TLSDESC_CALL,
                      R_TLSGD_GOT, R_TLSGD_GOT_FROM_END, R_TLSGD_PC>(Expr)) {
     if (Config->Shared) {
       if (In.Got->addDynTlsEntry(Sym)) {
         uint64_t Off = In.Got->getGlobalDynOffset(Sym);
         In.RelaDyn->addReloc(Target->TlsModuleIndexRel, In.Got, Off, &Sym);
 
         // If the symbol is preemptible we need the dynamic linker to write
         // the offset too.
         uint64_t OffsetOff = Off + Config->Wordsize;
         if (Sym.IsPreemptible)
           In.RelaDyn->addReloc(Target->TlsOffsetRel, In.Got, OffsetOff, &Sym);
         else
           In.Got->Relocations.push_back(
               {R_ABS, Target->TlsOffsetRel, OffsetOff, 0, &Sym});
       }
       C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
       return 1;
     }
 
     // Global-Dynamic relocs can be relaxed to Initial-Exec or Local-Exec
     // depending on the symbol being locally defined or not.
     if (Sym.IsPreemptible) {
       C.Relocations.push_back(
           {Target->adjustRelaxExpr(Type, nullptr, R_RELAX_TLS_GD_TO_IE), Type,
            Offset, Addend, &Sym});
       if (!Sym.isInGot()) {
         In.Got->addEntry(Sym);
         In.RelaDyn->addReloc(Target->TlsGotRel, In.Got, Sym.getGotOffset(),
                              &Sym);
       }
     } else {
       C.Relocations.push_back(
           {Target->adjustRelaxExpr(Type, nullptr, R_RELAX_TLS_GD_TO_LE), Type,
            Offset, Addend, &Sym});
     }
     return Target->TlsGdRelaxSkip;
   }
 
   // Initial-Exec relocs can be relaxed to Local-Exec if the symbol is locally
   // defined.
   if (isRelExprOneOf<R_GOT, R_GOT_FROM_END, R_GOT_PC, R_AARCH64_GOT_PAGE_PC,
                      R_GOT_OFF, R_TLSIE_HINT>(Expr) &&
       !Config->Shared && !Sym.IsPreemptible) {
     C.Relocations.push_back({R_RELAX_TLS_IE_TO_LE, Type, Offset, Addend, &Sym});
     return 1;
   }
 
   if (Expr == R_TLSIE_HINT)
     return 1;
   return 0;
 }
 
 static RelType getMipsPairType(RelType Type, bool IsLocal) {
   switch (Type) {
   case R_MIPS_HI16:
     return R_MIPS_LO16;
   case R_MIPS_GOT16:
     // In case of global symbol, the R_MIPS_GOT16 relocation does not
     // have a pair. Each global symbol has a unique entry in the GOT
     // and a corresponding instruction with help of the R_MIPS_GOT16
     // relocation loads an address of the symbol. In case of local
     // symbol, the R_MIPS_GOT16 relocation creates a GOT entry to hold
     // the high 16 bits of the symbol's value. A paired R_MIPS_LO16
     // relocations handle low 16 bits of the address. That allows
     // to allocate only one GOT entry for every 64 KBytes of local data.
     return IsLocal ? R_MIPS_LO16 : R_MIPS_NONE;
   case R_MICROMIPS_GOT16:
     return IsLocal ? R_MICROMIPS_LO16 : R_MIPS_NONE;
   case R_MIPS_PCHI16:
     return R_MIPS_PCLO16;
   case R_MICROMIPS_HI16:
     return R_MICROMIPS_LO16;
   default:
     return R_MIPS_NONE;
   }
 }
 
 // True if non-preemptable symbol always has the same value regardless of where
 // the DSO is loaded.
 static bool isAbsolute(const Symbol &Sym) {
   if (Sym.isUndefWeak())
     return true;
   if (const auto *DR = dyn_cast<Defined>(&Sym))
     return DR->Section == nullptr; // Absolute symbol.
   return false;
 }
 
 static bool isAbsoluteValue(const Symbol &Sym) {
   return isAbsolute(Sym) || Sym.isTls();
 }
 
 // Returns true if Expr refers a PLT entry.
 static bool needsPlt(RelExpr Expr) {
-  return isRelExprOneOf<R_PLT_PC, R_PPC_CALL_PLT, R_PLT, R_AARCH64_PLT_PAGE_PC,
-                        R_GOT_PLT, R_AARCH64_GOT_PAGE_PC_PLT>(Expr);
+  return isRelExprOneOf<R_PLT_PC, R_PPC_CALL_PLT, R_PLT>(Expr);
 }
 
 // Returns true if Expr refers a GOT entry. Note that this function
 // returns false for TLS variables even though they need GOT, because
 // TLS variables uses GOT differently than the regular variables.
 static bool needsGot(RelExpr Expr) {
   return isRelExprOneOf<R_GOT, R_GOT_OFF, R_HEXAGON_GOT, R_MIPS_GOT_LOCAL_PAGE,
                         R_MIPS_GOT_OFF, R_MIPS_GOT_OFF32, R_AARCH64_GOT_PAGE_PC,
-                        R_AARCH64_GOT_PAGE_PC_PLT, R_GOT_PC, R_GOT_FROM_END,
-                        R_GOT_PLT>(Expr);
+                        R_GOT_PC, R_GOT_FROM_END>(Expr);
 }
 
 // True if this expression is of the form Sym - X, where X is a position in the
 // file (PC, or GOT for example).
 static bool isRelExpr(RelExpr Expr) {
   return isRelExprOneOf<R_PC, R_GOTREL, R_GOTREL_FROM_END, R_MIPS_GOTREL,
                         R_PPC_CALL, R_PPC_CALL_PLT, R_AARCH64_PAGE_PC,
-                        R_AARCH64_PLT_PAGE_PC, R_RELAX_GOT_PC>(Expr);
+                        R_RELAX_GOT_PC>(Expr);
 }
 
 // Returns true if a given relocation can be computed at link-time.
 //
 // For instance, we know the offset from a relocation to its target at
 // link-time if the relocation is PC-relative and refers a
 // non-interposable function in the same executable. This function
 // will return true for such relocation.
 //
 // If this function returns false, that means we need to emit a
 // dynamic relocation so that the relocation will be fixed at load-time.
 static bool isStaticLinkTimeConstant(RelExpr E, RelType Type, const Symbol &Sym,
                                      InputSectionBase &S, uint64_t RelOff) {
   // These expressions always compute a constant
   if (isRelExprOneOf<R_GOT_FROM_END, R_GOT_OFF, R_HEXAGON_GOT, R_TLSLD_GOT_OFF,
                      R_MIPS_GOT_LOCAL_PAGE, R_MIPS_GOTREL, R_MIPS_GOT_OFF,
                      R_MIPS_GOT_OFF32, R_MIPS_GOT_GP_PC, R_MIPS_TLSGD,
-                     R_AARCH64_GOT_PAGE_PC, R_AARCH64_GOT_PAGE_PC_PLT, R_GOT_PC,
-                     R_GOTONLY_PC, R_GOTONLY_PC_FROM_END, R_PLT_PC, R_TLSGD_GOT,
+                     R_AARCH64_GOT_PAGE_PC, R_GOT_PC, R_GOTONLY_PC,
+                     R_GOTONLY_PC_FROM_END, R_PLT_PC, R_TLSGD_GOT,
                      R_TLSGD_GOT_FROM_END, R_TLSGD_PC, R_PPC_CALL_PLT,
                      R_TLSDESC_CALL, R_AARCH64_TLSDESC_PAGE, R_HINT,
                      R_TLSLD_HINT, R_TLSIE_HINT>(E))
     return true;
 
-  // The computation involves output from the ifunc resolver.
-  if (Sym.isGnuIFunc() && Config->ZIfuncnoplt)
-    return false;
-
   // These never do, except if the entire file is position dependent or if
   // only the low bits are used.
-  if (E == R_GOT || E == R_GOT_PLT || E == R_PLT || E == R_TLSDESC)
+  if (E == R_GOT || E == R_PLT || E == R_TLSDESC)
     return Target->usesOnlyLowPageBits(Type) || !Config->Pic;
 
   if (Sym.IsPreemptible)
     return false;
   if (!Config->Pic)
     return true;
 
   // The size of a non preemptible symbol is a constant.
   if (E == R_SIZE)
     return true;
 
   // For the target and the relocation, we want to know if they are
   // absolute or relative.
   bool AbsVal = isAbsoluteValue(Sym);
   bool RelE = isRelExpr(E);
   if (AbsVal && !RelE)
     return true;
   if (!AbsVal && RelE)
     return true;
   if (!AbsVal && !RelE)
     return Target->usesOnlyLowPageBits(Type);
 
   // Relative relocation to an absolute value. This is normally unrepresentable,
   // but if the relocation refers to a weak undefined symbol, we allow it to
   // resolve to the image base. This is a little strange, but it allows us to
   // link function calls to such symbols. Normally such a call will be guarded
   // with a comparison, which will load a zero from the GOT.
   // Another special case is MIPS _gp_disp symbol which represents offset
   // between start of a function and '_gp' value and defined as absolute just
   // to simplify the code.
   assert(AbsVal && RelE);
   if (Sym.isUndefWeak())
     return true;
 
   error("relocation " + toString(Type) + " cannot refer to absolute symbol: " +
         toString(Sym) + getLocation(S, Sym, RelOff));
   return true;
 }
 
 static RelExpr toPlt(RelExpr Expr) {
   switch (Expr) {
   case R_PPC_CALL:
     return R_PPC_CALL_PLT;
   case R_PC:
     return R_PLT_PC;
-  case R_AARCH64_PAGE_PC:
-    return R_AARCH64_PLT_PAGE_PC;
-  case R_AARCH64_GOT_PAGE_PC:
-    return R_AARCH64_GOT_PAGE_PC_PLT;
   case R_ABS:
     return R_PLT;
-  case R_GOT:
-    return R_GOT_PLT;
   default:
     return Expr;
   }
 }
 
 static RelExpr fromPlt(RelExpr Expr) {
   // We decided not to use a plt. Optimize a reference to the plt to a
   // reference to the symbol itself.
   switch (Expr) {
   case R_PLT_PC:
     return R_PC;
   case R_PPC_CALL_PLT:
     return R_PPC_CALL;
   case R_PLT:
     return R_ABS;
   default:
     return Expr;
   }
 }
 
 // Returns true if a given shared symbol is in a read-only segment in a DSO.
 template <class ELFT> static bool isReadOnly(SharedSymbol &SS) {
   typedef typename ELFT::Phdr Elf_Phdr;
 
   // Determine if the symbol is read-only by scanning the DSO's program headers.
   const SharedFile<ELFT> &File = SS.getFile<ELFT>();
   for (const Elf_Phdr &Phdr : check(File.getObj().program_headers()))
     if ((Phdr.p_type == ELF::PT_LOAD || Phdr.p_type == ELF::PT_GNU_RELRO) &&
         !(Phdr.p_flags & ELF::PF_W) && SS.Value >= Phdr.p_vaddr &&
         SS.Value < Phdr.p_vaddr + Phdr.p_memsz)
       return true;
   return false;
 }
 
 // Returns symbols at the same offset as a given symbol, including SS itself.
 //
 // If two or more symbols are at the same offset, and at least one of
 // them are copied by a copy relocation, all of them need to be copied.
 // Otherwise, they would refer to different places at runtime.
 template <class ELFT>
 static SmallSet<SharedSymbol *, 4> getSymbolsAt(SharedSymbol &SS) {
   typedef typename ELFT::Sym Elf_Sym;
 
   SharedFile<ELFT> &File = SS.getFile<ELFT>();
 
   SmallSet<SharedSymbol *, 4> Ret;
   for (const Elf_Sym &S : File.getGlobalELFSyms()) {
     if (S.st_shndx == SHN_UNDEF || S.st_shndx == SHN_ABS ||
         S.getType() == STT_TLS || S.st_value != SS.Value)
       continue;
     StringRef Name = check(S.getName(File.getStringTable()));
     Symbol *Sym = Symtab->find(Name);
     if (auto *Alias = dyn_cast_or_null<SharedSymbol>(Sym))
       Ret.insert(Alias);
   }
   return Ret;
 }
 
 // When a symbol is copy relocated or we create a canonical plt entry, it is
 // effectively a defined symbol. In the case of copy relocation the symbol is
 // in .bss and in the case of a canonical plt entry it is in .plt. This function
 // replaces the existing symbol with a Defined pointing to the appropriate
 // location.
 static void replaceWithDefined(Symbol &Sym, SectionBase *Sec, uint64_t Value,
                                uint64_t Size) {
   Symbol Old = Sym;
   replaceSymbol<Defined>(&Sym, Sym.File, Sym.getName(), Sym.Binding,
                          Sym.StOther, Sym.Type, Value, Size, Sec);
   Sym.PltIndex = Old.PltIndex;
   Sym.GotIndex = Old.GotIndex;
   Sym.VerdefIndex = Old.VerdefIndex;
   Sym.PPC64BranchltIndex = Old.PPC64BranchltIndex;
   Sym.IsPreemptible = true;
   Sym.ExportDynamic = true;
   Sym.IsUsedInRegularObj = true;
   Sym.Used = true;
 }
 
 // Reserve space in .bss or .bss.rel.ro for copy relocation.
 //
 // The copy relocation is pretty much a hack. If you use a copy relocation
 // in your program, not only the symbol name but the symbol's size, RW/RO
 // bit and alignment become part of the ABI. In addition to that, if the
 // symbol has aliases, the aliases become part of the ABI. That's subtle,
 // but if you violate that implicit ABI, that can cause very counter-
 // intuitive consequences.
 //
 // So, what is the copy relocation? It's for linking non-position
 // independent code to DSOs. In an ideal world, all references to data
 // exported by DSOs should go indirectly through GOT. But if object files
 // are compiled as non-PIC, all data references are direct. There is no
 // way for the linker to transform the code to use GOT, as machine
 // instructions are already set in stone in object files. This is where
 // the copy relocation takes a role.
 //
 // A copy relocation instructs the dynamic linker to copy data from a DSO
 // to a specified address (which is usually in .bss) at load-time. If the
 // static linker (that's us) finds a direct data reference to a DSO
 // symbol, it creates a copy relocation, so that the symbol can be
 // resolved as if it were in .bss rather than in a DSO.
 //
 // As you can see in this function, we create a copy relocation for the
 // dynamic linker, and the relocation contains not only symbol name but
 // various other informtion about the symbol. So, such attributes become a
 // part of the ABI.
 //
 // Note for application developers: I can give you a piece of advice if
 // you are writing a shared library. You probably should export only
 // functions from your library. You shouldn't export variables.
 //
 // As an example what can happen when you export variables without knowing
 // the semantics of copy relocations, assume that you have an exported
 // variable of type T. It is an ABI-breaking change to add new members at
 // end of T even though doing that doesn't change the layout of the
 // existing members. That's because the space for the new members are not
 // reserved in .bss unless you recompile the main program. That means they
 // are likely to overlap with other data that happens to be laid out next
 // to the variable in .bss. This kind of issue is sometimes very hard to
 // debug. What's a solution? Instead of exporting a varaible V from a DSO,
 // define an accessor getV().
 template <class ELFT> static void addCopyRelSymbol(SharedSymbol &SS) {
   // Copy relocation against zero-sized symbol doesn't make sense.
   uint64_t SymSize = SS.getSize();
   if (SymSize == 0 || SS.Alignment == 0)
     fatal("cannot create a copy relocation for symbol " + toString(SS));
 
   // See if this symbol is in a read-only segment. If so, preserve the symbol's
   // memory protection by reserving space in the .bss.rel.ro section.
   bool IsReadOnly = isReadOnly<ELFT>(SS);
   BssSection *Sec = make<BssSection>(IsReadOnly ? ".bss.rel.ro" : ".bss",
                                      SymSize, SS.Alignment);
   if (IsReadOnly)
     In.BssRelRo->getParent()->addSection(Sec);
   else
     In.Bss->getParent()->addSection(Sec);
 
   // Look through the DSO's dynamic symbol table for aliases and create a
   // dynamic symbol for each one. This causes the copy relocation to correctly
   // interpose any aliases.
   for (SharedSymbol *Sym : getSymbolsAt<ELFT>(SS))
     replaceWithDefined(*Sym, Sec, 0, Sym->Size);
 
   In.RelaDyn->addReloc(Target->CopyRel, Sec, 0, &SS);
 }
 
 // MIPS has an odd notion of "paired" relocations to calculate addends.
 // For example, if a relocation is of R_MIPS_HI16, there must be a
 // R_MIPS_LO16 relocation after that, and an addend is calculated using
 // the two relocations.
 template <class ELFT, class RelTy>
 static int64_t computeMipsAddend(const RelTy &Rel, const RelTy *End,
                                  InputSectionBase &Sec, RelExpr Expr,
                                  bool IsLocal) {
   if (Expr == R_MIPS_GOTREL && IsLocal)
     return Sec.getFile<ELFT>()->MipsGp0;
 
   // The ABI says that the paired relocation is used only for REL.
   // See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
   if (RelTy::IsRela)
     return 0;
 
   RelType Type = Rel.getType(Config->IsMips64EL);
   uint32_t PairTy = getMipsPairType(Type, IsLocal);
   if (PairTy == R_MIPS_NONE)
     return 0;
 
   const uint8_t *Buf = Sec.data().data();
   uint32_t SymIndex = Rel.getSymbol(Config->IsMips64EL);
 
   // To make things worse, paired relocations might not be contiguous in
   // the relocation table, so we need to do linear search. *sigh*
   for (const RelTy *RI = &Rel; RI != End; ++RI)
     if (RI->getType(Config->IsMips64EL) == PairTy &&
         RI->getSymbol(Config->IsMips64EL) == SymIndex)
       return Target->getImplicitAddend(Buf + RI->r_offset, PairTy);
 
   warn("can't find matching " + toString(PairTy) + " relocation for " +
        toString(Type));
   return 0;
 }
 
 // Returns an addend of a given relocation. If it is RELA, an addend
 // is in a relocation itself. If it is REL, we need to read it from an
 // input section.
 template <class ELFT, class RelTy>
 static int64_t computeAddend(const RelTy &Rel, const RelTy *End,
                              InputSectionBase &Sec, RelExpr Expr,
                              bool IsLocal) {
   int64_t Addend;
   RelType Type = Rel.getType(Config->IsMips64EL);
 
   if (RelTy::IsRela) {
     Addend = getAddend<ELFT>(Rel);
   } else {
     const uint8_t *Buf = Sec.data().data();
     Addend = Target->getImplicitAddend(Buf + Rel.r_offset, Type);
   }
 
   if (Config->EMachine == EM_PPC64 && Config->Pic && Type == R_PPC64_TOC)
     Addend += getPPC64TocBase();
   if (Config->EMachine == EM_MIPS)
     Addend += computeMipsAddend<ELFT>(Rel, End, Sec, Expr, IsLocal);
 
   return Addend;
 }
 
 // Report an undefined symbol if necessary.
 // Returns true if this function printed out an error message.
 static bool maybeReportUndefined(Symbol &Sym, InputSectionBase &Sec,
                                  uint64_t Offset) {
   if (Sym.isLocal() || !Sym.isUndefined() || Sym.isWeak())
     return false;
 
   bool CanBeExternal =
       Sym.computeBinding() != STB_LOCAL && Sym.Visibility == STV_DEFAULT;
   if (Config->UnresolvedSymbols == UnresolvedPolicy::Ignore && CanBeExternal)
     return false;
 
   std::string Msg =
       "undefined symbol: " + toString(Sym) + "\n>>> referenced by ";
 
   std::string Src = Sec.getSrcMsg(Sym, Offset);
   if (!Src.empty())
     Msg += Src + "\n>>>               ";
   Msg += Sec.getObjMsg(Offset);
 
   if (Sym.getName().startswith("_ZTV"))
     Msg += "\nthe vtable symbol may be undefined because the class is missing "
            "its key function (see https://lld.llvm.org/missingkeyfunction)";
 
   if ((Config->UnresolvedSymbols == UnresolvedPolicy::Warn && CanBeExternal) ||
       Config->NoinhibitExec) {
     warn(Msg);
     return false;
   }
 
   error(Msg);
   return true;
 }
 
 // MIPS N32 ABI treats series of successive relocations with the same offset
 // as a single relocation. The similar approach used by N64 ABI, but this ABI
 // packs all relocations into the single relocation record. Here we emulate
 // this for the N32 ABI. Iterate over relocation with the same offset and put
 // theirs types into the single bit-set.
 template <class RelTy> static RelType getMipsN32RelType(RelTy *&Rel, RelTy *End) {
   RelType Type = 0;
   uint64_t Offset = Rel->r_offset;
 
   int N = 0;
   while (Rel != End && Rel->r_offset == Offset)
     Type |= (Rel++)->getType(Config->IsMips64EL) << (8 * N++);
   return Type;
 }
 
 // .eh_frame sections are mergeable input sections, so their input
 // offsets are not linearly mapped to output section. For each input
 // offset, we need to find a section piece containing the offset and
 // add the piece's base address to the input offset to compute the
 // output offset. That isn't cheap.
 //
 // This class is to speed up the offset computation. When we process
 // relocations, we access offsets in the monotonically increasing
 // order. So we can optimize for that access pattern.
 //
 // For sections other than .eh_frame, this class doesn't do anything.
 namespace {
 class OffsetGetter {
 public:
   explicit OffsetGetter(InputSectionBase &Sec) {
     if (auto *Eh = dyn_cast<EhInputSection>(&Sec))
       Pieces = Eh->Pieces;
   }
 
   // Translates offsets in input sections to offsets in output sections.
   // Given offset must increase monotonically. We assume that Piece is
   // sorted by InputOff.
   uint64_t get(uint64_t Off) {
     if (Pieces.empty())
       return Off;
 
     while (I != Pieces.size() && Pieces[I].InputOff + Pieces[I].Size <= Off)
       ++I;
     if (I == Pieces.size())
       fatal(".eh_frame: relocation is not in any piece");
 
     // Pieces must be contiguous, so there must be no holes in between.
     assert(Pieces[I].InputOff <= Off && "Relocation not in any piece");
 
     // Offset -1 means that the piece is dead (i.e. garbage collected).
     if (Pieces[I].OutputOff == -1)
       return -1;
     return Pieces[I].OutputOff + Off - Pieces[I].InputOff;
   }
 
 private:
   ArrayRef<EhSectionPiece> Pieces;
   size_t I = 0;
 };
 } // namespace
 
 static void addRelativeReloc(InputSectionBase *IS, uint64_t OffsetInSec,
                              Symbol *Sym, int64_t Addend, RelExpr Expr,
                              RelType Type) {
   // Add a relative relocation. If RelrDyn section is enabled, and the
   // relocation offset is guaranteed to be even, add the relocation to
   // the RelrDyn section, otherwise add it to the RelaDyn section.
   // RelrDyn sections don't support odd offsets. Also, RelrDyn sections
   // don't store the addend values, so we must write it to the relocated
   // address.
   if (In.RelrDyn && IS->Alignment >= 2 && OffsetInSec % 2 == 0) {
     IS->Relocations.push_back({Expr, Type, OffsetInSec, Addend, Sym});
     In.RelrDyn->Relocs.push_back({IS, OffsetInSec});
     return;
   }
   In.RelaDyn->addReloc(Target->RelativeRel, IS, OffsetInSec, Sym, Addend, Expr,
                        Type);
 }
 
 template <class ELFT, class GotPltSection>
 static void addPltEntry(PltSection *Plt, GotPltSection *GotPlt,
                         RelocationBaseSection *Rel, RelType Type, Symbol &Sym) {
   Plt->addEntry<ELFT>(Sym);
   GotPlt->addEntry(Sym);
   Rel->addReloc(
       {Type, GotPlt, Sym.getGotPltOffset(), !Sym.IsPreemptible, &Sym, 0});
 }
 
 template <class ELFT> static void addGotEntry(Symbol &Sym) {
   In.Got->addEntry(Sym);
 
-  RelExpr Expr;
-  if (Sym.isTls())
-    Expr = R_TLS;
-  else if (Sym.isGnuIFunc())
-    Expr = R_PLT;
-  else
-    Expr = R_ABS;
-
+  RelExpr Expr = Sym.isTls() ? R_TLS : R_ABS;
   uint64_t Off = Sym.getGotOffset();
 
   // If a GOT slot value can be calculated at link-time, which is now,
   // we can just fill that out.
   //
   // (We don't actually write a value to a GOT slot right now, but we
   // add a static relocation to a Relocations vector so that
   // InputSection::relocate will do the work for us. We may be able
   // to just write a value now, but it is a TODO.)
   bool IsLinkTimeConstant =
       !Sym.IsPreemptible && (!Config->Pic || isAbsolute(Sym));
   if (IsLinkTimeConstant) {
     In.Got->Relocations.push_back({Expr, Target->GotRel, Off, 0, &Sym});
     return;
   }
 
   // Otherwise, we emit a dynamic relocation to .rel[a].dyn so that
   // the GOT slot will be fixed at load-time.
   if (!Sym.isTls() && !Sym.IsPreemptible && Config->Pic && !isAbsolute(Sym)) {
     addRelativeReloc(In.Got, Off, &Sym, 0, R_ABS, Target->GotRel);
     return;
   }
   In.RelaDyn->addReloc(Sym.isTls() ? Target->TlsGotRel : Target->GotRel, In.Got,
                        Off, &Sym, 0, Sym.IsPreemptible ? R_ADDEND : R_ABS,
                        Target->GotRel);
 }
 
 // Return true if we can define a symbol in the executable that
 // contains the value/function of a symbol defined in a shared
 // library.
 static bool canDefineSymbolInExecutable(Symbol &Sym) {
   // If the symbol has default visibility the symbol defined in the
   // executable will preempt it.
   // Note that we want the visibility of the shared symbol itself, not
   // the visibility of the symbol in the output file we are producing. That is
   // why we use Sym.StOther.
   if ((Sym.StOther & 0x3) == STV_DEFAULT)
     return true;
 
   // If we are allowed to break address equality of functions, defining
   // a plt entry will allow the program to call the function in the
   // .so, but the .so and the executable will no agree on the address
   // of the function. Similar logic for objects.
   return ((Sym.isFunc() && Config->IgnoreFunctionAddressEquality) ||
           (Sym.isObject() && Config->IgnoreDataAddressEquality));
 }
 
 // The reason we have to do this early scan is as follows
 // * To mmap the output file, we need to know the size
 // * For that, we need to know how many dynamic relocs we will have.
 // It might be possible to avoid this by outputting the file with write:
 // * Write the allocated output sections, computing addresses.
 // * Apply relocations, recording which ones require a dynamic reloc.
 // * Write the dynamic relocations.
 // * Write the rest of the file.
 // This would have some drawbacks. For example, we would only know if .rela.dyn
 // is needed after applying relocations. If it is, it will go after rw and rx
 // sections. Given that it is ro, we will need an extra PT_LOAD. This
 // complicates things for the dynamic linker and means we would have to reserve
 // space for the extra PT_LOAD even if we end up not using it.
 template <class ELFT, class RelTy>
 static void processRelocAux(InputSectionBase &Sec, RelExpr Expr, RelType Type,
                             uint64_t Offset, Symbol &Sym, const RelTy &Rel,
                             int64_t Addend) {
   if (isStaticLinkTimeConstant(Expr, Type, Sym, Sec, Offset)) {
     Sec.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return;
   }
-  if (Sym.isGnuIFunc() && Config->ZIfuncnoplt) {
-    In.RelaDyn->addReloc(Type, &Sec, Offset, &Sym, Addend, R_ADDEND, Type);
-    return;
-  }
   bool CanWrite = (Sec.Flags & SHF_WRITE) || !Config->ZText;
   if (CanWrite) {
     // R_GOT refers to a position in the got, even if the symbol is preemptible.
     bool IsPreemptibleValue = Sym.IsPreemptible && Expr != R_GOT;
 
     if (!IsPreemptibleValue) {
       addRelativeReloc(&Sec, Offset, &Sym, Addend, Expr, Type);
       return;
     } else if (RelType Rel = Target->getDynRel(Type)) {
       In.RelaDyn->addReloc(Rel, &Sec, Offset, &Sym, Addend, R_ADDEND, Type);
 
       // MIPS ABI turns using of GOT and dynamic relocations inside out.
       // While regular ABI uses dynamic relocations to fill up GOT entries
       // MIPS ABI requires dynamic linker to fills up GOT entries using
       // specially sorted dynamic symbol table. This affects even dynamic
       // relocations against symbols which do not require GOT entries
       // creation explicitly, i.e. do not have any GOT-relocations. So if
       // a preemptible symbol has a dynamic relocation we anyway have
       // to create a GOT entry for it.
       // If a non-preemptible symbol has a dynamic relocation against it,
       // dynamic linker takes it st_value, adds offset and writes down
       // result of the dynamic relocation. In case of preemptible symbol
       // dynamic linker performs symbol resolution, writes the symbol value
       // to the GOT entry and reads the GOT entry when it needs to perform
       // a dynamic relocation.
       // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19
       if (Config->EMachine == EM_MIPS)
         In.MipsGot->addEntry(*Sec.File, Sym, Addend, Expr);
       return;
     }
   }
 
   // If the relocation is to a weak undef, and we are producing
   // executable, give up on it and produce a non preemptible 0.
   if (!Config->Shared && Sym.isUndefWeak()) {
     Sec.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return;
   }
 
   if (!CanWrite && (Config->Pic && !isRelExpr(Expr))) {
     error(
         "can't create dynamic relocation " + toString(Type) + " against " +
         (Sym.getName().empty() ? "local symbol" : "symbol: " + toString(Sym)) +
         " in readonly segment; recompile object files with -fPIC "
         "or pass '-Wl,-z,notext' to allow text relocations in the output" +
         getLocation(Sec, Sym, Offset));
     return;
   }
 
   // Copy relocations are only possible if we are creating an executable.
   if (Config->Shared) {
     errorOrWarn("relocation " + toString(Type) +
                 " cannot be used against symbol " + toString(Sym) +
                 "; recompile with -fPIC" + getLocation(Sec, Sym, Offset));
     return;
   }
 
   // If the symbol is undefined we already reported any relevant errors.
   if (Sym.isUndefined())
     return;
 
   if (!canDefineSymbolInExecutable(Sym)) {
     error("cannot preempt symbol: " + toString(Sym) +
           getLocation(Sec, Sym, Offset));
     return;
   }
 
   if (Sym.isObject()) {
     // Produce a copy relocation.
     if (auto *SS = dyn_cast<SharedSymbol>(&Sym)) {
       if (!Config->ZCopyreloc)
         error("unresolvable relocation " + toString(Type) +
               " against symbol '" + toString(*SS) +
               "'; recompile with -fPIC or remove '-z nocopyreloc'" +
               getLocation(Sec, Sym, Offset));
       addCopyRelSymbol<ELFT>(*SS);
     }
     Sec.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return;
   }
 
   if (Sym.isFunc()) {
     // This handles a non PIC program call to function in a shared library. In
     // an ideal world, we could just report an error saying the relocation can
     // overflow at runtime. In the real world with glibc, crt1.o has a
     // R_X86_64_PC32 pointing to libc.so.
     //
     // The general idea on how to handle such cases is to create a PLT entry and
     // use that as the function value.
     //
     // For the static linking part, we just return a plt expr and everything
     // else will use the PLT entry as the address.
     //
     // The remaining problem is making sure pointer equality still works. We
     // need the help of the dynamic linker for that. We let it know that we have
     // a direct reference to a so symbol by creating an undefined symbol with a
     // non zero st_value. Seeing that, the dynamic linker resolves the symbol to
     // the value of the symbol we created. This is true even for got entries, so
     // pointer equality is maintained. To avoid an infinite loop, the only entry
     // that points to the real function is a dedicated got entry used by the
     // plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT,
     // R_386_JMP_SLOT, etc).
 
     // For position independent executable on i386, the plt entry requires ebx
     // to be set. This causes two problems:
     // * If some code has a direct reference to a function, it was probably
     //   compiled without -fPIE/-fPIC and doesn't maintain ebx.
     // * If a library definition gets preempted to the executable, it will have
     //   the wrong ebx value.
     if (Config->Pie && Config->EMachine == EM_386)
       errorOrWarn("symbol '" + toString(Sym) +
                   "' cannot be preempted; recompile with -fPIE" +
                   getLocation(Sec, Sym, Offset));
     if (!Sym.isInPlt())
       addPltEntry<ELFT>(In.Plt, In.GotPlt, In.RelaPlt, Target->PltRel, Sym);
     if (!Sym.isDefined())
       replaceWithDefined(Sym, In.Plt, getPltEntryOffset(Sym.PltIndex), 0);
     Sym.NeedsPltAddr = true;
     Sec.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
     return;
   }
 
   errorOrWarn("symbol '" + toString(Sym) + "' has no type" +
               getLocation(Sec, Sym, Offset));
 }
 
+struct IRelativeReloc {
+  RelType Type;
+  InputSectionBase *Sec;
+  uint64_t Offset;
+  Symbol *Sym;
+};
+
+static std::vector<IRelativeReloc> IRelativeRelocs;
+
 template <class ELFT, class RelTy>
 static void scanReloc(InputSectionBase &Sec, OffsetGetter &GetOffset, RelTy *&I,
                       RelTy *End) {
   const RelTy &Rel = *I;
   Symbol &Sym = Sec.getFile<ELFT>()->getRelocTargetSym(Rel);
   RelType Type;
 
   // Deal with MIPS oddity.
   if (Config->MipsN32Abi) {
     Type = getMipsN32RelType(I, End);
   } else {
     Type = Rel.getType(Config->IsMips64EL);
     ++I;
   }
 
   // Get an offset in an output section this relocation is applied to.
   uint64_t Offset = GetOffset.get(Rel.r_offset);
   if (Offset == uint64_t(-1))
     return;
 
   // Skip if the target symbol is an erroneous undefined symbol.
   if (maybeReportUndefined(Sym, Sec, Rel.r_offset))
     return;
 
   const uint8_t *RelocatedAddr = Sec.data().begin() + Rel.r_offset;
   RelExpr Expr = Target->getRelExpr(Type, Sym, RelocatedAddr);
 
   // Ignore "hint" relocations because they are only markers for relaxation.
   if (isRelExprOneOf<R_HINT, R_NONE>(Expr))
     return;
 
-  // Strenghten or relax relocations.
+  if (Sym.isGnuIFunc() && !Config->ZText && Config->WarnIfuncTextrel) {
+    warn("using ifunc symbols when text relocations are allowed may produce "
+         "a binary that will segfault, if the object file is linked with "
+         "old version of glibc (glibc 2.28 and earlier). If this applies to "
+         "you, consider recompiling the object files without -fPIC and "
+         "without -Wl,-z,notext option. Use -no-warn-ifunc-textrel to "
+         "turn off this warning." +
+         getLocation(Sec, Sym, Offset));
+  }
+
+  // Relax relocations.
   //
-  // GNU ifunc symbols must be accessed via PLT because their addresses
-  // are determined by runtime.
-  //
-  // On the other hand, if we know that a PLT entry will be resolved within
-  // the same ELF module, we can skip PLT access and directly jump to the
-  // destination function. For example, if we are linking a main exectuable,
-  // all dynamic symbols that can be resolved within the executable will
-  // actually be resolved that way at runtime, because the main exectuable
-  // is always at the beginning of a search list. We can leverage that fact.
-  if (Sym.isGnuIFunc() && !Config->ZIfuncnoplt) {
-    if (!Config->ZText && Config->WarnIfuncTextrel) {
-      warn("using ifunc symbols when text relocations are allowed may produce "
-           "a binary that will segfault, if the object file is linked with "
-           "old version of glibc (glibc 2.28 and earlier). If this applies to "
-           "you, consider recompiling the object files without -fPIC and "
-           "without -Wl,-z,notext option. Use -no-warn-ifunc-textrel to "
-           "turn off this warning." +
-           getLocation(Sec, Sym, Offset));
-    }
-    Expr = toPlt(Expr);
-  } else if (!Sym.IsPreemptible && Expr == R_GOT_PC && !isAbsoluteValue(Sym)) {
-    Expr = Target->adjustRelaxExpr(Type, RelocatedAddr, Expr);
-  } else if (!Sym.IsPreemptible) {
-    Expr = fromPlt(Expr);
+  // If we know that a PLT entry will be resolved within the same ELF module, we
+  // can skip PLT access and directly jump to the destination function. For
+  // example, if we are linking a main exectuable, all dynamic symbols that can
+  // be resolved within the executable will actually be resolved that way at
+  // runtime, because the main exectuable is always at the beginning of a search
+  // list. We can leverage that fact.
+  if (!Sym.IsPreemptible && (!Sym.isGnuIFunc() || Config->ZIfuncNoplt)) {
+    if (Expr == R_GOT_PC && !isAbsoluteValue(Sym))
+      Expr = Target->adjustRelaxExpr(Type, RelocatedAddr, Expr);
+    else
+      Expr = fromPlt(Expr);
   }
 
   // This relocation does not require got entry, but it is relative to got and
   // needs it to be created. Here we request for that.
   if (isRelExprOneOf<R_GOTONLY_PC, R_GOTONLY_PC_FROM_END, R_GOTREL,
                      R_GOTREL_FROM_END, R_PPC_TOC>(Expr))
     In.Got->HasGotOffRel = true;
 
   // Read an addend.
   int64_t Addend = computeAddend<ELFT>(Rel, End, Sec, Expr, Sym.isLocal());
 
   // Process some TLS relocations, including relaxing TLS relocations.
   // Note that this function does not handle all TLS relocations.
   if (unsigned Processed =
           handleTlsRelocation<ELFT>(Type, Sym, Sec, Offset, Addend, Expr)) {
     I += (Processed - 1);
     return;
   }
 
-  // If a relocation needs PLT, we create PLT and GOTPLT slots for the symbol.
-  if (needsPlt(Expr) && !Sym.isInPlt()) {
-    if (Sym.isGnuIFunc() && !Sym.IsPreemptible)
-      addPltEntry<ELFT>(In.Iplt, In.IgotPlt, In.RelaIplt, Target->IRelativeRel,
-                        Sym);
-    else
-      addPltEntry<ELFT>(In.Plt, In.GotPlt, In.RelaPlt, Target->PltRel, Sym);
+  // We were asked not to generate PLT entries for ifuncs. Instead, pass the
+  // direct relocation on through.
+  if (Sym.isGnuIFunc() && Config->ZIfuncNoplt) {
+    Sym.ExportDynamic = true;
+    In.RelaDyn->addReloc(Type, &Sec, Offset, &Sym, Addend, R_ADDEND, Type);
+    return;
   }
 
-  // Create a GOT slot if a relocation needs GOT.
-  if (needsGot(Expr)) {
-    if (Config->EMachine == EM_MIPS) {
-      // MIPS ABI has special rules to process GOT entries and doesn't
-      // require relocation entries for them. A special case is TLS
-      // relocations. In that case dynamic loader applies dynamic
-      // relocations to initialize TLS GOT entries.
-      // See "Global Offset Table" in Chapter 5 in the following document
-      // for detailed description:
-      // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
-      In.MipsGot->addEntry(*Sec.File, Sym, Addend, Expr);
-    } else if (!Sym.isInGot()) {
-      addGotEntry<ELFT>(Sym);
+  // Non-preemptible ifuncs require special handling. First, handle the usual
+  // case where the symbol isn't one of these.
+  if (!Sym.isGnuIFunc() || Sym.IsPreemptible) {
+    // If a relocation needs PLT, we create PLT and GOTPLT slots for the symbol.
+    if (needsPlt(Expr) && !Sym.isInPlt())
+      addPltEntry<ELFT>(In.Plt, In.GotPlt, In.RelaPlt, Target->PltRel, Sym);
+
+    // Create a GOT slot if a relocation needs GOT.
+    if (needsGot(Expr)) {
+      if (Config->EMachine == EM_MIPS) {
+        // MIPS ABI has special rules to process GOT entries and doesn't
+        // require relocation entries for them. A special case is TLS
+        // relocations. In that case dynamic loader applies dynamic
+        // relocations to initialize TLS GOT entries.
+        // See "Global Offset Table" in Chapter 5 in the following document
+        // for detailed description:
+        // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
+        In.MipsGot->addEntry(*Sec.File, Sym, Addend, Expr);
+      } else if (!Sym.isInGot()) {
+        addGotEntry<ELFT>(Sym);
+      }
     }
+  } else {
+    // Handle a reference to a non-preemptible ifunc. These are special in a
+    // few ways:
+    //
+    // - Unlike most non-preemptible symbols, non-preemptible ifuncs do not have
+    //   a fixed value. But assuming that all references to the ifunc are
+    //   GOT-generating or PLT-generating, the handling of an ifunc is
+    //   relatively straightforward. We create a PLT entry in Iplt, which is
+    //   usually at the end of .plt, which makes an indirect call using a
+    //   matching GOT entry in IgotPlt, which is usually at the end of .got.plt.
+    //   The GOT entry is relocated using an IRELATIVE relocation in RelaIplt,
+    //   which is usually at the end of .rela.plt. Unlike most relocations in
+    //   .rela.plt, which may be evaluated lazily without -z now, dynamic
+    //   loaders evaluate IRELATIVE relocs eagerly, which means that for
+    //   IRELATIVE relocs only, GOT-generating relocations can point directly to
+    //   .got.plt without requiring a separate GOT entry.
+    //
+    // - Despite the fact that an ifunc does not have a fixed value, compilers
+    //   that are not passed -fPIC will assume that they do, and will emit
+    //   direct (non-GOT-generating, non-PLT-generating) relocations to the
+    //   symbol. This means that if a direct relocation to the symbol is
+    //   seen, the linker must set a value for the symbol, and this value must
+    //   be consistent no matter what type of reference is made to the symbol.
+    //   This can be done by creating a PLT entry for the symbol in the way
+    //   described above and making it canonical, that is, making all references
+    //   point to the PLT entry instead of the resolver. In lld we also store
+    //   the address of the PLT entry in the dynamic symbol table, which means
+    //   that the symbol will also have the same value in other modules.
+    //   Because the value loaded from the GOT needs to be consistent with
+    //   the value computed using a direct relocation, a non-preemptible ifunc
+    //   may end up with two GOT entries, one in .got.plt that points to the
+    //   address returned by the resolver and is used only by the PLT entry,
+    //   and another in .got that points to the PLT entry and is used by
+    //   GOT-generating relocations.
+    //
+    // - The fact that these symbols do not have a fixed value makes them an
+    //   exception to the general rule that a statically linked executable does
+    //   not require any form of dynamic relocation. To handle these relocations
+    //   correctly, the IRELATIVE relocations are stored in an array which a
+    //   statically linked executable's startup code must enumerate using the
+    //   linker-defined symbols __rela?_iplt_{start,end}.
+    //
+    // - An absolute relocation to a non-preemptible ifunc (such as a global
+    //   variable containing a pointer to the ifunc) needs to be relocated in
+    //   the exact same way as a GOT entry, so we can avoid needing to make the
+    //   PLT entry canonical by translating such relocations into IRELATIVE
+    //   relocations in the RelaIplt.
+    if (!Sym.isInPlt()) {
+      // Create PLT and GOTPLT slots for the symbol.
+      Sym.IsInIplt = true;
+
+      // Create a copy of the symbol to use as the target of the IRELATIVE
+      // relocation in the IgotPlt. This is in case we make the PLT canonical
+      // later, which would overwrite the original symbol.
+      //
+      // FIXME: Creating a copy of the symbol here is a bit of a hack. All
+      // that's really needed to create the IRELATIVE is the section and value,
+      // so ideally we should just need to copy those.
+      auto *DirectSym = make<Defined>(cast<Defined>(Sym));
+      addPltEntry<ELFT>(In.Iplt, In.IgotPlt, In.RelaIplt, Target->IRelativeRel,
+                        *DirectSym);
+      Sym.PltIndex = DirectSym->PltIndex;
+    }
+    if (Expr == R_ABS && Addend == 0 && (Sec.Flags & SHF_WRITE)) {
+      // We might be able to represent this as an IRELATIVE. But we don't know
+      // yet whether some later relocation will make the symbol point to a
+      // canonical PLT, which would make this either a dynamic RELATIVE (PIC) or
+      // static (non-PIC) relocation. So we keep a record of the information
+      // required to process the relocation, and after scanRelocs() has been
+      // called on all relocations, the relocation is resolved by
+      // addIRelativeRelocs().
+      IRelativeRelocs.push_back({Type, &Sec, Offset, &Sym});
+      return;
+    }
+    if (needsGot(Expr)) {
+      // Redirect GOT accesses to point to the Igot.
+      //
+      // This field is also used to keep track of whether we ever needed a GOT
+      // entry. If we did and we make the PLT canonical later, we'll need to
+      // create a GOT entry pointing to the PLT entry for Sym.
+      Sym.GotInIgot = true;
+    } else if (!needsPlt(Expr)) {
+      // Make the ifunc's PLT entry canonical by changing the value of its
+      // symbol to redirect all references to point to it.
+      unsigned EntryOffset = Sym.PltIndex * Target->PltEntrySize;
+      if (Config->ZRetpolineplt)
+        EntryOffset += Target->PltHeaderSize;
+
+      auto &D = cast<Defined>(Sym);
+      D.Section = In.Iplt;
+      D.Value = EntryOffset;
+      D.Size = 0;
+      // It's important to set the symbol type here so that dynamic loaders
+      // don't try to call the PLT as if it were an ifunc resolver.
+      D.Type = STT_FUNC;
+
+      if (Sym.GotInIgot) {
+        // We previously encountered a GOT generating reference that we
+        // redirected to the Igot. Now that the PLT entry is canonical we must
+        // clear the redirection to the Igot and add a GOT entry. As we've
+        // changed the symbol type to STT_FUNC future GOT generating references
+        // will naturally use this GOT entry.
+        //
+        // We don't need to worry about creating a MIPS GOT here because ifuncs
+        // aren't a thing on MIPS.
+        Sym.GotInIgot = false;
+        addGotEntry<ELFT>(Sym);
+      }
+    }
   }
 
   processRelocAux<ELFT>(Sec, Expr, Type, Offset, Sym, Rel, Addend);
 }
 
 template <class ELFT, class RelTy>
 static void scanRelocs(InputSectionBase &Sec, ArrayRef<RelTy> Rels) {
   OffsetGetter GetOffset(Sec);
 
   // Not all relocations end up in Sec.Relocations, but a lot do.
   Sec.Relocations.reserve(Rels.size());
 
   for (auto I = Rels.begin(), End = Rels.end(); I != End;)
     scanReloc<ELFT>(Sec, GetOffset, I, End);
 
   // Sort relocations by offset to binary search for R_RISCV_PCREL_HI20
   if (Config->EMachine == EM_RISCV)
     std::stable_sort(Sec.Relocations.begin(), Sec.Relocations.end(),
                      RelocationOffsetComparator{});
 }
 
 template <class ELFT> void elf::scanRelocations(InputSectionBase &S) {
   if (S.AreRelocsRela)
     scanRelocs<ELFT>(S, S.relas<ELFT>());
   else
     scanRelocs<ELFT>(S, S.rels<ELFT>());
+}
+
+// Figure out which representation to use for any absolute relocs to
+// non-preemptible ifuncs that we visited during scanRelocs().
+void elf::addIRelativeRelocs() {
+  for (IRelativeReloc &R : IRelativeRelocs) {
+    if (R.Sym->Type == STT_GNU_IFUNC)
+      In.RelaIplt->addReloc(
+          {Target->IRelativeRel, R.Sec, R.Offset, true, R.Sym, 0});
+    else if (Config->Pic)
+      addRelativeReloc(R.Sec, R.Offset, R.Sym, 0, R_ABS, R.Type);
+    else
+      R.Sec->Relocations.push_back({R_ABS, R.Type, R.Offset, 0, R.Sym});
+  }
+  IRelativeRelocs.clear();
 }
 
 static bool mergeCmp(const InputSection *A, const InputSection *B) {
   // std::merge requires a strict weak ordering.
   if (A->OutSecOff < B->OutSecOff)
     return true;
 
   if (A->OutSecOff == B->OutSecOff) {
     auto *TA = dyn_cast<ThunkSection>(A);
     auto *TB = dyn_cast<ThunkSection>(B);
 
     // Check if Thunk is immediately before any specific Target
     // InputSection for example Mips LA25 Thunks.
     if (TA && TA->getTargetInputSection() == B)
       return true;
 
     // Place Thunk Sections without specific targets before
     // non-Thunk Sections.
     if (TA && !TB && !TA->getTargetInputSection())
       return true;
   }
 
   return false;
 }
 
 // Call Fn on every executable InputSection accessed via the linker script
 // InputSectionDescription::Sections.
 static void forEachInputSectionDescription(
     ArrayRef<OutputSection *> OutputSections,
     llvm::function_ref<void(OutputSection *, InputSectionDescription *)> Fn) {
   for (OutputSection *OS : OutputSections) {
     if (!(OS->Flags & SHF_ALLOC) || !(OS->Flags & SHF_EXECINSTR))
       continue;
     for (BaseCommand *BC : OS->SectionCommands)
       if (auto *ISD = dyn_cast<InputSectionDescription>(BC))
         Fn(OS, ISD);
   }
 }
 
 // Thunk Implementation
 //
 // Thunks (sometimes called stubs, veneers or branch islands) are small pieces
 // of code that the linker inserts inbetween a caller and a callee. The thunks
 // are added at link time rather than compile time as the decision on whether
 // a thunk is needed, such as the caller and callee being out of range, can only
 // be made at link time.
 //
 // It is straightforward to tell given the current state of the program when a
 // thunk is needed for a particular call. The more difficult part is that
 // the thunk needs to be placed in the program such that the caller can reach
 // the thunk and the thunk can reach the callee; furthermore, adding thunks to
 // the program alters addresses, which can mean more thunks etc.
 //
 // In lld we have a synthetic ThunkSection that can hold many Thunks.
 // The decision to have a ThunkSection act as a container means that we can
 // more easily handle the most common case of a single block of contiguous
 // Thunks by inserting just a single ThunkSection.
 //
 // The implementation of Thunks in lld is split across these areas
 // Relocations.cpp : Framework for creating and placing thunks
 // Thunks.cpp : The code generated for each supported thunk
 // Target.cpp : Target specific hooks that the framework uses to decide when
 //              a thunk is used
 // Synthetic.cpp : Implementation of ThunkSection
 // Writer.cpp : Iteratively call framework until no more Thunks added
 //
 // Thunk placement requirements:
 // Mips LA25 thunks. These must be placed immediately before the callee section
 // We can assume that the caller is in range of the Thunk. These are modelled
 // by Thunks that return the section they must precede with
 // getTargetInputSection().
 //
 // ARM interworking and range extension thunks. These thunks must be placed
 // within range of the caller. All implemented ARM thunks can always reach the
 // callee as they use an indirect jump via a register that has no range
 // restrictions.
 //
 // Thunk placement algorithm:
 // For Mips LA25 ThunkSections; the placement is explicit, it has to be before
 // getTargetInputSection().
 //
 // For thunks that must be placed within range of the caller there are many
 // possible choices given that the maximum range from the caller is usually
 // much larger than the average InputSection size. Desirable properties include:
 // - Maximize reuse of thunks by multiple callers
 // - Minimize number of ThunkSections to simplify insertion
 // - Handle impact of already added Thunks on addresses
 // - Simple to understand and implement
 //
 // In lld for the first pass, we pre-create one or more ThunkSections per
 // InputSectionDescription at Target specific intervals. A ThunkSection is
 // placed so that the estimated end of the ThunkSection is within range of the
 // start of the InputSectionDescription or the previous ThunkSection. For
 // example:
 // InputSectionDescription
 // Section 0
 // ...
 // Section N
 // ThunkSection 0
 // Section N + 1
 // ...
 // Section N + K
 // Thunk Section 1
 //
 // The intention is that we can add a Thunk to a ThunkSection that is well
 // spaced enough to service a number of callers without having to do a lot
 // of work. An important principle is that it is not an error if a Thunk cannot
 // be placed in a pre-created ThunkSection; when this happens we create a new
 // ThunkSection placed next to the caller. This allows us to handle the vast
 // majority of thunks simply, but also handle rare cases where the branch range
 // is smaller than the target specific spacing.
 //
 // The algorithm is expected to create all the thunks that are needed in a
 // single pass, with a small number of programs needing a second pass due to
 // the insertion of thunks in the first pass increasing the offset between
 // callers and callees that were only just in range.
 //
 // A consequence of allowing new ThunkSections to be created outside of the
 // pre-created ThunkSections is that in rare cases calls to Thunks that were in
 // range in pass K, are out of range in some pass > K due to the insertion of
 // more Thunks in between the caller and callee. When this happens we retarget
 // the relocation back to the original target and create another Thunk.
 
 // Remove ThunkSections that are empty, this should only be the initial set
 // precreated on pass 0.
 
 // Insert the Thunks for OutputSection OS into their designated place
 // in the Sections vector, and recalculate the InputSection output section
 // offsets.
 // This may invalidate any output section offsets stored outside of InputSection
 void ThunkCreator::mergeThunks(ArrayRef<OutputSection *> OutputSections) {
   forEachInputSectionDescription(
       OutputSections, [&](OutputSection *OS, InputSectionDescription *ISD) {
         if (ISD->ThunkSections.empty())
           return;
 
         // Remove any zero sized precreated Thunks.
         llvm::erase_if(ISD->ThunkSections,
                        [](const std::pair<ThunkSection *, uint32_t> &TS) {
                          return TS.first->getSize() == 0;
                        });
 
         // ISD->ThunkSections contains all created ThunkSections, including
         // those inserted in previous passes. Extract the Thunks created this
         // pass and order them in ascending OutSecOff.
         std::vector<ThunkSection *> NewThunks;
         for (const std::pair<ThunkSection *, uint32_t> TS : ISD->ThunkSections)
           if (TS.second == Pass)
             NewThunks.push_back(TS.first);
         std::stable_sort(NewThunks.begin(), NewThunks.end(),
                          [](const ThunkSection *A, const ThunkSection *B) {
                            return A->OutSecOff < B->OutSecOff;
                          });
 
         // Merge sorted vectors of Thunks and InputSections by OutSecOff
         std::vector<InputSection *> Tmp;
         Tmp.reserve(ISD->Sections.size() + NewThunks.size());
 
         std::merge(ISD->Sections.begin(), ISD->Sections.end(),
                    NewThunks.begin(), NewThunks.end(), std::back_inserter(Tmp),
                    mergeCmp);
 
         ISD->Sections = std::move(Tmp);
       });
 }
 
 // Find or create a ThunkSection within the InputSectionDescription (ISD) that
 // is in range of Src. An ISD maps to a range of InputSections described by a
 // linker script section pattern such as { .text .text.* }.
 ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *OS, InputSection *IS,
                                            InputSectionDescription *ISD,
                                            uint32_t Type, uint64_t Src) {
   for (std::pair<ThunkSection *, uint32_t> TP : ISD->ThunkSections) {
     ThunkSection *TS = TP.first;
     uint64_t TSBase = OS->Addr + TS->OutSecOff;
     uint64_t TSLimit = TSBase + TS->getSize();
     if (Target->inBranchRange(Type, Src, (Src > TSLimit) ? TSBase : TSLimit))
       return TS;
   }
 
   // No suitable ThunkSection exists. This can happen when there is a branch
   // with lower range than the ThunkSection spacing or when there are too
   // many Thunks. Create a new ThunkSection as close to the InputSection as
   // possible. Error if InputSection is so large we cannot place ThunkSection
   // anywhere in Range.
   uint64_t ThunkSecOff = IS->OutSecOff;
   if (!Target->inBranchRange(Type, Src, OS->Addr + ThunkSecOff)) {
     ThunkSecOff = IS->OutSecOff + IS->getSize();
     if (!Target->inBranchRange(Type, Src, OS->Addr + ThunkSecOff))
       fatal("InputSection too large for range extension thunk " +
             IS->getObjMsg(Src - (OS->Addr + IS->OutSecOff)));
   }
   return addThunkSection(OS, ISD, ThunkSecOff);
 }
 
 // Add a Thunk that needs to be placed in a ThunkSection that immediately
 // precedes its Target.
 ThunkSection *ThunkCreator::getISThunkSec(InputSection *IS) {
   ThunkSection *TS = ThunkedSections.lookup(IS);
   if (TS)
     return TS;
 
   // Find InputSectionRange within Target Output Section (TOS) that the
   // InputSection (IS) that we need to precede is in.
   OutputSection *TOS = IS->getParent();
   for (BaseCommand *BC : TOS->SectionCommands) {
     auto *ISD = dyn_cast<InputSectionDescription>(BC);
     if (!ISD || ISD->Sections.empty())
       continue;
 
     InputSection *First = ISD->Sections.front();
     InputSection *Last = ISD->Sections.back();
 
     if (IS->OutSecOff < First->OutSecOff || Last->OutSecOff < IS->OutSecOff)
       continue;
 
     TS = addThunkSection(TOS, ISD, IS->OutSecOff);
     ThunkedSections[IS] = TS;
     return TS;
   }
 
   return nullptr;
 }
 
 // Create one or more ThunkSections per OS that can be used to place Thunks.
 // We attempt to place the ThunkSections using the following desirable
 // properties:
 // - Within range of the maximum number of callers
 // - Minimise the number of ThunkSections
 //
 // We follow a simple but conservative heuristic to place ThunkSections at
 // offsets that are multiples of a Target specific branch range.
 // For an InputSectionDescription that is smaller than the range, a single
 // ThunkSection at the end of the range will do.
 //
 // For an InputSectionDescription that is more than twice the size of the range,
 // we place the last ThunkSection at range bytes from the end of the
 // InputSectionDescription in order to increase the likelihood that the
 // distance from a thunk to its target will be sufficiently small to
 // allow for the creation of a short thunk.
 void ThunkCreator::createInitialThunkSections(
     ArrayRef<OutputSection *> OutputSections) {
   uint32_t ThunkSectionSpacing = Target->getThunkSectionSpacing();
 
   forEachInputSectionDescription(
       OutputSections, [&](OutputSection *OS, InputSectionDescription *ISD) {
         if (ISD->Sections.empty())
           return;
 
         uint32_t ISDBegin = ISD->Sections.front()->OutSecOff;
         uint32_t ISDEnd =
             ISD->Sections.back()->OutSecOff + ISD->Sections.back()->getSize();
         uint32_t LastThunkLowerBound = -1;
         if (ISDEnd - ISDBegin > ThunkSectionSpacing * 2)
           LastThunkLowerBound = ISDEnd - ThunkSectionSpacing;
 
         uint32_t ISLimit;
         uint32_t PrevISLimit = ISDBegin;
         uint32_t ThunkUpperBound = ISDBegin + ThunkSectionSpacing;
 
         for (const InputSection *IS : ISD->Sections) {
           ISLimit = IS->OutSecOff + IS->getSize();
           if (ISLimit > ThunkUpperBound) {
             addThunkSection(OS, ISD, PrevISLimit);
             ThunkUpperBound = PrevISLimit + ThunkSectionSpacing;
           }
           if (ISLimit > LastThunkLowerBound)
             break;
           PrevISLimit = ISLimit;
         }
         addThunkSection(OS, ISD, ISLimit);
       });
 }
 
 ThunkSection *ThunkCreator::addThunkSection(OutputSection *OS,
                                             InputSectionDescription *ISD,
                                             uint64_t Off) {
   auto *TS = make<ThunkSection>(OS, Off);
   ISD->ThunkSections.push_back({TS, Pass});
   return TS;
 }
 
 std::pair<Thunk *, bool> ThunkCreator::getThunk(Symbol &Sym, RelType Type,
                                                 uint64_t Src) {
   std::vector<Thunk *> *ThunkVec = nullptr;
 
   // We use (section, offset) pair to find the thunk position if possible so
   // that we create only one thunk for aliased symbols or ICFed sections.
   if (auto *D = dyn_cast<Defined>(&Sym))
     if (!D->isInPlt() && D->Section)
       ThunkVec = &ThunkedSymbolsBySection[{D->Section->Repl, D->Value}];
   if (!ThunkVec)
     ThunkVec = &ThunkedSymbols[&Sym];
 
   // Check existing Thunks for Sym to see if they can be reused
   for (Thunk *T : *ThunkVec)
     if (T->isCompatibleWith(Type) &&
         Target->inBranchRange(Type, Src, T->getThunkTargetSym()->getVA()))
       return std::make_pair(T, false);
 
   // No existing compatible Thunk in range, create a new one
   Thunk *T = addThunk(Type, Sym);
   ThunkVec->push_back(T);
   return std::make_pair(T, true);
 }
 
 // Return true if the relocation target is an in range Thunk.
 // Return false if the relocation is not to a Thunk. If the relocation target
 // was originally to a Thunk, but is no longer in range we revert the
 // relocation back to its original non-Thunk target.
 bool ThunkCreator::normalizeExistingThunk(Relocation &Rel, uint64_t Src) {
   if (Thunk *T = Thunks.lookup(Rel.Sym)) {
     if (Target->inBranchRange(Rel.Type, Src, Rel.Sym->getVA()))
       return true;
     Rel.Sym = &T->Destination;
     if (Rel.Sym->isInPlt())
       Rel.Expr = toPlt(Rel.Expr);
   }
   return false;
 }
 
 // Process all relocations from the InputSections that have been assigned
 // to InputSectionDescriptions and redirect through Thunks if needed. The
 // function should be called iteratively until it returns false.
 //
 // PreConditions:
 // All InputSections that may need a Thunk are reachable from
 // OutputSectionCommands.
 //
 // All OutputSections have an address and all InputSections have an offset
 // within the OutputSection.
 //
 // The offsets between caller (relocation place) and callee
 // (relocation target) will not be modified outside of createThunks().
 //
 // PostConditions:
 // If return value is true then ThunkSections have been inserted into
 // OutputSections. All relocations that needed a Thunk based on the information
 // available to createThunks() on entry have been redirected to a Thunk. Note
 // that adding Thunks changes offsets between caller and callee so more Thunks
 // may be required.
 //
 // If return value is false then no more Thunks are needed, and createThunks has
 // made no changes. If the target requires range extension thunks, currently
 // ARM, then any future change in offset between caller and callee risks a
 // relocation out of range error.
 bool ThunkCreator::createThunks(ArrayRef<OutputSection *> OutputSections) {
   bool AddressesChanged = false;
 
   if (Pass == 0 && Target->getThunkSectionSpacing())
     createInitialThunkSections(OutputSections);
 
   // With Thunk Size much smaller than branch range we expect to
   // converge quickly; if we get to 10 something has gone wrong.
   if (Pass == 10)
     fatal("thunk creation not converged");
 
   // Create all the Thunks and insert them into synthetic ThunkSections. The
   // ThunkSections are later inserted back into InputSectionDescriptions.
   // We separate the creation of ThunkSections from the insertion of the
   // ThunkSections as ThunkSections are not always inserted into the same
   // InputSectionDescription as the caller.
   forEachInputSectionDescription(
       OutputSections, [&](OutputSection *OS, InputSectionDescription *ISD) {
         for (InputSection *IS : ISD->Sections)
           for (Relocation &Rel : IS->Relocations) {
             uint64_t Src = IS->getVA(Rel.Offset);
 
             // If we are a relocation to an existing Thunk, check if it is
             // still in range. If not then Rel will be altered to point to its
             // original target so another Thunk can be generated.
             if (Pass > 0 && normalizeExistingThunk(Rel, Src))
               continue;
 
             if (!Target->needsThunk(Rel.Expr, Rel.Type, IS->File, Src,
                                     *Rel.Sym))
               continue;
 
             Thunk *T;
             bool IsNew;
             std::tie(T, IsNew) = getThunk(*Rel.Sym, Rel.Type, Src);
 
             if (IsNew) {
               // Find or create a ThunkSection for the new Thunk
               ThunkSection *TS;
               if (auto *TIS = T->getTargetInputSection())
                 TS = getISThunkSec(TIS);
               else
                 TS = getISDThunkSec(OS, IS, ISD, Rel.Type, Src);
               TS->addThunk(T);
               Thunks[T->getThunkTargetSym()] = T;
             }
 
             // Redirect relocation to Thunk, we never go via the PLT to a Thunk
             Rel.Sym = T->getThunkTargetSym();
             Rel.Expr = fromPlt(Rel.Expr);
           }
 
         for (auto &P : ISD->ThunkSections)
           AddressesChanged |= P.first->assignOffsets();
       });
 
   for (auto &P : ThunkedSections)
     AddressesChanged |= P.second->assignOffsets();
 
   // Merge all created synthetic ThunkSections back into OutputSection
   mergeThunks(OutputSections);
   ++Pass;
   return AddressesChanged;
 }
 
 template void elf::scanRelocations<ELF32LE>(InputSectionBase &);
 template void elf::scanRelocations<ELF32BE>(InputSectionBase &);
 template void elf::scanRelocations<ELF64LE>(InputSectionBase &);
 template void elf::scanRelocations<ELF64BE>(InputSectionBase &);
Index: head/contrib/llvm/tools/lld/ELF/Relocations.h
===================================================================
--- head/contrib/llvm/tools/lld/ELF/Relocations.h	(revision 350466)
+++ head/contrib/llvm/tools/lld/ELF/Relocations.h	(revision 350467)
@@ -1,219 +1,213 @@
 //===- Relocations.h -------------------------------------------*- C++ -*-===//
 //
 //                             The LLVM Linker
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
 //
 //===----------------------------------------------------------------------===//
 
 #ifndef LLD_ELF_RELOCATIONS_H
 #define LLD_ELF_RELOCATIONS_H
 
 #include "lld/Common/LLVM.h"
 #include "llvm/ADT/DenseMap.h"
 #include <map>
 #include <vector>
 
 namespace lld {
 namespace elf {
 class Symbol;
 class InputSection;
 class InputSectionBase;
 class OutputSection;
 class SectionBase;
 
 // Represents a relocation type, such as R_X86_64_PC32 or R_ARM_THM_CALL.
 typedef uint32_t RelType;
 
 // List of target-independent relocation types. Relocations read
 // from files are converted to these types so that the main code
 // doesn't have to know about architecture-specific details.
 enum RelExpr {
   R_INVALID,
   R_ABS,
   R_ADDEND,
   R_AARCH64_GOT_PAGE_PC,
-  // The expression is used for IFUNC support. Describes PC-relative
-  // address of the memory page of GOT entry. This entry is used for
-  // a redirection to IPLT.
-  R_AARCH64_GOT_PAGE_PC_PLT,
   R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC,
   R_AARCH64_PAGE_PC,
-  R_AARCH64_PLT_PAGE_PC,
   R_AARCH64_TLSDESC_PAGE,
   R_ARM_SBREL,
   R_GOT,
-  // The expression is used for IFUNC support. Evaluates to GOT entry,
-  // containing redirection to the IPLT.
-  R_GOT_PLT,
   R_GOTONLY_PC,
   R_GOTONLY_PC_FROM_END,
   R_GOTREL,
   R_GOTREL_FROM_END,
   R_GOT_FROM_END,
   R_GOT_OFF,
   R_GOT_PC,
   R_HEXAGON_GOT,
   R_HINT,
   R_MIPS_GOTREL,
   R_MIPS_GOT_GP,
   R_MIPS_GOT_GP_PC,
   R_MIPS_GOT_LOCAL_PAGE,
   R_MIPS_GOT_OFF,
   R_MIPS_GOT_OFF32,
   R_MIPS_TLSGD,
   R_MIPS_TLSLD,
   R_NEG_TLS,
   R_NONE,
   R_PC,
   R_PLT,
   R_PLT_PC,
   R_PPC_CALL,
   R_PPC_CALL_PLT,
   R_PPC_TOC,
   R_RELAX_GOT_PC,
   R_RELAX_GOT_PC_NOPIC,
   R_RELAX_TLS_GD_TO_IE,
   R_RELAX_TLS_GD_TO_IE_ABS,
   R_RELAX_TLS_GD_TO_IE_END,
   R_RELAX_TLS_GD_TO_IE_GOT_OFF,
   R_RELAX_TLS_GD_TO_LE,
   R_RELAX_TLS_GD_TO_LE_NEG,
   R_RELAX_TLS_IE_TO_LE,
   R_RELAX_TLS_LD_TO_LE,
   R_RELAX_TLS_LD_TO_LE_ABS,
   R_RISCV_PC_INDIRECT,
   R_SIZE,
   R_TLS,
   R_TLSDESC,
   R_TLSDESC_CALL,
   R_TLSGD_GOT,
   R_TLSGD_GOT_FROM_END,
   R_TLSGD_PC,
   R_TLSIE_HINT,
   R_TLSLD_GOT,
   R_TLSLD_GOT_FROM_END,
   R_TLSLD_GOT_OFF,
   R_TLSLD_HINT,
   R_TLSLD_PC,
 };
 
 // Build a bitmask with one bit set for each RelExpr.
 //
 // Constexpr function arguments can't be used in static asserts, so we
 // use template arguments to build the mask.
 // But function template partial specializations don't exist (needed
 // for base case of the recursion), so we need a dummy struct.
 template <RelExpr... Exprs> struct RelExprMaskBuilder {
   static inline uint64_t build() { return 0; }
 };
 
 // Specialization for recursive case.
 template <RelExpr Head, RelExpr... Tail>
 struct RelExprMaskBuilder<Head, Tail...> {
   static inline uint64_t build() {
     static_assert(0 <= Head && Head < 64,
                   "RelExpr is too large for 64-bit mask!");
     return (uint64_t(1) << Head) | RelExprMaskBuilder<Tail...>::build();
   }
 };
 
 // Return true if `Expr` is one of `Exprs`.
 // There are fewer than 64 RelExpr's, so we can represent any set of
 // RelExpr's as a constant bit mask and test for membership with a
 // couple cheap bitwise operations.
 template <RelExpr... Exprs> bool isRelExprOneOf(RelExpr Expr) {
   assert(0 <= Expr && (int)Expr < 64 &&
          "RelExpr is too large for 64-bit mask!");
   return (uint64_t(1) << Expr) & RelExprMaskBuilder<Exprs...>::build();
 }
 
 // Architecture-neutral representation of relocation.
 struct Relocation {
   RelExpr Expr;
   RelType Type;
   uint64_t Offset;
   int64_t Addend;
   Symbol *Sym;
 };
 
 struct RelocationOffsetComparator {
   bool operator()(const Relocation &Lhs, const Relocation &Rhs) {
     return Lhs.Offset < Rhs.Offset;
   }
 
   // For std::lower_bound, std::upper_bound, std::equal_range.
   bool operator()(const Relocation &Rel, uint64_t Val) {
     return Rel.Offset < Val;
   }
 
   bool operator()(uint64_t Val, const Relocation &Rel) {
     return Val < Rel.Offset;
   }
 };
 
 template <class ELFT> void scanRelocations(InputSectionBase &);
+
+void addIRelativeRelocs();
 
 class ThunkSection;
 class Thunk;
 struct InputSectionDescription;
 
 class ThunkCreator {
 public:
   // Return true if Thunks have been added to OutputSections
   bool createThunks(ArrayRef<OutputSection *> OutputSections);
 
   // The number of completed passes of createThunks this permits us
   // to do one time initialization on Pass 0 and put a limit on the
   // number of times it can be called to prevent infinite loops.
   uint32_t Pass = 0;
 
 private:
   void mergeThunks(ArrayRef<OutputSection *> OutputSections);
 
   ThunkSection *getISDThunkSec(OutputSection *OS, InputSection *IS,
                                InputSectionDescription *ISD, uint32_t Type,
                                uint64_t Src);
 
   ThunkSection *getISThunkSec(InputSection *IS);
 
   void createInitialThunkSections(ArrayRef<OutputSection *> OutputSections);
 
   std::pair<Thunk *, bool> getThunk(Symbol &Sym, RelType Type, uint64_t Src);
 
   ThunkSection *addThunkSection(OutputSection *OS, InputSectionDescription *,
                                 uint64_t Off);
 
   bool normalizeExistingThunk(Relocation &Rel, uint64_t Src);
 
   // Record all the available Thunks for a Symbol
   llvm::DenseMap<std::pair<SectionBase *, uint64_t>, std::vector<Thunk *>>
       ThunkedSymbolsBySection;
   llvm::DenseMap<Symbol *, std::vector<Thunk *>> ThunkedSymbols;
 
   // Find a Thunk from the Thunks symbol definition, we can use this to find
   // the Thunk from a relocation to the Thunks symbol definition.
   llvm::DenseMap<Symbol *, Thunk *> Thunks;
 
   // Track InputSections that have an inline ThunkSection placed in front
   // an inline ThunkSection may have control fall through to the section below
   // so we need to make sure that there is only one of them.
   // The Mips LA25 Thunk is an example of an inline ThunkSection.
   llvm::DenseMap<InputSection *, ThunkSection *> ThunkedSections;
 };
 
 // Return a int64_t to make sure we get the sign extension out of the way as
 // early as possible.
 template <class ELFT>
 static inline int64_t getAddend(const typename ELFT::Rel &Rel) {
   return 0;
 }
 template <class ELFT>
 static inline int64_t getAddend(const typename ELFT::Rela &Rel) {
   return Rel.r_addend;
 }
 } // namespace elf
 } // namespace lld
 
 #endif
Index: head/contrib/llvm/tools/lld/ELF/Symbols.cpp
===================================================================
--- head/contrib/llvm/tools/lld/ELF/Symbols.cpp	(revision 350466)
+++ head/contrib/llvm/tools/lld/ELF/Symbols.cpp	(revision 350467)
@@ -1,305 +1,309 @@
 //===- Symbols.cpp --------------------------------------------------------===//
 //
 //                             The LLVM Linker
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
 //
 //===----------------------------------------------------------------------===//
 
 #include "Symbols.h"
 #include "InputFiles.h"
 #include "InputSection.h"
 #include "OutputSections.h"
 #include "SyntheticSections.h"
 #include "Target.h"
 #include "Writer.h"
 #include "lld/Common/ErrorHandler.h"
 #include "lld/Common/Strings.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Support/Path.h"
 #include <cstring>
 
 using namespace llvm;
 using namespace llvm::object;
 using namespace llvm::ELF;
 
 using namespace lld;
 using namespace lld::elf;
 
 Defined *ElfSym::Bss;
 Defined *ElfSym::Etext1;
 Defined *ElfSym::Etext2;
 Defined *ElfSym::Edata1;
 Defined *ElfSym::Edata2;
 Defined *ElfSym::End1;
 Defined *ElfSym::End2;
 Defined *ElfSym::GlobalOffsetTable;
 Defined *ElfSym::MipsGp;
 Defined *ElfSym::MipsGpDisp;
 Defined *ElfSym::MipsLocalGp;
 Defined *ElfSym::RelaIpltStart;
 Defined *ElfSym::RelaIpltEnd;
 
 static uint64_t getSymVA(const Symbol &Sym, int64_t &Addend) {
   switch (Sym.kind()) {
   case Symbol::DefinedKind: {
     auto &D = cast<Defined>(Sym);
     SectionBase *IS = D.Section;
 
     // According to the ELF spec reference to a local symbol from outside
     // the group are not allowed. Unfortunately .eh_frame breaks that rule
     // and must be treated specially. For now we just replace the symbol with
     // 0.
     if (IS == &InputSection::Discarded)
       return 0;
 
     // This is an absolute symbol.
     if (!IS)
       return D.Value;
 
     IS = IS->Repl;
 
     uint64_t Offset = D.Value;
 
     // An object in an SHF_MERGE section might be referenced via a
     // section symbol (as a hack for reducing the number of local
     // symbols).
     // Depending on the addend, the reference via a section symbol
     // refers to a different object in the merge section.
     // Since the objects in the merge section are not necessarily
     // contiguous in the output, the addend can thus affect the final
     // VA in a non-linear way.
     // To make this work, we incorporate the addend into the section
     // offset (and zero out the addend for later processing) so that
     // we find the right object in the section.
     if (D.isSection()) {
       Offset += Addend;
       Addend = 0;
     }
 
     // In the typical case, this is actually very simple and boils
     // down to adding together 3 numbers:
     // 1. The address of the output section.
     // 2. The offset of the input section within the output section.
     // 3. The offset within the input section (this addition happens
     //    inside InputSection::getOffset).
     //
     // If you understand the data structures involved with this next
     // line (and how they get built), then you have a pretty good
     // understanding of the linker.
     uint64_t VA = IS->getVA(Offset);
 
     if (D.isTls() && !Config->Relocatable) {
       // Use the address of the TLS segment's first section rather than the
       // segment's address, because segment addresses aren't initialized until
       // after sections are finalized. (e.g. Measuring the size of .rela.dyn
       // for Android relocation packing requires knowing TLS symbol addresses
       // during section finalization.)
       if (!Out::TlsPhdr || !Out::TlsPhdr->FirstSec)
         fatal(toString(D.File) +
               " has an STT_TLS symbol but doesn't have an SHF_TLS section");
       return VA - Out::TlsPhdr->FirstSec->Addr;
     }
     return VA;
   }
   case Symbol::SharedKind:
   case Symbol::UndefinedKind:
     return 0;
   case Symbol::LazyArchiveKind:
   case Symbol::LazyObjectKind:
     assert(Sym.IsUsedInRegularObj && "lazy symbol reached writer");
     return 0;
   case Symbol::PlaceholderKind:
     llvm_unreachable("placeholder symbol reached writer");
   }
   llvm_unreachable("invalid symbol kind");
 }
 
 uint64_t Symbol::getVA(int64_t Addend) const {
   uint64_t OutVA = getSymVA(*this, Addend);
   return OutVA + Addend;
 }
 
-uint64_t Symbol::getGotVA() const { return In.Got->getVA() + getGotOffset(); }
+uint64_t Symbol::getGotVA() const {
+  if (GotInIgot)
+    return In.IgotPlt->getVA() + getGotPltOffset();
+  return In.Got->getVA() + getGotOffset();
+}
 
 uint64_t Symbol::getGotOffset() const {
   return GotIndex * Target->GotEntrySize;
 }
 
 uint64_t Symbol::getGotPltVA() const {
-  if (this->IsInIgot)
+  if (IsInIplt)
     return In.IgotPlt->getVA() + getGotPltOffset();
   return In.GotPlt->getVA() + getGotPltOffset();
 }
 
 uint64_t Symbol::getGotPltOffset() const {
-  if (IsInIgot)
+  if (IsInIplt)
     return PltIndex * Target->GotPltEntrySize;
   return (PltIndex + Target->GotPltHeaderEntriesNum) * Target->GotPltEntrySize;
 }
 
 uint64_t Symbol::getPPC64LongBranchOffset() const {
   assert(PPC64BranchltIndex != 0xffff);
   return PPC64BranchltIndex * Target->GotPltEntrySize;
 }
 
 uint64_t Symbol::getPltVA() const {
   PltSection *Plt = IsInIplt ? In.Iplt : In.Plt;
   return Plt->getVA() + Plt->HeaderSize + PltIndex * Target->PltEntrySize;
 }
 
 uint64_t Symbol::getPPC64LongBranchTableVA() const {
   assert(PPC64BranchltIndex != 0xffff);
   return In.PPC64LongBranchTarget->getVA() +
          PPC64BranchltIndex * Target->GotPltEntrySize;
 }
 
 uint64_t Symbol::getSize() const {
   if (const auto *DR = dyn_cast<Defined>(this))
     return DR->Size;
   return cast<SharedSymbol>(this)->Size;
 }
 
 OutputSection *Symbol::getOutputSection() const {
   if (auto *S = dyn_cast<Defined>(this)) {
     if (auto *Sec = S->Section)
       return Sec->Repl->getOutputSection();
     return nullptr;
   }
   return nullptr;
 }
 
 // If a symbol name contains '@', the characters after that is
 // a symbol version name. This function parses that.
 void Symbol::parseSymbolVersion() {
   StringRef S = getName();
   size_t Pos = S.find('@');
   if (Pos == 0 || Pos == StringRef::npos)
     return;
   StringRef Verstr = S.substr(Pos + 1);
   if (Verstr.empty())
     return;
 
   // Truncate the symbol name so that it doesn't include the version string.
   NameSize = Pos;
 
   // If this is not in this DSO, it is not a definition.
   if (!isDefined())
     return;
 
   // '@@' in a symbol name means the default version.
   // It is usually the most recent one.
   bool IsDefault = (Verstr[0] == '@');
   if (IsDefault)
     Verstr = Verstr.substr(1);
 
   for (VersionDefinition &Ver : Config->VersionDefinitions) {
     if (Ver.Name != Verstr)
       continue;
 
     if (IsDefault)
       VersionId = Ver.Id;
     else
       VersionId = Ver.Id | VERSYM_HIDDEN;
     return;
   }
 
   // It is an error if the specified version is not defined.
   // Usually version script is not provided when linking executable,
   // but we may still want to override a versioned symbol from DSO,
   // so we do not report error in this case. We also do not error
   // if the symbol has a local version as it won't be in the dynamic
   // symbol table.
   if (Config->Shared && VersionId != VER_NDX_LOCAL)
     error(toString(File) + ": symbol " + S + " has undefined version " +
           Verstr);
 }
 
 InputFile *LazyArchive::fetch() { return cast<ArchiveFile>(File)->fetch(Sym); }
 
 MemoryBufferRef LazyArchive::getMemberBuffer() {
   Archive::Child C = CHECK(
       Sym.getMember(), "could not get the member for symbol " + Sym.getName());
 
   return CHECK(C.getMemoryBufferRef(),
                "could not get the buffer for the member defining symbol " +
                    Sym.getName());
 }
 
 uint8_t Symbol::computeBinding() const {
   if (Config->Relocatable)
     return Binding;
   if (Visibility != STV_DEFAULT && Visibility != STV_PROTECTED)
     return STB_LOCAL;
   if (VersionId == VER_NDX_LOCAL && isDefined() && !IsPreemptible)
     return STB_LOCAL;
   if (!Config->GnuUnique && Binding == STB_GNU_UNIQUE)
     return STB_GLOBAL;
   return Binding;
 }
 
 bool Symbol::includeInDynsym() const {
   if (!Config->HasDynSymTab)
     return false;
   if (computeBinding() == STB_LOCAL)
     return false;
   if (!isDefined())
     return true;
   return ExportDynamic;
 }
 
 // Print out a log message for --trace-symbol.
 void elf::printTraceSymbol(Symbol *Sym) {
   std::string S;
   if (Sym->isUndefined())
     S = ": reference to ";
   else if (Sym->isLazy())
     S = ": lazy definition of ";
   else if (Sym->isShared())
     S = ": shared definition of ";
   else if (dyn_cast_or_null<BssSection>(cast<Defined>(Sym)->Section))
     S = ": common definition of ";
   else
     S = ": definition of ";
 
   message(toString(Sym->File) + S + Sym->getName());
 }
 
 void elf::maybeWarnUnorderableSymbol(const Symbol *Sym) {
   if (!Config->WarnSymbolOrdering)
     return;
 
   // If UnresolvedPolicy::Ignore is used, no "undefined symbol" error/warning
   // is emitted. It makes sense to not warn on undefined symbols.
   //
   // Note, ld.bfd --symbol-ordering-file= does not warn on undefined symbols,
   // but we don't have to be compatible here.
   if (Sym->isUndefined() &&
       Config->UnresolvedSymbols == UnresolvedPolicy::Ignore)
     return;
 
   const InputFile *File = Sym->File;
   auto *D = dyn_cast<Defined>(Sym);
 
   auto Warn = [&](StringRef S) { warn(toString(File) + S + Sym->getName()); };
 
   if (Sym->isUndefined())
     Warn(": unable to order undefined symbol: ");
   else if (Sym->isShared())
     Warn(": unable to order shared symbol: ");
   else if (D && !D->Section)
     Warn(": unable to order absolute symbol: ");
   else if (D && isa<OutputSection>(D->Section))
     Warn(": unable to order synthetic symbol: ");
   else if (D && !D->Section->Repl->Live)
     Warn(": unable to order discarded symbol: ");
 }
 
 // Returns a symbol for an error message.
 std::string lld::toString(const Symbol &B) {
   if (Config->Demangle)
     if (Optional<std::string> S = demangleItanium(B.getName()))
       return *S;
   return B.getName();
 }
Index: head/contrib/llvm/tools/lld/ELF/Symbols.h
===================================================================
--- head/contrib/llvm/tools/lld/ELF/Symbols.h	(revision 350466)
+++ head/contrib/llvm/tools/lld/ELF/Symbols.h	(revision 350467)
@@ -1,419 +1,421 @@
 //===- Symbols.h ------------------------------------------------*- C++ -*-===//
 //
 //                             The LLVM Linker
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
 //
 //===----------------------------------------------------------------------===//
 //
 // This file defines various types of Symbols.
 //
 //===----------------------------------------------------------------------===//
 
 #ifndef LLD_ELF_SYMBOLS_H
 #define LLD_ELF_SYMBOLS_H
 
 #include "InputSection.h"
 #include "lld/Common/LLVM.h"
 #include "lld/Common/Strings.h"
 #include "llvm/Object/Archive.h"
 #include "llvm/Object/ELF.h"
 
 namespace lld {
 namespace elf {
 class Symbol;
 class InputFile;
 } // namespace elf
 
 std::string toString(const elf::Symbol &);
 std::string toString(const elf::InputFile *);
 
 namespace elf {
 
 class ArchiveFile;
 class BitcodeFile;
 class BssSection;
 class InputFile;
 class LazyObjFile;
 template <class ELFT> class ObjFile;
 class OutputSection;
 template <class ELFT> class SharedFile;
 
 // This is a StringRef-like container that doesn't run strlen().
 //
 // ELF string tables contain a lot of null-terminated strings. Most of them
 // are not necessary for the linker because they are names of local symbols,
 // and the linker doesn't use local symbol names for name resolution. So, we
 // use this class to represents strings read from string tables.
 struct StringRefZ {
   StringRefZ(const char *S) : Data(S), Size(-1) {}
   StringRefZ(StringRef S) : Data(S.data()), Size(S.size()) {}
 
   const char *Data;
   const uint32_t Size;
 };
 
 // The base class for real symbol classes.
 class Symbol {
 public:
   enum Kind {
     PlaceholderKind,
     DefinedKind,
     SharedKind,
     UndefinedKind,
     LazyArchiveKind,
     LazyObjectKind,
   };
 
   Kind kind() const { return static_cast<Kind>(SymbolKind); }
 
   // The file from which this symbol was created.
   InputFile *File;
 
 protected:
   const char *NameData;
   mutable uint32_t NameSize;
 
 public:
   uint32_t DynsymIndex = 0;
   uint32_t GotIndex = -1;
   uint32_t PltIndex = -1;
 
   uint32_t GlobalDynIndex = -1;
 
   // This field is a index to the symbol's version definition.
   uint32_t VerdefIndex = -1;
 
   // Version definition index.
   uint16_t VersionId;
 
   // An index into the .branch_lt section on PPC64.
   uint16_t PPC64BranchltIndex = -1;
 
   // Symbol binding. This is not overwritten by replaceSymbol to track
   // changes during resolution. In particular:
   //  - An undefined weak is still weak when it resolves to a shared library.
   //  - An undefined weak will not fetch archive members, but we have to
   //    remember it is weak.
   uint8_t Binding;
 
   // The following fields have the same meaning as the ELF symbol attributes.
   uint8_t Type;    // symbol type
   uint8_t StOther; // st_other field value
 
   uint8_t SymbolKind;
 
   // Symbol visibility. This is the computed minimum visibility of all
   // observed non-DSO symbols.
   unsigned Visibility : 2;
 
   // True if the symbol was used for linking and thus need to be added to the
   // output file's symbol table. This is true for all symbols except for
   // unreferenced DSO symbols and bitcode symbols that are unreferenced except
   // by other bitcode objects.
   unsigned IsUsedInRegularObj : 1;
 
   // If this flag is true and the symbol has protected or default visibility, it
   // will appear in .dynsym. This flag is set by interposable DSO symbols in
   // executables, by most symbols in DSOs and executables built with
   // --export-dynamic, and by dynamic lists.
   unsigned ExportDynamic : 1;
 
   // False if LTO shouldn't inline whatever this symbol points to. If a symbol
   // is overwritten after LTO, LTO shouldn't inline the symbol because it
   // doesn't know the final contents of the symbol.
   unsigned CanInline : 1;
 
   // True if this symbol is specified by --trace-symbol option.
   unsigned Traced : 1;
 
   bool includeInDynsym() const;
   uint8_t computeBinding() const;
   bool isWeak() const { return Binding == llvm::ELF::STB_WEAK; }
 
   bool isUndefined() const { return SymbolKind == UndefinedKind; }
   bool isDefined() const { return SymbolKind == DefinedKind; }
   bool isShared() const { return SymbolKind == SharedKind; }
   bool isLocal() const { return Binding == llvm::ELF::STB_LOCAL; }
 
   bool isLazy() const {
     return SymbolKind == LazyArchiveKind || SymbolKind == LazyObjectKind;
   }
 
   // True if this is an undefined weak symbol. This only works once
   // all input files have been added.
   bool isUndefWeak() const {
     // See comment on lazy symbols for details.
     return isWeak() && (isUndefined() || isLazy());
   }
 
   StringRef getName() const {
     if (NameSize == (uint32_t)-1)
       NameSize = strlen(NameData);
     return {NameData, NameSize};
   }
 
   void setName(StringRef S) {
     NameData = S.data();
     NameSize = S.size();
   }
 
   void parseSymbolVersion();
 
   bool isInGot() const { return GotIndex != -1U; }
   bool isInPlt() const { return PltIndex != -1U; }
   bool isInPPC64Branchlt() const { return PPC64BranchltIndex != 0xffff; }
 
   uint64_t getVA(int64_t Addend = 0) const;
 
   uint64_t getGotOffset() const;
   uint64_t getGotVA() const;
   uint64_t getGotPltOffset() const;
   uint64_t getGotPltVA() const;
   uint64_t getPltVA() const;
   uint64_t getPPC64LongBranchTableVA() const;
   uint64_t getPPC64LongBranchOffset() const;
   uint64_t getSize() const;
   OutputSection *getOutputSection() const;
 
 protected:
   Symbol(Kind K, InputFile *File, StringRefZ Name, uint8_t Binding,
          uint8_t StOther, uint8_t Type)
       : File(File), NameData(Name.Data), NameSize(Name.Size), Binding(Binding),
         Type(Type), StOther(StOther), SymbolKind(K), NeedsPltAddr(false),
-        IsInIplt(false), IsInIgot(false), IsPreemptible(false),
+        IsInIplt(false), GotInIgot(false), IsPreemptible(false),
         Used(!Config->GcSections), NeedsTocRestore(false),
         ScriptDefined(false) {}
 
 public:
   // True the symbol should point to its PLT entry.
   // For SharedSymbol only.
   unsigned NeedsPltAddr : 1;
 
-  // True if this symbol is in the Iplt sub-section of the Plt.
+  // True if this symbol is in the Iplt sub-section of the Plt and the Igot
+  // sub-section of the .got.plt or .got.
   unsigned IsInIplt : 1;
 
-  // True if this symbol is in the Igot sub-section of the .got.plt or .got.
-  unsigned IsInIgot : 1;
+  // True if this symbol needs a GOT entry and its GOT entry is actually in
+  // Igot. This will be true only for certain non-preemptible ifuncs.
+  unsigned GotInIgot : 1;
 
   // True if this symbol is preemptible at load time.
   unsigned IsPreemptible : 1;
 
   // True if an undefined or shared symbol is used from a live section.
   unsigned Used : 1;
 
   // True if a call to this symbol needs to be followed by a restore of the
   // PPC64 toc pointer.
   unsigned NeedsTocRestore : 1;
 
   // True if this symbol is defined by a linker script.
   unsigned ScriptDefined : 1;
 
   bool isSection() const { return Type == llvm::ELF::STT_SECTION; }
   bool isTls() const { return Type == llvm::ELF::STT_TLS; }
   bool isFunc() const { return Type == llvm::ELF::STT_FUNC; }
   bool isGnuIFunc() const { return Type == llvm::ELF::STT_GNU_IFUNC; }
   bool isObject() const { return Type == llvm::ELF::STT_OBJECT; }
   bool isFile() const { return Type == llvm::ELF::STT_FILE; }
 };
 
 // Represents a symbol that is defined in the current output file.
 class Defined : public Symbol {
 public:
   Defined(InputFile *File, StringRefZ Name, uint8_t Binding, uint8_t StOther,
           uint8_t Type, uint64_t Value, uint64_t Size, SectionBase *Section)
       : Symbol(DefinedKind, File, Name, Binding, StOther, Type), Value(Value),
         Size(Size), Section(Section) {}
 
   static bool classof(const Symbol *S) { return S->isDefined(); }
 
   uint64_t Value;
   uint64_t Size;
   SectionBase *Section;
 };
 
 class Undefined : public Symbol {
 public:
   Undefined(InputFile *File, StringRefZ Name, uint8_t Binding, uint8_t StOther,
             uint8_t Type)
       : Symbol(UndefinedKind, File, Name, Binding, StOther, Type) {}
 
   static bool classof(const Symbol *S) { return S->kind() == UndefinedKind; }
 };
 
 class SharedSymbol : public Symbol {
 public:
   static bool classof(const Symbol *S) { return S->kind() == SharedKind; }
 
   SharedSymbol(InputFile &File, StringRef Name, uint8_t Binding,
                uint8_t StOther, uint8_t Type, uint64_t Value, uint64_t Size,
                uint32_t Alignment, uint32_t VerdefIndex)
       : Symbol(SharedKind, &File, Name, Binding, StOther, Type),
         Alignment(Alignment), Value(Value), Size(Size) {
     this->VerdefIndex = VerdefIndex;
     // GNU ifunc is a mechanism to allow user-supplied functions to
     // resolve PLT slot values at load-time. This is contrary to the
     // regular symbol resolution scheme in which symbols are resolved just
     // by name. Using this hook, you can program how symbols are solved
     // for you program. For example, you can make "memcpy" to be resolved
     // to a SSE-enabled version of memcpy only when a machine running the
     // program supports the SSE instruction set.
     //
     // Naturally, such symbols should always be called through their PLT
     // slots. What GNU ifunc symbols point to are resolver functions, and
     // calling them directly doesn't make sense (unless you are writing a
     // loader).
     //
     // For DSO symbols, we always call them through PLT slots anyway.
     // So there's no difference between GNU ifunc and regular function
     // symbols if they are in DSOs. So we can handle GNU_IFUNC as FUNC.
     if (this->Type == llvm::ELF::STT_GNU_IFUNC)
       this->Type = llvm::ELF::STT_FUNC;
   }
 
   template <class ELFT> SharedFile<ELFT> &getFile() const {
     return *cast<SharedFile<ELFT>>(File);
   }
 
   uint32_t Alignment;
 
   uint64_t Value; // st_value
   uint64_t Size;  // st_size
 };
 
 // LazyArchive and LazyObject represent a symbols that is not yet in the link,
 // but we know where to find it if needed. If the resolver finds both Undefined
 // and Lazy for the same name, it will ask the Lazy to load a file.
 //
 // A special complication is the handling of weak undefined symbols. They should
 // not load a file, but we have to remember we have seen both the weak undefined
 // and the lazy. We represent that with a lazy symbol with a weak binding. This
 // means that code looking for undefined symbols normally also has to take lazy
 // symbols into consideration.
 
 // This class represents a symbol defined in an archive file. It is
 // created from an archive file header, and it knows how to load an
 // object file from an archive to replace itself with a defined
 // symbol.
 class LazyArchive : public Symbol {
 public:
   LazyArchive(InputFile &File, uint8_t Type,
               const llvm::object::Archive::Symbol S)
       : Symbol(LazyArchiveKind, &File, S.getName(), llvm::ELF::STB_GLOBAL,
                llvm::ELF::STV_DEFAULT, Type),
         Sym(S) {}
 
   static bool classof(const Symbol *S) { return S->kind() == LazyArchiveKind; }
 
   InputFile *fetch();
   MemoryBufferRef getMemberBuffer();
 
 private:
   const llvm::object::Archive::Symbol Sym;
 };
 
 // LazyObject symbols represents symbols in object files between
 // --start-lib and --end-lib options.
 class LazyObject : public Symbol {
 public:
   LazyObject(InputFile &File, uint8_t Type, StringRef Name)
       : Symbol(LazyObjectKind, &File, Name, llvm::ELF::STB_GLOBAL,
                llvm::ELF::STV_DEFAULT, Type) {}
 
   static bool classof(const Symbol *S) { return S->kind() == LazyObjectKind; }
 };
 
 // Some linker-generated symbols need to be created as
 // Defined symbols.
 struct ElfSym {
   // __bss_start
   static Defined *Bss;
 
   // etext and _etext
   static Defined *Etext1;
   static Defined *Etext2;
 
   // edata and _edata
   static Defined *Edata1;
   static Defined *Edata2;
 
   // end and _end
   static Defined *End1;
   static Defined *End2;
 
   // The _GLOBAL_OFFSET_TABLE_ symbol is defined by target convention to
   // be at some offset from the base of the .got section, usually 0 or
   // the end of the .got.
   static Defined *GlobalOffsetTable;
 
   // _gp, _gp_disp and __gnu_local_gp symbols. Only for MIPS.
   static Defined *MipsGp;
   static Defined *MipsGpDisp;
   static Defined *MipsLocalGp;
 
   // __rel{,a}_iplt_{start,end} symbols.
   static Defined *RelaIpltStart;
   static Defined *RelaIpltEnd;
 };
 
 // A buffer class that is large enough to hold any Symbol-derived
 // object. We allocate memory using this class and instantiate a symbol
 // using the placement new.
 union SymbolUnion {
   alignas(Defined) char A[sizeof(Defined)];
   alignas(Undefined) char C[sizeof(Undefined)];
   alignas(SharedSymbol) char D[sizeof(SharedSymbol)];
   alignas(LazyArchive) char E[sizeof(LazyArchive)];
   alignas(LazyObject) char F[sizeof(LazyObject)];
 };
 
 void printTraceSymbol(Symbol *Sym);
 
 template <typename T, typename... ArgT>
 void replaceSymbol(Symbol *S, ArgT &&... Arg) {
   using llvm::ELF::STT_TLS;
 
   static_assert(std::is_trivially_destructible<T>(),
                 "Symbol types must be trivially destructible");
   static_assert(sizeof(T) <= sizeof(SymbolUnion), "SymbolUnion too small");
   static_assert(alignof(T) <= alignof(SymbolUnion),
                 "SymbolUnion not aligned enough");
   assert(static_cast<Symbol *>(static_cast<T *>(nullptr)) == nullptr &&
          "Not a Symbol");
 
   Symbol Sym = *S;
 
   new (S) T(std::forward<ArgT>(Arg)...);
 
   S->VersionId = Sym.VersionId;
   S->Visibility = Sym.Visibility;
   S->IsUsedInRegularObj = Sym.IsUsedInRegularObj;
   S->ExportDynamic = Sym.ExportDynamic;
   S->CanInline = Sym.CanInline;
   S->Traced = Sym.Traced;
   S->ScriptDefined = Sym.ScriptDefined;
 
   // Symbols representing thread-local variables must be referenced by
   // TLS-aware relocations, and non-TLS symbols must be reference by
   // non-TLS relocations, so there's a clear distinction between TLS
   // and non-TLS symbols. It is an error if the same symbol is defined
   // as a TLS symbol in one file and as a non-TLS symbol in other file.
   bool TlsMismatch = (Sym.Type == STT_TLS && S->Type != STT_TLS) ||
                      (Sym.Type != STT_TLS && S->Type == STT_TLS);
 
   if (Sym.SymbolKind != Symbol::PlaceholderKind && TlsMismatch && !Sym.isLazy())
     error("TLS attribute mismatch: " + toString(Sym) + "\n>>> defined in " +
           toString(Sym.File) + "\n>>> defined in " + toString(S->File));
 
   // Print out a log message if --trace-symbol was specified.
   // This is for debugging.
   if (S->Traced)
     printTraceSymbol(S);
 }
 
 void maybeWarnUnorderableSymbol(const Symbol *Sym);
 } // namespace elf
 } // namespace lld
 
 #endif
Index: head/contrib/llvm/tools/lld/ELF/SyntheticSections.cpp
===================================================================
--- head/contrib/llvm/tools/lld/ELF/SyntheticSections.cpp	(revision 350466)
+++ head/contrib/llvm/tools/lld/ELF/SyntheticSections.cpp	(revision 350467)
@@ -1,3233 +1,3230 @@
 //===- SyntheticSections.cpp ----------------------------------------------===//
 //
 //                             The LLVM Linker
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
 //
 //===----------------------------------------------------------------------===//
 //
 // This file contains linker-synthesized sections. Currently,
 // synthetic sections are created either output sections or input sections,
 // but we are rewriting code so that all synthetic sections are created as
 // input sections.
 //
 //===----------------------------------------------------------------------===//
 
 #include "SyntheticSections.h"
 #include "Bits.h"
 #include "Config.h"
 #include "InputFiles.h"
 #include "LinkerScript.h"
 #include "OutputSections.h"
 #include "SymbolTable.h"
 #include "Symbols.h"
 #include "Target.h"
 #include "Writer.h"
 #include "lld/Common/ErrorHandler.h"
 #include "lld/Common/Memory.h"
 #include "lld/Common/Strings.h"
 #include "lld/Common/Threads.h"
 #include "lld/Common/Version.h"
 #include "llvm/ADT/SetOperations.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/BinaryFormat/Dwarf.h"
 #include "llvm/DebugInfo/DWARF/DWARFDebugPubTable.h"
 #include "llvm/Object/ELFObjectFile.h"
 #include "llvm/Support/Compression.h"
 #include "llvm/Support/Endian.h"
 #include "llvm/Support/LEB128.h"
 #include "llvm/Support/MD5.h"
 #include "llvm/Support/RandomNumberGenerator.h"
 #include "llvm/Support/SHA1.h"
 #include "llvm/Support/xxhash.h"
 #include <cstdlib>
 #include <thread>
 
 using namespace llvm;
 using namespace llvm::dwarf;
 using namespace llvm::ELF;
 using namespace llvm::object;
 using namespace llvm::support;
 
 using namespace lld;
 using namespace lld::elf;
 
 using llvm::support::endian::read32le;
 using llvm::support::endian::write32le;
 using llvm::support::endian::write64le;
 
 constexpr size_t MergeNoTailSection::NumShards;
 
 // Returns an LLD version string.
 static ArrayRef<uint8_t> getVersion() {
   // Check LLD_VERSION first for ease of testing.
   // You can get consistent output by using the environment variable.
   // This is only for testing.
   StringRef S = getenv("LLD_VERSION");
   if (S.empty())
     S = Saver.save(Twine("Linker: ") + getLLDVersion());
 
   // +1 to include the terminating '\0'.
   return {(const uint8_t *)S.data(), S.size() + 1};
 }
 
 // Creates a .comment section containing LLD version info.
 // With this feature, you can identify LLD-generated binaries easily
 // by "readelf --string-dump .comment <file>".
 // The returned object is a mergeable string section.
 MergeInputSection *elf::createCommentSection() {
   return make<MergeInputSection>(SHF_MERGE | SHF_STRINGS, SHT_PROGBITS, 1,
                                  getVersion(), ".comment");
 }
 
 // .MIPS.abiflags section.
 template <class ELFT>
 MipsAbiFlagsSection<ELFT>::MipsAbiFlagsSection(Elf_Mips_ABIFlags Flags)
     : SyntheticSection(SHF_ALLOC, SHT_MIPS_ABIFLAGS, 8, ".MIPS.abiflags"),
       Flags(Flags) {
   this->Entsize = sizeof(Elf_Mips_ABIFlags);
 }
 
 template <class ELFT> void MipsAbiFlagsSection<ELFT>::writeTo(uint8_t *Buf) {
   memcpy(Buf, &Flags, sizeof(Flags));
 }
 
 template <class ELFT>
 MipsAbiFlagsSection<ELFT> *MipsAbiFlagsSection<ELFT>::create() {
   Elf_Mips_ABIFlags Flags = {};
   bool Create = false;
 
   for (InputSectionBase *Sec : InputSections) {
     if (Sec->Type != SHT_MIPS_ABIFLAGS)
       continue;
     Sec->Live = false;
     Create = true;
 
     std::string Filename = toString(Sec->File);
     const size_t Size = Sec->data().size();
     // Older version of BFD (such as the default FreeBSD linker) concatenate
     // .MIPS.abiflags instead of merging. To allow for this case (or potential
     // zero padding) we ignore everything after the first Elf_Mips_ABIFlags
     if (Size < sizeof(Elf_Mips_ABIFlags)) {
       error(Filename + ": invalid size of .MIPS.abiflags section: got " +
             Twine(Size) + " instead of " + Twine(sizeof(Elf_Mips_ABIFlags)));
       return nullptr;
     }
     auto *S = reinterpret_cast<const Elf_Mips_ABIFlags *>(Sec->data().data());
     if (S->version != 0) {
       error(Filename + ": unexpected .MIPS.abiflags version " +
             Twine(S->version));
       return nullptr;
     }
 
     // LLD checks ISA compatibility in calcMipsEFlags(). Here we just
     // select the highest number of ISA/Rev/Ext.
     Flags.isa_level = std::max(Flags.isa_level, S->isa_level);
     Flags.isa_rev = std::max(Flags.isa_rev, S->isa_rev);
     Flags.isa_ext = std::max(Flags.isa_ext, S->isa_ext);
     Flags.gpr_size = std::max(Flags.gpr_size, S->gpr_size);
     Flags.cpr1_size = std::max(Flags.cpr1_size, S->cpr1_size);
     Flags.cpr2_size = std::max(Flags.cpr2_size, S->cpr2_size);
     Flags.ases |= S->ases;
     Flags.flags1 |= S->flags1;
     Flags.flags2 |= S->flags2;
     Flags.fp_abi = elf::getMipsFpAbiFlag(Flags.fp_abi, S->fp_abi, Filename);
   };
 
   if (Create)
     return make<MipsAbiFlagsSection<ELFT>>(Flags);
   return nullptr;
 }
 
 // .MIPS.options section.
 template <class ELFT>
 MipsOptionsSection<ELFT>::MipsOptionsSection(Elf_Mips_RegInfo Reginfo)
     : SyntheticSection(SHF_ALLOC, SHT_MIPS_OPTIONS, 8, ".MIPS.options"),
       Reginfo(Reginfo) {
   this->Entsize = sizeof(Elf_Mips_Options) + sizeof(Elf_Mips_RegInfo);
 }
 
 template <class ELFT> void MipsOptionsSection<ELFT>::writeTo(uint8_t *Buf) {
   auto *Options = reinterpret_cast<Elf_Mips_Options *>(Buf);
   Options->kind = ODK_REGINFO;
   Options->size = getSize();
 
   if (!Config->Relocatable)
     Reginfo.ri_gp_value = In.MipsGot->getGp();
   memcpy(Buf + sizeof(Elf_Mips_Options), &Reginfo, sizeof(Reginfo));
 }
 
 template <class ELFT>
 MipsOptionsSection<ELFT> *MipsOptionsSection<ELFT>::create() {
   // N64 ABI only.
   if (!ELFT::Is64Bits)
     return nullptr;
 
   std::vector<InputSectionBase *> Sections;
   for (InputSectionBase *Sec : InputSections)
     if (Sec->Type == SHT_MIPS_OPTIONS)
       Sections.push_back(Sec);
 
   if (Sections.empty())
     return nullptr;
 
   Elf_Mips_RegInfo Reginfo = {};
   for (InputSectionBase *Sec : Sections) {
     Sec->Live = false;
 
     std::string Filename = toString(Sec->File);
     ArrayRef<uint8_t> D = Sec->data();
 
     while (!D.empty()) {
       if (D.size() < sizeof(Elf_Mips_Options)) {
         error(Filename + ": invalid size of .MIPS.options section");
         break;
       }
 
       auto *Opt = reinterpret_cast<const Elf_Mips_Options *>(D.data());
       if (Opt->kind == ODK_REGINFO) {
         Reginfo.ri_gprmask |= Opt->getRegInfo().ri_gprmask;
         Sec->getFile<ELFT>()->MipsGp0 = Opt->getRegInfo().ri_gp_value;
         break;
       }
 
       if (!Opt->size)
         fatal(Filename + ": zero option descriptor size");
       D = D.slice(Opt->size);
     }
   };
 
   return make<MipsOptionsSection<ELFT>>(Reginfo);
 }
 
 // MIPS .reginfo section.
 template <class ELFT>
 MipsReginfoSection<ELFT>::MipsReginfoSection(Elf_Mips_RegInfo Reginfo)
     : SyntheticSection(SHF_ALLOC, SHT_MIPS_REGINFO, 4, ".reginfo"),
       Reginfo(Reginfo) {
   this->Entsize = sizeof(Elf_Mips_RegInfo);
 }
 
 template <class ELFT> void MipsReginfoSection<ELFT>::writeTo(uint8_t *Buf) {
   if (!Config->Relocatable)
     Reginfo.ri_gp_value = In.MipsGot->getGp();
   memcpy(Buf, &Reginfo, sizeof(Reginfo));
 }
 
 template <class ELFT>
 MipsReginfoSection<ELFT> *MipsReginfoSection<ELFT>::create() {
   // Section should be alive for O32 and N32 ABIs only.
   if (ELFT::Is64Bits)
     return nullptr;
 
   std::vector<InputSectionBase *> Sections;
   for (InputSectionBase *Sec : InputSections)
     if (Sec->Type == SHT_MIPS_REGINFO)
       Sections.push_back(Sec);
 
   if (Sections.empty())
     return nullptr;
 
   Elf_Mips_RegInfo Reginfo = {};
   for (InputSectionBase *Sec : Sections) {
     Sec->Live = false;
 
     if (Sec->data().size() != sizeof(Elf_Mips_RegInfo)) {
       error(toString(Sec->File) + ": invalid size of .reginfo section");
       return nullptr;
     }
 
     auto *R = reinterpret_cast<const Elf_Mips_RegInfo *>(Sec->data().data());
     Reginfo.ri_gprmask |= R->ri_gprmask;
     Sec->getFile<ELFT>()->MipsGp0 = R->ri_gp_value;
   };
 
   return make<MipsReginfoSection<ELFT>>(Reginfo);
 }
 
 InputSection *elf::createInterpSection() {
   // StringSaver guarantees that the returned string ends with '\0'.
   StringRef S = Saver.save(Config->DynamicLinker);
   ArrayRef<uint8_t> Contents = {(const uint8_t *)S.data(), S.size() + 1};
 
   auto *Sec = make<InputSection>(nullptr, SHF_ALLOC, SHT_PROGBITS, 1, Contents,
                                  ".interp");
   Sec->Live = true;
   return Sec;
 }
 
 Defined *elf::addSyntheticLocal(StringRef Name, uint8_t Type, uint64_t Value,
                                 uint64_t Size, InputSectionBase &Section) {
   auto *S = make<Defined>(Section.File, Name, STB_LOCAL, STV_DEFAULT, Type,
                           Value, Size, &Section);
   if (In.SymTab)
     In.SymTab->addSymbol(S);
   return S;
 }
 
 static size_t getHashSize() {
   switch (Config->BuildId) {
   case BuildIdKind::Fast:
     return 8;
   case BuildIdKind::Md5:
   case BuildIdKind::Uuid:
     return 16;
   case BuildIdKind::Sha1:
     return 20;
   case BuildIdKind::Hexstring:
     return Config->BuildIdVector.size();
   default:
     llvm_unreachable("unknown BuildIdKind");
   }
 }
 
 BuildIdSection::BuildIdSection()
     : SyntheticSection(SHF_ALLOC, SHT_NOTE, 4, ".note.gnu.build-id"),
       HashSize(getHashSize()) {}
 
 void BuildIdSection::writeTo(uint8_t *Buf) {
   write32(Buf, 4);                      // Name size
   write32(Buf + 4, HashSize);           // Content size
   write32(Buf + 8, NT_GNU_BUILD_ID);    // Type
   memcpy(Buf + 12, "GNU", 4);           // Name string
   HashBuf = Buf + 16;
 }
 
 // Split one uint8 array into small pieces of uint8 arrays.
 static std::vector<ArrayRef<uint8_t>> split(ArrayRef<uint8_t> Arr,
                                             size_t ChunkSize) {
   std::vector<ArrayRef<uint8_t>> Ret;
   while (Arr.size() > ChunkSize) {
     Ret.push_back(Arr.take_front(ChunkSize));
     Arr = Arr.drop_front(ChunkSize);
   }
   if (!Arr.empty())
     Ret.push_back(Arr);
   return Ret;
 }
 
 // Computes a hash value of Data using a given hash function.
 // In order to utilize multiple cores, we first split data into 1MB
 // chunks, compute a hash for each chunk, and then compute a hash value
 // of the hash values.
 void BuildIdSection::computeHash(
     llvm::ArrayRef<uint8_t> Data,
     std::function<void(uint8_t *Dest, ArrayRef<uint8_t> Arr)> HashFn) {
   std::vector<ArrayRef<uint8_t>> Chunks = split(Data, 1024 * 1024);
   std::vector<uint8_t> Hashes(Chunks.size() * HashSize);
 
   // Compute hash values.
   parallelForEachN(0, Chunks.size(), [&](size_t I) {
     HashFn(Hashes.data() + I * HashSize, Chunks[I]);
   });
 
   // Write to the final output buffer.
   HashFn(HashBuf, Hashes);
 }
 
 BssSection::BssSection(StringRef Name, uint64_t Size, uint32_t Alignment)
     : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_NOBITS, Alignment, Name) {
   this->Bss = true;
   this->Size = Size;
 }
 
 void BuildIdSection::writeBuildId(ArrayRef<uint8_t> Buf) {
   switch (Config->BuildId) {
   case BuildIdKind::Fast:
     computeHash(Buf, [](uint8_t *Dest, ArrayRef<uint8_t> Arr) {
       write64le(Dest, xxHash64(Arr));
     });
     break;
   case BuildIdKind::Md5:
     computeHash(Buf, [](uint8_t *Dest, ArrayRef<uint8_t> Arr) {
       memcpy(Dest, MD5::hash(Arr).data(), 16);
     });
     break;
   case BuildIdKind::Sha1:
     computeHash(Buf, [](uint8_t *Dest, ArrayRef<uint8_t> Arr) {
       memcpy(Dest, SHA1::hash(Arr).data(), 20);
     });
     break;
   case BuildIdKind::Uuid:
     if (auto EC = getRandomBytes(HashBuf, HashSize))
       error("entropy source failure: " + EC.message());
     break;
   case BuildIdKind::Hexstring:
     memcpy(HashBuf, Config->BuildIdVector.data(), Config->BuildIdVector.size());
     break;
   default:
     llvm_unreachable("unknown BuildIdKind");
   }
 }
 
 EhFrameSection::EhFrameSection()
     : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 1, ".eh_frame") {}
 
 // Search for an existing CIE record or create a new one.
 // CIE records from input object files are uniquified by their contents
 // and where their relocations point to.
 template <class ELFT, class RelTy>
 CieRecord *EhFrameSection::addCie(EhSectionPiece &Cie, ArrayRef<RelTy> Rels) {
   Symbol *Personality = nullptr;
   unsigned FirstRelI = Cie.FirstRelocation;
   if (FirstRelI != (unsigned)-1)
     Personality =
         &Cie.Sec->template getFile<ELFT>()->getRelocTargetSym(Rels[FirstRelI]);
 
   // Search for an existing CIE by CIE contents/relocation target pair.
   CieRecord *&Rec = CieMap[{Cie.data(), Personality}];
 
   // If not found, create a new one.
   if (!Rec) {
     Rec = make<CieRecord>();
     Rec->Cie = &Cie;
     CieRecords.push_back(Rec);
   }
   return Rec;
 }
 
 // There is one FDE per function. Returns true if a given FDE
 // points to a live function.
 template <class ELFT, class RelTy>
 bool EhFrameSection::isFdeLive(EhSectionPiece &Fde, ArrayRef<RelTy> Rels) {
   auto *Sec = cast<EhInputSection>(Fde.Sec);
   unsigned FirstRelI = Fde.FirstRelocation;
 
   // An FDE should point to some function because FDEs are to describe
   // functions. That's however not always the case due to an issue of
   // ld.gold with -r. ld.gold may discard only functions and leave their
   // corresponding FDEs, which results in creating bad .eh_frame sections.
   // To deal with that, we ignore such FDEs.
   if (FirstRelI == (unsigned)-1)
     return false;
 
   const RelTy &Rel = Rels[FirstRelI];
   Symbol &B = Sec->template getFile<ELFT>()->getRelocTargetSym(Rel);
 
   // FDEs for garbage-collected or merged-by-ICF sections are dead.
   if (auto *D = dyn_cast<Defined>(&B))
     if (SectionBase *Sec = D->Section)
       return Sec->Live;
   return false;
 }
 
 // .eh_frame is a sequence of CIE or FDE records. In general, there
 // is one CIE record per input object file which is followed by
 // a list of FDEs. This function searches an existing CIE or create a new
 // one and associates FDEs to the CIE.
 template <class ELFT, class RelTy>
 void EhFrameSection::addSectionAux(EhInputSection *Sec, ArrayRef<RelTy> Rels) {
   OffsetToCie.clear();
   for (EhSectionPiece &Piece : Sec->Pieces) {
     // The empty record is the end marker.
     if (Piece.Size == 4)
       return;
 
     size_t Offset = Piece.InputOff;
     uint32_t ID = read32(Piece.data().data() + 4);
     if (ID == 0) {
       OffsetToCie[Offset] = addCie<ELFT>(Piece, Rels);
       continue;
     }
 
     uint32_t CieOffset = Offset + 4 - ID;
     CieRecord *Rec = OffsetToCie[CieOffset];
     if (!Rec)
       fatal(toString(Sec) + ": invalid CIE reference");
 
     if (!isFdeLive<ELFT>(Piece, Rels))
       continue;
     Rec->Fdes.push_back(&Piece);
     NumFdes++;
   }
 }
 
 template <class ELFT> void EhFrameSection::addSection(InputSectionBase *C) {
   auto *Sec = cast<EhInputSection>(C);
   Sec->Parent = this;
 
   Alignment = std::max(Alignment, Sec->Alignment);
   Sections.push_back(Sec);
 
   for (auto *DS : Sec->DependentSections)
     DependentSections.push_back(DS);
 
   if (Sec->Pieces.empty())
     return;
 
   if (Sec->AreRelocsRela)
     addSectionAux<ELFT>(Sec, Sec->template relas<ELFT>());
   else
     addSectionAux<ELFT>(Sec, Sec->template rels<ELFT>());
 }
 
 static void writeCieFde(uint8_t *Buf, ArrayRef<uint8_t> D) {
   memcpy(Buf, D.data(), D.size());
 
   size_t Aligned = alignTo(D.size(), Config->Wordsize);
 
   // Zero-clear trailing padding if it exists.
   memset(Buf + D.size(), 0, Aligned - D.size());
 
   // Fix the size field. -4 since size does not include the size field itself.
   write32(Buf, Aligned - 4);
 }
 
 void EhFrameSection::finalizeContents() {
   assert(!this->Size); // Not finalized.
   size_t Off = 0;
   for (CieRecord *Rec : CieRecords) {
     Rec->Cie->OutputOff = Off;
     Off += alignTo(Rec->Cie->Size, Config->Wordsize);
 
     for (EhSectionPiece *Fde : Rec->Fdes) {
       Fde->OutputOff = Off;
       Off += alignTo(Fde->Size, Config->Wordsize);
     }
   }
 
   // The LSB standard does not allow a .eh_frame section with zero
   // Call Frame Information records. glibc unwind-dw2-fde.c
   // classify_object_over_fdes expects there is a CIE record length 0 as a
   // terminator. Thus we add one unconditionally.
   Off += 4;
 
   this->Size = Off;
 }
 
 // Returns data for .eh_frame_hdr. .eh_frame_hdr is a binary search table
 // to get an FDE from an address to which FDE is applied. This function
 // returns a list of such pairs.
 std::vector<EhFrameSection::FdeData> EhFrameSection::getFdeData() const {
   uint8_t *Buf = getParent()->Loc + OutSecOff;
   std::vector<FdeData> Ret;
 
   uint64_t VA = In.EhFrameHdr->getVA();
   for (CieRecord *Rec : CieRecords) {
     uint8_t Enc = getFdeEncoding(Rec->Cie);
     for (EhSectionPiece *Fde : Rec->Fdes) {
       uint64_t Pc = getFdePc(Buf, Fde->OutputOff, Enc);
       uint64_t FdeVA = getParent()->Addr + Fde->OutputOff;
       if (!isInt<32>(Pc - VA))
         fatal(toString(Fde->Sec) + ": PC offset is too large: 0x" +
               Twine::utohexstr(Pc - VA));
       Ret.push_back({uint32_t(Pc - VA), uint32_t(FdeVA - VA)});
     }
   }
 
   // Sort the FDE list by their PC and uniqueify. Usually there is only
   // one FDE for a PC (i.e. function), but if ICF merges two functions
   // into one, there can be more than one FDEs pointing to the address.
   auto Less = [](const FdeData &A, const FdeData &B) {
     return A.PcRel < B.PcRel;
   };
   std::stable_sort(Ret.begin(), Ret.end(), Less);
   auto Eq = [](const FdeData &A, const FdeData &B) {
     return A.PcRel == B.PcRel;
   };
   Ret.erase(std::unique(Ret.begin(), Ret.end(), Eq), Ret.end());
 
   return Ret;
 }
 
 static uint64_t readFdeAddr(uint8_t *Buf, int Size) {
   switch (Size) {
   case DW_EH_PE_udata2:
     return read16(Buf);
   case DW_EH_PE_sdata2:
     return (int16_t)read16(Buf);
   case DW_EH_PE_udata4:
     return read32(Buf);
   case DW_EH_PE_sdata4:
     return (int32_t)read32(Buf);
   case DW_EH_PE_udata8:
   case DW_EH_PE_sdata8:
     return read64(Buf);
   case DW_EH_PE_absptr:
     return readUint(Buf);
   }
   fatal("unknown FDE size encoding");
 }
 
 // Returns the VA to which a given FDE (on a mmap'ed buffer) is applied to.
 // We need it to create .eh_frame_hdr section.
 uint64_t EhFrameSection::getFdePc(uint8_t *Buf, size_t FdeOff,
                                   uint8_t Enc) const {
   // The starting address to which this FDE applies is
   // stored at FDE + 8 byte.
   size_t Off = FdeOff + 8;
   uint64_t Addr = readFdeAddr(Buf + Off, Enc & 0xf);
   if ((Enc & 0x70) == DW_EH_PE_absptr)
     return Addr;
   if ((Enc & 0x70) == DW_EH_PE_pcrel)
     return Addr + getParent()->Addr + Off;
   fatal("unknown FDE size relative encoding");
 }
 
 void EhFrameSection::writeTo(uint8_t *Buf) {
   // Write CIE and FDE records.
   for (CieRecord *Rec : CieRecords) {
     size_t CieOffset = Rec->Cie->OutputOff;
     writeCieFde(Buf + CieOffset, Rec->Cie->data());
 
     for (EhSectionPiece *Fde : Rec->Fdes) {
       size_t Off = Fde->OutputOff;
       writeCieFde(Buf + Off, Fde->data());
 
       // FDE's second word should have the offset to an associated CIE.
       // Write it.
       write32(Buf + Off + 4, Off + 4 - CieOffset);
     }
   }
 
   // Apply relocations. .eh_frame section contents are not contiguous
   // in the output buffer, but relocateAlloc() still works because
   // getOffset() takes care of discontiguous section pieces.
   for (EhInputSection *S : Sections)
     S->relocateAlloc(Buf, nullptr);
 }
 
 GotSection::GotSection()
     : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
                        Target->GotEntrySize, ".got") {
   // PPC64 saves the ElfSym::GlobalOffsetTable .TOC. as the first entry in the
   // .got. If there are no references to .TOC. in the symbol table,
   // ElfSym::GlobalOffsetTable will not be defined and we won't need to save
   // .TOC. in the .got. When it is defined, we increase NumEntries by the number
   // of entries used to emit ElfSym::GlobalOffsetTable.
   if (ElfSym::GlobalOffsetTable && !Target->GotBaseSymInGotPlt)
     NumEntries += Target->GotHeaderEntriesNum;
 }
 
 void GotSection::addEntry(Symbol &Sym) {
   Sym.GotIndex = NumEntries;
   ++NumEntries;
 }
 
 bool GotSection::addDynTlsEntry(Symbol &Sym) {
   if (Sym.GlobalDynIndex != -1U)
     return false;
   Sym.GlobalDynIndex = NumEntries;
   // Global Dynamic TLS entries take two GOT slots.
   NumEntries += 2;
   return true;
 }
 
 // Reserves TLS entries for a TLS module ID and a TLS block offset.
 // In total it takes two GOT slots.
 bool GotSection::addTlsIndex() {
   if (TlsIndexOff != uint32_t(-1))
     return false;
   TlsIndexOff = NumEntries * Config->Wordsize;
   NumEntries += 2;
   return true;
 }
 
 uint64_t GotSection::getGlobalDynAddr(const Symbol &B) const {
   return this->getVA() + B.GlobalDynIndex * Config->Wordsize;
 }
 
 uint64_t GotSection::getGlobalDynOffset(const Symbol &B) const {
   return B.GlobalDynIndex * Config->Wordsize;
 }
 
 void GotSection::finalizeContents() {
   Size = NumEntries * Config->Wordsize;
 }
 
 bool GotSection::empty() const {
   // We need to emit a GOT even if it's empty if there's a relocation that is
   // relative to GOT(such as GOTOFFREL) or there's a symbol that points to a GOT
   // (i.e. _GLOBAL_OFFSET_TABLE_) that the target defines relative to the .got.
   return NumEntries == 0 && !HasGotOffRel &&
          !(ElfSym::GlobalOffsetTable && !Target->GotBaseSymInGotPlt);
 }
 
 void GotSection::writeTo(uint8_t *Buf) {
   // Buf points to the start of this section's buffer,
   // whereas InputSectionBase::relocateAlloc() expects its argument
   // to point to the start of the output section.
   Target->writeGotHeader(Buf);
   relocateAlloc(Buf - OutSecOff, Buf - OutSecOff + Size);
 }
 
 static uint64_t getMipsPageAddr(uint64_t Addr) {
   return (Addr + 0x8000) & ~0xffff;
 }
 
 static uint64_t getMipsPageCount(uint64_t Size) {
   return (Size + 0xfffe) / 0xffff + 1;
 }
 
 MipsGotSection::MipsGotSection()
     : SyntheticSection(SHF_ALLOC | SHF_WRITE | SHF_MIPS_GPREL, SHT_PROGBITS, 16,
                        ".got") {}
 
 void MipsGotSection::addEntry(InputFile &File, Symbol &Sym, int64_t Addend,
                               RelExpr Expr) {
   FileGot &G = getGot(File);
   if (Expr == R_MIPS_GOT_LOCAL_PAGE) {
     if (const OutputSection *OS = Sym.getOutputSection())
       G.PagesMap.insert({OS, {}});
     else
       G.Local16.insert({{nullptr, getMipsPageAddr(Sym.getVA(Addend))}, 0});
   } else if (Sym.isTls())
     G.Tls.insert({&Sym, 0});
   else if (Sym.IsPreemptible && Expr == R_ABS)
     G.Relocs.insert({&Sym, 0});
   else if (Sym.IsPreemptible)
     G.Global.insert({&Sym, 0});
   else if (Expr == R_MIPS_GOT_OFF32)
     G.Local32.insert({{&Sym, Addend}, 0});
   else
     G.Local16.insert({{&Sym, Addend}, 0});
 }
 
 void MipsGotSection::addDynTlsEntry(InputFile &File, Symbol &Sym) {
   getGot(File).DynTlsSymbols.insert({&Sym, 0});
 }
 
 void MipsGotSection::addTlsIndex(InputFile &File) {
   getGot(File).DynTlsSymbols.insert({nullptr, 0});
 }
 
 size_t MipsGotSection::FileGot::getEntriesNum() const {
   return getPageEntriesNum() + Local16.size() + Global.size() + Relocs.size() +
          Tls.size() + DynTlsSymbols.size() * 2;
 }
 
 size_t MipsGotSection::FileGot::getPageEntriesNum() const {
   size_t Num = 0;
   for (const std::pair<const OutputSection *, FileGot::PageBlock> &P : PagesMap)
     Num += P.second.Count;
   return Num;
 }
 
 size_t MipsGotSection::FileGot::getIndexedEntriesNum() const {
   size_t Count = getPageEntriesNum() + Local16.size() + Global.size();
   // If there are relocation-only entries in the GOT, TLS entries
   // are allocated after them. TLS entries should be addressable
   // by 16-bit index so count both reloc-only and TLS entries.
   if (!Tls.empty() || !DynTlsSymbols.empty())
     Count += Relocs.size() + Tls.size() + DynTlsSymbols.size() * 2;
   return Count;
 }
 
 MipsGotSection::FileGot &MipsGotSection::getGot(InputFile &F) {
   if (!F.MipsGotIndex.hasValue()) {
     Gots.emplace_back();
     Gots.back().File = &F;
     F.MipsGotIndex = Gots.size() - 1;
   }
   return Gots[*F.MipsGotIndex];
 }
 
 uint64_t MipsGotSection::getPageEntryOffset(const InputFile *F,
                                             const Symbol &Sym,
                                             int64_t Addend) const {
   const FileGot &G = Gots[*F->MipsGotIndex];
   uint64_t Index = 0;
   if (const OutputSection *OutSec = Sym.getOutputSection()) {
     uint64_t SecAddr = getMipsPageAddr(OutSec->Addr);
     uint64_t SymAddr = getMipsPageAddr(Sym.getVA(Addend));
     Index = G.PagesMap.lookup(OutSec).FirstIndex + (SymAddr - SecAddr) / 0xffff;
   } else {
     Index = G.Local16.lookup({nullptr, getMipsPageAddr(Sym.getVA(Addend))});
   }
   return Index * Config->Wordsize;
 }
 
 uint64_t MipsGotSection::getSymEntryOffset(const InputFile *F, const Symbol &S,
                                            int64_t Addend) const {
   const FileGot &G = Gots[*F->MipsGotIndex];
   Symbol *Sym = const_cast<Symbol *>(&S);
   if (Sym->isTls())
     return G.Tls.lookup(Sym) * Config->Wordsize;
   if (Sym->IsPreemptible)
     return G.Global.lookup(Sym) * Config->Wordsize;
   return G.Local16.lookup({Sym, Addend}) * Config->Wordsize;
 }
 
 uint64_t MipsGotSection::getTlsIndexOffset(const InputFile *F) const {
   const FileGot &G = Gots[*F->MipsGotIndex];
   return G.DynTlsSymbols.lookup(nullptr) * Config->Wordsize;
 }
 
 uint64_t MipsGotSection::getGlobalDynOffset(const InputFile *F,
                                             const Symbol &S) const {
   const FileGot &G = Gots[*F->MipsGotIndex];
   Symbol *Sym = const_cast<Symbol *>(&S);
   return G.DynTlsSymbols.lookup(Sym) * Config->Wordsize;
 }
 
 const Symbol *MipsGotSection::getFirstGlobalEntry() const {
   if (Gots.empty())
     return nullptr;
   const FileGot &PrimGot = Gots.front();
   if (!PrimGot.Global.empty())
     return PrimGot.Global.front().first;
   if (!PrimGot.Relocs.empty())
     return PrimGot.Relocs.front().first;
   return nullptr;
 }
 
 unsigned MipsGotSection::getLocalEntriesNum() const {
   if (Gots.empty())
     return HeaderEntriesNum;
   return HeaderEntriesNum + Gots.front().getPageEntriesNum() +
          Gots.front().Local16.size();
 }
 
 bool MipsGotSection::tryMergeGots(FileGot &Dst, FileGot &Src, bool IsPrimary) {
   FileGot Tmp = Dst;
   set_union(Tmp.PagesMap, Src.PagesMap);
   set_union(Tmp.Local16, Src.Local16);
   set_union(Tmp.Global, Src.Global);
   set_union(Tmp.Relocs, Src.Relocs);
   set_union(Tmp.Tls, Src.Tls);
   set_union(Tmp.DynTlsSymbols, Src.DynTlsSymbols);
 
   size_t Count = IsPrimary ? HeaderEntriesNum : 0;
   Count += Tmp.getIndexedEntriesNum();
 
   if (Count * Config->Wordsize > Config->MipsGotSize)
     return false;
 
   std::swap(Tmp, Dst);
   return true;
 }
 
 void MipsGotSection::finalizeContents() { updateAllocSize(); }
 
 bool MipsGotSection::updateAllocSize() {
   Size = HeaderEntriesNum * Config->Wordsize;
   for (const FileGot &G : Gots)
     Size += G.getEntriesNum() * Config->Wordsize;
   return false;
 }
 
 template <class ELFT> void MipsGotSection::build() {
   if (Gots.empty())
     return;
 
   std::vector<FileGot> MergedGots(1);
 
   // For each GOT move non-preemptible symbols from the `Global`
   // to `Local16` list. Preemptible symbol might become non-preemptible
   // one if, for example, it gets a related copy relocation.
   for (FileGot &Got : Gots) {
     for (auto &P: Got.Global)
       if (!P.first->IsPreemptible)
         Got.Local16.insert({{P.first, 0}, 0});
     Got.Global.remove_if([&](const std::pair<Symbol *, size_t> &P) {
       return !P.first->IsPreemptible;
     });
   }
 
   // For each GOT remove "reloc-only" entry if there is "global"
   // entry for the same symbol. And add local entries which indexed
   // using 32-bit value at the end of 16-bit entries.
   for (FileGot &Got : Gots) {
     Got.Relocs.remove_if([&](const std::pair<Symbol *, size_t> &P) {
       return Got.Global.count(P.first);
     });
     set_union(Got.Local16, Got.Local32);
     Got.Local32.clear();
   }
 
   // Evaluate number of "reloc-only" entries in the resulting GOT.
   // To do that put all unique "reloc-only" and "global" entries
   // from all GOTs to the future primary GOT.
   FileGot *PrimGot = &MergedGots.front();
   for (FileGot &Got : Gots) {
     set_union(PrimGot->Relocs, Got.Global);
     set_union(PrimGot->Relocs, Got.Relocs);
     Got.Relocs.clear();
   }
 
   // Evaluate number of "page" entries in each GOT.
   for (FileGot &Got : Gots) {
     for (std::pair<const OutputSection *, FileGot::PageBlock> &P :
          Got.PagesMap) {
       const OutputSection *OS = P.first;
       uint64_t SecSize = 0;
       for (BaseCommand *Cmd : OS->SectionCommands) {
         if (auto *ISD = dyn_cast<InputSectionDescription>(Cmd))
           for (InputSection *IS : ISD->Sections) {
             uint64_t Off = alignTo(SecSize, IS->Alignment);
             SecSize = Off + IS->getSize();
           }
       }
       P.second.Count = getMipsPageCount(SecSize);
     }
   }
 
   // Merge GOTs. Try to join as much as possible GOTs but do not exceed
   // maximum GOT size. At first, try to fill the primary GOT because
   // the primary GOT can be accessed in the most effective way. If it
   // is not possible, try to fill the last GOT in the list, and finally
   // create a new GOT if both attempts failed.
   for (FileGot &SrcGot : Gots) {
     InputFile *File = SrcGot.File;
     if (tryMergeGots(MergedGots.front(), SrcGot, true)) {
       File->MipsGotIndex = 0;
     } else {
       // If this is the first time we failed to merge with the primary GOT,
       // MergedGots.back() will also be the primary GOT. We must make sure not
       // to try to merge again with IsPrimary=false, as otherwise, if the
       // inputs are just right, we could allow the primary GOT to become 1 or 2
       // words too big due to ignoring the header size.
       if (MergedGots.size() == 1 ||
           !tryMergeGots(MergedGots.back(), SrcGot, false)) {
         MergedGots.emplace_back();
         std::swap(MergedGots.back(), SrcGot);
       }
       File->MipsGotIndex = MergedGots.size() - 1;
     }
   }
   std::swap(Gots, MergedGots);
 
   // Reduce number of "reloc-only" entries in the primary GOT
   // by substracting "global" entries exist in the primary GOT.
   PrimGot = &Gots.front();
   PrimGot->Relocs.remove_if([&](const std::pair<Symbol *, size_t> &P) {
     return PrimGot->Global.count(P.first);
   });
 
   // Calculate indexes for each GOT entry.
   size_t Index = HeaderEntriesNum;
   for (FileGot &Got : Gots) {
     Got.StartIndex = &Got == PrimGot ? 0 : Index;
     for (std::pair<const OutputSection *, FileGot::PageBlock> &P :
          Got.PagesMap) {
       // For each output section referenced by GOT page relocations calculate
       // and save into PagesMap an upper bound of MIPS GOT entries required
       // to store page addresses of local symbols. We assume the worst case -
       // each 64kb page of the output section has at least one GOT relocation
       // against it. And take in account the case when the section intersects
       // page boundaries.
       P.second.FirstIndex = Index;
       Index += P.second.Count;
     }
     for (auto &P: Got.Local16)
       P.second = Index++;
     for (auto &P: Got.Global)
       P.second = Index++;
     for (auto &P: Got.Relocs)
       P.second = Index++;
     for (auto &P: Got.Tls)
       P.second = Index++;
     for (auto &P: Got.DynTlsSymbols) {
       P.second = Index;
       Index += 2;
     }
   }
 
   // Update Symbol::GotIndex field to use this
   // value later in the `sortMipsSymbols` function.
   for (auto &P : PrimGot->Global)
     P.first->GotIndex = P.second;
   for (auto &P : PrimGot->Relocs)
     P.first->GotIndex = P.second;
 
   // Create dynamic relocations.
   for (FileGot &Got : Gots) {
     // Create dynamic relocations for TLS entries.
     for (std::pair<Symbol *, size_t> &P : Got.Tls) {
       Symbol *S = P.first;
       uint64_t Offset = P.second * Config->Wordsize;
       if (S->IsPreemptible)
         In.RelaDyn->addReloc(Target->TlsGotRel, this, Offset, S);
     }
     for (std::pair<Symbol *, size_t> &P : Got.DynTlsSymbols) {
       Symbol *S = P.first;
       uint64_t Offset = P.second * Config->Wordsize;
       if (S == nullptr) {
         if (!Config->Pic)
           continue;
         In.RelaDyn->addReloc(Target->TlsModuleIndexRel, this, Offset, S);
       } else {
         // When building a shared library we still need a dynamic relocation
         // for the module index. Therefore only checking for
         // S->IsPreemptible is not sufficient (this happens e.g. for
         // thread-locals that have been marked as local through a linker script)
         if (!S->IsPreemptible && !Config->Pic)
           continue;
         In.RelaDyn->addReloc(Target->TlsModuleIndexRel, this, Offset, S);
         // However, we can skip writing the TLS offset reloc for non-preemptible
         // symbols since it is known even in shared libraries
         if (!S->IsPreemptible)
           continue;
         Offset += Config->Wordsize;
         In.RelaDyn->addReloc(Target->TlsOffsetRel, this, Offset, S);
       }
     }
 
     // Do not create dynamic relocations for non-TLS
     // entries in the primary GOT.
     if (&Got == PrimGot)
       continue;
 
     // Dynamic relocations for "global" entries.
     for (const std::pair<Symbol *, size_t> &P : Got.Global) {
       uint64_t Offset = P.second * Config->Wordsize;
       In.RelaDyn->addReloc(Target->RelativeRel, this, Offset, P.first);
     }
     if (!Config->Pic)
       continue;
     // Dynamic relocations for "local" entries in case of PIC.
     for (const std::pair<const OutputSection *, FileGot::PageBlock> &L :
          Got.PagesMap) {
       size_t PageCount = L.second.Count;
       for (size_t PI = 0; PI < PageCount; ++PI) {
         uint64_t Offset = (L.second.FirstIndex + PI) * Config->Wordsize;
         In.RelaDyn->addReloc({Target->RelativeRel, this, Offset, L.first,
                               int64_t(PI * 0x10000)});
       }
     }
     for (const std::pair<GotEntry, size_t> &P : Got.Local16) {
       uint64_t Offset = P.second * Config->Wordsize;
       In.RelaDyn->addReloc({Target->RelativeRel, this, Offset, true,
                             P.first.first, P.first.second});
     }
   }
 }
 
 bool MipsGotSection::empty() const {
   // We add the .got section to the result for dynamic MIPS target because
   // its address and properties are mentioned in the .dynamic section.
   return Config->Relocatable;
 }
 
 uint64_t MipsGotSection::getGp(const InputFile *F) const {
   // For files without related GOT or files refer a primary GOT
   // returns "common" _gp value. For secondary GOTs calculate
   // individual _gp values.
   if (!F || !F->MipsGotIndex.hasValue() || *F->MipsGotIndex == 0)
     return ElfSym::MipsGp->getVA(0);
   return getVA() + Gots[*F->MipsGotIndex].StartIndex * Config->Wordsize +
          0x7ff0;
 }
 
 void MipsGotSection::writeTo(uint8_t *Buf) {
   // Set the MSB of the second GOT slot. This is not required by any
   // MIPS ABI documentation, though.
   //
   // There is a comment in glibc saying that "The MSB of got[1] of a
   // gnu object is set to identify gnu objects," and in GNU gold it
   // says "the second entry will be used by some runtime loaders".
   // But how this field is being used is unclear.
   //
   // We are not really willing to mimic other linkers behaviors
   // without understanding why they do that, but because all files
   // generated by GNU tools have this special GOT value, and because
   // we've been doing this for years, it is probably a safe bet to
   // keep doing this for now. We really need to revisit this to see
   // if we had to do this.
   writeUint(Buf + Config->Wordsize, (uint64_t)1 << (Config->Wordsize * 8 - 1));
   for (const FileGot &G : Gots) {
     auto Write = [&](size_t I, const Symbol *S, int64_t A) {
       uint64_t VA = A;
       if (S) {
         VA = S->getVA(A);
         if (S->StOther & STO_MIPS_MICROMIPS)
           VA |= 1;
       }
       writeUint(Buf + I * Config->Wordsize, VA);
     };
     // Write 'page address' entries to the local part of the GOT.
     for (const std::pair<const OutputSection *, FileGot::PageBlock> &L :
          G.PagesMap) {
       size_t PageCount = L.second.Count;
       uint64_t FirstPageAddr = getMipsPageAddr(L.first->Addr);
       for (size_t PI = 0; PI < PageCount; ++PI)
         Write(L.second.FirstIndex + PI, nullptr, FirstPageAddr + PI * 0x10000);
     }
     // Local, global, TLS, reloc-only  entries.
     // If TLS entry has a corresponding dynamic relocations, leave it
     // initialized by zero. Write down adjusted TLS symbol's values otherwise.
     // To calculate the adjustments use offsets for thread-local storage.
     // https://www.linux-mips.org/wiki/NPTL
     for (const std::pair<GotEntry, size_t> &P : G.Local16)
       Write(P.second, P.first.first, P.first.second);
     // Write VA to the primary GOT only. For secondary GOTs that
     // will be done by REL32 dynamic relocations.
     if (&G == &Gots.front())
       for (const std::pair<const Symbol *, size_t> &P : G.Global)
         Write(P.second, P.first, 0);
     for (const std::pair<Symbol *, size_t> &P : G.Relocs)
       Write(P.second, P.first, 0);
     for (const std::pair<Symbol *, size_t> &P : G.Tls)
       Write(P.second, P.first, P.first->IsPreemptible ? 0 : -0x7000);
     for (const std::pair<Symbol *, size_t> &P : G.DynTlsSymbols) {
       if (P.first == nullptr && !Config->Pic)
         Write(P.second, nullptr, 1);
       else if (P.first && !P.first->IsPreemptible) {
         // If we are emitting PIC code with relocations we mustn't write
         // anything to the GOT here. When using Elf_Rel relocations the value
         // one will be treated as an addend and will cause crashes at runtime
         if (!Config->Pic)
           Write(P.second, nullptr, 1);
         Write(P.second + 1, P.first, -0x8000);
       }
     }
   }
 }
 
 // On PowerPC the .plt section is used to hold the table of function addresses
 // instead of the .got.plt, and the type is SHT_NOBITS similar to a .bss
 // section. I don't know why we have a BSS style type for the section but it is
 // consitent across both 64-bit PowerPC ABIs as well as the 32-bit PowerPC ABI.
 GotPltSection::GotPltSection()
     : SyntheticSection(SHF_ALLOC | SHF_WRITE,
                        Config->EMachine == EM_PPC64 ? SHT_NOBITS : SHT_PROGBITS,
                        Target->GotPltEntrySize,
                        Config->EMachine == EM_PPC64 ? ".plt" : ".got.plt") {}
 
 void GotPltSection::addEntry(Symbol &Sym) {
   assert(Sym.PltIndex == Entries.size());
   Entries.push_back(&Sym);
 }
 
 size_t GotPltSection::getSize() const {
   return (Target->GotPltHeaderEntriesNum + Entries.size()) *
          Target->GotPltEntrySize;
 }
 
 void GotPltSection::writeTo(uint8_t *Buf) {
   Target->writeGotPltHeader(Buf);
   Buf += Target->GotPltHeaderEntriesNum * Target->GotPltEntrySize;
   for (const Symbol *B : Entries) {
     Target->writeGotPlt(Buf, *B);
     Buf += Config->Wordsize;
   }
 }
 
 bool GotPltSection::empty() const {
   // We need to emit a GOT.PLT even if it's empty if there's a symbol that
   // references the _GLOBAL_OFFSET_TABLE_ and the Target defines the symbol
   // relative to the .got.plt section.
   return Entries.empty() &&
          !(ElfSym::GlobalOffsetTable && Target->GotBaseSymInGotPlt);
 }
 
 static StringRef getIgotPltName() {
   // On ARM the IgotPltSection is part of the GotSection.
   if (Config->EMachine == EM_ARM)
     return ".got";
 
   // On PowerPC64 the GotPltSection is renamed to '.plt' so the IgotPltSection
   // needs to be named the same.
   if (Config->EMachine == EM_PPC64)
     return ".plt";
 
   return ".got.plt";
 }
 
 // On PowerPC64 the GotPltSection type is SHT_NOBITS so we have to follow suit
 // with the IgotPltSection.
 IgotPltSection::IgotPltSection()
     : SyntheticSection(SHF_ALLOC | SHF_WRITE,
                        Config->EMachine == EM_PPC64 ? SHT_NOBITS : SHT_PROGBITS,
                        Target->GotPltEntrySize, getIgotPltName()) {}
 
 void IgotPltSection::addEntry(Symbol &Sym) {
-  Sym.IsInIgot = true;
   assert(Sym.PltIndex == Entries.size());
   Entries.push_back(&Sym);
 }
 
 size_t IgotPltSection::getSize() const {
   return Entries.size() * Target->GotPltEntrySize;
 }
 
 void IgotPltSection::writeTo(uint8_t *Buf) {
   for (const Symbol *B : Entries) {
     Target->writeIgotPlt(Buf, *B);
     Buf += Config->Wordsize;
   }
 }
 
 StringTableSection::StringTableSection(StringRef Name, bool Dynamic)
     : SyntheticSection(Dynamic ? (uint64_t)SHF_ALLOC : 0, SHT_STRTAB, 1, Name),
       Dynamic(Dynamic) {
   // ELF string tables start with a NUL byte.
   addString("");
 }
 
 // Adds a string to the string table. If HashIt is true we hash and check for
 // duplicates. It is optional because the name of global symbols are already
 // uniqued and hashing them again has a big cost for a small value: uniquing
 // them with some other string that happens to be the same.
 unsigned StringTableSection::addString(StringRef S, bool HashIt) {
   if (HashIt) {
     auto R = StringMap.insert(std::make_pair(S, this->Size));
     if (!R.second)
       return R.first->second;
   }
   unsigned Ret = this->Size;
   this->Size = this->Size + S.size() + 1;
   Strings.push_back(S);
   return Ret;
 }
 
 void StringTableSection::writeTo(uint8_t *Buf) {
   for (StringRef S : Strings) {
     memcpy(Buf, S.data(), S.size());
     Buf[S.size()] = '\0';
     Buf += S.size() + 1;
   }
 }
 
 // Returns the number of version definition entries. Because the first entry
 // is for the version definition itself, it is the number of versioned symbols
 // plus one. Note that we don't support multiple versions yet.
 static unsigned getVerDefNum() { return Config->VersionDefinitions.size() + 1; }
 
 template <class ELFT>
 DynamicSection<ELFT>::DynamicSection()
     : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_DYNAMIC, Config->Wordsize,
                        ".dynamic") {
   this->Entsize = ELFT::Is64Bits ? 16 : 8;
 
   // .dynamic section is not writable on MIPS and on Fuchsia OS
   // which passes -z rodynamic.
   // See "Special Section" in Chapter 4 in the following document:
   // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
   if (Config->EMachine == EM_MIPS || Config->ZRodynamic)
     this->Flags = SHF_ALLOC;
 
   // Add strings to .dynstr early so that .dynstr's size will be
   // fixed early.
   for (StringRef S : Config->FilterList)
     addInt(DT_FILTER, In.DynStrTab->addString(S));
   for (StringRef S : Config->AuxiliaryList)
     addInt(DT_AUXILIARY, In.DynStrTab->addString(S));
 
   if (!Config->Rpath.empty())
     addInt(Config->EnableNewDtags ? DT_RUNPATH : DT_RPATH,
            In.DynStrTab->addString(Config->Rpath));
 
   for (InputFile *File : SharedFiles) {
     SharedFile<ELFT> *F = cast<SharedFile<ELFT>>(File);
     if (F->IsNeeded)
       addInt(DT_NEEDED, In.DynStrTab->addString(F->SoName));
   }
   if (!Config->SoName.empty())
     addInt(DT_SONAME, In.DynStrTab->addString(Config->SoName));
 }
 
 template <class ELFT>
 void DynamicSection<ELFT>::add(int32_t Tag, std::function<uint64_t()> Fn) {
   Entries.push_back({Tag, Fn});
 }
 
 template <class ELFT>
 void DynamicSection<ELFT>::addInt(int32_t Tag, uint64_t Val) {
   Entries.push_back({Tag, [=] { return Val; }});
 }
 
 template <class ELFT>
 void DynamicSection<ELFT>::addInSec(int32_t Tag, InputSection *Sec) {
   Entries.push_back({Tag, [=] { return Sec->getVA(0); }});
 }
 
 template <class ELFT>
 void DynamicSection<ELFT>::addInSecRelative(int32_t Tag, InputSection *Sec) {
   size_t TagOffset = Entries.size() * Entsize;
   Entries.push_back(
       {Tag, [=] { return Sec->getVA(0) - (getVA() + TagOffset); }});
 }
 
 template <class ELFT>
 void DynamicSection<ELFT>::addOutSec(int32_t Tag, OutputSection *Sec) {
   Entries.push_back({Tag, [=] { return Sec->Addr; }});
 }
 
 template <class ELFT>
 void DynamicSection<ELFT>::addSize(int32_t Tag, OutputSection *Sec) {
   Entries.push_back({Tag, [=] { return Sec->Size; }});
 }
 
 template <class ELFT>
 void DynamicSection<ELFT>::addSym(int32_t Tag, Symbol *Sym) {
   Entries.push_back({Tag, [=] { return Sym->getVA(); }});
 }
 
 // A Linker script may assign the RELA relocation sections to the same
 // output section. When this occurs we cannot just use the OutputSection
 // Size. Moreover the [DT_JMPREL, DT_JMPREL + DT_PLTRELSZ) is permitted to
 // overlap with the [DT_RELA, DT_RELA + DT_RELASZ).
 static uint64_t addPltRelSz() {
   size_t Size = In.RelaPlt->getSize();
   if (In.RelaIplt->getParent() == In.RelaPlt->getParent() &&
       In.RelaIplt->Name == In.RelaPlt->Name)
     Size += In.RelaIplt->getSize();
   return Size;
 }
 
 // Add remaining entries to complete .dynamic contents.
 template <class ELFT> void DynamicSection<ELFT>::finalizeContents() {
   // Set DT_FLAGS and DT_FLAGS_1.
   uint32_t DtFlags = 0;
   uint32_t DtFlags1 = 0;
   if (Config->Bsymbolic)
     DtFlags |= DF_SYMBOLIC;
   if (Config->ZGlobal)
     DtFlags1 |= DF_1_GLOBAL;
   if (Config->ZInitfirst)
     DtFlags1 |= DF_1_INITFIRST;
   if (Config->ZInterpose)
     DtFlags1 |= DF_1_INTERPOSE;
   if (Config->ZNodefaultlib)
     DtFlags1 |= DF_1_NODEFLIB;
   if (Config->ZNodelete)
     DtFlags1 |= DF_1_NODELETE;
   if (Config->ZNodlopen)
     DtFlags1 |= DF_1_NOOPEN;
   if (Config->ZNow) {
     DtFlags |= DF_BIND_NOW;
     DtFlags1 |= DF_1_NOW;
   }
   if (Config->ZOrigin) {
     DtFlags |= DF_ORIGIN;
     DtFlags1 |= DF_1_ORIGIN;
   }
   if (!Config->ZText)
     DtFlags |= DF_TEXTREL;
   if (Config->HasStaticTlsModel)
     DtFlags |= DF_STATIC_TLS;
 
   if (DtFlags)
     addInt(DT_FLAGS, DtFlags);
   if (DtFlags1)
     addInt(DT_FLAGS_1, DtFlags1);
 
   // DT_DEBUG is a pointer to debug informaion used by debuggers at runtime. We
   // need it for each process, so we don't write it for DSOs. The loader writes
   // the pointer into this entry.
   //
   // DT_DEBUG is the only .dynamic entry that needs to be written to. Some
   // systems (currently only Fuchsia OS) provide other means to give the
   // debugger this information. Such systems may choose make .dynamic read-only.
   // If the target is such a system (used -z rodynamic) don't write DT_DEBUG.
   if (!Config->Shared && !Config->Relocatable && !Config->ZRodynamic)
     addInt(DT_DEBUG, 0);
 
   if (OutputSection *Sec = In.DynStrTab->getParent())
     this->Link = Sec->SectionIndex;
 
   if (!In.RelaDyn->empty()) {
     addInSec(In.RelaDyn->DynamicTag, In.RelaDyn);
     addSize(In.RelaDyn->SizeDynamicTag, In.RelaDyn->getParent());
 
     bool IsRela = Config->IsRela;
     addInt(IsRela ? DT_RELAENT : DT_RELENT,
            IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel));
 
     // MIPS dynamic loader does not support RELCOUNT tag.
     // The problem is in the tight relation between dynamic
     // relocations and GOT. So do not emit this tag on MIPS.
     if (Config->EMachine != EM_MIPS) {
       size_t NumRelativeRels = In.RelaDyn->getRelativeRelocCount();
       if (Config->ZCombreloc && NumRelativeRels)
         addInt(IsRela ? DT_RELACOUNT : DT_RELCOUNT, NumRelativeRels);
     }
   }
   if (In.RelrDyn && !In.RelrDyn->Relocs.empty()) {
     addInSec(Config->UseAndroidRelrTags ? DT_ANDROID_RELR : DT_RELR,
              In.RelrDyn);
     addSize(Config->UseAndroidRelrTags ? DT_ANDROID_RELRSZ : DT_RELRSZ,
             In.RelrDyn->getParent());
     addInt(Config->UseAndroidRelrTags ? DT_ANDROID_RELRENT : DT_RELRENT,
            sizeof(Elf_Relr));
   }
   // .rel[a].plt section usually consists of two parts, containing plt and
   // iplt relocations. It is possible to have only iplt relocations in the
   // output. In that case RelaPlt is empty and have zero offset, the same offset
   // as RelaIplt have. And we still want to emit proper dynamic tags for that
   // case, so here we always use RelaPlt as marker for the begining of
   // .rel[a].plt section.
   if (In.RelaPlt->getParent()->Live) {
     addInSec(DT_JMPREL, In.RelaPlt);
     Entries.push_back({DT_PLTRELSZ, addPltRelSz});
     switch (Config->EMachine) {
     case EM_MIPS:
       addInSec(DT_MIPS_PLTGOT, In.GotPlt);
       break;
     case EM_SPARCV9:
       addInSec(DT_PLTGOT, In.Plt);
       break;
     default:
       addInSec(DT_PLTGOT, In.GotPlt);
       break;
     }
     addInt(DT_PLTREL, Config->IsRela ? DT_RELA : DT_REL);
   }
 
   addInSec(DT_SYMTAB, In.DynSymTab);
   addInt(DT_SYMENT, sizeof(Elf_Sym));
   addInSec(DT_STRTAB, In.DynStrTab);
   addInt(DT_STRSZ, In.DynStrTab->getSize());
   if (!Config->ZText)
     addInt(DT_TEXTREL, 0);
   if (In.GnuHashTab)
     addInSec(DT_GNU_HASH, In.GnuHashTab);
   if (In.HashTab)
     addInSec(DT_HASH, In.HashTab);
 
   if (Out::PreinitArray) {
     addOutSec(DT_PREINIT_ARRAY, Out::PreinitArray);
     addSize(DT_PREINIT_ARRAYSZ, Out::PreinitArray);
   }
   if (Out::InitArray) {
     addOutSec(DT_INIT_ARRAY, Out::InitArray);
     addSize(DT_INIT_ARRAYSZ, Out::InitArray);
   }
   if (Out::FiniArray) {
     addOutSec(DT_FINI_ARRAY, Out::FiniArray);
     addSize(DT_FINI_ARRAYSZ, Out::FiniArray);
   }
 
   if (Symbol *B = Symtab->find(Config->Init))
     if (B->isDefined())
       addSym(DT_INIT, B);
   if (Symbol *B = Symtab->find(Config->Fini))
     if (B->isDefined())
       addSym(DT_FINI, B);
 
   bool HasVerNeed = InX<ELFT>::VerNeed->getNeedNum() != 0;
   if (HasVerNeed || In.VerDef)
     addInSec(DT_VERSYM, InX<ELFT>::VerSym);
   if (In.VerDef) {
     addInSec(DT_VERDEF, In.VerDef);
     addInt(DT_VERDEFNUM, getVerDefNum());
   }
   if (HasVerNeed) {
     addInSec(DT_VERNEED, InX<ELFT>::VerNeed);
     addInt(DT_VERNEEDNUM, InX<ELFT>::VerNeed->getNeedNum());
   }
 
   if (Config->EMachine == EM_MIPS) {
     addInt(DT_MIPS_RLD_VERSION, 1);
     addInt(DT_MIPS_FLAGS, RHF_NOTPOT);
     addInt(DT_MIPS_BASE_ADDRESS, Target->getImageBase());
     addInt(DT_MIPS_SYMTABNO, In.DynSymTab->getNumSymbols());
 
     add(DT_MIPS_LOCAL_GOTNO, [] { return In.MipsGot->getLocalEntriesNum(); });
 
     if (const Symbol *B = In.MipsGot->getFirstGlobalEntry())
       addInt(DT_MIPS_GOTSYM, B->DynsymIndex);
     else
       addInt(DT_MIPS_GOTSYM, In.DynSymTab->getNumSymbols());
     addInSec(DT_PLTGOT, In.MipsGot);
     if (In.MipsRldMap) {
       if (!Config->Pie)
         addInSec(DT_MIPS_RLD_MAP, In.MipsRldMap);
       // Store the offset to the .rld_map section
       // relative to the address of the tag.
       addInSecRelative(DT_MIPS_RLD_MAP_REL, In.MipsRldMap);
     }
   }
 
   // Glink dynamic tag is required by the V2 abi if the plt section isn't empty.
   if (Config->EMachine == EM_PPC64 && !In.Plt->empty()) {
     // The Glink tag points to 32 bytes before the first lazy symbol resolution
     // stub, which starts directly after the header.
     Entries.push_back({DT_PPC64_GLINK, [=] {
                          unsigned Offset = Target->PltHeaderSize - 32;
                          return In.Plt->getVA(0) + Offset;
                        }});
   }
 
   addInt(DT_NULL, 0);
 
   getParent()->Link = this->Link;
   this->Size = Entries.size() * this->Entsize;
 }
 
 template <class ELFT> void DynamicSection<ELFT>::writeTo(uint8_t *Buf) {
   auto *P = reinterpret_cast<Elf_Dyn *>(Buf);
 
   for (std::pair<int32_t, std::function<uint64_t()>> &KV : Entries) {
     P->d_tag = KV.first;
     P->d_un.d_val = KV.second();
     ++P;
   }
 }
 
 uint64_t DynamicReloc::getOffset() const {
   return InputSec->getVA(OffsetInSec);
 }
 
 int64_t DynamicReloc::computeAddend() const {
   if (UseSymVA)
     return Sym->getVA(Addend);
   if (!OutputSec)
     return Addend;
   // See the comment in the DynamicReloc ctor.
   return getMipsPageAddr(OutputSec->Addr) + Addend;
 }
 
 uint32_t DynamicReloc::getSymIndex() const {
   if (Sym && !UseSymVA)
     return Sym->DynsymIndex;
   return 0;
 }
 
 RelocationBaseSection::RelocationBaseSection(StringRef Name, uint32_t Type,
                                              int32_t DynamicTag,
                                              int32_t SizeDynamicTag)
     : SyntheticSection(SHF_ALLOC, Type, Config->Wordsize, Name),
       DynamicTag(DynamicTag), SizeDynamicTag(SizeDynamicTag) {}
 
 void RelocationBaseSection::addReloc(RelType DynType, InputSectionBase *IS,
                                      uint64_t OffsetInSec, Symbol *Sym) {
   addReloc({DynType, IS, OffsetInSec, false, Sym, 0});
 }
 
 void RelocationBaseSection::addReloc(RelType DynType,
                                      InputSectionBase *InputSec,
                                      uint64_t OffsetInSec, Symbol *Sym,
                                      int64_t Addend, RelExpr Expr,
                                      RelType Type) {
   // Write the addends to the relocated address if required. We skip
   // it if the written value would be zero.
   if (Config->WriteAddends && (Expr != R_ADDEND || Addend != 0))
     InputSec->Relocations.push_back({Expr, Type, OffsetInSec, Addend, Sym});
   addReloc({DynType, InputSec, OffsetInSec, Expr != R_ADDEND, Sym, Addend});
 }
 
 void RelocationBaseSection::addReloc(const DynamicReloc &Reloc) {
   if (Reloc.Type == Target->RelativeRel)
     ++NumRelativeRelocs;
   Relocs.push_back(Reloc);
 }
 
 void RelocationBaseSection::finalizeContents() {
   // When linking glibc statically, .rel{,a}.plt contains R_*_IRELATIVE
   // relocations due to IFUNC (e.g. strcpy). sh_link will be set to 0 in that
   // case.
   InputSection *SymTab = Config->Relocatable ? In.SymTab : In.DynSymTab;
   if (SymTab && SymTab->getParent())
     getParent()->Link = SymTab->getParent()->SectionIndex;
   else
     getParent()->Link = 0;
 
   if (In.RelaPlt == this)
     getParent()->Info = In.GotPlt->getParent()->SectionIndex;
   if (In.RelaIplt == this)
     getParent()->Info = In.IgotPlt->getParent()->SectionIndex;
 }
 
 RelrBaseSection::RelrBaseSection()
     : SyntheticSection(SHF_ALLOC,
                        Config->UseAndroidRelrTags ? SHT_ANDROID_RELR : SHT_RELR,
                        Config->Wordsize, ".relr.dyn") {}
 
 template <class ELFT>
 static void encodeDynamicReloc(typename ELFT::Rela *P,
                                const DynamicReloc &Rel) {
   if (Config->IsRela)
     P->r_addend = Rel.computeAddend();
   P->r_offset = Rel.getOffset();
   P->setSymbolAndType(Rel.getSymIndex(), Rel.Type, Config->IsMips64EL);
 }
 
 template <class ELFT>
 RelocationSection<ELFT>::RelocationSection(StringRef Name, bool Sort)
     : RelocationBaseSection(Name, Config->IsRela ? SHT_RELA : SHT_REL,
                             Config->IsRela ? DT_RELA : DT_REL,
                             Config->IsRela ? DT_RELASZ : DT_RELSZ),
       Sort(Sort) {
   this->Entsize = Config->IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel);
 }
 
 static bool compRelocations(const DynamicReloc &A, const DynamicReloc &B) {
   bool AIsRel = A.Type == Target->RelativeRel;
   bool BIsRel = B.Type == Target->RelativeRel;
   if (AIsRel != BIsRel)
     return AIsRel;
   return A.getSymIndex() < B.getSymIndex();
 }
 
 template <class ELFT> void RelocationSection<ELFT>::writeTo(uint8_t *Buf) {
   if (Sort)
     std::stable_sort(Relocs.begin(), Relocs.end(), compRelocations);
 
   for (const DynamicReloc &Rel : Relocs) {
     encodeDynamicReloc<ELFT>(reinterpret_cast<Elf_Rela *>(Buf), Rel);
     Buf += Config->IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel);
   }
 }
 
 template <class ELFT> unsigned RelocationSection<ELFT>::getRelocOffset() {
   return this->Entsize * Relocs.size();
 }
 
 template <class ELFT>
 AndroidPackedRelocationSection<ELFT>::AndroidPackedRelocationSection(
     StringRef Name)
     : RelocationBaseSection(
           Name, Config->IsRela ? SHT_ANDROID_RELA : SHT_ANDROID_REL,
           Config->IsRela ? DT_ANDROID_RELA : DT_ANDROID_REL,
           Config->IsRela ? DT_ANDROID_RELASZ : DT_ANDROID_RELSZ) {
   this->Entsize = 1;
 }
 
 template <class ELFT>
 bool AndroidPackedRelocationSection<ELFT>::updateAllocSize() {
   // This function computes the contents of an Android-format packed relocation
   // section.
   //
   // This format compresses relocations by using relocation groups to factor out
   // fields that are common between relocations and storing deltas from previous
   // relocations in SLEB128 format (which has a short representation for small
   // numbers). A good example of a relocation type with common fields is
   // R_*_RELATIVE, which is normally used to represent function pointers in
   // vtables. In the REL format, each relative relocation has the same r_info
   // field, and is only different from other relative relocations in terms of
   // the r_offset field. By sorting relocations by offset, grouping them by
   // r_info and representing each relocation with only the delta from the
   // previous offset, each 8-byte relocation can be compressed to as little as 1
   // byte (or less with run-length encoding). This relocation packer was able to
   // reduce the size of the relocation section in an Android Chromium DSO from
   // 2,911,184 bytes to 174,693 bytes, or 6% of the original size.
   //
   // A relocation section consists of a header containing the literal bytes
   // 'APS2' followed by a sequence of SLEB128-encoded integers. The first two
   // elements are the total number of relocations in the section and an initial
   // r_offset value. The remaining elements define a sequence of relocation
   // groups. Each relocation group starts with a header consisting of the
   // following elements:
   //
   // - the number of relocations in the relocation group
   // - flags for the relocation group
   // - (if RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG is set) the r_offset delta
   //   for each relocation in the group.
   // - (if RELOCATION_GROUPED_BY_INFO_FLAG is set) the value of the r_info
   //   field for each relocation in the group.
   // - (if RELOCATION_GROUP_HAS_ADDEND_FLAG and
   //   RELOCATION_GROUPED_BY_ADDEND_FLAG are set) the r_addend delta for
   //   each relocation in the group.
   //
   // Following the relocation group header are descriptions of each of the
   // relocations in the group. They consist of the following elements:
   //
   // - (if RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG is not set) the r_offset
   //   delta for this relocation.
   // - (if RELOCATION_GROUPED_BY_INFO_FLAG is not set) the value of the r_info
   //   field for this relocation.
   // - (if RELOCATION_GROUP_HAS_ADDEND_FLAG is set and
   //   RELOCATION_GROUPED_BY_ADDEND_FLAG is not set) the r_addend delta for
   //   this relocation.
 
   size_t OldSize = RelocData.size();
 
   RelocData = {'A', 'P', 'S', '2'};
   raw_svector_ostream OS(RelocData);
   auto Add = [&](int64_t V) { encodeSLEB128(V, OS); };
 
   // The format header includes the number of relocations and the initial
   // offset (we set this to zero because the first relocation group will
   // perform the initial adjustment).
   Add(Relocs.size());
   Add(0);
 
   std::vector<Elf_Rela> Relatives, NonRelatives;
 
   for (const DynamicReloc &Rel : Relocs) {
     Elf_Rela R;
     encodeDynamicReloc<ELFT>(&R, Rel);
 
     if (R.getType(Config->IsMips64EL) == Target->RelativeRel)
       Relatives.push_back(R);
     else
       NonRelatives.push_back(R);
   }
 
   llvm::sort(Relatives, [](const Elf_Rel &A, const Elf_Rel &B) {
     return A.r_offset < B.r_offset;
   });
 
   // Try to find groups of relative relocations which are spaced one word
   // apart from one another. These generally correspond to vtable entries. The
   // format allows these groups to be encoded using a sort of run-length
   // encoding, but each group will cost 7 bytes in addition to the offset from
   // the previous group, so it is only profitable to do this for groups of
   // size 8 or larger.
   std::vector<Elf_Rela> UngroupedRelatives;
   std::vector<std::vector<Elf_Rela>> RelativeGroups;
   for (auto I = Relatives.begin(), E = Relatives.end(); I != E;) {
     std::vector<Elf_Rela> Group;
     do {
       Group.push_back(*I++);
     } while (I != E && (I - 1)->r_offset + Config->Wordsize == I->r_offset);
 
     if (Group.size() < 8)
       UngroupedRelatives.insert(UngroupedRelatives.end(), Group.begin(),
                                 Group.end());
     else
       RelativeGroups.emplace_back(std::move(Group));
   }
 
   unsigned HasAddendIfRela =
       Config->IsRela ? RELOCATION_GROUP_HAS_ADDEND_FLAG : 0;
 
   uint64_t Offset = 0;
   uint64_t Addend = 0;
 
   // Emit the run-length encoding for the groups of adjacent relative
   // relocations. Each group is represented using two groups in the packed
   // format. The first is used to set the current offset to the start of the
   // group (and also encodes the first relocation), and the second encodes the
   // remaining relocations.
   for (std::vector<Elf_Rela> &G : RelativeGroups) {
     // The first relocation in the group.
     Add(1);
     Add(RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG |
         RELOCATION_GROUPED_BY_INFO_FLAG | HasAddendIfRela);
     Add(G[0].r_offset - Offset);
     Add(Target->RelativeRel);
     if (Config->IsRela) {
       Add(G[0].r_addend - Addend);
       Addend = G[0].r_addend;
     }
 
     // The remaining relocations.
     Add(G.size() - 1);
     Add(RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG |
         RELOCATION_GROUPED_BY_INFO_FLAG | HasAddendIfRela);
     Add(Config->Wordsize);
     Add(Target->RelativeRel);
     if (Config->IsRela) {
       for (auto I = G.begin() + 1, E = G.end(); I != E; ++I) {
         Add(I->r_addend - Addend);
         Addend = I->r_addend;
       }
     }
 
     Offset = G.back().r_offset;
   }
 
   // Now the ungrouped relatives.
   if (!UngroupedRelatives.empty()) {
     Add(UngroupedRelatives.size());
     Add(RELOCATION_GROUPED_BY_INFO_FLAG | HasAddendIfRela);
     Add(Target->RelativeRel);
     for (Elf_Rela &R : UngroupedRelatives) {
       Add(R.r_offset - Offset);
       Offset = R.r_offset;
       if (Config->IsRela) {
         Add(R.r_addend - Addend);
         Addend = R.r_addend;
       }
     }
   }
 
   // Finally the non-relative relocations.
   llvm::sort(NonRelatives, [](const Elf_Rela &A, const Elf_Rela &B) {
     return A.r_offset < B.r_offset;
   });
   if (!NonRelatives.empty()) {
     Add(NonRelatives.size());
     Add(HasAddendIfRela);
     for (Elf_Rela &R : NonRelatives) {
       Add(R.r_offset - Offset);
       Offset = R.r_offset;
       Add(R.r_info);
       if (Config->IsRela) {
         Add(R.r_addend - Addend);
         Addend = R.r_addend;
       }
     }
   }
 
   // Don't allow the section to shrink; otherwise the size of the section can
   // oscillate infinitely.
   if (RelocData.size() < OldSize)
     RelocData.append(OldSize - RelocData.size(), 0);
 
   // Returns whether the section size changed. We need to keep recomputing both
   // section layout and the contents of this section until the size converges
   // because changing this section's size can affect section layout, which in
   // turn can affect the sizes of the LEB-encoded integers stored in this
   // section.
   return RelocData.size() != OldSize;
 }
 
 template <class ELFT> RelrSection<ELFT>::RelrSection() {
   this->Entsize = Config->Wordsize;
 }
 
 template <class ELFT> bool RelrSection<ELFT>::updateAllocSize() {
   // This function computes the contents of an SHT_RELR packed relocation
   // section.
   //
   // Proposal for adding SHT_RELR sections to generic-abi is here:
   //   https://groups.google.com/forum/#!topic/generic-abi/bX460iggiKg
   //
   // The encoded sequence of Elf64_Relr entries in a SHT_RELR section looks
   // like [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
   //
   // i.e. start with an address, followed by any number of bitmaps. The address
   // entry encodes 1 relocation. The subsequent bitmap entries encode up to 63
   // relocations each, at subsequent offsets following the last address entry.
   //
   // The bitmap entries must have 1 in the least significant bit. The assumption
   // here is that an address cannot have 1 in lsb. Odd addresses are not
   // supported.
   //
   // Excluding the least significant bit in the bitmap, each non-zero bit in
   // the bitmap represents a relocation to be applied to a corresponding machine
   // word that follows the base address word. The second least significant bit
   // represents the machine word immediately following the initial address, and
   // each bit that follows represents the next word, in linear order. As such,
   // a single bitmap can encode up to 31 relocations in a 32-bit object, and
   // 63 relocations in a 64-bit object.
   //
   // This encoding has a couple of interesting properties:
   // 1. Looking at any entry, it is clear whether it's an address or a bitmap:
   //    even means address, odd means bitmap.
   // 2. Just a simple list of addresses is a valid encoding.
 
   size_t OldSize = RelrRelocs.size();
   RelrRelocs.clear();
 
   // Same as Config->Wordsize but faster because this is a compile-time
   // constant.
   const size_t Wordsize = sizeof(typename ELFT::uint);
 
   // Number of bits to use for the relocation offsets bitmap.
   // Must be either 63 or 31.
   const size_t NBits = Wordsize * 8 - 1;
 
   // Get offsets for all relative relocations and sort them.
   std::vector<uint64_t> Offsets;
   for (const RelativeReloc &Rel : Relocs)
     Offsets.push_back(Rel.getOffset());
   llvm::sort(Offsets.begin(), Offsets.end());
 
   // For each leading relocation, find following ones that can be folded
   // as a bitmap and fold them.
   for (size_t I = 0, E = Offsets.size(); I < E;) {
     // Add a leading relocation.
     RelrRelocs.push_back(Elf_Relr(Offsets[I]));
     uint64_t Base = Offsets[I] + Wordsize;
     ++I;
 
     // Find foldable relocations to construct bitmaps.
     while (I < E) {
       uint64_t Bitmap = 0;
 
       while (I < E) {
         uint64_t Delta = Offsets[I] - Base;
 
         // If it is too far, it cannot be folded.
         if (Delta >= NBits * Wordsize)
           break;
 
         // If it is not a multiple of wordsize away, it cannot be folded.
         if (Delta % Wordsize)
           break;
 
         // Fold it.
         Bitmap |= 1ULL << (Delta / Wordsize);
         ++I;
       }
 
       if (!Bitmap)
         break;
 
       RelrRelocs.push_back(Elf_Relr((Bitmap << 1) | 1));
       Base += NBits * Wordsize;
     }
   }
 
   return RelrRelocs.size() != OldSize;
 }
 
 SymbolTableBaseSection::SymbolTableBaseSection(StringTableSection &StrTabSec)
     : SyntheticSection(StrTabSec.isDynamic() ? (uint64_t)SHF_ALLOC : 0,
                        StrTabSec.isDynamic() ? SHT_DYNSYM : SHT_SYMTAB,
                        Config->Wordsize,
                        StrTabSec.isDynamic() ? ".dynsym" : ".symtab"),
       StrTabSec(StrTabSec) {}
 
 // Orders symbols according to their positions in the GOT,
 // in compliance with MIPS ABI rules.
 // See "Global Offset Table" in Chapter 5 in the following document
 // for detailed description:
 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
 static bool sortMipsSymbols(const SymbolTableEntry &L,
                             const SymbolTableEntry &R) {
   // Sort entries related to non-local preemptible symbols by GOT indexes.
   // All other entries go to the beginning of a dynsym in arbitrary order.
   if (L.Sym->isInGot() && R.Sym->isInGot())
     return L.Sym->GotIndex < R.Sym->GotIndex;
   if (!L.Sym->isInGot() && !R.Sym->isInGot())
     return false;
   return !L.Sym->isInGot();
 }
 
 void SymbolTableBaseSection::finalizeContents() {
   if (OutputSection *Sec = StrTabSec.getParent())
     getParent()->Link = Sec->SectionIndex;
 
   if (this->Type != SHT_DYNSYM) {
     sortSymTabSymbols();
     return;
   }
 
   // If it is a .dynsym, there should be no local symbols, but we need
   // to do a few things for the dynamic linker.
 
   // Section's Info field has the index of the first non-local symbol.
   // Because the first symbol entry is a null entry, 1 is the first.
   getParent()->Info = 1;
 
   if (In.GnuHashTab) {
     // NB: It also sorts Symbols to meet the GNU hash table requirements.
     In.GnuHashTab->addSymbols(Symbols);
   } else if (Config->EMachine == EM_MIPS) {
     std::stable_sort(Symbols.begin(), Symbols.end(), sortMipsSymbols);
   }
 
   size_t I = 0;
   for (const SymbolTableEntry &S : Symbols)
     S.Sym->DynsymIndex = ++I;
 }
 
 // The ELF spec requires that all local symbols precede global symbols, so we
 // sort symbol entries in this function. (For .dynsym, we don't do that because
 // symbols for dynamic linking are inherently all globals.)
 //
 // Aside from above, we put local symbols in groups starting with the STT_FILE
 // symbol. That is convenient for purpose of identifying where are local symbols
 // coming from.
 void SymbolTableBaseSection::sortSymTabSymbols() {
   // Move all local symbols before global symbols.
   auto E = std::stable_partition(
       Symbols.begin(), Symbols.end(), [](const SymbolTableEntry &S) {
         return S.Sym->isLocal() || S.Sym->computeBinding() == STB_LOCAL;
       });
   size_t NumLocals = E - Symbols.begin();
   getParent()->Info = NumLocals + 1;
 
   // We want to group the local symbols by file. For that we rebuild the local
   // part of the symbols vector. We do not need to care about the STT_FILE
   // symbols, they are already naturally placed first in each group. That
   // happens because STT_FILE is always the first symbol in the object and hence
   // precede all other local symbols we add for a file.
   MapVector<InputFile *, std::vector<SymbolTableEntry>> Arr;
   for (const SymbolTableEntry &S : llvm::make_range(Symbols.begin(), E))
     Arr[S.Sym->File].push_back(S);
 
   auto I = Symbols.begin();
   for (std::pair<InputFile *, std::vector<SymbolTableEntry>> &P : Arr)
     for (SymbolTableEntry &Entry : P.second)
       *I++ = Entry;
 }
 
 void SymbolTableBaseSection::addSymbol(Symbol *B) {
   // Adding a local symbol to a .dynsym is a bug.
   assert(this->Type != SHT_DYNSYM || !B->isLocal());
 
   bool HashIt = B->isLocal();
   Symbols.push_back({B, StrTabSec.addString(B->getName(), HashIt)});
 }
 
 size_t SymbolTableBaseSection::getSymbolIndex(Symbol *Sym) {
   // Initializes symbol lookup tables lazily. This is used only
   // for -r or -emit-relocs.
   llvm::call_once(OnceFlag, [&] {
     SymbolIndexMap.reserve(Symbols.size());
     size_t I = 0;
     for (const SymbolTableEntry &E : Symbols) {
       if (E.Sym->Type == STT_SECTION)
         SectionIndexMap[E.Sym->getOutputSection()] = ++I;
       else
         SymbolIndexMap[E.Sym] = ++I;
     }
   });
 
   // Section symbols are mapped based on their output sections
   // to maintain their semantics.
   if (Sym->Type == STT_SECTION)
     return SectionIndexMap.lookup(Sym->getOutputSection());
   return SymbolIndexMap.lookup(Sym);
 }
 
 template <class ELFT>
 SymbolTableSection<ELFT>::SymbolTableSection(StringTableSection &StrTabSec)
     : SymbolTableBaseSection(StrTabSec) {
   this->Entsize = sizeof(Elf_Sym);
 }
 
 static BssSection *getCommonSec(Symbol *Sym) {
   if (!Config->DefineCommon)
     if (auto *D = dyn_cast<Defined>(Sym))
       return dyn_cast_or_null<BssSection>(D->Section);
   return nullptr;
 }
 
 static uint32_t getSymSectionIndex(Symbol *Sym) {
   if (getCommonSec(Sym))
     return SHN_COMMON;
   if (!isa<Defined>(Sym) || Sym->NeedsPltAddr)
     return SHN_UNDEF;
   if (const OutputSection *OS = Sym->getOutputSection())
     return OS->SectionIndex >= SHN_LORESERVE ? (uint32_t)SHN_XINDEX
                                              : OS->SectionIndex;
   return SHN_ABS;
 }
 
 // Write the internal symbol table contents to the output symbol table.
 template <class ELFT> void SymbolTableSection<ELFT>::writeTo(uint8_t *Buf) {
   // The first entry is a null entry as per the ELF spec.
   memset(Buf, 0, sizeof(Elf_Sym));
   Buf += sizeof(Elf_Sym);
 
   auto *ESym = reinterpret_cast<Elf_Sym *>(Buf);
 
   for (SymbolTableEntry &Ent : Symbols) {
     Symbol *Sym = Ent.Sym;
 
     // Set st_info and st_other.
     ESym->st_other = 0;
     if (Sym->isLocal()) {
       ESym->setBindingAndType(STB_LOCAL, Sym->Type);
     } else {
       ESym->setBindingAndType(Sym->computeBinding(), Sym->Type);
       ESym->setVisibility(Sym->Visibility);
     }
 
     // The 3 most significant bits of st_other are used by OpenPOWER ABI.
     // See getPPC64GlobalEntryToLocalEntryOffset() for more details.
     if (Config->EMachine == EM_PPC64)
       ESym->st_other |= Sym->StOther & 0xe0;
 
     ESym->st_name = Ent.StrTabOffset;
     ESym->st_shndx = getSymSectionIndex(Ent.Sym);
 
     // Copy symbol size if it is a defined symbol. st_size is not significant
     // for undefined symbols, so whether copying it or not is up to us if that's
     // the case. We'll leave it as zero because by not setting a value, we can
     // get the exact same outputs for two sets of input files that differ only
     // in undefined symbol size in DSOs.
     if (ESym->st_shndx == SHN_UNDEF)
       ESym->st_size = 0;
     else
       ESym->st_size = Sym->getSize();
 
     // st_value is usually an address of a symbol, but that has a
     // special meaining for uninstantiated common symbols (this can
     // occur if -r is given).
     if (BssSection *CommonSec = getCommonSec(Ent.Sym))
       ESym->st_value = CommonSec->Alignment;
     else
       ESym->st_value = Sym->getVA();
 
     ++ESym;
   }
 
   // On MIPS we need to mark symbol which has a PLT entry and requires
   // pointer equality by STO_MIPS_PLT flag. That is necessary to help
   // dynamic linker distinguish such symbols and MIPS lazy-binding stubs.
   // https://sourceware.org/ml/binutils/2008-07/txt00000.txt
   if (Config->EMachine == EM_MIPS) {
     auto *ESym = reinterpret_cast<Elf_Sym *>(Buf);
 
     for (SymbolTableEntry &Ent : Symbols) {
       Symbol *Sym = Ent.Sym;
       if (Sym->isInPlt() && Sym->NeedsPltAddr)
         ESym->st_other |= STO_MIPS_PLT;
       if (isMicroMips()) {
         // Set STO_MIPS_MICROMIPS flag and less-significant bit for
         // a defined microMIPS symbol and symbol should point to its
         // PLT entry (in case of microMIPS, PLT entries always contain
         // microMIPS code).
         if (Sym->isDefined() &&
             ((Sym->StOther & STO_MIPS_MICROMIPS) || Sym->NeedsPltAddr)) {
           if (StrTabSec.isDynamic())
             ESym->st_value |= 1;
           ESym->st_other |= STO_MIPS_MICROMIPS;
         }
       }
       if (Config->Relocatable)
         if (auto *D = dyn_cast<Defined>(Sym))
           if (isMipsPIC<ELFT>(D))
             ESym->st_other |= STO_MIPS_PIC;
       ++ESym;
     }
   }
 }
 
 SymtabShndxSection::SymtabShndxSection()
     : SyntheticSection(0, SHT_SYMTAB_SHNDX, 4, ".symtab_shndxr") {
   this->Entsize = 4;
 }
 
 void SymtabShndxSection::writeTo(uint8_t *Buf) {
   // We write an array of 32 bit values, where each value has 1:1 association
   // with an entry in .symtab. If the corresponding entry contains SHN_XINDEX,
   // we need to write actual index, otherwise, we must write SHN_UNDEF(0).
   Buf += 4; // Ignore .symtab[0] entry.
   for (const SymbolTableEntry &Entry : In.SymTab->getSymbols()) {
     if (getSymSectionIndex(Entry.Sym) == SHN_XINDEX)
       write32(Buf, Entry.Sym->getOutputSection()->SectionIndex);
     Buf += 4;
   }
 }
 
 bool SymtabShndxSection::empty() const {
   // SHT_SYMTAB can hold symbols with section indices values up to
   // SHN_LORESERVE. If we need more, we want to use extension SHT_SYMTAB_SHNDX
   // section. Problem is that we reveal the final section indices a bit too
   // late, and we do not know them here. For simplicity, we just always create
   // a .symtab_shndxr section when the amount of output sections is huge.
   size_t Size = 0;
   for (BaseCommand *Base : Script->SectionCommands)
     if (isa<OutputSection>(Base))
       ++Size;
   return Size < SHN_LORESERVE;
 }
 
 void SymtabShndxSection::finalizeContents() {
   getParent()->Link = In.SymTab->getParent()->SectionIndex;
 }
 
 size_t SymtabShndxSection::getSize() const {
   return In.SymTab->getNumSymbols() * 4;
 }
 
 // .hash and .gnu.hash sections contain on-disk hash tables that map
 // symbol names to their dynamic symbol table indices. Their purpose
 // is to help the dynamic linker resolve symbols quickly. If ELF files
 // don't have them, the dynamic linker has to do linear search on all
 // dynamic symbols, which makes programs slower. Therefore, a .hash
 // section is added to a DSO by default. A .gnu.hash is added if you
 // give the -hash-style=gnu or -hash-style=both option.
 //
 // The Unix semantics of resolving dynamic symbols is somewhat expensive.
 // Each ELF file has a list of DSOs that the ELF file depends on and a
 // list of dynamic symbols that need to be resolved from any of the
 // DSOs. That means resolving all dynamic symbols takes O(m)*O(n)
 // where m is the number of DSOs and n is the number of dynamic
 // symbols. For modern large programs, both m and n are large.  So
 // making each step faster by using hash tables substiantially
 // improves time to load programs.
 //
 // (Note that this is not the only way to design the shared library.
 // For instance, the Windows DLL takes a different approach. On
 // Windows, each dynamic symbol has a name of DLL from which the symbol
 // has to be resolved. That makes the cost of symbol resolution O(n).
 // This disables some hacky techniques you can use on Unix such as
 // LD_PRELOAD, but this is arguably better semantics than the Unix ones.)
 //
 // Due to historical reasons, we have two different hash tables, .hash
 // and .gnu.hash. They are for the same purpose, and .gnu.hash is a new
 // and better version of .hash. .hash is just an on-disk hash table, but
 // .gnu.hash has a bloom filter in addition to a hash table to skip
 // DSOs very quickly. If you are sure that your dynamic linker knows
 // about .gnu.hash, you want to specify -hash-style=gnu. Otherwise, a
 // safe bet is to specify -hash-style=both for backward compatibilty.
 GnuHashTableSection::GnuHashTableSection()
     : SyntheticSection(SHF_ALLOC, SHT_GNU_HASH, Config->Wordsize, ".gnu.hash") {
 }
 
 void GnuHashTableSection::finalizeContents() {
   if (OutputSection *Sec = In.DynSymTab->getParent())
     getParent()->Link = Sec->SectionIndex;
 
   // Computes bloom filter size in word size. We want to allocate 12
   // bits for each symbol. It must be a power of two.
   if (Symbols.empty()) {
     MaskWords = 1;
   } else {
     uint64_t NumBits = Symbols.size() * 12;
     MaskWords = NextPowerOf2(NumBits / (Config->Wordsize * 8));
   }
 
   Size = 16;                            // Header
   Size += Config->Wordsize * MaskWords; // Bloom filter
   Size += NBuckets * 4;                 // Hash buckets
   Size += Symbols.size() * 4;           // Hash values
 }
 
 void GnuHashTableSection::writeTo(uint8_t *Buf) {
   // The output buffer is not guaranteed to be zero-cleared because we pre-
   // fill executable sections with trap instructions. This is a precaution
   // for that case, which happens only when -no-rosegment is given.
   memset(Buf, 0, Size);
 
   // Write a header.
   write32(Buf, NBuckets);
   write32(Buf + 4, In.DynSymTab->getNumSymbols() - Symbols.size());
   write32(Buf + 8, MaskWords);
   write32(Buf + 12, Shift2);
   Buf += 16;
 
   // Write a bloom filter and a hash table.
   writeBloomFilter(Buf);
   Buf += Config->Wordsize * MaskWords;
   writeHashTable(Buf);
 }
 
 // This function writes a 2-bit bloom filter. This bloom filter alone
 // usually filters out 80% or more of all symbol lookups [1].
 // The dynamic linker uses the hash table only when a symbol is not
 // filtered out by a bloom filter.
 //
 // [1] Ulrich Drepper (2011), "How To Write Shared Libraries" (Ver. 4.1.2),
 //     p.9, https://www.akkadia.org/drepper/dsohowto.pdf
 void GnuHashTableSection::writeBloomFilter(uint8_t *Buf) {
   unsigned C = Config->Is64 ? 64 : 32;
   for (const Entry &Sym : Symbols) {
     // When C = 64, we choose a word with bits [6:...] and set 1 to two bits in
     // the word using bits [0:5] and [26:31].
     size_t I = (Sym.Hash / C) & (MaskWords - 1);
     uint64_t Val = readUint(Buf + I * Config->Wordsize);
     Val |= uint64_t(1) << (Sym.Hash % C);
     Val |= uint64_t(1) << ((Sym.Hash >> Shift2) % C);
     writeUint(Buf + I * Config->Wordsize, Val);
   }
 }
 
 void GnuHashTableSection::writeHashTable(uint8_t *Buf) {
   uint32_t *Buckets = reinterpret_cast<uint32_t *>(Buf);
   uint32_t OldBucket = -1;
   uint32_t *Values = Buckets + NBuckets;
   for (auto I = Symbols.begin(), E = Symbols.end(); I != E; ++I) {
     // Write a hash value. It represents a sequence of chains that share the
     // same hash modulo value. The last element of each chain is terminated by
     // LSB 1.
     uint32_t Hash = I->Hash;
     bool IsLastInChain = (I + 1) == E || I->BucketIdx != (I + 1)->BucketIdx;
     Hash = IsLastInChain ? Hash | 1 : Hash & ~1;
     write32(Values++, Hash);
 
     if (I->BucketIdx == OldBucket)
       continue;
     // Write a hash bucket. Hash buckets contain indices in the following hash
     // value table.
     write32(Buckets + I->BucketIdx, I->Sym->DynsymIndex);
     OldBucket = I->BucketIdx;
   }
 }
 
 static uint32_t hashGnu(StringRef Name) {
   uint32_t H = 5381;
   for (uint8_t C : Name)
     H = (H << 5) + H + C;
   return H;
 }
 
 // Add symbols to this symbol hash table. Note that this function
 // destructively sort a given vector -- which is needed because
 // GNU-style hash table places some sorting requirements.
 void GnuHashTableSection::addSymbols(std::vector<SymbolTableEntry> &V) {
   // We cannot use 'auto' for Mid because GCC 6.1 cannot deduce
   // its type correctly.
   std::vector<SymbolTableEntry>::iterator Mid =
       std::stable_partition(V.begin(), V.end(), [](const SymbolTableEntry &S) {
         return !S.Sym->isDefined();
       });
 
   // We chose load factor 4 for the on-disk hash table. For each hash
   // collision, the dynamic linker will compare a uint32_t hash value.
   // Since the integer comparison is quite fast, we believe we can
   // make the load factor even larger. 4 is just a conservative choice.
   //
   // Note that we don't want to create a zero-sized hash table because
   // Android loader as of 2018 doesn't like a .gnu.hash containing such
   // table. If that's the case, we create a hash table with one unused
   // dummy slot.
   NBuckets = std::max<size_t>((V.end() - Mid) / 4, 1);
 
   if (Mid == V.end())
     return;
 
   for (SymbolTableEntry &Ent : llvm::make_range(Mid, V.end())) {
     Symbol *B = Ent.Sym;
     uint32_t Hash = hashGnu(B->getName());
     uint32_t BucketIdx = Hash % NBuckets;
     Symbols.push_back({B, Ent.StrTabOffset, Hash, BucketIdx});
   }
 
   std::stable_sort(
       Symbols.begin(), Symbols.end(),
       [](const Entry &L, const Entry &R) { return L.BucketIdx < R.BucketIdx; });
 
   V.erase(Mid, V.end());
   for (const Entry &Ent : Symbols)
     V.push_back({Ent.Sym, Ent.StrTabOffset});
 }
 
 HashTableSection::HashTableSection()
     : SyntheticSection(SHF_ALLOC, SHT_HASH, 4, ".hash") {
   this->Entsize = 4;
 }
 
 void HashTableSection::finalizeContents() {
   if (OutputSection *Sec = In.DynSymTab->getParent())
     getParent()->Link = Sec->SectionIndex;
 
   unsigned NumEntries = 2;                       // nbucket and nchain.
   NumEntries += In.DynSymTab->getNumSymbols();   // The chain entries.
 
   // Create as many buckets as there are symbols.
   NumEntries += In.DynSymTab->getNumSymbols();
   this->Size = NumEntries * 4;
 }
 
 void HashTableSection::writeTo(uint8_t *Buf) {
   // See comment in GnuHashTableSection::writeTo.
   memset(Buf, 0, Size);
 
   unsigned NumSymbols = In.DynSymTab->getNumSymbols();
 
   uint32_t *P = reinterpret_cast<uint32_t *>(Buf);
   write32(P++, NumSymbols); // nbucket
   write32(P++, NumSymbols); // nchain
 
   uint32_t *Buckets = P;
   uint32_t *Chains = P + NumSymbols;
 
   for (const SymbolTableEntry &S : In.DynSymTab->getSymbols()) {
     Symbol *Sym = S.Sym;
     StringRef Name = Sym->getName();
     unsigned I = Sym->DynsymIndex;
     uint32_t Hash = hashSysV(Name) % NumSymbols;
     Chains[I] = Buckets[Hash];
     write32(Buckets + Hash, I);
   }
 }
 
 // On PowerPC64 the lazy symbol resolvers go into the `global linkage table`
 // in the .glink section, rather then the typical .plt section.
 PltSection::PltSection(bool IsIplt)
     : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16,
                        Config->EMachine == EM_PPC64 ? ".glink" : ".plt"),
       HeaderSize(!IsIplt || Config->ZRetpolineplt ? Target->PltHeaderSize : 0),
       IsIplt(IsIplt) {
   // The PLT needs to be writable on SPARC as the dynamic linker will
   // modify the instructions in the PLT entries.
   if (Config->EMachine == EM_SPARCV9)
     this->Flags |= SHF_WRITE;
 }
 
 void PltSection::writeTo(uint8_t *Buf) {
   // At beginning of PLT or retpoline IPLT, we have code to call the dynamic
   // linker to resolve dynsyms at runtime. Write such code.
   if (HeaderSize > 0)
     Target->writePltHeader(Buf);
   size_t Off = HeaderSize;
   // The IPlt is immediately after the Plt, account for this in RelOff
   unsigned PltOff = getPltRelocOff();
 
   for (auto &I : Entries) {
     const Symbol *B = I.first;
     unsigned RelOff = I.second + PltOff;
     uint64_t Got = B->getGotPltVA();
     uint64_t Plt = this->getVA() + Off;
     Target->writePlt(Buf + Off, Got, Plt, B->PltIndex, RelOff);
     Off += Target->PltEntrySize;
   }
 }
 
 template <class ELFT> void PltSection::addEntry(Symbol &Sym) {
   Sym.PltIndex = Entries.size();
   RelocationBaseSection *PltRelocSection = In.RelaPlt;
-  if (IsIplt) {
+  if (IsIplt)
     PltRelocSection = In.RelaIplt;
-    Sym.IsInIplt = true;
-  }
   unsigned RelOff =
       static_cast<RelocationSection<ELFT> *>(PltRelocSection)->getRelocOffset();
   Entries.push_back(std::make_pair(&Sym, RelOff));
 }
 
 size_t PltSection::getSize() const {
   return HeaderSize + Entries.size() * Target->PltEntrySize;
 }
 
 // Some architectures such as additional symbols in the PLT section. For
 // example ARM uses mapping symbols to aid disassembly
 void PltSection::addSymbols() {
   // The PLT may have symbols defined for the Header, the IPLT has no header
   if (!IsIplt)
     Target->addPltHeaderSymbols(*this);
   size_t Off = HeaderSize;
   for (size_t I = 0; I < Entries.size(); ++I) {
     Target->addPltSymbols(*this, Off);
     Off += Target->PltEntrySize;
   }
 }
 
 unsigned PltSection::getPltRelocOff() const {
   return IsIplt ? In.Plt->getSize() : 0;
 }
 
 // The string hash function for .gdb_index.
 static uint32_t computeGdbHash(StringRef S) {
   uint32_t H = 0;
   for (uint8_t C : S)
     H = H * 67 + toLower(C) - 113;
   return H;
 }
 
 GdbIndexSection::GdbIndexSection()
     : SyntheticSection(0, SHT_PROGBITS, 1, ".gdb_index") {}
 
 // Returns the desired size of an on-disk hash table for a .gdb_index section.
 // There's a tradeoff between size and collision rate. We aim 75% utilization.
 size_t GdbIndexSection::computeSymtabSize() const {
   return std::max<size_t>(NextPowerOf2(Symbols.size() * 4 / 3), 1024);
 }
 
 // Compute the output section size.
 void GdbIndexSection::initOutputSize() {
   Size = sizeof(GdbIndexHeader) + computeSymtabSize() * 8;
 
   for (GdbChunk &Chunk : Chunks)
     Size += Chunk.CompilationUnits.size() * 16 + Chunk.AddressAreas.size() * 20;
 
   // Add the constant pool size if exists.
   if (!Symbols.empty()) {
     GdbSymbol &Sym = Symbols.back();
     Size += Sym.NameOff + Sym.Name.size() + 1;
   }
 }
 
 static std::vector<InputSection *> getDebugInfoSections() {
   std::vector<InputSection *> Ret;
   for (InputSectionBase *S : InputSections)
     if (InputSection *IS = dyn_cast<InputSection>(S))
       if (IS->Name == ".debug_info")
         Ret.push_back(IS);
   return Ret;
 }
 
 static std::vector<GdbIndexSection::CuEntry> readCuList(DWARFContext &Dwarf) {
   std::vector<GdbIndexSection::CuEntry> Ret;
   for (std::unique_ptr<DWARFUnit> &Cu : Dwarf.compile_units())
     Ret.push_back({Cu->getOffset(), Cu->getLength() + 4});
   return Ret;
 }
 
 static std::vector<GdbIndexSection::AddressEntry>
 readAddressAreas(DWARFContext &Dwarf, InputSection *Sec) {
   std::vector<GdbIndexSection::AddressEntry> Ret;
 
   uint32_t CuIdx = 0;
   for (std::unique_ptr<DWARFUnit> &Cu : Dwarf.compile_units()) {
     Expected<DWARFAddressRangesVector> Ranges = Cu->collectAddressRanges();
     if (!Ranges) {
       error(toString(Sec) + ": " + toString(Ranges.takeError()));
       return {};
     }
 
     ArrayRef<InputSectionBase *> Sections = Sec->File->getSections();
     for (DWARFAddressRange &R : *Ranges) {
       InputSectionBase *S = Sections[R.SectionIndex];
       if (!S || S == &InputSection::Discarded || !S->Live)
         continue;
       // Range list with zero size has no effect.
       if (R.LowPC == R.HighPC)
         continue;
       auto *IS = cast<InputSection>(S);
       uint64_t Offset = IS->getOffsetInFile();
       Ret.push_back({IS, R.LowPC - Offset, R.HighPC - Offset, CuIdx});
     }
     ++CuIdx;
   }
 
   return Ret;
 }
 
 template <class ELFT>
 static std::vector<GdbIndexSection::NameAttrEntry>
 readPubNamesAndTypes(const LLDDwarfObj<ELFT> &Obj,
                      const std::vector<GdbIndexSection::CuEntry> &CUs) {
   const DWARFSection &PubNames = Obj.getGnuPubNamesSection();
   const DWARFSection &PubTypes = Obj.getGnuPubTypesSection();
 
   std::vector<GdbIndexSection::NameAttrEntry> Ret;
   for (const DWARFSection *Pub : {&PubNames, &PubTypes}) {
     DWARFDebugPubTable Table(Obj, *Pub, Config->IsLE, true);
     for (const DWARFDebugPubTable::Set &Set : Table.getData()) {
       // The value written into the constant pool is Kind << 24 | CuIndex. As we
       // don't know how many compilation units precede this object to compute
       // CuIndex, we compute (Kind << 24 | CuIndexInThisObject) instead, and add
       // the number of preceding compilation units later.
       uint32_t I =
           lower_bound(CUs, Set.Offset,
                       [](GdbIndexSection::CuEntry CU, uint32_t Offset) {
                         return CU.CuOffset < Offset;
                       }) -
           CUs.begin();
       for (const DWARFDebugPubTable::Entry &Ent : Set.Entries)
         Ret.push_back({{Ent.Name, computeGdbHash(Ent.Name)},
                        (Ent.Descriptor.toBits() << 24) | I});
     }
   }
   return Ret;
 }
 
 // Create a list of symbols from a given list of symbol names and types
 // by uniquifying them by name.
 static std::vector<GdbIndexSection::GdbSymbol>
 createSymbols(ArrayRef<std::vector<GdbIndexSection::NameAttrEntry>> NameAttrs,
               const std::vector<GdbIndexSection::GdbChunk> &Chunks) {
   typedef GdbIndexSection::GdbSymbol GdbSymbol;
   typedef GdbIndexSection::NameAttrEntry NameAttrEntry;
 
   // For each chunk, compute the number of compilation units preceding it.
   uint32_t CuIdx = 0;
   std::vector<uint32_t> CuIdxs(Chunks.size());
   for (uint32_t I = 0, E = Chunks.size(); I != E; ++I) {
     CuIdxs[I] = CuIdx;
     CuIdx += Chunks[I].CompilationUnits.size();
   }
 
   // The number of symbols we will handle in this function is of the order
   // of millions for very large executables, so we use multi-threading to
   // speed it up.
   size_t NumShards = 32;
   size_t Concurrency = 1;
   if (ThreadsEnabled)
     Concurrency =
         std::min<size_t>(PowerOf2Floor(hardware_concurrency()), NumShards);
 
   // A sharded map to uniquify symbols by name.
   std::vector<DenseMap<CachedHashStringRef, size_t>> Map(NumShards);
   size_t Shift = 32 - countTrailingZeros(NumShards);
 
   // Instantiate GdbSymbols while uniqufying them by name.
   std::vector<std::vector<GdbSymbol>> Symbols(NumShards);
   parallelForEachN(0, Concurrency, [&](size_t ThreadId) {
     uint32_t I = 0;
     for (ArrayRef<NameAttrEntry> Entries : NameAttrs) {
       for (const NameAttrEntry &Ent : Entries) {
         size_t ShardId = Ent.Name.hash() >> Shift;
         if ((ShardId & (Concurrency - 1)) != ThreadId)
           continue;
 
         uint32_t V = Ent.CuIndexAndAttrs + CuIdxs[I];
         size_t &Idx = Map[ShardId][Ent.Name];
         if (Idx) {
           Symbols[ShardId][Idx - 1].CuVector.push_back(V);
           continue;
         }
 
         Idx = Symbols[ShardId].size() + 1;
         Symbols[ShardId].push_back({Ent.Name, {V}, 0, 0});
       }
       ++I;
     }
   });
 
   size_t NumSymbols = 0;
   for (ArrayRef<GdbSymbol> V : Symbols)
     NumSymbols += V.size();
 
   // The return type is a flattened vector, so we'll copy each vector
   // contents to Ret.
   std::vector<GdbSymbol> Ret;
   Ret.reserve(NumSymbols);
   for (std::vector<GdbSymbol> &Vec : Symbols)
     for (GdbSymbol &Sym : Vec)
       Ret.push_back(std::move(Sym));
 
   // CU vectors and symbol names are adjacent in the output file.
   // We can compute their offsets in the output file now.
   size_t Off = 0;
   for (GdbSymbol &Sym : Ret) {
     Sym.CuVectorOff = Off;
     Off += (Sym.CuVector.size() + 1) * 4;
   }
   for (GdbSymbol &Sym : Ret) {
     Sym.NameOff = Off;
     Off += Sym.Name.size() + 1;
   }
 
   return Ret;
 }
 
 // Returns a newly-created .gdb_index section.
 template <class ELFT> GdbIndexSection *GdbIndexSection::create() {
   std::vector<InputSection *> Sections = getDebugInfoSections();
 
   // .debug_gnu_pub{names,types} are useless in executables.
   // They are present in input object files solely for creating
   // a .gdb_index. So we can remove them from the output.
   for (InputSectionBase *S : InputSections)
     if (S->Name == ".debug_gnu_pubnames" || S->Name == ".debug_gnu_pubtypes")
       S->Live = false;
 
   std::vector<GdbChunk> Chunks(Sections.size());
   std::vector<std::vector<NameAttrEntry>> NameAttrs(Sections.size());
 
   parallelForEachN(0, Sections.size(), [&](size_t I) {
     ObjFile<ELFT> *File = Sections[I]->getFile<ELFT>();
     DWARFContext Dwarf(make_unique<LLDDwarfObj<ELFT>>(File));
 
     Chunks[I].Sec = Sections[I];
     Chunks[I].CompilationUnits = readCuList(Dwarf);
     Chunks[I].AddressAreas = readAddressAreas(Dwarf, Sections[I]);
     NameAttrs[I] = readPubNamesAndTypes<ELFT>(
         static_cast<const LLDDwarfObj<ELFT> &>(Dwarf.getDWARFObj()),
         Chunks[I].CompilationUnits);
   });
 
   auto *Ret = make<GdbIndexSection>();
   Ret->Chunks = std::move(Chunks);
   Ret->Symbols = createSymbols(NameAttrs, Ret->Chunks);
   Ret->initOutputSize();
   return Ret;
 }
 
 void GdbIndexSection::writeTo(uint8_t *Buf) {
   // Write the header.
   auto *Hdr = reinterpret_cast<GdbIndexHeader *>(Buf);
   uint8_t *Start = Buf;
   Hdr->Version = 7;
   Buf += sizeof(*Hdr);
 
   // Write the CU list.
   Hdr->CuListOff = Buf - Start;
   for (GdbChunk &Chunk : Chunks) {
     for (CuEntry &Cu : Chunk.CompilationUnits) {
       write64le(Buf, Chunk.Sec->OutSecOff + Cu.CuOffset);
       write64le(Buf + 8, Cu.CuLength);
       Buf += 16;
     }
   }
 
   // Write the address area.
   Hdr->CuTypesOff = Buf - Start;
   Hdr->AddressAreaOff = Buf - Start;
   uint32_t CuOff = 0;
   for (GdbChunk &Chunk : Chunks) {
     for (AddressEntry &E : Chunk.AddressAreas) {
       uint64_t BaseAddr = E.Section->getVA(0);
       write64le(Buf, BaseAddr + E.LowAddress);
       write64le(Buf + 8, BaseAddr + E.HighAddress);
       write32le(Buf + 16, E.CuIndex + CuOff);
       Buf += 20;
     }
     CuOff += Chunk.CompilationUnits.size();
   }
 
   // Write the on-disk open-addressing hash table containing symbols.
   Hdr->SymtabOff = Buf - Start;
   size_t SymtabSize = computeSymtabSize();
   uint32_t Mask = SymtabSize - 1;
 
   for (GdbSymbol &Sym : Symbols) {
     uint32_t H = Sym.Name.hash();
     uint32_t I = H & Mask;
     uint32_t Step = ((H * 17) & Mask) | 1;
 
     while (read32le(Buf + I * 8))
       I = (I + Step) & Mask;
 
     write32le(Buf + I * 8, Sym.NameOff);
     write32le(Buf + I * 8 + 4, Sym.CuVectorOff);
   }
 
   Buf += SymtabSize * 8;
 
   // Write the string pool.
   Hdr->ConstantPoolOff = Buf - Start;
   parallelForEach(Symbols, [&](GdbSymbol &Sym) {
     memcpy(Buf + Sym.NameOff, Sym.Name.data(), Sym.Name.size());
   });
 
   // Write the CU vectors.
   for (GdbSymbol &Sym : Symbols) {
     write32le(Buf, Sym.CuVector.size());
     Buf += 4;
     for (uint32_t Val : Sym.CuVector) {
       write32le(Buf, Val);
       Buf += 4;
     }
   }
 }
 
 bool GdbIndexSection::empty() const { return Chunks.empty(); }
 
 EhFrameHeader::EhFrameHeader()
     : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 4, ".eh_frame_hdr") {}
 
 // .eh_frame_hdr contains a binary search table of pointers to FDEs.
 // Each entry of the search table consists of two values,
 // the starting PC from where FDEs covers, and the FDE's address.
 // It is sorted by PC.
 void EhFrameHeader::writeTo(uint8_t *Buf) {
   typedef EhFrameSection::FdeData FdeData;
 
   std::vector<FdeData> Fdes = In.EhFrame->getFdeData();
 
   Buf[0] = 1;
   Buf[1] = DW_EH_PE_pcrel | DW_EH_PE_sdata4;
   Buf[2] = DW_EH_PE_udata4;
   Buf[3] = DW_EH_PE_datarel | DW_EH_PE_sdata4;
   write32(Buf + 4, In.EhFrame->getParent()->Addr - this->getVA() - 4);
   write32(Buf + 8, Fdes.size());
   Buf += 12;
 
   for (FdeData &Fde : Fdes) {
     write32(Buf, Fde.PcRel);
     write32(Buf + 4, Fde.FdeVARel);
     Buf += 8;
   }
 }
 
 size_t EhFrameHeader::getSize() const {
   // .eh_frame_hdr has a 12 bytes header followed by an array of FDEs.
   return 12 + In.EhFrame->NumFdes * 8;
 }
 
 bool EhFrameHeader::empty() const { return In.EhFrame->empty(); }
 
 VersionDefinitionSection::VersionDefinitionSection()
     : SyntheticSection(SHF_ALLOC, SHT_GNU_verdef, sizeof(uint32_t),
                        ".gnu.version_d") {}
 
 static StringRef getFileDefName() {
   if (!Config->SoName.empty())
     return Config->SoName;
   return Config->OutputFile;
 }
 
 void VersionDefinitionSection::finalizeContents() {
   FileDefNameOff = In.DynStrTab->addString(getFileDefName());
   for (VersionDefinition &V : Config->VersionDefinitions)
     V.NameOff = In.DynStrTab->addString(V.Name);
 
   if (OutputSection *Sec = In.DynStrTab->getParent())
     getParent()->Link = Sec->SectionIndex;
 
   // sh_info should be set to the number of definitions. This fact is missed in
   // documentation, but confirmed by binutils community:
   // https://sourceware.org/ml/binutils/2014-11/msg00355.html
   getParent()->Info = getVerDefNum();
 }
 
 void VersionDefinitionSection::writeOne(uint8_t *Buf, uint32_t Index,
                                         StringRef Name, size_t NameOff) {
   uint16_t Flags = Index == 1 ? VER_FLG_BASE : 0;
 
   // Write a verdef.
   write16(Buf, 1);                  // vd_version
   write16(Buf + 2, Flags);          // vd_flags
   write16(Buf + 4, Index);          // vd_ndx
   write16(Buf + 6, 1);              // vd_cnt
   write32(Buf + 8, hashSysV(Name)); // vd_hash
   write32(Buf + 12, 20);            // vd_aux
   write32(Buf + 16, 28);            // vd_next
 
   // Write a veraux.
   write32(Buf + 20, NameOff); // vda_name
   write32(Buf + 24, 0);       // vda_next
 }
 
 void VersionDefinitionSection::writeTo(uint8_t *Buf) {
   writeOne(Buf, 1, getFileDefName(), FileDefNameOff);
 
   for (VersionDefinition &V : Config->VersionDefinitions) {
     Buf += EntrySize;
     writeOne(Buf, V.Id, V.Name, V.NameOff);
   }
 
   // Need to terminate the last version definition.
   write32(Buf + 16, 0); // vd_next
 }
 
 size_t VersionDefinitionSection::getSize() const {
   return EntrySize * getVerDefNum();
 }
 
 // .gnu.version is a table where each entry is 2 byte long.
 template <class ELFT>
 VersionTableSection<ELFT>::VersionTableSection()
     : SyntheticSection(SHF_ALLOC, SHT_GNU_versym, sizeof(uint16_t),
                        ".gnu.version") {
   this->Entsize = 2;
 }
 
 template <class ELFT> void VersionTableSection<ELFT>::finalizeContents() {
   // At the moment of june 2016 GNU docs does not mention that sh_link field
   // should be set, but Sun docs do. Also readelf relies on this field.
   getParent()->Link = In.DynSymTab->getParent()->SectionIndex;
 }
 
 template <class ELFT> size_t VersionTableSection<ELFT>::getSize() const {
   return (In.DynSymTab->getSymbols().size() + 1) * 2;
 }
 
 template <class ELFT> void VersionTableSection<ELFT>::writeTo(uint8_t *Buf) {
   Buf += 2;
   for (const SymbolTableEntry &S : In.DynSymTab->getSymbols()) {
     write16(Buf, S.Sym->VersionId);
     Buf += 2;
   }
 }
 
 template <class ELFT> bool VersionTableSection<ELFT>::empty() const {
   return !In.VerDef && InX<ELFT>::VerNeed->empty();
 }
 
 template <class ELFT>
 VersionNeedSection<ELFT>::VersionNeedSection()
     : SyntheticSection(SHF_ALLOC, SHT_GNU_verneed, sizeof(uint32_t),
                        ".gnu.version_r") {
   // Identifiers in verneed section start at 2 because 0 and 1 are reserved
   // for VER_NDX_LOCAL and VER_NDX_GLOBAL.
   // First identifiers are reserved by verdef section if it exist.
   NextIndex = getVerDefNum() + 1;
 }
 
 template <class ELFT> void VersionNeedSection<ELFT>::addSymbol(Symbol *SS) {
   auto &File = cast<SharedFile<ELFT>>(*SS->File);
   if (SS->VerdefIndex == VER_NDX_GLOBAL) {
     SS->VersionId = VER_NDX_GLOBAL;
     return;
   }
 
   // If we don't already know that we need an Elf_Verneed for this DSO, prepare
   // to create one by adding it to our needed list and creating a dynstr entry
   // for the soname.
   if (File.VerdefMap.empty())
     Needed.push_back({&File, In.DynStrTab->addString(File.SoName)});
   const typename ELFT::Verdef *Ver = File.Verdefs[SS->VerdefIndex];
   typename SharedFile<ELFT>::NeededVer &NV = File.VerdefMap[Ver];
 
   // If we don't already know that we need an Elf_Vernaux for this Elf_Verdef,
   // prepare to create one by allocating a version identifier and creating a
   // dynstr entry for the version name.
   if (NV.Index == 0) {
     NV.StrTab = In.DynStrTab->addString(File.getStringTable().data() +
                                         Ver->getAux()->vda_name);
     NV.Index = NextIndex++;
   }
   SS->VersionId = NV.Index;
 }
 
 template <class ELFT> void VersionNeedSection<ELFT>::writeTo(uint8_t *Buf) {
   // The Elf_Verneeds need to appear first, followed by the Elf_Vernauxs.
   auto *Verneed = reinterpret_cast<Elf_Verneed *>(Buf);
   auto *Vernaux = reinterpret_cast<Elf_Vernaux *>(Verneed + Needed.size());
 
   for (std::pair<SharedFile<ELFT> *, size_t> &P : Needed) {
     // Create an Elf_Verneed for this DSO.
     Verneed->vn_version = 1;
     Verneed->vn_cnt = P.first->VerdefMap.size();
     Verneed->vn_file = P.second;
     Verneed->vn_aux =
         reinterpret_cast<char *>(Vernaux) - reinterpret_cast<char *>(Verneed);
     Verneed->vn_next = sizeof(Elf_Verneed);
     ++Verneed;
 
     // Create the Elf_Vernauxs for this Elf_Verneed. The loop iterates over
     // VerdefMap, which will only contain references to needed version
     // definitions. Each Elf_Vernaux is based on the information contained in
     // the Elf_Verdef in the source DSO. This loop iterates over a std::map of
     // pointers, but is deterministic because the pointers refer to Elf_Verdef
     // data structures within a single input file.
     for (auto &NV : P.first->VerdefMap) {
       Vernaux->vna_hash = NV.first->vd_hash;
       Vernaux->vna_flags = 0;
       Vernaux->vna_other = NV.second.Index;
       Vernaux->vna_name = NV.second.StrTab;
       Vernaux->vna_next = sizeof(Elf_Vernaux);
       ++Vernaux;
     }
 
     Vernaux[-1].vna_next = 0;
   }
   Verneed[-1].vn_next = 0;
 }
 
 template <class ELFT> void VersionNeedSection<ELFT>::finalizeContents() {
   if (OutputSection *Sec = In.DynStrTab->getParent())
     getParent()->Link = Sec->SectionIndex;
   getParent()->Info = Needed.size();
 }
 
 template <class ELFT> size_t VersionNeedSection<ELFT>::getSize() const {
   unsigned Size = Needed.size() * sizeof(Elf_Verneed);
   for (const std::pair<SharedFile<ELFT> *, size_t> &P : Needed)
     Size += P.first->VerdefMap.size() * sizeof(Elf_Vernaux);
   return Size;
 }
 
 template <class ELFT> bool VersionNeedSection<ELFT>::empty() const {
   return getNeedNum() == 0;
 }
 
 void MergeSyntheticSection::addSection(MergeInputSection *MS) {
   MS->Parent = this;
   Sections.push_back(MS);
 }
 
 MergeTailSection::MergeTailSection(StringRef Name, uint32_t Type,
                                    uint64_t Flags, uint32_t Alignment)
     : MergeSyntheticSection(Name, Type, Flags, Alignment),
       Builder(StringTableBuilder::RAW, Alignment) {}
 
 size_t MergeTailSection::getSize() const { return Builder.getSize(); }
 
 void MergeTailSection::writeTo(uint8_t *Buf) { Builder.write(Buf); }
 
 void MergeTailSection::finalizeContents() {
   // Add all string pieces to the string table builder to create section
   // contents.
   for (MergeInputSection *Sec : Sections)
     for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
       if (Sec->Pieces[I].Live)
         Builder.add(Sec->getData(I));
 
   // Fix the string table content. After this, the contents will never change.
   Builder.finalize();
 
   // finalize() fixed tail-optimized strings, so we can now get
   // offsets of strings. Get an offset for each string and save it
   // to a corresponding StringPiece for easy access.
   for (MergeInputSection *Sec : Sections)
     for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
       if (Sec->Pieces[I].Live)
         Sec->Pieces[I].OutputOff = Builder.getOffset(Sec->getData(I));
 }
 
 void MergeNoTailSection::writeTo(uint8_t *Buf) {
   for (size_t I = 0; I < NumShards; ++I)
     Shards[I].write(Buf + ShardOffsets[I]);
 }
 
 // This function is very hot (i.e. it can take several seconds to finish)
 // because sometimes the number of inputs is in an order of magnitude of
 // millions. So, we use multi-threading.
 //
 // For any strings S and T, we know S is not mergeable with T if S's hash
 // value is different from T's. If that's the case, we can safely put S and
 // T into different string builders without worrying about merge misses.
 // We do it in parallel.
 void MergeNoTailSection::finalizeContents() {
   // Initializes string table builders.
   for (size_t I = 0; I < NumShards; ++I)
     Shards.emplace_back(StringTableBuilder::RAW, Alignment);
 
   // Concurrency level. Must be a power of 2 to avoid expensive modulo
   // operations in the following tight loop.
   size_t Concurrency = 1;
   if (ThreadsEnabled)
     Concurrency =
         std::min<size_t>(PowerOf2Floor(hardware_concurrency()), NumShards);
 
   // Add section pieces to the builders.
   parallelForEachN(0, Concurrency, [&](size_t ThreadId) {
     for (MergeInputSection *Sec : Sections) {
       for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I) {
         size_t ShardId = getShardId(Sec->Pieces[I].Hash);
         if ((ShardId & (Concurrency - 1)) == ThreadId && Sec->Pieces[I].Live)
           Sec->Pieces[I].OutputOff = Shards[ShardId].add(Sec->getData(I));
       }
     }
   });
 
   // Compute an in-section offset for each shard.
   size_t Off = 0;
   for (size_t I = 0; I < NumShards; ++I) {
     Shards[I].finalizeInOrder();
     if (Shards[I].getSize() > 0)
       Off = alignTo(Off, Alignment);
     ShardOffsets[I] = Off;
     Off += Shards[I].getSize();
   }
   Size = Off;
 
   // So far, section pieces have offsets from beginning of shards, but
   // we want offsets from beginning of the whole section. Fix them.
   parallelForEach(Sections, [&](MergeInputSection *Sec) {
     for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
       if (Sec->Pieces[I].Live)
         Sec->Pieces[I].OutputOff +=
             ShardOffsets[getShardId(Sec->Pieces[I].Hash)];
   });
 }
 
 static MergeSyntheticSection *createMergeSynthetic(StringRef Name,
                                                    uint32_t Type,
                                                    uint64_t Flags,
                                                    uint32_t Alignment) {
   bool ShouldTailMerge = (Flags & SHF_STRINGS) && Config->Optimize >= 2;
   if (ShouldTailMerge)
     return make<MergeTailSection>(Name, Type, Flags, Alignment);
   return make<MergeNoTailSection>(Name, Type, Flags, Alignment);
 }
 
 template <class ELFT> void elf::splitSections() {
   // splitIntoPieces needs to be called on each MergeInputSection
   // before calling finalizeContents().
   parallelForEach(InputSections, [](InputSectionBase *Sec) {
     if (auto *S = dyn_cast<MergeInputSection>(Sec))
       S->splitIntoPieces();
     else if (auto *Eh = dyn_cast<EhInputSection>(Sec))
       Eh->split<ELFT>();
   });
 }
 
 // This function scans over the inputsections to create mergeable
 // synthetic sections.
 //
 // It removes MergeInputSections from the input section array and adds
 // new synthetic sections at the location of the first input section
 // that it replaces. It then finalizes each synthetic section in order
 // to compute an output offset for each piece of each input section.
 void elf::mergeSections() {
   std::vector<MergeSyntheticSection *> MergeSections;
   for (InputSectionBase *&S : InputSections) {
     MergeInputSection *MS = dyn_cast<MergeInputSection>(S);
     if (!MS)
       continue;
 
     // We do not want to handle sections that are not alive, so just remove
     // them instead of trying to merge.
     if (!MS->Live) {
       S = nullptr;
       continue;
     }
 
     StringRef OutsecName = getOutputSectionName(MS);
     uint32_t Alignment = std::max<uint32_t>(MS->Alignment, MS->Entsize);
 
     auto I = llvm::find_if(MergeSections, [=](MergeSyntheticSection *Sec) {
       // While we could create a single synthetic section for two different
       // values of Entsize, it is better to take Entsize into consideration.
       //
       // With a single synthetic section no two pieces with different Entsize
       // could be equal, so we may as well have two sections.
       //
       // Using Entsize in here also allows us to propagate it to the synthetic
       // section.
       return Sec->Name == OutsecName && Sec->Flags == MS->Flags &&
              Sec->Entsize == MS->Entsize && Sec->Alignment == Alignment;
     });
     if (I == MergeSections.end()) {
       MergeSyntheticSection *Syn =
           createMergeSynthetic(OutsecName, MS->Type, MS->Flags, Alignment);
       MergeSections.push_back(Syn);
       I = std::prev(MergeSections.end());
       S = Syn;
       Syn->Entsize = MS->Entsize;
     } else {
       S = nullptr;
     }
     (*I)->addSection(MS);
   }
   for (auto *MS : MergeSections)
     MS->finalizeContents();
 
   std::vector<InputSectionBase *> &V = InputSections;
   V.erase(std::remove(V.begin(), V.end(), nullptr), V.end());
 }
 
 MipsRldMapSection::MipsRldMapSection()
     : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, Config->Wordsize,
                        ".rld_map") {}
 
 ARMExidxSentinelSection::ARMExidxSentinelSection()
     : SyntheticSection(SHF_ALLOC | SHF_LINK_ORDER, SHT_ARM_EXIDX,
                        Config->Wordsize, ".ARM.exidx") {}
 
 // Write a terminating sentinel entry to the end of the .ARM.exidx table.
 // This section will have been sorted last in the .ARM.exidx table.
 // This table entry will have the form:
 // | PREL31 upper bound of code that has exception tables | EXIDX_CANTUNWIND |
 // The sentinel must have the PREL31 value of an address higher than any
 // address described by any other table entry.
 void ARMExidxSentinelSection::writeTo(uint8_t *Buf) {
   assert(Highest);
   uint64_t S = Highest->getVA(Highest->getSize());
   uint64_t P = getVA();
   Target->relocateOne(Buf, R_ARM_PREL31, S - P);
   write32le(Buf + 4, 1);
 }
 
 // The sentinel has to be removed if there are no other .ARM.exidx entries.
 bool ARMExidxSentinelSection::empty() const {
   for (InputSection *IS : getInputSections(getParent()))
     if (!isa<ARMExidxSentinelSection>(IS))
       return false;
   return true;
 }
 
 bool ARMExidxSentinelSection::classof(const SectionBase *D) {
   return D->kind() == InputSectionBase::Synthetic && D->Type == SHT_ARM_EXIDX;
 }
 
 ThunkSection::ThunkSection(OutputSection *OS, uint64_t Off)
     : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS,
                        Config->Wordsize, ".text.thunk") {
   this->Parent = OS;
   this->OutSecOff = Off;
 }
 
 void ThunkSection::addThunk(Thunk *T) {
   Thunks.push_back(T);
   T->addSymbols(*this);
 }
 
 void ThunkSection::writeTo(uint8_t *Buf) {
   for (Thunk *T : Thunks)
     T->writeTo(Buf + T->Offset);
 }
 
 InputSection *ThunkSection::getTargetInputSection() const {
   if (Thunks.empty())
     return nullptr;
   const Thunk *T = Thunks.front();
   return T->getTargetInputSection();
 }
 
 bool ThunkSection::assignOffsets() {
   uint64_t Off = 0;
   for (Thunk *T : Thunks) {
     Off = alignTo(Off, T->Alignment);
     T->setOffset(Off);
     uint32_t Size = T->size();
     T->getThunkTargetSym()->Size = Size;
     Off += Size;
   }
   bool Changed = Off != Size;
   Size = Off;
   return Changed;
 }
 
 // If linking position-dependent code then the table will store the addresses
 // directly in the binary so the section has type SHT_PROGBITS. If linking
 // position-independent code the section has type SHT_NOBITS since it will be
 // allocated and filled in by the dynamic linker.
 PPC64LongBranchTargetSection::PPC64LongBranchTargetSection()
     : SyntheticSection(SHF_ALLOC | SHF_WRITE,
                        Config->Pic ? SHT_NOBITS : SHT_PROGBITS, 8,
                        ".branch_lt") {}
 
 void PPC64LongBranchTargetSection::addEntry(Symbol &Sym) {
   assert(Sym.PPC64BranchltIndex == 0xffff);
   Sym.PPC64BranchltIndex = Entries.size();
   Entries.push_back(&Sym);
 }
 
 size_t PPC64LongBranchTargetSection::getSize() const {
   return Entries.size() * 8;
 }
 
 void PPC64LongBranchTargetSection::writeTo(uint8_t *Buf) {
   assert(Target->GotPltEntrySize == 8);
   // If linking non-pic we have the final addresses of the targets and they get
   // written to the table directly. For pic the dynamic linker will allocate
   // the section and fill it it.
   if (Config->Pic)
     return;
 
   for (const Symbol *Sym : Entries) {
     assert(Sym->getVA());
     // Need calls to branch to the local entry-point since a long-branch
     // must be a local-call.
     write64(Buf,
             Sym->getVA() + getPPC64GlobalEntryToLocalEntryOffset(Sym->StOther));
     Buf += Target->GotPltEntrySize;
   }
 }
 
 bool PPC64LongBranchTargetSection::empty() const {
   // `removeUnusedSyntheticSections()` is called before thunk allocation which
   // is too early to determine if this section will be empty or not. We need
   // Finalized to keep the section alive until after thunk creation. Finalized
   // only gets set to true once `finalizeSections()` is called after thunk
   // creation. Becuase of this, if we don't create any long-branch thunks we end
   // up with an empty .branch_lt section in the binary.
   return Finalized && Entries.empty();
 }
 
 InStruct elf::In;
 
 template GdbIndexSection *GdbIndexSection::create<ELF32LE>();
 template GdbIndexSection *GdbIndexSection::create<ELF32BE>();
 template GdbIndexSection *GdbIndexSection::create<ELF64LE>();
 template GdbIndexSection *GdbIndexSection::create<ELF64BE>();
 
 template void elf::splitSections<ELF32LE>();
 template void elf::splitSections<ELF32BE>();
 template void elf::splitSections<ELF64LE>();
 template void elf::splitSections<ELF64BE>();
 
 template void EhFrameSection::addSection<ELF32LE>(InputSectionBase *);
 template void EhFrameSection::addSection<ELF32BE>(InputSectionBase *);
 template void EhFrameSection::addSection<ELF64LE>(InputSectionBase *);
 template void EhFrameSection::addSection<ELF64BE>(InputSectionBase *);
 
 template void PltSection::addEntry<ELF32LE>(Symbol &Sym);
 template void PltSection::addEntry<ELF32BE>(Symbol &Sym);
 template void PltSection::addEntry<ELF64LE>(Symbol &Sym);
 template void PltSection::addEntry<ELF64BE>(Symbol &Sym);
 
 template void MipsGotSection::build<ELF32LE>();
 template void MipsGotSection::build<ELF32BE>();
 template void MipsGotSection::build<ELF64LE>();
 template void MipsGotSection::build<ELF64BE>();
 
 template class elf::MipsAbiFlagsSection<ELF32LE>;
 template class elf::MipsAbiFlagsSection<ELF32BE>;
 template class elf::MipsAbiFlagsSection<ELF64LE>;
 template class elf::MipsAbiFlagsSection<ELF64BE>;
 
 template class elf::MipsOptionsSection<ELF32LE>;
 template class elf::MipsOptionsSection<ELF32BE>;
 template class elf::MipsOptionsSection<ELF64LE>;
 template class elf::MipsOptionsSection<ELF64BE>;
 
 template class elf::MipsReginfoSection<ELF32LE>;
 template class elf::MipsReginfoSection<ELF32BE>;
 template class elf::MipsReginfoSection<ELF64LE>;
 template class elf::MipsReginfoSection<ELF64BE>;
 
 template class elf::DynamicSection<ELF32LE>;
 template class elf::DynamicSection<ELF32BE>;
 template class elf::DynamicSection<ELF64LE>;
 template class elf::DynamicSection<ELF64BE>;
 
 template class elf::RelocationSection<ELF32LE>;
 template class elf::RelocationSection<ELF32BE>;
 template class elf::RelocationSection<ELF64LE>;
 template class elf::RelocationSection<ELF64BE>;
 
 template class elf::AndroidPackedRelocationSection<ELF32LE>;
 template class elf::AndroidPackedRelocationSection<ELF32BE>;
 template class elf::AndroidPackedRelocationSection<ELF64LE>;
 template class elf::AndroidPackedRelocationSection<ELF64BE>;
 
 template class elf::RelrSection<ELF32LE>;
 template class elf::RelrSection<ELF32BE>;
 template class elf::RelrSection<ELF64LE>;
 template class elf::RelrSection<ELF64BE>;
 
 template class elf::SymbolTableSection<ELF32LE>;
 template class elf::SymbolTableSection<ELF32BE>;
 template class elf::SymbolTableSection<ELF64LE>;
 template class elf::SymbolTableSection<ELF64BE>;
 
 template class elf::VersionTableSection<ELF32LE>;
 template class elf::VersionTableSection<ELF32BE>;
 template class elf::VersionTableSection<ELF64LE>;
 template class elf::VersionTableSection<ELF64BE>;
 
 template class elf::VersionNeedSection<ELF32LE>;
 template class elf::VersionNeedSection<ELF32BE>;
 template class elf::VersionNeedSection<ELF64LE>;
 template class elf::VersionNeedSection<ELF64BE>;
Index: head/contrib/llvm/tools/lld/ELF/Writer.cpp
===================================================================
--- head/contrib/llvm/tools/lld/ELF/Writer.cpp	(revision 350466)
+++ head/contrib/llvm/tools/lld/ELF/Writer.cpp	(revision 350467)
@@ -1,2545 +1,2543 @@
 //===- Writer.cpp ---------------------------------------------------------===//
 //
 //                             The LLVM Linker
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
 //
 //===----------------------------------------------------------------------===//
 
 #include "Writer.h"
 #include "AArch64ErrataFix.h"
 #include "CallGraphSort.h"
 #include "Config.h"
 #include "Filesystem.h"
 #include "LinkerScript.h"
 #include "MapFile.h"
 #include "OutputSections.h"
 #include "Relocations.h"
 #include "SymbolTable.h"
 #include "Symbols.h"
 #include "SyntheticSections.h"
 #include "Target.h"
 #include "lld/Common/Memory.h"
 #include "lld/Common/Strings.h"
 #include "lld/Common/Threads.h"
 #include "llvm/ADT/StringMap.h"
 #include "llvm/ADT/StringSwitch.h"
 #include <climits>
 
 using namespace llvm;
 using namespace llvm::ELF;
 using namespace llvm::object;
 using namespace llvm::support;
 using namespace llvm::support::endian;
 
 using namespace lld;
 using namespace lld::elf;
 
 namespace {
 // The writer writes a SymbolTable result to a file.
 template <class ELFT> class Writer {
 public:
   Writer() : Buffer(errorHandler().OutputBuffer) {}
   typedef typename ELFT::Shdr Elf_Shdr;
   typedef typename ELFT::Ehdr Elf_Ehdr;
   typedef typename ELFT::Phdr Elf_Phdr;
 
   void run();
 
 private:
   void copyLocalSymbols();
   void addSectionSymbols();
   void forEachRelSec(llvm::function_ref<void(InputSectionBase &)> Fn);
   void sortSections();
   void resolveShfLinkOrder();
   void maybeAddThunks();
   void sortInputSections();
   void finalizeSections();
   void checkExecuteOnly();
   void setReservedSymbolSections();
 
   std::vector<PhdrEntry *> createPhdrs();
   void removeEmptyPTLoad();
   void addPtArmExid(std::vector<PhdrEntry *> &Phdrs);
   void assignFileOffsets();
   void assignFileOffsetsBinary();
   void setPhdrs();
   void checkSections();
   void fixSectionAlignments();
   void openFile();
   void writeTrapInstr();
   void writeHeader();
   void writeSections();
   void writeSectionsBinary();
   void writeBuildId();
 
   std::unique_ptr<FileOutputBuffer> &Buffer;
 
   void addRelIpltSymbols();
   void addStartEndSymbols();
   void addStartStopSymbols(OutputSection *Sec);
 
   std::vector<PhdrEntry *> Phdrs;
 
   uint64_t FileSize;
   uint64_t SectionHeaderOff;
 };
 } // anonymous namespace
 
 static bool isSectionPrefix(StringRef Prefix, StringRef Name) {
   return Name.startswith(Prefix) || Name == Prefix.drop_back();
 }
 
 StringRef elf::getOutputSectionName(const InputSectionBase *S) {
   if (Config->Relocatable)
     return S->Name;
 
   // This is for --emit-relocs. If .text.foo is emitted as .text.bar, we want
   // to emit .rela.text.foo as .rela.text.bar for consistency (this is not
   // technically required, but not doing it is odd). This code guarantees that.
   if (auto *IS = dyn_cast<InputSection>(S)) {
     if (InputSectionBase *Rel = IS->getRelocatedSection()) {
       OutputSection *Out = Rel->getOutputSection();
       if (S->Type == SHT_RELA)
         return Saver.save(".rela" + Out->Name);
       return Saver.save(".rel" + Out->Name);
     }
   }
 
   // This check is for -z keep-text-section-prefix.  This option separates text
   // sections with prefix ".text.hot", ".text.unlikely", ".text.startup" or
   // ".text.exit".
   // When enabled, this allows identifying the hot code region (.text.hot) in
   // the final binary which can be selectively mapped to huge pages or mlocked,
   // for instance.
   if (Config->ZKeepTextSectionPrefix)
     for (StringRef V :
          {".text.hot.", ".text.unlikely.", ".text.startup.", ".text.exit."})
       if (isSectionPrefix(V, S->Name))
         return V.drop_back();
 
   for (StringRef V :
        {".text.", ".rodata.", ".data.rel.ro.", ".data.", ".bss.rel.ro.",
         ".bss.", ".init_array.", ".fini_array.", ".ctors.", ".dtors.", ".tbss.",
         ".gcc_except_table.", ".tdata.", ".ARM.exidx.", ".ARM.extab."})
     if (isSectionPrefix(V, S->Name))
       return V.drop_back();
 
   // CommonSection is identified as "COMMON" in linker scripts.
   // By default, it should go to .bss section.
   if (S->Name == "COMMON")
     return ".bss";
 
   return S->Name;
 }
 
 static bool needsInterpSection() {
   return !SharedFiles.empty() && !Config->DynamicLinker.empty() &&
          Script->needsInterpSection();
 }
 
 template <class ELFT> void elf::writeResult() { Writer<ELFT>().run(); }
 
 template <class ELFT> void Writer<ELFT>::removeEmptyPTLoad() {
   llvm::erase_if(Phdrs, [&](const PhdrEntry *P) {
     if (P->p_type != PT_LOAD)
       return false;
     if (!P->FirstSec)
       return true;
     uint64_t Size = P->LastSec->Addr + P->LastSec->Size - P->FirstSec->Addr;
     return Size == 0;
   });
 }
 
 template <class ELFT> static void combineEhFrameSections() {
   for (InputSectionBase *&S : InputSections) {
     EhInputSection *ES = dyn_cast<EhInputSection>(S);
     if (!ES || !ES->Live)
       continue;
 
     In.EhFrame->addSection<ELFT>(ES);
     S = nullptr;
   }
 
   std::vector<InputSectionBase *> &V = InputSections;
   V.erase(std::remove(V.begin(), V.end(), nullptr), V.end());
 }
 
 static Defined *addOptionalRegular(StringRef Name, SectionBase *Sec,
                                    uint64_t Val, uint8_t StOther = STV_HIDDEN,
                                    uint8_t Binding = STB_GLOBAL) {
   Symbol *S = Symtab->find(Name);
   if (!S || S->isDefined())
     return nullptr;
   return Symtab->addDefined(Name, StOther, STT_NOTYPE, Val,
                             /*Size=*/0, Binding, Sec,
                             /*File=*/nullptr);
 }
 
 static Defined *addAbsolute(StringRef Name) {
   return Symtab->addDefined(Name, STV_HIDDEN, STT_NOTYPE, 0, 0, STB_GLOBAL,
                             nullptr, nullptr);
 }
 
 // The linker is expected to define some symbols depending on
 // the linking result. This function defines such symbols.
 void elf::addReservedSymbols() {
   if (Config->EMachine == EM_MIPS) {
     // Define _gp for MIPS. st_value of _gp symbol will be updated by Writer
     // so that it points to an absolute address which by default is relative
     // to GOT. Default offset is 0x7ff0.
     // See "Global Data Symbols" in Chapter 6 in the following document:
     // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
     ElfSym::MipsGp = addAbsolute("_gp");
 
     // On MIPS O32 ABI, _gp_disp is a magic symbol designates offset between
     // start of function and 'gp' pointer into GOT.
     if (Symtab->find("_gp_disp"))
       ElfSym::MipsGpDisp = addAbsolute("_gp_disp");
 
     // The __gnu_local_gp is a magic symbol equal to the current value of 'gp'
     // pointer. This symbol is used in the code generated by .cpload pseudo-op
     // in case of using -mno-shared option.
     // https://sourceware.org/ml/binutils/2004-12/msg00094.html
     if (Symtab->find("__gnu_local_gp"))
       ElfSym::MipsLocalGp = addAbsolute("__gnu_local_gp");
   }
 
   // The Power Architecture 64-bit v2 ABI defines a TableOfContents (TOC) which
   // combines the typical ELF GOT with the small data sections. It commonly
   // includes .got .toc .sdata .sbss. The .TOC. symbol replaces both
   // _GLOBAL_OFFSET_TABLE_ and _SDA_BASE_ from the 32-bit ABI. It is used to
   // represent the TOC base which is offset by 0x8000 bytes from the start of
   // the .got section.
   // We do not allow _GLOBAL_OFFSET_TABLE_ to be defined by input objects as the
   // correctness of some relocations depends on its value.
   StringRef GotTableSymName =
       (Config->EMachine == EM_PPC64) ? ".TOC." : "_GLOBAL_OFFSET_TABLE_";
   if (Symbol *S = Symtab->find(GotTableSymName)) {
     if (S->isDefined())
       error(toString(S->File) + " cannot redefine linker defined symbol '" +
             GotTableSymName + "'");
     else
       ElfSym::GlobalOffsetTable = Symtab->addDefined(
           GotTableSymName, STV_HIDDEN, STT_NOTYPE, Target->GotBaseSymOff,
           /*Size=*/0, STB_GLOBAL, Out::ElfHeader,
           /*File=*/nullptr);
   }
 
   // __ehdr_start is the location of ELF file headers. Note that we define
   // this symbol unconditionally even when using a linker script, which
   // differs from the behavior implemented by GNU linker which only define
   // this symbol if ELF headers are in the memory mapped segment.
   addOptionalRegular("__ehdr_start", Out::ElfHeader, 0, STV_HIDDEN);
 
   // __executable_start is not documented, but the expectation of at
   // least the Android libc is that it points to the ELF header.
   addOptionalRegular("__executable_start", Out::ElfHeader, 0, STV_HIDDEN);
 
   // __dso_handle symbol is passed to cxa_finalize as a marker to identify
   // each DSO. The address of the symbol doesn't matter as long as they are
   // different in different DSOs, so we chose the start address of the DSO.
   addOptionalRegular("__dso_handle", Out::ElfHeader, 0, STV_HIDDEN);
 
   // If linker script do layout we do not need to create any standart symbols.
   if (Script->HasSectionsCommand)
     return;
 
   auto Add = [](StringRef S, int64_t Pos) {
     return addOptionalRegular(S, Out::ElfHeader, Pos, STV_DEFAULT);
   };
 
   ElfSym::Bss = Add("__bss_start", 0);
   ElfSym::End1 = Add("end", -1);
   ElfSym::End2 = Add("_end", -1);
   ElfSym::Etext1 = Add("etext", -1);
   ElfSym::Etext2 = Add("_etext", -1);
   ElfSym::Edata1 = Add("edata", -1);
   ElfSym::Edata2 = Add("_edata", -1);
 }
 
 static OutputSection *findSection(StringRef Name) {
   for (BaseCommand *Base : Script->SectionCommands)
     if (auto *Sec = dyn_cast<OutputSection>(Base))
       if (Sec->Name == Name)
         return Sec;
   return nullptr;
 }
 
 // Initialize Out members.
 template <class ELFT> static void createSyntheticSections() {
   // Initialize all pointers with NULL. This is needed because
   // you can call lld::elf::main more than once as a library.
   memset(&Out::First, 0, sizeof(Out));
 
   auto Add = [](InputSectionBase *Sec) { InputSections.push_back(Sec); };
 
   In.DynStrTab = make<StringTableSection>(".dynstr", true);
   In.Dynamic = make<DynamicSection<ELFT>>();
   if (Config->AndroidPackDynRelocs) {
     In.RelaDyn = make<AndroidPackedRelocationSection<ELFT>>(
         Config->IsRela ? ".rela.dyn" : ".rel.dyn");
   } else {
     In.RelaDyn = make<RelocationSection<ELFT>>(
         Config->IsRela ? ".rela.dyn" : ".rel.dyn", Config->ZCombreloc);
   }
   In.ShStrTab = make<StringTableSection>(".shstrtab", false);
 
   Out::ProgramHeaders = make<OutputSection>("", 0, SHF_ALLOC);
   Out::ProgramHeaders->Alignment = Config->Wordsize;
 
   if (needsInterpSection()) {
     In.Interp = createInterpSection();
     Add(In.Interp);
   }
 
   if (Config->Strip != StripPolicy::All) {
     In.StrTab = make<StringTableSection>(".strtab", false);
     In.SymTab = make<SymbolTableSection<ELFT>>(*In.StrTab);
     In.SymTabShndx = make<SymtabShndxSection>();
   }
 
   if (Config->BuildId != BuildIdKind::None) {
     In.BuildId = make<BuildIdSection>();
     Add(In.BuildId);
   }
 
   In.Bss = make<BssSection>(".bss", 0, 1);
   Add(In.Bss);
 
   // If there is a SECTIONS command and a .data.rel.ro section name use name
   // .data.rel.ro.bss so that we match in the .data.rel.ro output section.
   // This makes sure our relro is contiguous.
   bool HasDataRelRo = Script->HasSectionsCommand && findSection(".data.rel.ro");
   In.BssRelRo =
       make<BssSection>(HasDataRelRo ? ".data.rel.ro.bss" : ".bss.rel.ro", 0, 1);
   Add(In.BssRelRo);
 
   // Add MIPS-specific sections.
   if (Config->EMachine == EM_MIPS) {
     if (!Config->Shared && Config->HasDynSymTab) {
       In.MipsRldMap = make<MipsRldMapSection>();
       Add(In.MipsRldMap);
     }
     if (auto *Sec = MipsAbiFlagsSection<ELFT>::create())
       Add(Sec);
     if (auto *Sec = MipsOptionsSection<ELFT>::create())
       Add(Sec);
     if (auto *Sec = MipsReginfoSection<ELFT>::create())
       Add(Sec);
   }
 
   if (Config->HasDynSymTab) {
     In.DynSymTab = make<SymbolTableSection<ELFT>>(*In.DynStrTab);
     Add(In.DynSymTab);
 
     InX<ELFT>::VerSym = make<VersionTableSection<ELFT>>();
     Add(InX<ELFT>::VerSym);
 
     if (!Config->VersionDefinitions.empty()) {
       In.VerDef = make<VersionDefinitionSection>();
       Add(In.VerDef);
     }
 
     InX<ELFT>::VerNeed = make<VersionNeedSection<ELFT>>();
     Add(InX<ELFT>::VerNeed);
 
     if (Config->GnuHash) {
       In.GnuHashTab = make<GnuHashTableSection>();
       Add(In.GnuHashTab);
     }
 
     if (Config->SysvHash) {
       In.HashTab = make<HashTableSection>();
       Add(In.HashTab);
     }
 
     Add(In.Dynamic);
     Add(In.DynStrTab);
     Add(In.RelaDyn);
   }
 
   if (Config->RelrPackDynRelocs) {
     In.RelrDyn = make<RelrSection<ELFT>>();
     Add(In.RelrDyn);
   }
 
   // Add .got. MIPS' .got is so different from the other archs,
   // it has its own class.
   if (Config->EMachine == EM_MIPS) {
     In.MipsGot = make<MipsGotSection>();
     Add(In.MipsGot);
   } else {
     In.Got = make<GotSection>();
     Add(In.Got);
   }
 
   if (Config->EMachine == EM_PPC64) {
     In.PPC64LongBranchTarget = make<PPC64LongBranchTargetSection>();
     Add(In.PPC64LongBranchTarget);
   }
 
   In.GotPlt = make<GotPltSection>();
   Add(In.GotPlt);
   In.IgotPlt = make<IgotPltSection>();
   Add(In.IgotPlt);
 
   if (Config->GdbIndex) {
     In.GdbIndex = GdbIndexSection::create<ELFT>();
     Add(In.GdbIndex);
   }
 
   // We always need to add rel[a].plt to output if it has entries.
   // Even for static linking it can contain R_[*]_IRELATIVE relocations.
   In.RelaPlt = make<RelocationSection<ELFT>>(
       Config->IsRela ? ".rela.plt" : ".rel.plt", false /*Sort*/);
   Add(In.RelaPlt);
 
   // The RelaIplt immediately follows .rel.plt (.rel.dyn for ARM) to ensure
   // that the IRelative relocations are processed last by the dynamic loader.
   // We cannot place the iplt section in .rel.dyn when Android relocation
   // packing is enabled because that would cause a section type mismatch.
   // However, because the Android dynamic loader reads .rel.plt after .rel.dyn,
   // we can get the desired behaviour by placing the iplt section in .rel.plt.
   In.RelaIplt = make<RelocationSection<ELFT>>(
       (Config->EMachine == EM_ARM && !Config->AndroidPackDynRelocs)
           ? ".rel.dyn"
           : In.RelaPlt->Name,
       false /*Sort*/);
   Add(In.RelaIplt);
 
   In.Plt = make<PltSection>(false);
   Add(In.Plt);
   In.Iplt = make<PltSection>(true);
   Add(In.Iplt);
 
   // .note.GNU-stack is always added when we are creating a re-linkable
   // object file. Other linkers are using the presence of this marker
   // section to control the executable-ness of the stack area, but that
   // is irrelevant these days. Stack area should always be non-executable
   // by default. So we emit this section unconditionally.
   if (Config->Relocatable)
     Add(make<GnuStackSection>());
 
   if (!Config->Relocatable) {
     if (Config->EhFrameHdr) {
       In.EhFrameHdr = make<EhFrameHeader>();
       Add(In.EhFrameHdr);
     }
     In.EhFrame = make<EhFrameSection>();
     Add(In.EhFrame);
   }
 
   if (In.SymTab)
     Add(In.SymTab);
   if (In.SymTabShndx)
     Add(In.SymTabShndx);
   Add(In.ShStrTab);
   if (In.StrTab)
     Add(In.StrTab);
 
   if (Config->EMachine == EM_ARM && !Config->Relocatable)
     // Add a sentinel to terminate .ARM.exidx. It helps an unwinder
     // to find the exact address range of the last entry.
     Add(make<ARMExidxSentinelSection>());
 }
 
 // The main function of the writer.
 template <class ELFT> void Writer<ELFT>::run() {
   // Create linker-synthesized sections such as .got or .plt.
   // Such sections are of type input section.
   createSyntheticSections<ELFT>();
 
   if (!Config->Relocatable)
     combineEhFrameSections<ELFT>();
 
   // We want to process linker script commands. When SECTIONS command
   // is given we let it create sections.
   Script->processSectionCommands();
 
   // Linker scripts controls how input sections are assigned to output sections.
   // Input sections that were not handled by scripts are called "orphans", and
   // they are assigned to output sections by the default rule. Process that.
   Script->addOrphanSections();
 
   if (Config->Discard != DiscardPolicy::All)
     copyLocalSymbols();
 
   if (Config->CopyRelocs)
     addSectionSymbols();
 
   // Now that we have a complete set of output sections. This function
   // completes section contents. For example, we need to add strings
   // to the string table, and add entries to .got and .plt.
   // finalizeSections does that.
   finalizeSections();
   checkExecuteOnly();
   if (errorCount())
     return;
 
   Script->assignAddresses();
 
   // If -compressed-debug-sections is specified, we need to compress
   // .debug_* sections. Do it right now because it changes the size of
   // output sections.
   for (OutputSection *Sec : OutputSections)
     Sec->maybeCompress<ELFT>();
 
   Script->allocateHeaders(Phdrs);
 
   // Remove empty PT_LOAD to avoid causing the dynamic linker to try to mmap a
   // 0 sized region. This has to be done late since only after assignAddresses
   // we know the size of the sections.
   removeEmptyPTLoad();
 
   if (!Config->OFormatBinary)
     assignFileOffsets();
   else
     assignFileOffsetsBinary();
 
   setPhdrs();
 
   if (Config->Relocatable)
     for (OutputSection *Sec : OutputSections)
       Sec->Addr = 0;
 
   if (Config->CheckSections)
     checkSections();
 
   // It does not make sense try to open the file if we have error already.
   if (errorCount())
     return;
   // Write the result down to a file.
   openFile();
   if (errorCount())
     return;
 
   if (!Config->OFormatBinary) {
     writeTrapInstr();
     writeHeader();
     writeSections();
   } else {
     writeSectionsBinary();
   }
 
   // Backfill .note.gnu.build-id section content. This is done at last
   // because the content is usually a hash value of the entire output file.
   writeBuildId();
   if (errorCount())
     return;
 
   // Handle -Map and -cref options.
   writeMapFile();
   writeCrossReferenceTable();
   if (errorCount())
     return;
 
   if (auto E = Buffer->commit())
     error("failed to write to the output file: " + toString(std::move(E)));
 }
 
 static bool shouldKeepInSymtab(SectionBase *Sec, StringRef SymName,
                                const Symbol &B) {
   if (B.isSection())
     return false;
 
   if (Config->Discard == DiscardPolicy::None)
     return true;
 
   // If -emit-reloc is given, all symbols including local ones need to be
   // copied because they may be referenced by relocations.
   if (Config->EmitRelocs)
     return true;
 
   // In ELF assembly .L symbols are normally discarded by the assembler.
   // If the assembler fails to do so, the linker discards them if
   // * --discard-locals is used.
   // * The symbol is in a SHF_MERGE section, which is normally the reason for
   //   the assembler keeping the .L symbol.
   if (!SymName.startswith(".L") && !SymName.empty())
     return true;
 
   if (Config->Discard == DiscardPolicy::Locals)
     return false;
 
   return !Sec || !(Sec->Flags & SHF_MERGE);
 }
 
 static bool includeInSymtab(const Symbol &B) {
   if (!B.isLocal() && !B.IsUsedInRegularObj)
     return false;
 
   if (auto *D = dyn_cast<Defined>(&B)) {
     // Always include absolute symbols.
     SectionBase *Sec = D->Section;
     if (!Sec)
       return true;
     Sec = Sec->Repl;
 
     // Exclude symbols pointing to garbage-collected sections.
     if (isa<InputSectionBase>(Sec) && !Sec->Live)
       return false;
 
     if (auto *S = dyn_cast<MergeInputSection>(Sec))
       if (!S->getSectionPiece(D->Value)->Live)
         return false;
     return true;
   }
   return B.Used;
 }
 
 // Local symbols are not in the linker's symbol table. This function scans
 // each object file's symbol table to copy local symbols to the output.
 template <class ELFT> void Writer<ELFT>::copyLocalSymbols() {
   if (!In.SymTab)
     return;
   for (InputFile *File : ObjectFiles) {
     ObjFile<ELFT> *F = cast<ObjFile<ELFT>>(File);
     for (Symbol *B : F->getLocalSymbols()) {
       if (!B->isLocal())
         fatal(toString(F) +
               ": broken object: getLocalSymbols returns a non-local symbol");
       auto *DR = dyn_cast<Defined>(B);
 
       // No reason to keep local undefined symbol in symtab.
       if (!DR)
         continue;
       if (!includeInSymtab(*B))
         continue;
 
       SectionBase *Sec = DR->Section;
       if (!shouldKeepInSymtab(Sec, B->getName(), *B))
         continue;
       In.SymTab->addSymbol(B);
     }
   }
 }
 
 // Create a section symbol for each output section so that we can represent
 // relocations that point to the section. If we know that no relocation is
 // referring to a section (that happens if the section is a synthetic one), we
 // don't create a section symbol for that section.
 template <class ELFT> void Writer<ELFT>::addSectionSymbols() {
   for (BaseCommand *Base : Script->SectionCommands) {
     auto *Sec = dyn_cast<OutputSection>(Base);
     if (!Sec)
       continue;
     auto I = llvm::find_if(Sec->SectionCommands, [](BaseCommand *Base) {
       if (auto *ISD = dyn_cast<InputSectionDescription>(Base))
         return !ISD->Sections.empty();
       return false;
     });
     if (I == Sec->SectionCommands.end())
       continue;
     InputSection *IS = cast<InputSectionDescription>(*I)->Sections[0];
 
     // Relocations are not using REL[A] section symbols.
     if (IS->Type == SHT_REL || IS->Type == SHT_RELA)
       continue;
 
     // Unlike other synthetic sections, mergeable output sections contain data
     // copied from input sections, and there may be a relocation pointing to its
     // contents if -r or -emit-reloc are given.
     if (isa<SyntheticSection>(IS) && !(IS->Flags & SHF_MERGE))
       continue;
 
     auto *Sym =
         make<Defined>(IS->File, "", STB_LOCAL, /*StOther=*/0, STT_SECTION,
                       /*Value=*/0, /*Size=*/0, IS);
     In.SymTab->addSymbol(Sym);
   }
 }
 
 // Today's loaders have a feature to make segments read-only after
 // processing dynamic relocations to enhance security. PT_GNU_RELRO
 // is defined for that.
 //
 // This function returns true if a section needs to be put into a
 // PT_GNU_RELRO segment.
 static bool isRelroSection(const OutputSection *Sec) {
   if (!Config->ZRelro)
     return false;
 
   uint64_t Flags = Sec->Flags;
 
   // Non-allocatable or non-writable sections don't need RELRO because
   // they are not writable or not even mapped to memory in the first place.
   // RELRO is for sections that are essentially read-only but need to
   // be writable only at process startup to allow dynamic linker to
   // apply relocations.
   if (!(Flags & SHF_ALLOC) || !(Flags & SHF_WRITE))
     return false;
 
   // Once initialized, TLS data segments are used as data templates
   // for a thread-local storage. For each new thread, runtime
   // allocates memory for a TLS and copy templates there. No thread
   // are supposed to use templates directly. Thus, it can be in RELRO.
   if (Flags & SHF_TLS)
     return true;
 
   // .init_array, .preinit_array and .fini_array contain pointers to
   // functions that are executed on process startup or exit. These
   // pointers are set by the static linker, and they are not expected
   // to change at runtime. But if you are an attacker, you could do
   // interesting things by manipulating pointers in .fini_array, for
   // example. So they are put into RELRO.
   uint32_t Type = Sec->Type;
   if (Type == SHT_INIT_ARRAY || Type == SHT_FINI_ARRAY ||
       Type == SHT_PREINIT_ARRAY)
     return true;
 
   // .got contains pointers to external symbols. They are resolved by
   // the dynamic linker when a module is loaded into memory, and after
   // that they are not expected to change. So, it can be in RELRO.
   if (In.Got && Sec == In.Got->getParent())
     return true;
 
   // .toc is a GOT-ish section for PowerPC64. Their contents are accessed
   // through r2 register, which is reserved for that purpose. Since r2 is used
   // for accessing .got as well, .got and .toc need to be close enough in the
   // virtual address space. Usually, .toc comes just after .got. Since we place
   // .got into RELRO, .toc needs to be placed into RELRO too.
   if (Sec->Name.equals(".toc"))
     return true;
 
   // .got.plt contains pointers to external function symbols. They are
   // by default resolved lazily, so we usually cannot put it into RELRO.
   // However, if "-z now" is given, the lazy symbol resolution is
   // disabled, which enables us to put it into RELRO.
   if (Sec == In.GotPlt->getParent())
     return Config->ZNow;
 
   // .dynamic section contains data for the dynamic linker, and
   // there's no need to write to it at runtime, so it's better to put
   // it into RELRO.
   if (Sec == In.Dynamic->getParent())
     return true;
 
   // Sections with some special names are put into RELRO. This is a
   // bit unfortunate because section names shouldn't be significant in
   // ELF in spirit. But in reality many linker features depend on
   // magic section names.
   StringRef S = Sec->Name;
   return S == ".data.rel.ro" || S == ".bss.rel.ro" || S == ".ctors" ||
          S == ".dtors" || S == ".jcr" || S == ".eh_frame" ||
          S == ".openbsd.randomdata";
 }
 
 // We compute a rank for each section. The rank indicates where the
 // section should be placed in the file.  Instead of using simple
 // numbers (0,1,2...), we use a series of flags. One for each decision
 // point when placing the section.
 // Using flags has two key properties:
 // * It is easy to check if a give branch was taken.
 // * It is easy two see how similar two ranks are (see getRankProximity).
 enum RankFlags {
   RF_NOT_ADDR_SET = 1 << 18,
   RF_NOT_ALLOC = 1 << 17,
   RF_NOT_INTERP = 1 << 16,
   RF_NOT_NOTE = 1 << 15,
   RF_WRITE = 1 << 14,
   RF_EXEC_WRITE = 1 << 13,
   RF_EXEC = 1 << 12,
   RF_RODATA = 1 << 11,
   RF_NON_TLS_BSS = 1 << 10,
   RF_NON_TLS_BSS_RO = 1 << 9,
   RF_NOT_TLS = 1 << 8,
   RF_BSS = 1 << 7,
   RF_PPC_NOT_TOCBSS = 1 << 6,
   RF_PPC_TOCL = 1 << 5,
   RF_PPC_TOC = 1 << 4,
   RF_PPC_GOT = 1 << 3,
   RF_PPC_BRANCH_LT = 1 << 2,
   RF_MIPS_GPREL = 1 << 1,
   RF_MIPS_NOT_GOT = 1 << 0
 };
 
 static unsigned getSectionRank(const OutputSection *Sec) {
   unsigned Rank = 0;
 
   // We want to put section specified by -T option first, so we
   // can start assigning VA starting from them later.
   if (Config->SectionStartMap.count(Sec->Name))
     return Rank;
   Rank |= RF_NOT_ADDR_SET;
 
   // Allocatable sections go first to reduce the total PT_LOAD size and
   // so debug info doesn't change addresses in actual code.
   if (!(Sec->Flags & SHF_ALLOC))
     return Rank | RF_NOT_ALLOC;
 
   // Put .interp first because some loaders want to see that section
   // on the first page of the executable file when loaded into memory.
   if (Sec->Name == ".interp")
     return Rank;
   Rank |= RF_NOT_INTERP;
 
   // Put .note sections (which make up one PT_NOTE) at the beginning so that
   // they are likely to be included in a core file even if core file size is
   // limited. In particular, we want a .note.gnu.build-id and a .note.tag to be
   // included in a core to match core files with executables.
   if (Sec->Type == SHT_NOTE)
     return Rank;
   Rank |= RF_NOT_NOTE;
 
   // Sort sections based on their access permission in the following
   // order: R, RX, RWX, RW.  This order is based on the following
   // considerations:
   // * Read-only sections come first such that they go in the
   //   PT_LOAD covering the program headers at the start of the file.
   // * Read-only, executable sections come next.
   // * Writable, executable sections follow such that .plt on
   //   architectures where it needs to be writable will be placed
   //   between .text and .data.
   // * Writable sections come last, such that .bss lands at the very
   //   end of the last PT_LOAD.
   bool IsExec = Sec->Flags & SHF_EXECINSTR;
   bool IsWrite = Sec->Flags & SHF_WRITE;
 
   if (IsExec) {
     if (IsWrite)
       Rank |= RF_EXEC_WRITE;
     else
       Rank |= RF_EXEC;
   } else if (IsWrite) {
     Rank |= RF_WRITE;
   } else if (Sec->Type == SHT_PROGBITS) {
     // Make non-executable and non-writable PROGBITS sections (e.g .rodata
     // .eh_frame) closer to .text. They likely contain PC or GOT relative
     // relocations and there could be relocation overflow if other huge sections
     // (.dynstr .dynsym) were placed in between.
     Rank |= RF_RODATA;
   }
 
   // If we got here we know that both A and B are in the same PT_LOAD.
 
   bool IsTls = Sec->Flags & SHF_TLS;
   bool IsNoBits = Sec->Type == SHT_NOBITS;
 
   // The first requirement we have is to put (non-TLS) nobits sections last. The
   // reason is that the only thing the dynamic linker will see about them is a
   // p_memsz that is larger than p_filesz. Seeing that it zeros the end of the
   // PT_LOAD, so that has to correspond to the nobits sections.
   bool IsNonTlsNoBits = IsNoBits && !IsTls;
   if (IsNonTlsNoBits)
     Rank |= RF_NON_TLS_BSS;
 
   // We place nobits RelRo sections before plain r/w ones, and non-nobits RelRo
   // sections after r/w ones, so that the RelRo sections are contiguous.
   bool IsRelRo = isRelroSection(Sec);
   if (IsNonTlsNoBits && !IsRelRo)
     Rank |= RF_NON_TLS_BSS_RO;
   if (!IsNonTlsNoBits && IsRelRo)
     Rank |= RF_NON_TLS_BSS_RO;
 
   // The TLS initialization block needs to be a single contiguous block in a R/W
   // PT_LOAD, so stick TLS sections directly before the other RelRo R/W
   // sections. The TLS NOBITS sections are placed here as they don't take up
   // virtual address space in the PT_LOAD.
   if (!IsTls)
     Rank |= RF_NOT_TLS;
 
   // Within the TLS initialization block, the non-nobits sections need to appear
   // first.
   if (IsNoBits)
     Rank |= RF_BSS;
 
   // Some architectures have additional ordering restrictions for sections
   // within the same PT_LOAD.
   if (Config->EMachine == EM_PPC64) {
     // PPC64 has a number of special SHT_PROGBITS+SHF_ALLOC+SHF_WRITE sections
     // that we would like to make sure appear is a specific order to maximize
     // their coverage by a single signed 16-bit offset from the TOC base
     // pointer. Conversely, the special .tocbss section should be first among
     // all SHT_NOBITS sections. This will put it next to the loaded special
     // PPC64 sections (and, thus, within reach of the TOC base pointer).
     StringRef Name = Sec->Name;
     if (Name != ".tocbss")
       Rank |= RF_PPC_NOT_TOCBSS;
 
     if (Name == ".toc1")
       Rank |= RF_PPC_TOCL;
 
     if (Name == ".toc")
       Rank |= RF_PPC_TOC;
 
     if (Name == ".got")
       Rank |= RF_PPC_GOT;
 
     if (Name == ".branch_lt")
       Rank |= RF_PPC_BRANCH_LT;
   }
 
   if (Config->EMachine == EM_MIPS) {
     // All sections with SHF_MIPS_GPREL flag should be grouped together
     // because data in these sections is addressable with a gp relative address.
     if (Sec->Flags & SHF_MIPS_GPREL)
       Rank |= RF_MIPS_GPREL;
 
     if (Sec->Name != ".got")
       Rank |= RF_MIPS_NOT_GOT;
   }
 
   return Rank;
 }
 
 static bool compareSections(const BaseCommand *ACmd, const BaseCommand *BCmd) {
   const OutputSection *A = cast<OutputSection>(ACmd);
   const OutputSection *B = cast<OutputSection>(BCmd);
 
   if (A->SortRank != B->SortRank)
     return A->SortRank < B->SortRank;
 
   if (!(A->SortRank & RF_NOT_ADDR_SET))
     return Config->SectionStartMap.lookup(A->Name) <
            Config->SectionStartMap.lookup(B->Name);
   return false;
 }
 
 void PhdrEntry::add(OutputSection *Sec) {
   LastSec = Sec;
   if (!FirstSec)
     FirstSec = Sec;
   p_align = std::max(p_align, Sec->Alignment);
   if (p_type == PT_LOAD)
     Sec->PtLoad = this;
 }
 
 // The beginning and the ending of .rel[a].plt section are marked
 // with __rel[a]_iplt_{start,end} symbols if it is a statically linked
 // executable. The runtime needs these symbols in order to resolve
 // all IRELATIVE relocs on startup. For dynamic executables, we don't
 // need these symbols, since IRELATIVE relocs are resolved through GOT
 // and PLT. For details, see http://www.airs.com/blog/archives/403.
 template <class ELFT> void Writer<ELFT>::addRelIpltSymbols() {
   if (Config->Relocatable || needsInterpSection())
     return;
 
   // By default, __rela_iplt_{start,end} belong to a dummy section 0
   // because .rela.plt might be empty and thus removed from output.
   // We'll override Out::ElfHeader with In.RelaIplt later when we are
   // sure that .rela.plt exists in output.
   ElfSym::RelaIpltStart = addOptionalRegular(
       Config->IsRela ? "__rela_iplt_start" : "__rel_iplt_start",
       Out::ElfHeader, 0, STV_HIDDEN, STB_WEAK);
 
   ElfSym::RelaIpltEnd = addOptionalRegular(
       Config->IsRela ? "__rela_iplt_end" : "__rel_iplt_end",
       Out::ElfHeader, 0, STV_HIDDEN, STB_WEAK);
 }
 
 template <class ELFT>
 void Writer<ELFT>::forEachRelSec(
     llvm::function_ref<void(InputSectionBase &)> Fn) {
   // Scan all relocations. Each relocation goes through a series
   // of tests to determine if it needs special treatment, such as
   // creating GOT, PLT, copy relocations, etc.
   // Note that relocations for non-alloc sections are directly
   // processed by InputSection::relocateNonAlloc.
   for (InputSectionBase *IS : InputSections)
     if (IS->Live && isa<InputSection>(IS) && (IS->Flags & SHF_ALLOC))
       Fn(*IS);
   for (EhInputSection *ES : In.EhFrame->Sections)
     Fn(*ES);
 }
 
 // This function generates assignments for predefined symbols (e.g. _end or
 // _etext) and inserts them into the commands sequence to be processed at the
 // appropriate time. This ensures that the value is going to be correct by the
 // time any references to these symbols are processed and is equivalent to
 // defining these symbols explicitly in the linker script.
 template <class ELFT> void Writer<ELFT>::setReservedSymbolSections() {
   if (ElfSym::GlobalOffsetTable) {
     // The _GLOBAL_OFFSET_TABLE_ symbol is defined by target convention usually
     // to the start of the .got or .got.plt section.
     InputSection *GotSection = In.GotPlt;
     if (!Target->GotBaseSymInGotPlt)
       GotSection = In.MipsGot ? cast<InputSection>(In.MipsGot)
                               : cast<InputSection>(In.Got);
     ElfSym::GlobalOffsetTable->Section = GotSection;
   }
 
   // .rela_iplt_{start,end} mark the start and the end of .rela.plt section.
   if (ElfSym::RelaIpltStart && !In.RelaIplt->empty()) {
     ElfSym::RelaIpltStart->Section = In.RelaIplt;
     ElfSym::RelaIpltEnd->Section = In.RelaIplt;
     ElfSym::RelaIpltEnd->Value = In.RelaIplt->getSize();
   }
 
   PhdrEntry *Last = nullptr;
   PhdrEntry *LastRO = nullptr;
 
   for (PhdrEntry *P : Phdrs) {
     if (P->p_type != PT_LOAD)
       continue;
     Last = P;
     if (!(P->p_flags & PF_W))
       LastRO = P;
   }
 
   if (LastRO) {
     // _etext is the first location after the last read-only loadable segment.
     if (ElfSym::Etext1)
       ElfSym::Etext1->Section = LastRO->LastSec;
     if (ElfSym::Etext2)
       ElfSym::Etext2->Section = LastRO->LastSec;
   }
 
   if (Last) {
     // _edata points to the end of the last mapped initialized section.
     OutputSection *Edata = nullptr;
     for (OutputSection *OS : OutputSections) {
       if (OS->Type != SHT_NOBITS)
         Edata = OS;
       if (OS == Last->LastSec)
         break;
     }
 
     if (ElfSym::Edata1)
       ElfSym::Edata1->Section = Edata;
     if (ElfSym::Edata2)
       ElfSym::Edata2->Section = Edata;
 
     // _end is the first location after the uninitialized data region.
     if (ElfSym::End1)
       ElfSym::End1->Section = Last->LastSec;
     if (ElfSym::End2)
       ElfSym::End2->Section = Last->LastSec;
   }
 
   if (ElfSym::Bss)
     ElfSym::Bss->Section = findSection(".bss");
 
   // Setup MIPS _gp_disp/__gnu_local_gp symbols which should
   // be equal to the _gp symbol's value.
   if (ElfSym::MipsGp) {
     // Find GP-relative section with the lowest address
     // and use this address to calculate default _gp value.
     for (OutputSection *OS : OutputSections) {
       if (OS->Flags & SHF_MIPS_GPREL) {
         ElfSym::MipsGp->Section = OS;
         ElfSym::MipsGp->Value = 0x7ff0;
         break;
       }
     }
   }
 }
 
 // We want to find how similar two ranks are.
 // The more branches in getSectionRank that match, the more similar they are.
 // Since each branch corresponds to a bit flag, we can just use
 // countLeadingZeros.
 static int getRankProximityAux(OutputSection *A, OutputSection *B) {
   return countLeadingZeros(A->SortRank ^ B->SortRank);
 }
 
 static int getRankProximity(OutputSection *A, BaseCommand *B) {
   if (auto *Sec = dyn_cast<OutputSection>(B))
     return getRankProximityAux(A, Sec);
   return -1;
 }
 
 // When placing orphan sections, we want to place them after symbol assignments
 // so that an orphan after
 //   begin_foo = .;
 //   foo : { *(foo) }
 //   end_foo = .;
 // doesn't break the intended meaning of the begin/end symbols.
 // We don't want to go over sections since findOrphanPos is the
 // one in charge of deciding the order of the sections.
 // We don't want to go over changes to '.', since doing so in
 //  rx_sec : { *(rx_sec) }
 //  . = ALIGN(0x1000);
 //  /* The RW PT_LOAD starts here*/
 //  rw_sec : { *(rw_sec) }
 // would mean that the RW PT_LOAD would become unaligned.
 static bool shouldSkip(BaseCommand *Cmd) {
   if (auto *Assign = dyn_cast<SymbolAssignment>(Cmd))
     return Assign->Name != ".";
   return false;
 }
 
 // We want to place orphan sections so that they share as much
 // characteristics with their neighbors as possible. For example, if
 // both are rw, or both are tls.
 template <typename ELFT>
 static std::vector<BaseCommand *>::iterator
 findOrphanPos(std::vector<BaseCommand *>::iterator B,
               std::vector<BaseCommand *>::iterator E) {
   OutputSection *Sec = cast<OutputSection>(*E);
 
   // Find the first element that has as close a rank as possible.
   auto I = std::max_element(B, E, [=](BaseCommand *A, BaseCommand *B) {
     return getRankProximity(Sec, A) < getRankProximity(Sec, B);
   });
   if (I == E)
     return E;
 
   // Consider all existing sections with the same proximity.
   int Proximity = getRankProximity(Sec, *I);
   for (; I != E; ++I) {
     auto *CurSec = dyn_cast<OutputSection>(*I);
     if (!CurSec)
       continue;
     if (getRankProximity(Sec, CurSec) != Proximity ||
         Sec->SortRank < CurSec->SortRank)
       break;
   }
 
   auto IsOutputSec = [](BaseCommand *Cmd) { return isa<OutputSection>(Cmd); };
   auto J = std::find_if(llvm::make_reverse_iterator(I),
                         llvm::make_reverse_iterator(B), IsOutputSec);
   I = J.base();
 
   // As a special case, if the orphan section is the last section, put
   // it at the very end, past any other commands.
   // This matches bfd's behavior and is convenient when the linker script fully
   // specifies the start of the file, but doesn't care about the end (the non
   // alloc sections for example).
   auto NextSec = std::find_if(I, E, IsOutputSec);
   if (NextSec == E)
     return E;
 
   while (I != E && shouldSkip(*I))
     ++I;
   return I;
 }
 
 // Builds section order for handling --symbol-ordering-file.
 static DenseMap<const InputSectionBase *, int> buildSectionOrder() {
   DenseMap<const InputSectionBase *, int> SectionOrder;
   // Use the rarely used option -call-graph-ordering-file to sort sections.
   if (!Config->CallGraphProfile.empty())
     return computeCallGraphProfileOrder();
 
   if (Config->SymbolOrderingFile.empty())
     return SectionOrder;
 
   struct SymbolOrderEntry {
     int Priority;
     bool Present;
   };
 
   // Build a map from symbols to their priorities. Symbols that didn't
   // appear in the symbol ordering file have the lowest priority 0.
   // All explicitly mentioned symbols have negative (higher) priorities.
   DenseMap<StringRef, SymbolOrderEntry> SymbolOrder;
   int Priority = -Config->SymbolOrderingFile.size();
   for (StringRef S : Config->SymbolOrderingFile)
     SymbolOrder.insert({S, {Priority++, false}});
 
   // Build a map from sections to their priorities.
   auto AddSym = [&](Symbol &Sym) {
     auto It = SymbolOrder.find(Sym.getName());
     if (It == SymbolOrder.end())
       return;
     SymbolOrderEntry &Ent = It->second;
     Ent.Present = true;
 
     maybeWarnUnorderableSymbol(&Sym);
 
     if (auto *D = dyn_cast<Defined>(&Sym)) {
       if (auto *Sec = dyn_cast_or_null<InputSectionBase>(D->Section)) {
         int &Priority = SectionOrder[cast<InputSectionBase>(Sec->Repl)];
         Priority = std::min(Priority, Ent.Priority);
       }
     }
   };
 
   // We want both global and local symbols. We get the global ones from the
   // symbol table and iterate the object files for the local ones.
   for (Symbol *Sym : Symtab->getSymbols())
     if (!Sym->isLazy())
       AddSym(*Sym);
   for (InputFile *File : ObjectFiles)
     for (Symbol *Sym : File->getSymbols())
       if (Sym->isLocal())
         AddSym(*Sym);
 
   if (Config->WarnSymbolOrdering)
     for (auto OrderEntry : SymbolOrder)
       if (!OrderEntry.second.Present)
         warn("symbol ordering file: no such symbol: " + OrderEntry.first);
 
   return SectionOrder;
 }
 
 // Sorts the sections in ISD according to the provided section order.
 static void
 sortISDBySectionOrder(InputSectionDescription *ISD,
                       const DenseMap<const InputSectionBase *, int> &Order) {
   std::vector<InputSection *> UnorderedSections;
   std::vector<std::pair<InputSection *, int>> OrderedSections;
   uint64_t UnorderedSize = 0;
 
   for (InputSection *IS : ISD->Sections) {
     auto I = Order.find(IS);
     if (I == Order.end()) {
       UnorderedSections.push_back(IS);
       UnorderedSize += IS->getSize();
       continue;
     }
     OrderedSections.push_back({IS, I->second});
   }
   llvm::sort(OrderedSections, [&](std::pair<InputSection *, int> A,
                                   std::pair<InputSection *, int> B) {
     return A.second < B.second;
   });
 
   // Find an insertion point for the ordered section list in the unordered
   // section list. On targets with limited-range branches, this is the mid-point
   // of the unordered section list. This decreases the likelihood that a range
   // extension thunk will be needed to enter or exit the ordered region. If the
   // ordered section list is a list of hot functions, we can generally expect
   // the ordered functions to be called more often than the unordered functions,
   // making it more likely that any particular call will be within range, and
   // therefore reducing the number of thunks required.
   //
   // For example, imagine that you have 8MB of hot code and 32MB of cold code.
   // If the layout is:
   //
   // 8MB hot
   // 32MB cold
   //
   // only the first 8-16MB of the cold code (depending on which hot function it
   // is actually calling) can call the hot code without a range extension thunk.
   // However, if we use this layout:
   //
   // 16MB cold
   // 8MB hot
   // 16MB cold
   //
   // both the last 8-16MB of the first block of cold code and the first 8-16MB
   // of the second block of cold code can call the hot code without a thunk. So
   // we effectively double the amount of code that could potentially call into
   // the hot code without a thunk.
   size_t InsPt = 0;
   if (Target->getThunkSectionSpacing() && !OrderedSections.empty()) {
     uint64_t UnorderedPos = 0;
     for (; InsPt != UnorderedSections.size(); ++InsPt) {
       UnorderedPos += UnorderedSections[InsPt]->getSize();
       if (UnorderedPos > UnorderedSize / 2)
         break;
     }
   }
 
   ISD->Sections.clear();
   for (InputSection *IS : makeArrayRef(UnorderedSections).slice(0, InsPt))
     ISD->Sections.push_back(IS);
   for (std::pair<InputSection *, int> P : OrderedSections)
     ISD->Sections.push_back(P.first);
   for (InputSection *IS : makeArrayRef(UnorderedSections).slice(InsPt))
     ISD->Sections.push_back(IS);
 }
 
 static void sortSection(OutputSection *Sec,
                         const DenseMap<const InputSectionBase *, int> &Order) {
   StringRef Name = Sec->Name;
 
   // Sort input sections by section name suffixes for
   // __attribute__((init_priority(N))).
   if (Name == ".init_array" || Name == ".fini_array") {
     if (!Script->HasSectionsCommand)
       Sec->sortInitFini();
     return;
   }
 
   // Sort input sections by the special rule for .ctors and .dtors.
   if (Name == ".ctors" || Name == ".dtors") {
     if (!Script->HasSectionsCommand)
       Sec->sortCtorsDtors();
     return;
   }
 
   // Never sort these.
   if (Name == ".init" || Name == ".fini")
     return;
 
   // Sort input sections by priority using the list provided
   // by --symbol-ordering-file.
   if (!Order.empty())
     for (BaseCommand *B : Sec->SectionCommands)
       if (auto *ISD = dyn_cast<InputSectionDescription>(B))
         sortISDBySectionOrder(ISD, Order);
 }
 
 // If no layout was provided by linker script, we want to apply default
 // sorting for special input sections. This also handles --symbol-ordering-file.
 template <class ELFT> void Writer<ELFT>::sortInputSections() {
   // Build the order once since it is expensive.
   DenseMap<const InputSectionBase *, int> Order = buildSectionOrder();
   for (BaseCommand *Base : Script->SectionCommands)
     if (auto *Sec = dyn_cast<OutputSection>(Base))
       sortSection(Sec, Order);
 }
 
 template <class ELFT> void Writer<ELFT>::sortSections() {
   Script->adjustSectionsBeforeSorting();
 
   // Don't sort if using -r. It is not necessary and we want to preserve the
   // relative order for SHF_LINK_ORDER sections.
   if (Config->Relocatable)
     return;
 
   sortInputSections();
 
   for (BaseCommand *Base : Script->SectionCommands) {
     auto *OS = dyn_cast<OutputSection>(Base);
     if (!OS)
       continue;
     OS->SortRank = getSectionRank(OS);
 
     // We want to assign rude approximation values to OutSecOff fields
     // to know the relative order of the input sections. We use it for
     // sorting SHF_LINK_ORDER sections. See resolveShfLinkOrder().
     uint64_t I = 0;
     for (InputSection *Sec : getInputSections(OS))
       Sec->OutSecOff = I++;
   }
 
   if (!Script->HasSectionsCommand) {
     // We know that all the OutputSections are contiguous in this case.
     auto IsSection = [](BaseCommand *Base) { return isa<OutputSection>(Base); };
     std::stable_sort(
         llvm::find_if(Script->SectionCommands, IsSection),
         llvm::find_if(llvm::reverse(Script->SectionCommands), IsSection).base(),
         compareSections);
     return;
   }
 
   // Orphan sections are sections present in the input files which are
   // not explicitly placed into the output file by the linker script.
   //
   // The sections in the linker script are already in the correct
   // order. We have to figuere out where to insert the orphan
   // sections.
   //
   // The order of the sections in the script is arbitrary and may not agree with
   // compareSections. This means that we cannot easily define a strict weak
   // ordering. To see why, consider a comparison of a section in the script and
   // one not in the script. We have a two simple options:
   // * Make them equivalent (a is not less than b, and b is not less than a).
   //   The problem is then that equivalence has to be transitive and we can
   //   have sections a, b and c with only b in a script and a less than c
   //   which breaks this property.
   // * Use compareSectionsNonScript. Given that the script order doesn't have
   //   to match, we can end up with sections a, b, c, d where b and c are in the
   //   script and c is compareSectionsNonScript less than b. In which case d
   //   can be equivalent to c, a to b and d < a. As a concrete example:
   //   .a (rx) # not in script
   //   .b (rx) # in script
   //   .c (ro) # in script
   //   .d (ro) # not in script
   //
   // The way we define an order then is:
   // *  Sort only the orphan sections. They are in the end right now.
   // *  Move each orphan section to its preferred position. We try
   //    to put each section in the last position where it can share
   //    a PT_LOAD.
   //
   // There is some ambiguity as to where exactly a new entry should be
   // inserted, because Commands contains not only output section
   // commands but also other types of commands such as symbol assignment
   // expressions. There's no correct answer here due to the lack of the
   // formal specification of the linker script. We use heuristics to
   // determine whether a new output command should be added before or
   // after another commands. For the details, look at shouldSkip
   // function.
 
   auto I = Script->SectionCommands.begin();
   auto E = Script->SectionCommands.end();
   auto NonScriptI = std::find_if(I, E, [](BaseCommand *Base) {
     if (auto *Sec = dyn_cast<OutputSection>(Base))
       return Sec->SectionIndex == UINT32_MAX;
     return false;
   });
 
   // Sort the orphan sections.
   std::stable_sort(NonScriptI, E, compareSections);
 
   // As a horrible special case, skip the first . assignment if it is before any
   // section. We do this because it is common to set a load address by starting
   // the script with ". = 0xabcd" and the expectation is that every section is
   // after that.
   auto FirstSectionOrDotAssignment =
       std::find_if(I, E, [](BaseCommand *Cmd) { return !shouldSkip(Cmd); });
   if (FirstSectionOrDotAssignment != E &&
       isa<SymbolAssignment>(**FirstSectionOrDotAssignment))
     ++FirstSectionOrDotAssignment;
   I = FirstSectionOrDotAssignment;
 
   while (NonScriptI != E) {
     auto Pos = findOrphanPos<ELFT>(I, NonScriptI);
     OutputSection *Orphan = cast<OutputSection>(*NonScriptI);
 
     // As an optimization, find all sections with the same sort rank
     // and insert them with one rotate.
     unsigned Rank = Orphan->SortRank;
     auto End = std::find_if(NonScriptI + 1, E, [=](BaseCommand *Cmd) {
       return cast<OutputSection>(Cmd)->SortRank != Rank;
     });
     std::rotate(Pos, NonScriptI, End);
     NonScriptI = End;
   }
 
   Script->adjustSectionsAfterSorting();
 }
 
 static bool compareByFilePosition(InputSection *A, InputSection *B) {
   // Synthetic, i. e. a sentinel section, should go last.
   if (A->kind() == InputSectionBase::Synthetic ||
       B->kind() == InputSectionBase::Synthetic)
     return A->kind() != InputSectionBase::Synthetic;
 
   InputSection *LA = A->getLinkOrderDep();
   InputSection *LB = B->getLinkOrderDep();
   OutputSection *AOut = LA->getParent();
   OutputSection *BOut = LB->getParent();
 
   if (AOut != BOut)
     return AOut->SectionIndex < BOut->SectionIndex;
   return LA->OutSecOff < LB->OutSecOff;
 }
 
 // This function is used by the --merge-exidx-entries to detect duplicate
 // .ARM.exidx sections. It is Arm only.
 //
 // The .ARM.exidx section is of the form:
 // | PREL31 offset to function | Unwind instructions for function |
 // where the unwind instructions are either a small number of unwind
 // instructions inlined into the table entry, the special CANT_UNWIND value of
 // 0x1 or a PREL31 offset into a .ARM.extab Section that contains unwind
 // instructions.
 //
 // We return true if all the unwind instructions in the .ARM.exidx entries of
 // Cur can be merged into the last entry of Prev.
 static bool isDuplicateArmExidxSec(InputSection *Prev, InputSection *Cur) {
 
   // References to .ARM.Extab Sections have bit 31 clear and are not the
   // special EXIDX_CANTUNWIND bit-pattern.
   auto IsExtabRef = [](uint32_t Unwind) {
     return (Unwind & 0x80000000) == 0 && Unwind != 0x1;
   };
 
   struct ExidxEntry {
     ulittle32_t Fn;
     ulittle32_t Unwind;
   };
 
   // Get the last table Entry from the previous .ARM.exidx section.
   const ExidxEntry &PrevEntry = Prev->getDataAs<ExidxEntry>().back();
   if (IsExtabRef(PrevEntry.Unwind))
     return false;
 
   // We consider the unwind instructions of an .ARM.exidx table entry
   // a duplicate if the previous unwind instructions if:
   // - Both are the special EXIDX_CANTUNWIND.
   // - Both are the same inline unwind instructions.
   // We do not attempt to follow and check links into .ARM.extab tables as
   // consecutive identical entries are rare and the effort to check that they
   // are identical is high.
 
   for (const ExidxEntry Entry : Cur->getDataAs<ExidxEntry>())
     if (IsExtabRef(Entry.Unwind) || Entry.Unwind != PrevEntry.Unwind)
       return false;
 
   // All table entries in this .ARM.exidx Section can be merged into the
   // previous Section.
   return true;
 }
 
 template <class ELFT> void Writer<ELFT>::resolveShfLinkOrder() {
   for (OutputSection *Sec : OutputSections) {
     if (!(Sec->Flags & SHF_LINK_ORDER))
       continue;
 
     // Link order may be distributed across several InputSectionDescriptions
     // but sort must consider them all at once.
     std::vector<InputSection **> ScriptSections;
     std::vector<InputSection *> Sections;
     for (BaseCommand *Base : Sec->SectionCommands) {
       if (auto *ISD = dyn_cast<InputSectionDescription>(Base)) {
         for (InputSection *&IS : ISD->Sections) {
           ScriptSections.push_back(&IS);
           Sections.push_back(IS);
         }
       }
     }
     std::stable_sort(Sections.begin(), Sections.end(), compareByFilePosition);
 
     if (!Config->Relocatable && Config->EMachine == EM_ARM &&
         Sec->Type == SHT_ARM_EXIDX) {
 
       if (auto *Sentinel = dyn_cast<ARMExidxSentinelSection>(Sections.back())) {
         assert(Sections.size() >= 2 &&
                "We should create a sentinel section only if there are "
                "alive regular exidx sections.");
 
         // The last executable section is required to fill the sentinel.
         // Remember it here so that we don't have to find it again.
         Sentinel->Highest = Sections[Sections.size() - 2]->getLinkOrderDep();
       }
 
       // The EHABI for the Arm Architecture permits consecutive identical
       // table entries to be merged. We use a simple implementation that
       // removes a .ARM.exidx Input Section if it can be merged into the
       // previous one. This does not require any rewriting of InputSection
       // contents but misses opportunities for fine grained deduplication
       // where only a subset of the InputSection contents can be merged.
       if (Config->MergeArmExidx) {
         size_t Prev = 0;
         // The last one is a sentinel entry which should not be removed.
         for (size_t I = 1; I < Sections.size() - 1; ++I) {
           if (isDuplicateArmExidxSec(Sections[Prev], Sections[I]))
             Sections[I] = nullptr;
           else
             Prev = I;
         }
       }
     }
 
     for (int I = 0, N = Sections.size(); I < N; ++I)
       *ScriptSections[I] = Sections[I];
 
     // Remove the Sections we marked as duplicate earlier.
     for (BaseCommand *Base : Sec->SectionCommands)
       if (auto *ISD = dyn_cast<InputSectionDescription>(Base))
         llvm::erase_if(ISD->Sections, [](InputSection *IS) { return !IS; });
   }
 }
 
 // For most RISC ISAs, we need to generate content that depends on the address
 // of InputSections. For example some architectures such as AArch64 use small
 // displacements for jump instructions that is the linker's responsibility for
 // creating range extension thunks for. As the generation of the content may
 // also alter InputSection addresses we must converge to a fixed point.
 template <class ELFT> void Writer<ELFT>::maybeAddThunks() {
   if (!Target->NeedsThunks && !Config->AndroidPackDynRelocs &&
       !Config->RelrPackDynRelocs)
     return;
 
   ThunkCreator TC;
   AArch64Err843419Patcher A64P;
 
   for (;;) {
     bool Changed = false;
 
     Script->assignAddresses();
 
     if (Target->NeedsThunks)
       Changed |= TC.createThunks(OutputSections);
 
     if (Config->FixCortexA53Errata843419) {
       if (Changed)
         Script->assignAddresses();
       Changed |= A64P.createFixes();
     }
 
     if (In.MipsGot)
       In.MipsGot->updateAllocSize();
 
     Changed |= In.RelaDyn->updateAllocSize();
 
     if (In.RelrDyn)
       Changed |= In.RelrDyn->updateAllocSize();
 
     if (!Changed)
       return;
   }
 }
 
 static void finalizeSynthetic(SyntheticSection *Sec) {
   if (Sec && !Sec->empty() && Sec->getParent())
     Sec->finalizeContents();
 }
 
 // In order to allow users to manipulate linker-synthesized sections,
 // we had to add synthetic sections to the input section list early,
 // even before we make decisions whether they are needed. This allows
 // users to write scripts like this: ".mygot : { .got }".
 //
 // Doing it has an unintended side effects. If it turns out that we
 // don't need a .got (for example) at all because there's no
 // relocation that needs a .got, we don't want to emit .got.
 //
 // To deal with the above problem, this function is called after
 // scanRelocations is called to remove synthetic sections that turn
 // out to be empty.
 static void removeUnusedSyntheticSections() {
   // All input synthetic sections that can be empty are placed after
   // all regular ones. We iterate over them all and exit at first
   // non-synthetic.
   for (InputSectionBase *S : llvm::reverse(InputSections)) {
     SyntheticSection *SS = dyn_cast<SyntheticSection>(S);
     if (!SS)
       return;
     OutputSection *OS = SS->getParent();
     if (!OS || !SS->empty())
       continue;
 
     // If we reach here, then SS is an unused synthetic section and we want to
     // remove it from corresponding input section description of output section.
     for (BaseCommand *B : OS->SectionCommands)
       if (auto *ISD = dyn_cast<InputSectionDescription>(B))
         llvm::erase_if(ISD->Sections,
                        [=](InputSection *IS) { return IS == SS; });
   }
 }
 
 // Returns true if a symbol can be replaced at load-time by a symbol
 // with the same name defined in other ELF executable or DSO.
 static bool computeIsPreemptible(const Symbol &B) {
   assert(!B.isLocal());
 
   // Only symbols that appear in dynsym can be preempted.
   if (!B.includeInDynsym())
     return false;
 
   // Only default visibility symbols can be preempted.
   if (B.Visibility != STV_DEFAULT)
     return false;
 
   // At this point copy relocations have not been created yet, so any
   // symbol that is not defined locally is preemptible.
   if (!B.isDefined())
     return true;
 
   // If we have a dynamic list it specifies which local symbols are preemptible.
   if (Config->HasDynamicList)
     return false;
 
   if (!Config->Shared)
     return false;
 
   // -Bsymbolic means that definitions are not preempted.
   if (Config->Bsymbolic || (Config->BsymbolicFunctions && B.isFunc()))
     return false;
   return true;
 }
 
 // Create output section objects and add them to OutputSections.
 template <class ELFT> void Writer<ELFT>::finalizeSections() {
   Out::PreinitArray = findSection(".preinit_array");
   Out::InitArray = findSection(".init_array");
   Out::FiniArray = findSection(".fini_array");
 
   // The linker needs to define SECNAME_start, SECNAME_end and SECNAME_stop
   // symbols for sections, so that the runtime can get the start and end
   // addresses of each section by section name. Add such symbols.
   if (!Config->Relocatable) {
     addStartEndSymbols();
     for (BaseCommand *Base : Script->SectionCommands)
       if (auto *Sec = dyn_cast<OutputSection>(Base))
         addStartStopSymbols(Sec);
   }
 
   // Add _DYNAMIC symbol. Unlike GNU gold, our _DYNAMIC symbol has no type.
   // It should be okay as no one seems to care about the type.
   // Even the author of gold doesn't remember why gold behaves that way.
   // https://sourceware.org/ml/binutils/2002-03/msg00360.html
   if (In.Dynamic->Parent)
     Symtab->addDefined("_DYNAMIC", STV_HIDDEN, STT_NOTYPE, 0 /*Value*/,
                        /*Size=*/0, STB_WEAK, In.Dynamic,
                        /*File=*/nullptr);
 
   // Define __rel[a]_iplt_{start,end} symbols if needed.
   addRelIpltSymbols();
 
   // RISC-V's gp can address +/- 2 KiB, set it to .sdata + 0x800 if not defined.
   if (Config->EMachine == EM_RISCV)
     if (!dyn_cast_or_null<Defined>(Symtab->find("__global_pointer$")))
       addOptionalRegular("__global_pointer$", findSection(".sdata"), 0x800);
 
   // This responsible for splitting up .eh_frame section into
   // pieces. The relocation scan uses those pieces, so this has to be
   // earlier.
   finalizeSynthetic(In.EhFrame);
 
-  for (Symbol *S : Symtab->getSymbols()) {
-    if (!S->IsPreemptible)
-      S->IsPreemptible = computeIsPreemptible(*S);
-    if (S->isGnuIFunc() && Config->ZIfuncnoplt)
-      S->ExportDynamic = true;
-  }
+  for (Symbol *S : Symtab->getSymbols())
+    S->IsPreemptible |= computeIsPreemptible(*S);
 
   // Scan relocations. This must be done after every symbol is declared so that
   // we can correctly decide if a dynamic relocation is needed.
   if (!Config->Relocatable)
     forEachRelSec(scanRelocations<ELFT>);
+
+  addIRelativeRelocs();
 
   if (In.Plt && !In.Plt->empty())
     In.Plt->addSymbols();
   if (In.Iplt && !In.Iplt->empty())
     In.Iplt->addSymbols();
 
   if (!Config->AllowShlibUndefined) {
     // Error on undefined symbols in a shared object, if all of its DT_NEEDED
     // entires are seen. These cases would otherwise lead to runtime errors
     // reported by the dynamic linker.
     //
     // ld.bfd traces all DT_NEEDED to emulate the logic of the dynamic linker to
     // catch more cases. That is too much for us. Our approach resembles the one
     // used in ld.gold, achieves a good balance to be useful but not too smart.
     for (InputFile *File : SharedFiles) {
       SharedFile<ELFT> *F = cast<SharedFile<ELFT>>(File);
       F->AllNeededIsKnown = llvm::all_of(F->DtNeeded, [&](StringRef Needed) {
         return Symtab->SoNames.count(Needed);
       });
     }
     for (Symbol *Sym : Symtab->getSymbols())
       if (Sym->isUndefined() && !Sym->isWeak())
         if (auto *F = dyn_cast_or_null<SharedFile<ELFT>>(Sym->File))
           if (F->AllNeededIsKnown)
             error(toString(F) + ": undefined reference to " + toString(*Sym));
   }
 
   // Now that we have defined all possible global symbols including linker-
   // synthesized ones. Visit all symbols to give the finishing touches.
   for (Symbol *Sym : Symtab->getSymbols()) {
     if (!includeInSymtab(*Sym))
       continue;
     if (In.SymTab)
       In.SymTab->addSymbol(Sym);
 
     if (Sym->includeInDynsym()) {
       In.DynSymTab->addSymbol(Sym);
       if (auto *File = dyn_cast_or_null<SharedFile<ELFT>>(Sym->File))
         if (File->IsNeeded && !Sym->isUndefined())
           InX<ELFT>::VerNeed->addSymbol(Sym);
     }
   }
 
   // Do not proceed if there was an undefined symbol.
   if (errorCount())
     return;
 
   if (In.MipsGot)
     In.MipsGot->build<ELFT>();
 
   removeUnusedSyntheticSections();
 
   sortSections();
 
   // Now that we have the final list, create a list of all the
   // OutputSections for convenience.
   for (BaseCommand *Base : Script->SectionCommands)
     if (auto *Sec = dyn_cast<OutputSection>(Base))
       OutputSections.push_back(Sec);
 
   // Prefer command line supplied address over other constraints.
   for (OutputSection *Sec : OutputSections) {
     auto I = Config->SectionStartMap.find(Sec->Name);
     if (I != Config->SectionStartMap.end())
       Sec->AddrExpr = [=] { return I->second; };
   }
 
   // This is a bit of a hack. A value of 0 means undef, so we set it
   // to 1 to make __ehdr_start defined. The section number is not
   // particularly relevant.
   Out::ElfHeader->SectionIndex = 1;
 
   for (size_t I = 0, E = OutputSections.size(); I != E; ++I) {
     OutputSection *Sec = OutputSections[I];
     Sec->SectionIndex = I + 1;
     Sec->ShName = In.ShStrTab->addString(Sec->Name);
   }
 
   // Binary and relocatable output does not have PHDRS.
   // The headers have to be created before finalize as that can influence the
   // image base and the dynamic section on mips includes the image base.
   if (!Config->Relocatable && !Config->OFormatBinary) {
     Phdrs = Script->hasPhdrsCommands() ? Script->createPhdrs() : createPhdrs();
     addPtArmExid(Phdrs);
     Out::ProgramHeaders->Size = sizeof(Elf_Phdr) * Phdrs.size();
 
     // Find the TLS segment. This happens before the section layout loop so that
     // Android relocation packing can look up TLS symbol addresses.
     for (PhdrEntry *P : Phdrs)
       if (P->p_type == PT_TLS)
         Out::TlsPhdr = P;
   }
 
   // Some symbols are defined in term of program headers. Now that we
   // have the headers, we can find out which sections they point to.
   setReservedSymbolSections();
 
   // Dynamic section must be the last one in this list and dynamic
   // symbol table section (DynSymTab) must be the first one.
   finalizeSynthetic(In.DynSymTab);
   finalizeSynthetic(In.Bss);
   finalizeSynthetic(In.BssRelRo);
   finalizeSynthetic(In.GnuHashTab);
   finalizeSynthetic(In.HashTab);
   finalizeSynthetic(In.SymTabShndx);
   finalizeSynthetic(In.ShStrTab);
   finalizeSynthetic(In.StrTab);
   finalizeSynthetic(In.VerDef);
   finalizeSynthetic(In.DynStrTab);
   finalizeSynthetic(In.Got);
   finalizeSynthetic(In.MipsGot);
   finalizeSynthetic(In.IgotPlt);
   finalizeSynthetic(In.GotPlt);
   finalizeSynthetic(In.RelaDyn);
   finalizeSynthetic(In.RelrDyn);
   finalizeSynthetic(In.RelaIplt);
   finalizeSynthetic(In.RelaPlt);
   finalizeSynthetic(In.Plt);
   finalizeSynthetic(In.Iplt);
   finalizeSynthetic(In.EhFrameHdr);
   finalizeSynthetic(InX<ELFT>::VerSym);
   finalizeSynthetic(InX<ELFT>::VerNeed);
   finalizeSynthetic(In.Dynamic);
 
   if (!Script->HasSectionsCommand && !Config->Relocatable)
     fixSectionAlignments();
 
   // After link order processing .ARM.exidx sections can be deduplicated, which
   // needs to be resolved before any other address dependent operation.
   resolveShfLinkOrder();
 
   // Jump instructions in many ISAs have small displacements, and therefore they
   // cannot jump to arbitrary addresses in memory. For example, RISC-V JAL
   // instruction can target only +-1 MiB from PC. It is a linker's
   // responsibility to create and insert small pieces of code between sections
   // to extend the ranges if jump targets are out of range. Such code pieces are
   // called "thunks".
   //
   // We add thunks at this stage. We couldn't do this before this point because
   // this is the earliest point where we know sizes of sections and their
   // layouts (that are needed to determine if jump targets are in range).
   maybeAddThunks();
 
   // maybeAddThunks may have added local symbols to the static symbol table.
   finalizeSynthetic(In.SymTab);
   finalizeSynthetic(In.PPC64LongBranchTarget);
 
   // Fill other section headers. The dynamic table is finalized
   // at the end because some tags like RELSZ depend on result
   // of finalizing other sections.
   for (OutputSection *Sec : OutputSections)
     Sec->finalize<ELFT>();
 }
 
 // Ensure data sections are not mixed with executable sections when
 // -execute-only is used. -execute-only is a feature to make pages executable
 // but not readable, and the feature is currently supported only on AArch64.
 template <class ELFT> void Writer<ELFT>::checkExecuteOnly() {
   if (!Config->ExecuteOnly)
     return;
 
   for (OutputSection *OS : OutputSections)
     if (OS->Flags & SHF_EXECINSTR)
       for (InputSection *IS : getInputSections(OS))
         if (!(IS->Flags & SHF_EXECINSTR))
           error("cannot place " + toString(IS) + " into " + toString(OS->Name) +
                 ": -execute-only does not support intermingling data and code");
 }
 
 // The linker is expected to define SECNAME_start and SECNAME_end
 // symbols for a few sections. This function defines them.
 template <class ELFT> void Writer<ELFT>::addStartEndSymbols() {
   // If a section does not exist, there's ambiguity as to how we
   // define _start and _end symbols for an init/fini section. Since
   // the loader assume that the symbols are always defined, we need to
   // always define them. But what value? The loader iterates over all
   // pointers between _start and _end to run global ctors/dtors, so if
   // the section is empty, their symbol values don't actually matter
   // as long as _start and _end point to the same location.
   //
   // That said, we don't want to set the symbols to 0 (which is
   // probably the simplest value) because that could cause some
   // program to fail to link due to relocation overflow, if their
   // program text is above 2 GiB. We use the address of the .text
   // section instead to prevent that failure.
   //
   // In a rare sitaution, .text section may not exist. If that's the
   // case, use the image base address as a last resort.
   OutputSection *Default = findSection(".text");
   if (!Default)
     Default = Out::ElfHeader;
 
   auto Define = [=](StringRef Start, StringRef End, OutputSection *OS) {
     if (OS) {
       addOptionalRegular(Start, OS, 0);
       addOptionalRegular(End, OS, -1);
     } else {
       addOptionalRegular(Start, Default, 0);
       addOptionalRegular(End, Default, 0);
     }
   };
 
   Define("__preinit_array_start", "__preinit_array_end", Out::PreinitArray);
   Define("__init_array_start", "__init_array_end", Out::InitArray);
   Define("__fini_array_start", "__fini_array_end", Out::FiniArray);
 
   if (OutputSection *Sec = findSection(".ARM.exidx"))
     Define("__exidx_start", "__exidx_end", Sec);
 }
 
 // If a section name is valid as a C identifier (which is rare because of
 // the leading '.'), linkers are expected to define __start_<secname> and
 // __stop_<secname> symbols. They are at beginning and end of the section,
 // respectively. This is not requested by the ELF standard, but GNU ld and
 // gold provide the feature, and used by many programs.
 template <class ELFT>
 void Writer<ELFT>::addStartStopSymbols(OutputSection *Sec) {
   StringRef S = Sec->Name;
   if (!isValidCIdentifier(S))
     return;
   addOptionalRegular(Saver.save("__start_" + S), Sec, 0, STV_PROTECTED);
   addOptionalRegular(Saver.save("__stop_" + S), Sec, -1, STV_PROTECTED);
 }
 
 static bool needsPtLoad(OutputSection *Sec) {
   if (!(Sec->Flags & SHF_ALLOC) || Sec->Noload)
     return false;
 
   // Don't allocate VA space for TLS NOBITS sections. The PT_TLS PHDR is
   // responsible for allocating space for them, not the PT_LOAD that
   // contains the TLS initialization image.
   if ((Sec->Flags & SHF_TLS) && Sec->Type == SHT_NOBITS)
     return false;
   return true;
 }
 
 // Linker scripts are responsible for aligning addresses. Unfortunately, most
 // linker scripts are designed for creating two PT_LOADs only, one RX and one
 // RW. This means that there is no alignment in the RO to RX transition and we
 // cannot create a PT_LOAD there.
 static uint64_t computeFlags(uint64_t Flags) {
   if (Config->Omagic)
     return PF_R | PF_W | PF_X;
   if (Config->ExecuteOnly && (Flags & PF_X))
     return Flags & ~PF_R;
   if (Config->SingleRoRx && !(Flags & PF_W))
     return Flags | PF_X;
   return Flags;
 }
 
 // Decide which program headers to create and which sections to include in each
 // one.
 template <class ELFT> std::vector<PhdrEntry *> Writer<ELFT>::createPhdrs() {
   std::vector<PhdrEntry *> Ret;
   auto AddHdr = [&](unsigned Type, unsigned Flags) -> PhdrEntry * {
     Ret.push_back(make<PhdrEntry>(Type, Flags));
     return Ret.back();
   };
 
   // The first phdr entry is PT_PHDR which describes the program header itself.
   AddHdr(PT_PHDR, PF_R)->add(Out::ProgramHeaders);
 
   // PT_INTERP must be the second entry if exists.
   if (OutputSection *Cmd = findSection(".interp"))
     AddHdr(PT_INTERP, Cmd->getPhdrFlags())->add(Cmd);
 
   // Add the first PT_LOAD segment for regular output sections.
   uint64_t Flags = computeFlags(PF_R);
   PhdrEntry *Load = AddHdr(PT_LOAD, Flags);
 
   // Add the headers. We will remove them if they don't fit.
   Load->add(Out::ElfHeader);
   Load->add(Out::ProgramHeaders);
 
   for (OutputSection *Sec : OutputSections) {
     if (!(Sec->Flags & SHF_ALLOC))
       break;
     if (!needsPtLoad(Sec))
       continue;
 
     // Segments are contiguous memory regions that has the same attributes
     // (e.g. executable or writable). There is one phdr for each segment.
     // Therefore, we need to create a new phdr when the next section has
     // different flags or is loaded at a discontiguous address or memory
     // region using AT or AT> linker script command, respectively. At the same
     // time, we don't want to create a separate load segment for the headers,
     // even if the first output section has an AT or AT> attribute.
     uint64_t NewFlags = computeFlags(Sec->getPhdrFlags());
     if (((Sec->LMAExpr ||
           (Sec->LMARegion && (Sec->LMARegion != Load->FirstSec->LMARegion))) &&
          Load->LastSec != Out::ProgramHeaders) ||
         Sec->MemRegion != Load->FirstSec->MemRegion || Flags != NewFlags) {
 
       Load = AddHdr(PT_LOAD, NewFlags);
       Flags = NewFlags;
     }
 
     Load->add(Sec);
   }
 
   // Add a TLS segment if any.
   PhdrEntry *TlsHdr = make<PhdrEntry>(PT_TLS, PF_R);
   for (OutputSection *Sec : OutputSections)
     if (Sec->Flags & SHF_TLS)
       TlsHdr->add(Sec);
   if (TlsHdr->FirstSec)
     Ret.push_back(TlsHdr);
 
   // Add an entry for .dynamic.
   if (OutputSection *Sec = In.Dynamic->getParent())
     AddHdr(PT_DYNAMIC, Sec->getPhdrFlags())->add(Sec);
 
   // PT_GNU_RELRO includes all sections that should be marked as
   // read-only by dynamic linker after proccessing relocations.
   // Current dynamic loaders only support one PT_GNU_RELRO PHDR, give
   // an error message if more than one PT_GNU_RELRO PHDR is required.
   PhdrEntry *RelRo = make<PhdrEntry>(PT_GNU_RELRO, PF_R);
   bool InRelroPhdr = false;
   bool IsRelroFinished = false;
   for (OutputSection *Sec : OutputSections) {
     if (!needsPtLoad(Sec))
       continue;
     if (isRelroSection(Sec)) {
       InRelroPhdr = true;
       if (!IsRelroFinished)
         RelRo->add(Sec);
       else
         error("section: " + Sec->Name + " is not contiguous with other relro" +
               " sections");
     } else if (InRelroPhdr) {
       InRelroPhdr = false;
       IsRelroFinished = true;
     }
   }
   if (RelRo->FirstSec)
     Ret.push_back(RelRo);
 
   // PT_GNU_EH_FRAME is a special section pointing on .eh_frame_hdr.
   if (!In.EhFrame->empty() && In.EhFrameHdr && In.EhFrame->getParent() &&
       In.EhFrameHdr->getParent())
     AddHdr(PT_GNU_EH_FRAME, In.EhFrameHdr->getParent()->getPhdrFlags())
         ->add(In.EhFrameHdr->getParent());
 
   // PT_OPENBSD_RANDOMIZE is an OpenBSD-specific feature. That makes
   // the dynamic linker fill the segment with random data.
   if (OutputSection *Cmd = findSection(".openbsd.randomdata"))
     AddHdr(PT_OPENBSD_RANDOMIZE, Cmd->getPhdrFlags())->add(Cmd);
 
   // PT_GNU_STACK is a special section to tell the loader to make the
   // pages for the stack non-executable. If you really want an executable
   // stack, you can pass -z execstack, but that's not recommended for
   // security reasons.
   unsigned Perm = PF_R | PF_W;
   if (Config->ZExecstack)
     Perm |= PF_X;
   AddHdr(PT_GNU_STACK, Perm)->p_memsz = Config->ZStackSize;
 
   // PT_OPENBSD_WXNEEDED is a OpenBSD-specific header to mark the executable
   // is expected to perform W^X violations, such as calling mprotect(2) or
   // mmap(2) with PROT_WRITE | PROT_EXEC, which is prohibited by default on
   // OpenBSD.
   if (Config->ZWxneeded)
     AddHdr(PT_OPENBSD_WXNEEDED, PF_X);
 
   // Create one PT_NOTE per a group of contiguous .note sections.
   PhdrEntry *Note = nullptr;
   for (OutputSection *Sec : OutputSections) {
     if (Sec->Type == SHT_NOTE && (Sec->Flags & SHF_ALLOC)) {
       if (!Note || Sec->LMAExpr)
         Note = AddHdr(PT_NOTE, PF_R);
       Note->add(Sec);
     } else {
       Note = nullptr;
     }
   }
   return Ret;
 }
 
 template <class ELFT>
 void Writer<ELFT>::addPtArmExid(std::vector<PhdrEntry *> &Phdrs) {
   if (Config->EMachine != EM_ARM)
     return;
   auto I = llvm::find_if(OutputSections, [](OutputSection *Cmd) {
     return Cmd->Type == SHT_ARM_EXIDX;
   });
   if (I == OutputSections.end())
     return;
 
   // PT_ARM_EXIDX is the ARM EHABI equivalent of PT_GNU_EH_FRAME
   PhdrEntry *ARMExidx = make<PhdrEntry>(PT_ARM_EXIDX, PF_R);
   ARMExidx->add(*I);
   Phdrs.push_back(ARMExidx);
 }
 
 // The first section of each PT_LOAD, the first section in PT_GNU_RELRO and the
 // first section after PT_GNU_RELRO have to be page aligned so that the dynamic
 // linker can set the permissions.
 template <class ELFT> void Writer<ELFT>::fixSectionAlignments() {
   auto PageAlign = [](OutputSection *Cmd) {
     if (Cmd && !Cmd->AddrExpr)
       Cmd->AddrExpr = [=] {
         return alignTo(Script->getDot(), Config->MaxPageSize);
       };
   };
 
   for (const PhdrEntry *P : Phdrs)
     if (P->p_type == PT_LOAD && P->FirstSec)
       PageAlign(P->FirstSec);
 
   for (const PhdrEntry *P : Phdrs) {
     if (P->p_type != PT_GNU_RELRO)
       continue;
 
     if (P->FirstSec)
       PageAlign(P->FirstSec);
 
     // Find the first section after PT_GNU_RELRO. If it is in a PT_LOAD we
     // have to align it to a page.
     auto End = OutputSections.end();
     auto I = std::find(OutputSections.begin(), End, P->LastSec);
     if (I == End || (I + 1) == End)
       continue;
 
     OutputSection *Cmd = (*(I + 1));
     if (needsPtLoad(Cmd))
       PageAlign(Cmd);
   }
 }
 
 // Compute an in-file position for a given section. The file offset must be the
 // same with its virtual address modulo the page size, so that the loader can
 // load executables without any address adjustment.
 static uint64_t computeFileOffset(OutputSection *OS, uint64_t Off) {
   // File offsets are not significant for .bss sections. By convention, we keep
   // section offsets monotonically increasing rather than setting to zero.
   if (OS->Type == SHT_NOBITS)
     return Off;
 
   // If the section is not in a PT_LOAD, we just have to align it.
   if (!OS->PtLoad)
     return alignTo(Off, OS->Alignment);
 
   // The first section in a PT_LOAD has to have congruent offset and address
   // module the page size.
   OutputSection *First = OS->PtLoad->FirstSec;
   if (OS == First) {
     uint64_t Alignment = std::max<uint64_t>(OS->Alignment, Config->MaxPageSize);
     return alignTo(Off, Alignment, OS->Addr);
   }
 
   // If two sections share the same PT_LOAD the file offset is calculated
   // using this formula: Off2 = Off1 + (VA2 - VA1).
   return First->Offset + OS->Addr - First->Addr;
 }
 
 // Set an in-file position to a given section and returns the end position of
 // the section.
 static uint64_t setFileOffset(OutputSection *OS, uint64_t Off) {
   Off = computeFileOffset(OS, Off);
   OS->Offset = Off;
 
   if (OS->Type == SHT_NOBITS)
     return Off;
   return Off + OS->Size;
 }
 
 template <class ELFT> void Writer<ELFT>::assignFileOffsetsBinary() {
   uint64_t Off = 0;
   for (OutputSection *Sec : OutputSections)
     if (Sec->Flags & SHF_ALLOC)
       Off = setFileOffset(Sec, Off);
   FileSize = alignTo(Off, Config->Wordsize);
 }
 
 static std::string rangeToString(uint64_t Addr, uint64_t Len) {
   return "[0x" + utohexstr(Addr) + ", 0x" + utohexstr(Addr + Len - 1) + "]";
 }
 
 // Assign file offsets to output sections.
 template <class ELFT> void Writer<ELFT>::assignFileOffsets() {
   uint64_t Off = 0;
   Off = setFileOffset(Out::ElfHeader, Off);
   Off = setFileOffset(Out::ProgramHeaders, Off);
 
   PhdrEntry *LastRX = nullptr;
   for (PhdrEntry *P : Phdrs)
     if (P->p_type == PT_LOAD && (P->p_flags & PF_X))
       LastRX = P;
 
   for (OutputSection *Sec : OutputSections) {
     Off = setFileOffset(Sec, Off);
     if (Script->HasSectionsCommand)
       continue;
 
     // If this is a last section of the last executable segment and that
     // segment is the last loadable segment, align the offset of the
     // following section to avoid loading non-segments parts of the file.
     if (LastRX && LastRX->LastSec == Sec)
       Off = alignTo(Off, Target->PageSize);
   }
 
   SectionHeaderOff = alignTo(Off, Config->Wordsize);
   FileSize = SectionHeaderOff + (OutputSections.size() + 1) * sizeof(Elf_Shdr);
 
   // Our logic assumes that sections have rising VA within the same segment.
   // With use of linker scripts it is possible to violate this rule and get file
   // offset overlaps or overflows. That should never happen with a valid script
   // which does not move the location counter backwards and usually scripts do
   // not do that. Unfortunately, there are apps in the wild, for example, Linux
   // kernel, which control segment distribution explicitly and move the counter
   // backwards, so we have to allow doing that to support linking them. We
   // perform non-critical checks for overlaps in checkSectionOverlap(), but here
   // we want to prevent file size overflows because it would crash the linker.
   for (OutputSection *Sec : OutputSections) {
     if (Sec->Type == SHT_NOBITS)
       continue;
     if ((Sec->Offset > FileSize) || (Sec->Offset + Sec->Size > FileSize))
       error("unable to place section " + Sec->Name + " at file offset " +
             rangeToString(Sec->Offset, Sec->Size) +
             "; check your linker script for overflows");
   }
 }
 
 // Finalize the program headers. We call this function after we assign
 // file offsets and VAs to all sections.
 template <class ELFT> void Writer<ELFT>::setPhdrs() {
   for (PhdrEntry *P : Phdrs) {
     OutputSection *First = P->FirstSec;
     OutputSection *Last = P->LastSec;
 
     if (First) {
       P->p_filesz = Last->Offset - First->Offset;
       if (Last->Type != SHT_NOBITS)
         P->p_filesz += Last->Size;
 
       P->p_memsz = Last->Addr + Last->Size - First->Addr;
       P->p_offset = First->Offset;
       P->p_vaddr = First->Addr;
 
       if (!P->HasLMA)
         P->p_paddr = First->getLMA();
     }
 
     if (P->p_type == PT_LOAD) {
       P->p_align = std::max<uint64_t>(P->p_align, Config->MaxPageSize);
     } else if (P->p_type == PT_GNU_RELRO) {
       P->p_align = 1;
       // The glibc dynamic loader rounds the size down, so we need to round up
       // to protect the last page. This is a no-op on FreeBSD which always
       // rounds up.
       P->p_memsz = alignTo(P->p_memsz, Target->PageSize);
     }
 
     if (P->p_type == PT_TLS && P->p_memsz) {
       // The TLS pointer goes after PT_TLS for variant 2 targets. At least glibc
       // will align it, so round up the size to make sure the offsets are
       // correct.
       P->p_memsz = alignTo(P->p_memsz, P->p_align);
     }
   }
 }
 
 // A helper struct for checkSectionOverlap.
 namespace {
 struct SectionOffset {
   OutputSection *Sec;
   uint64_t Offset;
 };
 } // namespace
 
 // Check whether sections overlap for a specific address range (file offsets,
 // load and virtual adresses).
 static void checkOverlap(StringRef Name, std::vector<SectionOffset> &Sections,
                          bool IsVirtualAddr) {
   llvm::sort(Sections, [=](const SectionOffset &A, const SectionOffset &B) {
     return A.Offset < B.Offset;
   });
 
   // Finding overlap is easy given a vector is sorted by start position.
   // If an element starts before the end of the previous element, they overlap.
   for (size_t I = 1, End = Sections.size(); I < End; ++I) {
     SectionOffset A = Sections[I - 1];
     SectionOffset B = Sections[I];
     if (B.Offset >= A.Offset + A.Sec->Size)
       continue;
 
     // If both sections are in OVERLAY we allow the overlapping of virtual
     // addresses, because it is what OVERLAY was designed for.
     if (IsVirtualAddr && A.Sec->InOverlay && B.Sec->InOverlay)
       continue;
 
     errorOrWarn("section " + A.Sec->Name + " " + Name +
                 " range overlaps with " + B.Sec->Name + "\n>>> " + A.Sec->Name +
                 " range is " + rangeToString(A.Offset, A.Sec->Size) + "\n>>> " +
                 B.Sec->Name + " range is " +
                 rangeToString(B.Offset, B.Sec->Size));
   }
 }
 
 // Check for overlapping sections and address overflows.
 //
 // In this function we check that none of the output sections have overlapping
 // file offsets. For SHF_ALLOC sections we also check that the load address
 // ranges and the virtual address ranges don't overlap
 template <class ELFT> void Writer<ELFT>::checkSections() {
   // First, check that section's VAs fit in available address space for target.
   for (OutputSection *OS : OutputSections)
     if ((OS->Addr + OS->Size < OS->Addr) ||
         (!ELFT::Is64Bits && OS->Addr + OS->Size > UINT32_MAX))
       errorOrWarn("section " + OS->Name + " at 0x" + utohexstr(OS->Addr) +
                   " of size 0x" + utohexstr(OS->Size) +
                   " exceeds available address space");
 
   // Check for overlapping file offsets. In this case we need to skip any
   // section marked as SHT_NOBITS. These sections don't actually occupy space in
   // the file so Sec->Offset + Sec->Size can overlap with others. If --oformat
   // binary is specified only add SHF_ALLOC sections are added to the output
   // file so we skip any non-allocated sections in that case.
   std::vector<SectionOffset> FileOffs;
   for (OutputSection *Sec : OutputSections)
     if (Sec->Size > 0 && Sec->Type != SHT_NOBITS &&
         (!Config->OFormatBinary || (Sec->Flags & SHF_ALLOC)))
       FileOffs.push_back({Sec, Sec->Offset});
   checkOverlap("file", FileOffs, false);
 
   // When linking with -r there is no need to check for overlapping virtual/load
   // addresses since those addresses will only be assigned when the final
   // executable/shared object is created.
   if (Config->Relocatable)
     return;
 
   // Checking for overlapping virtual and load addresses only needs to take
   // into account SHF_ALLOC sections since others will not be loaded.
   // Furthermore, we also need to skip SHF_TLS sections since these will be
   // mapped to other addresses at runtime and can therefore have overlapping
   // ranges in the file.
   std::vector<SectionOffset> VMAs;
   for (OutputSection *Sec : OutputSections)
     if (Sec->Size > 0 && (Sec->Flags & SHF_ALLOC) && !(Sec->Flags & SHF_TLS))
       VMAs.push_back({Sec, Sec->Addr});
   checkOverlap("virtual address", VMAs, true);
 
   // Finally, check that the load addresses don't overlap. This will usually be
   // the same as the virtual addresses but can be different when using a linker
   // script with AT().
   std::vector<SectionOffset> LMAs;
   for (OutputSection *Sec : OutputSections)
     if (Sec->Size > 0 && (Sec->Flags & SHF_ALLOC) && !(Sec->Flags & SHF_TLS))
       LMAs.push_back({Sec, Sec->getLMA()});
   checkOverlap("load address", LMAs, false);
 }
 
 // The entry point address is chosen in the following ways.
 //
 // 1. the '-e' entry command-line option;
 // 2. the ENTRY(symbol) command in a linker control script;
 // 3. the value of the symbol _start, if present;
 // 4. the number represented by the entry symbol, if it is a number;
 // 5. the address of the first byte of the .text section, if present;
 // 6. the address 0.
 static uint64_t getEntryAddr() {
   // Case 1, 2 or 3
   if (Symbol *B = Symtab->find(Config->Entry))
     return B->getVA();
 
   // Case 4
   uint64_t Addr;
   if (to_integer(Config->Entry, Addr))
     return Addr;
 
   // Case 5
   if (OutputSection *Sec = findSection(".text")) {
     if (Config->WarnMissingEntry)
       warn("cannot find entry symbol " + Config->Entry + "; defaulting to 0x" +
            utohexstr(Sec->Addr));
     return Sec->Addr;
   }
 
   // Case 6
   if (Config->WarnMissingEntry)
     warn("cannot find entry symbol " + Config->Entry +
          "; not setting start address");
   return 0;
 }
 
 static uint16_t getELFType() {
   if (Config->Pic)
     return ET_DYN;
   if (Config->Relocatable)
     return ET_REL;
   return ET_EXEC;
 }
 
 static uint8_t getAbiVersion() {
   // MIPS non-PIC executable gets ABI version 1.
   if (Config->EMachine == EM_MIPS && getELFType() == ET_EXEC &&
       (Config->EFlags & (EF_MIPS_PIC | EF_MIPS_CPIC)) == EF_MIPS_CPIC)
     return 1;
   return 0;
 }
 
 template <class ELFT> void Writer<ELFT>::writeHeader() {
   uint8_t *Buf = Buffer->getBufferStart();
 
   // For executable segments, the trap instructions are written before writing
   // the header. Setting Elf header bytes to zero ensures that any unused bytes
   // in header are zero-cleared, instead of having trap instructions.
   memset(Buf, 0, sizeof(Elf_Ehdr));
   memcpy(Buf, "\177ELF", 4);
 
   // Write the ELF header.
   auto *EHdr = reinterpret_cast<Elf_Ehdr *>(Buf);
   EHdr->e_ident[EI_CLASS] = Config->Is64 ? ELFCLASS64 : ELFCLASS32;
   EHdr->e_ident[EI_DATA] = Config->IsLE ? ELFDATA2LSB : ELFDATA2MSB;
   EHdr->e_ident[EI_VERSION] = EV_CURRENT;
   EHdr->e_ident[EI_OSABI] = Config->OSABI;
   EHdr->e_ident[EI_ABIVERSION] = getAbiVersion();
   EHdr->e_type = getELFType();
   EHdr->e_machine = Config->EMachine;
   EHdr->e_version = EV_CURRENT;
   EHdr->e_entry = getEntryAddr();
   EHdr->e_shoff = SectionHeaderOff;
   EHdr->e_flags = Config->EFlags;
   EHdr->e_ehsize = sizeof(Elf_Ehdr);
   EHdr->e_phnum = Phdrs.size();
   EHdr->e_shentsize = sizeof(Elf_Shdr);
 
   if (!Config->Relocatable) {
     EHdr->e_phoff = sizeof(Elf_Ehdr);
     EHdr->e_phentsize = sizeof(Elf_Phdr);
   }
 
   // Write the program header table.
   auto *HBuf = reinterpret_cast<Elf_Phdr *>(Buf + EHdr->e_phoff);
   for (PhdrEntry *P : Phdrs) {
     HBuf->p_type = P->p_type;
     HBuf->p_flags = P->p_flags;
     HBuf->p_offset = P->p_offset;
     HBuf->p_vaddr = P->p_vaddr;
     HBuf->p_paddr = P->p_paddr;
     HBuf->p_filesz = P->p_filesz;
     HBuf->p_memsz = P->p_memsz;
     HBuf->p_align = P->p_align;
     ++HBuf;
   }
 
   // Write the section header table.
   //
   // The ELF header can only store numbers up to SHN_LORESERVE in the e_shnum
   // and e_shstrndx fields. When the value of one of these fields exceeds
   // SHN_LORESERVE ELF requires us to put sentinel values in the ELF header and
   // use fields in the section header at index 0 to store
   // the value. The sentinel values and fields are:
   // e_shnum = 0, SHdrs[0].sh_size = number of sections.
   // e_shstrndx = SHN_XINDEX, SHdrs[0].sh_link = .shstrtab section index.
   auto *SHdrs = reinterpret_cast<Elf_Shdr *>(Buf + EHdr->e_shoff);
   size_t Num = OutputSections.size() + 1;
   if (Num >= SHN_LORESERVE)
     SHdrs->sh_size = Num;
   else
     EHdr->e_shnum = Num;
 
   uint32_t StrTabIndex = In.ShStrTab->getParent()->SectionIndex;
   if (StrTabIndex >= SHN_LORESERVE) {
     SHdrs->sh_link = StrTabIndex;
     EHdr->e_shstrndx = SHN_XINDEX;
   } else {
     EHdr->e_shstrndx = StrTabIndex;
   }
 
   for (OutputSection *Sec : OutputSections)
     Sec->writeHeaderTo<ELFT>(++SHdrs);
 }
 
 // Open a result file.
 template <class ELFT> void Writer<ELFT>::openFile() {
   uint64_t MaxSize = Config->Is64 ? INT64_MAX : UINT32_MAX;
   if (MaxSize < FileSize) {
     error("output file too large: " + Twine(FileSize) + " bytes");
     return;
   }
 
   unlinkAsync(Config->OutputFile);
   unsigned Flags = 0;
   if (!Config->Relocatable)
     Flags = FileOutputBuffer::F_executable;
   Expected<std::unique_ptr<FileOutputBuffer>> BufferOrErr =
       FileOutputBuffer::create(Config->OutputFile, FileSize, Flags);
 
   if (!BufferOrErr)
     error("failed to open " + Config->OutputFile + ": " +
           llvm::toString(BufferOrErr.takeError()));
   else
     Buffer = std::move(*BufferOrErr);
 }
 
 template <class ELFT> void Writer<ELFT>::writeSectionsBinary() {
   uint8_t *Buf = Buffer->getBufferStart();
   for (OutputSection *Sec : OutputSections)
     if (Sec->Flags & SHF_ALLOC)
       Sec->writeTo<ELFT>(Buf + Sec->Offset);
 }
 
 static void fillTrap(uint8_t *I, uint8_t *End) {
   for (; I + 4 <= End; I += 4)
     memcpy(I, &Target->TrapInstr, 4);
 }
 
 // Fill the last page of executable segments with trap instructions
 // instead of leaving them as zero. Even though it is not required by any
 // standard, it is in general a good thing to do for security reasons.
 //
 // We'll leave other pages in segments as-is because the rest will be
 // overwritten by output sections.
 template <class ELFT> void Writer<ELFT>::writeTrapInstr() {
   if (Script->HasSectionsCommand)
     return;
 
   // Fill the last page.
   uint8_t *Buf = Buffer->getBufferStart();
   for (PhdrEntry *P : Phdrs)
     if (P->p_type == PT_LOAD && (P->p_flags & PF_X))
       fillTrap(Buf + alignDown(P->p_offset + P->p_filesz, Target->PageSize),
                Buf + alignTo(P->p_offset + P->p_filesz, Target->PageSize));
 
   // Round up the file size of the last segment to the page boundary iff it is
   // an executable segment to ensure that other tools don't accidentally
   // trim the instruction padding (e.g. when stripping the file).
   PhdrEntry *Last = nullptr;
   for (PhdrEntry *P : Phdrs)
     if (P->p_type == PT_LOAD)
       Last = P;
 
   if (Last && (Last->p_flags & PF_X))
     Last->p_memsz = Last->p_filesz = alignTo(Last->p_filesz, Target->PageSize);
 }
 
 // Write section contents to a mmap'ed file.
 template <class ELFT> void Writer<ELFT>::writeSections() {
   uint8_t *Buf = Buffer->getBufferStart();
 
   OutputSection *EhFrameHdr = nullptr;
   if (In.EhFrameHdr && !In.EhFrameHdr->empty())
     EhFrameHdr = In.EhFrameHdr->getParent();
 
   // In -r or -emit-relocs mode, write the relocation sections first as in
   // ELf_Rel targets we might find out that we need to modify the relocated
   // section while doing it.
   for (OutputSection *Sec : OutputSections)
     if (Sec->Type == SHT_REL || Sec->Type == SHT_RELA)
       Sec->writeTo<ELFT>(Buf + Sec->Offset);
 
   for (OutputSection *Sec : OutputSections)
     if (Sec != EhFrameHdr && Sec->Type != SHT_REL && Sec->Type != SHT_RELA)
       Sec->writeTo<ELFT>(Buf + Sec->Offset);
 
   // The .eh_frame_hdr depends on .eh_frame section contents, therefore
   // it should be written after .eh_frame is written.
   if (EhFrameHdr)
     EhFrameHdr->writeTo<ELFT>(Buf + EhFrameHdr->Offset);
 }
 
 template <class ELFT> void Writer<ELFT>::writeBuildId() {
   if (!In.BuildId || !In.BuildId->getParent())
     return;
 
   // Compute a hash of all sections of the output file.
   uint8_t *Start = Buffer->getBufferStart();
   uint8_t *End = Start + FileSize;
   In.BuildId->writeBuildId({Start, End});
 }
 
 template void elf::writeResult<ELF32LE>();
 template void elf::writeResult<ELF32BE>();
 template void elf::writeResult<ELF64LE>();
 template void elf::writeResult<ELF64BE>();