Index: projects/clang1000-import/contrib/llvm-project/llvm/include/llvm/MC/MCFixupKindInfo.h =================================================================== --- projects/clang1000-import/contrib/llvm-project/llvm/include/llvm/MC/MCFixupKindInfo.h (revision 358395) +++ projects/clang1000-import/contrib/llvm-project/llvm/include/llvm/MC/MCFixupKindInfo.h (revision 358396) @@ -1,45 +1,50 @@ //===-- llvm/MC/MCFixupKindInfo.h - Fixup Descriptors -----------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef LLVM_MC_MCFIXUPKINDINFO_H #define LLVM_MC_MCFIXUPKINDINFO_H namespace llvm { /// Target independent information on a fixup kind. struct MCFixupKindInfo { enum FixupKindFlags { /// Is this fixup kind PCrelative? This is used by the assembler backend to /// evaluate fixup values in a target independent manner when possible. FKF_IsPCRel = (1 << 0), /// Should this fixup kind force a 4-byte aligned effective PC value? FKF_IsAlignedDownTo32Bits = (1 << 1), /// Should this fixup be evaluated in a target dependent manner? - FKF_IsTarget = (1 << 2) + FKF_IsTarget = (1 << 2), + + /// This fixup kind should be resolved if defined. + /// FIXME This is a workaround because we don't support certain ARM + /// relocation types. This flag should eventually be removed. + FKF_Constant = 1 << 3, }; /// A target specific name for the fixup kind. The names will be unique for /// distinct kinds on any given target. const char *Name; /// The bit offset to write the relocation into. unsigned TargetOffset; /// The number of bits written by this fixup. The bits are assumed to be /// contiguous. unsigned TargetSize; /// Flags describing additional information on this fixup kind. unsigned Flags; }; } // End llvm namespace #endif Index: projects/clang1000-import/contrib/llvm-project/llvm/lib/MC/MCAssembler.cpp =================================================================== --- projects/clang1000-import/contrib/llvm-project/llvm/lib/MC/MCAssembler.cpp (revision 358395) +++ projects/clang1000-import/contrib/llvm-project/llvm/lib/MC/MCAssembler.cpp (revision 358396) @@ -1,1208 +1,1210 @@ //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "llvm/MC/MCAssembler.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/MC/MCAsmBackend.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCAsmLayout.h" #include "llvm/MC/MCCodeEmitter.h" #include "llvm/MC/MCCodeView.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCDwarf.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCFixup.h" #include "llvm/MC/MCFixupKindInfo.h" #include "llvm/MC/MCFragment.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCObjectWriter.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCValue.h" #include "llvm/Support/Alignment.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/LEB128.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include #include #include #include #include using namespace llvm; #define DEBUG_TYPE "assembler" namespace { namespace stats { STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total"); STATISTIC(EmittedRelaxableFragments, "Number of emitted assembler fragments - relaxable"); STATISTIC(EmittedDataFragments, "Number of emitted assembler fragments - data"); STATISTIC(EmittedCompactEncodedInstFragments, "Number of emitted assembler fragments - compact encoded inst"); STATISTIC(EmittedAlignFragments, "Number of emitted assembler fragments - align"); STATISTIC(EmittedFillFragments, "Number of emitted assembler fragments - fill"); STATISTIC(EmittedOrgFragments, "Number of emitted assembler fragments - org"); STATISTIC(evaluateFixup, "Number of evaluated fixups"); STATISTIC(FragmentLayouts, "Number of fragment layouts"); STATISTIC(ObjectBytes, "Number of emitted object file bytes"); STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps"); STATISTIC(RelaxedInstructions, "Number of relaxed instructions"); } // end namespace stats } // end anonymous namespace // FIXME FIXME FIXME: There are number of places in this file where we convert // what is a 64-bit assembler value used for computation into a value in the // object file, which may truncate it. We should detect that truncation where // invalid and report errors back. /* *** */ MCAssembler::MCAssembler(MCContext &Context, std::unique_ptr Backend, std::unique_ptr Emitter, std::unique_ptr Writer) : Context(Context), Backend(std::move(Backend)), Emitter(std::move(Emitter)), Writer(std::move(Writer)), BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false), IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) { VersionInfo.Major = 0; // Major version == 0 for "none specified" } MCAssembler::~MCAssembler() = default; void MCAssembler::reset() { Sections.clear(); Symbols.clear(); IndirectSymbols.clear(); DataRegions.clear(); LinkerOptions.clear(); FileNames.clear(); ThumbFuncs.clear(); BundleAlignSize = 0; RelaxAll = false; SubsectionsViaSymbols = false; IncrementalLinkerCompatible = false; ELFHeaderEFlags = 0; LOHContainer.reset(); VersionInfo.Major = 0; VersionInfo.SDKVersion = VersionTuple(); // reset objects owned by us if (getBackendPtr()) getBackendPtr()->reset(); if (getEmitterPtr()) getEmitterPtr()->reset(); if (getWriterPtr()) getWriterPtr()->reset(); getLOHContainer().reset(); } bool MCAssembler::registerSection(MCSection &Section) { if (Section.isRegistered()) return false; Sections.push_back(&Section); Section.setIsRegistered(true); return true; } bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { if (ThumbFuncs.count(Symbol)) return true; if (!Symbol->isVariable()) return false; const MCExpr *Expr = Symbol->getVariableValue(); MCValue V; if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr)) return false; if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) return false; const MCSymbolRefExpr *Ref = V.getSymA(); if (!Ref) return false; if (Ref->getKind() != MCSymbolRefExpr::VK_None) return false; const MCSymbol &Sym = Ref->getSymbol(); if (!isThumbFunc(&Sym)) return false; ThumbFuncs.insert(Symbol); // Cache it. return true; } bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { // Non-temporary labels should always be visible to the linker. if (!Symbol.isTemporary()) return true; if (Symbol.isUsedInReloc()) return true; return false; } const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const { // Linker visible symbols define atoms. if (isSymbolLinkerVisible(S)) return &S; // Absolute and undefined symbols have no defining atom. if (!S.isInSection()) return nullptr; // Non-linker visible symbols in sections which can't be atomized have no // defining atom. if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols( *S.getFragment()->getParent())) return nullptr; // Otherwise, return the atom for the containing fragment. return S.getFragment()->getAtom(); } bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, const MCFragment *DF, MCValue &Target, uint64_t &Value, bool &WasForced) const { ++stats::evaluateFixup; // FIXME: This code has some duplication with recordRelocation. We should // probably merge the two into a single callback that tries to evaluate a // fixup and records a relocation if one is needed. // On error claim to have completely evaluated the fixup, to prevent any // further processing from being done. const MCExpr *Expr = Fixup.getValue(); MCContext &Ctx = getContext(); Value = 0; WasForced = false; if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) { Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); return true; } if (const MCSymbolRefExpr *RefB = Target.getSymB()) { if (RefB->getKind() != MCSymbolRefExpr::VK_None) { Ctx.reportError(Fixup.getLoc(), "unsupported subtraction of qualified symbol"); return true; } } assert(getBackendPtr() && "Expected assembler backend"); bool IsTarget = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsTarget; if (IsTarget) return getBackend().evaluateTargetFixup(*this, Layout, Fixup, DF, Target, Value, WasForced); + unsigned FixupFlags = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags; bool IsPCRel = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel; bool IsResolved = false; if (IsPCRel) { if (Target.getSymB()) { IsResolved = false; } else if (!Target.getSymA()) { IsResolved = false; } else { const MCSymbolRefExpr *A = Target.getSymA(); const MCSymbol &SA = A->getSymbol(); if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { IsResolved = false; } else if (auto *Writer = getWriterPtr()) { - IsResolved = Writer->isSymbolRefDifferenceFullyResolvedImpl( - *this, SA, *DF, false, true); + IsResolved = (FixupFlags & MCFixupKindInfo::FKF_Constant) || + Writer->isSymbolRefDifferenceFullyResolvedImpl( + *this, SA, *DF, false, true); } } } else { IsResolved = Target.isAbsolute(); } Value = Target.getConstant(); if (const MCSymbolRefExpr *A = Target.getSymA()) { const MCSymbol &Sym = A->getSymbol(); if (Sym.isDefined()) Value += Layout.getSymbolOffset(Sym); } if (const MCSymbolRefExpr *B = Target.getSymB()) { const MCSymbol &Sym = B->getSymbol(); if (Sym.isDefined()) Value -= Layout.getSymbolOffset(Sym); } bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; assert((ShouldAlignPC ? IsPCRel : true) && "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!"); if (IsPCRel) { uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); // A number of ARM fixups in Thumb mode require that the effective PC // address be determined as the 32-bit aligned version of the actual offset. if (ShouldAlignPC) Offset &= ~0x3; Value -= Offset; } // Let the backend force a relocation if needed. if (IsResolved && getBackend().shouldForceRelocation(*this, Fixup, Target)) { IsResolved = false; WasForced = true; } return IsResolved; } uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, const MCFragment &F) const { assert(getBackendPtr() && "Requires assembler backend"); switch (F.getKind()) { case MCFragment::FT_Data: return cast(F).getContents().size(); case MCFragment::FT_Relaxable: return cast(F).getContents().size(); case MCFragment::FT_CompactEncodedInst: return cast(F).getContents().size(); case MCFragment::FT_Fill: { auto &FF = cast(F); int64_t NumValues = 0; if (!FF.getNumValues().evaluateAsAbsolute(NumValues, Layout)) { getContext().reportError(FF.getLoc(), "expected assembly-time absolute expression"); return 0; } int64_t Size = NumValues * FF.getValueSize(); if (Size < 0) { getContext().reportError(FF.getLoc(), "invalid number of bytes"); return 0; } return Size; } case MCFragment::FT_LEB: return cast(F).getContents().size(); case MCFragment::FT_BoundaryAlign: return cast(F).getSize(); case MCFragment::FT_SymbolId: return 4; case MCFragment::FT_Align: { const MCAlignFragment &AF = cast(F); unsigned Offset = Layout.getFragmentOffset(&AF); unsigned Size = offsetToAlignment(Offset, Align(AF.getAlignment())); // Insert extra Nops for code alignment if the target define // shouldInsertExtraNopBytesForCodeAlign target hook. if (AF.getParent()->UseCodeAlign() && AF.hasEmitNops() && getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) return Size; // If we are padding with nops, force the padding to be larger than the // minimum nop size. if (Size > 0 && AF.hasEmitNops()) { while (Size % getBackend().getMinimumNopSize()) Size += AF.getAlignment(); } if (Size > AF.getMaxBytesToEmit()) return 0; return Size; } case MCFragment::FT_Org: { const MCOrgFragment &OF = cast(F); MCValue Value; if (!OF.getOffset().evaluateAsValue(Value, Layout)) { getContext().reportError(OF.getLoc(), "expected assembly-time absolute expression"); return 0; } uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); int64_t TargetLocation = Value.getConstant(); if (const MCSymbolRefExpr *A = Value.getSymA()) { uint64_t Val; if (!Layout.getSymbolOffset(A->getSymbol(), Val)) { getContext().reportError(OF.getLoc(), "expected absolute expression"); return 0; } TargetLocation += Val; } int64_t Size = TargetLocation - FragmentOffset; if (Size < 0 || Size >= 0x40000000) { getContext().reportError( OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) + "' (at offset '" + Twine(FragmentOffset) + "')"); return 0; } return Size; } case MCFragment::FT_Dwarf: return cast(F).getContents().size(); case MCFragment::FT_DwarfFrame: return cast(F).getContents().size(); case MCFragment::FT_CVInlineLines: return cast(F).getContents().size(); case MCFragment::FT_CVDefRange: return cast(F).getContents().size(); case MCFragment::FT_Dummy: llvm_unreachable("Should not have been added"); } llvm_unreachable("invalid fragment kind"); } void MCAsmLayout::layoutFragment(MCFragment *F) { MCFragment *Prev = F->getPrevNode(); // We should never try to recompute something which is valid. assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!"); // We should never try to compute the fragment layout if its predecessor // isn't valid. assert((!Prev || isFragmentValid(Prev)) && "Attempt to compute fragment before its predecessor!"); ++stats::FragmentLayouts; // Compute fragment offset and size. if (Prev) F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); else F->Offset = 0; LastValidFragment[F->getParent()] = F; // If bundling is enabled and this fragment has instructions in it, it has to // obey the bundling restrictions. With padding, we'll have: // // // BundlePadding // ||| // ------------------------------------- // Prev |##########| F | // ------------------------------------- // ^ // | // F->Offset // // The fragment's offset will point to after the padding, and its computed // size won't include the padding. // // When the -mc-relax-all flag is used, we optimize bundling by writting the // padding directly into fragments when the instructions are emitted inside // the streamer. When the fragment is larger than the bundle size, we need to // ensure that it's bundle aligned. This means that if we end up with // multiple fragments, we must emit bundle padding between fragments. // // ".align N" is an example of a directive that introduces multiple // fragments. We could add a special case to handle ".align N" by emitting // within-fragment padding (which would produce less padding when N is less // than the bundle size), but for now we don't. // if (Assembler.isBundlingEnabled() && F->hasInstructions()) { assert(isa(F) && "Only MCEncodedFragment implementations have instructions"); MCEncodedFragment *EF = cast(F); uint64_t FSize = Assembler.computeFragmentSize(*this, *EF); if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize()) report_fatal_error("Fragment can't be larger than a bundle size"); uint64_t RequiredBundlePadding = computeBundlePadding(Assembler, EF, EF->Offset, FSize); if (RequiredBundlePadding > UINT8_MAX) report_fatal_error("Padding cannot exceed 255 bytes"); EF->setBundlePadding(static_cast(RequiredBundlePadding)); EF->Offset += RequiredBundlePadding; } } void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) { bool New = !Symbol.isRegistered(); if (Created) *Created = New; if (New) { Symbol.setIsRegistered(true); Symbols.push_back(&Symbol); } } void MCAssembler::writeFragmentPadding(raw_ostream &OS, const MCEncodedFragment &EF, uint64_t FSize) const { assert(getBackendPtr() && "Expected assembler backend"); // Should NOP padding be written out before this fragment? unsigned BundlePadding = EF.getBundlePadding(); if (BundlePadding > 0) { assert(isBundlingEnabled() && "Writing bundle padding with disabled bundling"); assert(EF.hasInstructions() && "Writing bundle padding for a fragment without instructions"); unsigned TotalLength = BundlePadding + static_cast(FSize); if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { // If the padding itself crosses a bundle boundary, it must be emitted // in 2 pieces, since even nop instructions must not cross boundaries. // v--------------v <- BundleAlignSize // v---------v <- BundlePadding // ---------------------------- // | Prev |####|####| F | // ---------------------------- // ^-------------------^ <- TotalLength unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); if (!getBackend().writeNopData(OS, DistanceToBoundary)) report_fatal_error("unable to write NOP sequence of " + Twine(DistanceToBoundary) + " bytes"); BundlePadding -= DistanceToBoundary; } if (!getBackend().writeNopData(OS, BundlePadding)) report_fatal_error("unable to write NOP sequence of " + Twine(BundlePadding) + " bytes"); } } /// Write the fragment \p F to the output file. static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFragment &F) { // FIXME: Embed in fragments instead? uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); support::endianness Endian = Asm.getBackend().Endian; if (const MCEncodedFragment *EF = dyn_cast(&F)) Asm.writeFragmentPadding(OS, *EF, FragmentSize); // This variable (and its dummy usage) is to participate in the assert at // the end of the function. uint64_t Start = OS.tell(); (void) Start; ++stats::EmittedFragments; switch (F.getKind()) { case MCFragment::FT_Align: { ++stats::EmittedAlignFragments; const MCAlignFragment &AF = cast(F); assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); uint64_t Count = FragmentSize / AF.getValueSize(); // FIXME: This error shouldn't actually occur (the front end should emit // multiple .align directives to enforce the semantics it wants), but is // severe enough that we want to report it. How to handle this? if (Count * AF.getValueSize() != FragmentSize) report_fatal_error("undefined .align directive, value size '" + Twine(AF.getValueSize()) + "' is not a divisor of padding size '" + Twine(FragmentSize) + "'"); // See if we are aligning with nops, and if so do that first to try to fill // the Count bytes. Then if that did not fill any bytes or there are any // bytes left to fill use the Value and ValueSize to fill the rest. // If we are aligning with nops, ask that target to emit the right data. if (AF.hasEmitNops()) { if (!Asm.getBackend().writeNopData(OS, Count)) report_fatal_error("unable to write nop sequence of " + Twine(Count) + " bytes"); break; } // Otherwise, write out in multiples of the value size. for (uint64_t i = 0; i != Count; ++i) { switch (AF.getValueSize()) { default: llvm_unreachable("Invalid size!"); case 1: OS << char(AF.getValue()); break; case 2: support::endian::write(OS, AF.getValue(), Endian); break; case 4: support::endian::write(OS, AF.getValue(), Endian); break; case 8: support::endian::write(OS, AF.getValue(), Endian); break; } } break; } case MCFragment::FT_Data: ++stats::EmittedDataFragments; OS << cast(F).getContents(); break; case MCFragment::FT_Relaxable: ++stats::EmittedRelaxableFragments; OS << cast(F).getContents(); break; case MCFragment::FT_CompactEncodedInst: ++stats::EmittedCompactEncodedInstFragments; OS << cast(F).getContents(); break; case MCFragment::FT_Fill: { ++stats::EmittedFillFragments; const MCFillFragment &FF = cast(F); uint64_t V = FF.getValue(); unsigned VSize = FF.getValueSize(); const unsigned MaxChunkSize = 16; char Data[MaxChunkSize]; assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size"); // Duplicate V into Data as byte vector to reduce number of // writes done. As such, do endian conversion here. for (unsigned I = 0; I != VSize; ++I) { unsigned index = Endian == support::little ? I : (VSize - I - 1); Data[I] = uint8_t(V >> (index * 8)); } for (unsigned I = VSize; I < MaxChunkSize; ++I) Data[I] = Data[I - VSize]; // Set to largest multiple of VSize in Data. const unsigned NumPerChunk = MaxChunkSize / VSize; // Set ChunkSize to largest multiple of VSize in Data const unsigned ChunkSize = VSize * NumPerChunk; // Do copies by chunk. StringRef Ref(Data, ChunkSize); for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I) OS << Ref; // do remainder if needed. unsigned TrailingCount = FragmentSize % ChunkSize; if (TrailingCount) OS.write(Data, TrailingCount); break; } case MCFragment::FT_LEB: { const MCLEBFragment &LF = cast(F); OS << LF.getContents(); break; } case MCFragment::FT_BoundaryAlign: { if (!Asm.getBackend().writeNopData(OS, FragmentSize)) report_fatal_error("unable to write nop sequence of " + Twine(FragmentSize) + " bytes"); break; } case MCFragment::FT_SymbolId: { const MCSymbolIdFragment &SF = cast(F); support::endian::write(OS, SF.getSymbol()->getIndex(), Endian); break; } case MCFragment::FT_Org: { ++stats::EmittedOrgFragments; const MCOrgFragment &OF = cast(F); for (uint64_t i = 0, e = FragmentSize; i != e; ++i) OS << char(OF.getValue()); break; } case MCFragment::FT_Dwarf: { const MCDwarfLineAddrFragment &OF = cast(F); OS << OF.getContents(); break; } case MCFragment::FT_DwarfFrame: { const MCDwarfCallFrameFragment &CF = cast(F); OS << CF.getContents(); break; } case MCFragment::FT_CVInlineLines: { const auto &OF = cast(F); OS << OF.getContents(); break; } case MCFragment::FT_CVDefRange: { const auto &DRF = cast(F); OS << DRF.getContents(); break; } case MCFragment::FT_Dummy: llvm_unreachable("Should not have been added"); } assert(OS.tell() - Start == FragmentSize && "The stream should advance by fragment size"); } void MCAssembler::writeSectionData(raw_ostream &OS, const MCSection *Sec, const MCAsmLayout &Layout) const { assert(getBackendPtr() && "Expected assembler backend"); // Ignore virtual sections. if (Sec->isVirtualSection()) { assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!"); // Check that contents are only things legal inside a virtual section. for (const MCFragment &F : *Sec) { switch (F.getKind()) { default: llvm_unreachable("Invalid fragment in virtual section!"); case MCFragment::FT_Data: { // Check that we aren't trying to write a non-zero contents (or fixups) // into a virtual section. This is to support clients which use standard // directives to fill the contents of virtual sections. const MCDataFragment &DF = cast(F); if (DF.fixup_begin() != DF.fixup_end()) report_fatal_error("cannot have fixups in virtual section!"); for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) if (DF.getContents()[i]) { if (auto *ELFSec = dyn_cast(Sec)) report_fatal_error("non-zero initializer found in section '" + ELFSec->getSectionName() + "'"); else report_fatal_error("non-zero initializer found in virtual section"); } break; } case MCFragment::FT_Align: // Check that we aren't trying to write a non-zero value into a virtual // section. assert((cast(F).getValueSize() == 0 || cast(F).getValue() == 0) && "Invalid align in virtual section!"); break; case MCFragment::FT_Fill: assert((cast(F).getValue() == 0) && "Invalid fill in virtual section!"); break; } } return; } uint64_t Start = OS.tell(); (void)Start; for (const MCFragment &F : *Sec) writeFragment(OS, *this, Layout, F); assert(OS.tell() - Start == Layout.getSectionAddressSize(Sec)); } std::tuple MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, const MCFixup &Fixup) { // Evaluate the fixup. MCValue Target; uint64_t FixedValue; bool WasForced; bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue, WasForced); if (!IsResolved) { // The fixup was unresolved, we need a relocation. Inform the object // writer of the relocation, and give it an opportunity to adjust the // fixup value if need be. if (Target.getSymA() && Target.getSymB() && getBackend().requiresDiffExpressionRelocations()) { // The fixup represents the difference between two symbols, which the // backend has indicated must be resolved at link time. Split up the fixup // into two relocations, one for the add, and one for the sub, and emit // both of these. The constant will be associated with the add half of the // expression. MCFixup FixupAdd = MCFixup::createAddFor(Fixup); MCValue TargetAdd = MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); getWriter().recordRelocation(*this, Layout, &F, FixupAdd, TargetAdd, FixedValue); MCFixup FixupSub = MCFixup::createSubFor(Fixup); MCValue TargetSub = MCValue::get(Target.getSymB()); getWriter().recordRelocation(*this, Layout, &F, FixupSub, TargetSub, FixedValue); } else { getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, FixedValue); } } return std::make_tuple(Target, FixedValue, IsResolved); } void MCAssembler::layout(MCAsmLayout &Layout) { assert(getBackendPtr() && "Expected assembler backend"); DEBUG_WITH_TYPE("mc-dump", { errs() << "assembler backend - pre-layout\n--\n"; dump(); }); // Create dummy fragments and assign section ordinals. unsigned SectionIndex = 0; for (MCSection &Sec : *this) { // Create dummy fragments to eliminate any empty sections, this simplifies // layout. if (Sec.getFragmentList().empty()) new MCDataFragment(&Sec); Sec.setOrdinal(SectionIndex++); } // Assign layout order indices to sections and fragments. for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { MCSection *Sec = Layout.getSectionOrder()[i]; Sec->setLayoutOrder(i); unsigned FragmentIndex = 0; for (MCFragment &Frag : *Sec) Frag.setLayoutOrder(FragmentIndex++); } // Layout until everything fits. while (layoutOnce(Layout)) if (getContext().hadError()) return; DEBUG_WITH_TYPE("mc-dump", { errs() << "assembler backend - post-relaxation\n--\n"; dump(); }); // Finalize the layout, including fragment lowering. finishLayout(Layout); DEBUG_WITH_TYPE("mc-dump", { errs() << "assembler backend - final-layout\n--\n"; dump(); }); // Allow the object writer a chance to perform post-layout binding (for // example, to set the index fields in the symbol data). getWriter().executePostLayoutBinding(*this, Layout); // Evaluate and apply the fixups, generating relocation entries as necessary. for (MCSection &Sec : *this) { for (MCFragment &Frag : Sec) { // Data and relaxable fragments both have fixups. So only process // those here. // FIXME: Is there a better way to do this? MCEncodedFragmentWithFixups // being templated makes this tricky. if (isa(&Frag) && isa(&Frag)) continue; if (!isa(&Frag) && !isa(&Frag) && !isa(&Frag)) continue; ArrayRef Fixups; MutableArrayRef Contents; const MCSubtargetInfo *STI = nullptr; if (auto *FragWithFixups = dyn_cast(&Frag)) { Fixups = FragWithFixups->getFixups(); Contents = FragWithFixups->getContents(); STI = FragWithFixups->getSubtargetInfo(); assert(!FragWithFixups->hasInstructions() || STI != nullptr); } else if (auto *FragWithFixups = dyn_cast(&Frag)) { Fixups = FragWithFixups->getFixups(); Contents = FragWithFixups->getContents(); STI = FragWithFixups->getSubtargetInfo(); assert(!FragWithFixups->hasInstructions() || STI != nullptr); } else if (auto *FragWithFixups = dyn_cast(&Frag)) { Fixups = FragWithFixups->getFixups(); Contents = FragWithFixups->getContents(); } else if (auto *FragWithFixups = dyn_cast(&Frag)) { Fixups = FragWithFixups->getFixups(); Contents = FragWithFixups->getContents(); } else if (auto *AF = dyn_cast(&Frag)) { // Insert fixup type for code alignment if the target define // shouldInsertFixupForCodeAlign target hook. if (Sec.UseCodeAlign() && AF->hasEmitNops()) { getBackend().shouldInsertFixupForCodeAlign(*this, Layout, *AF); } continue; } else if (auto *FragWithFixups = dyn_cast(&Frag)) { Fixups = FragWithFixups->getFixups(); Contents = FragWithFixups->getContents(); } else llvm_unreachable("Unknown fragment with fixups!"); for (const MCFixup &Fixup : Fixups) { uint64_t FixedValue; bool IsResolved; MCValue Target; std::tie(Target, FixedValue, IsResolved) = handleFixup(Layout, Frag, Fixup); getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, IsResolved, STI); } } } } void MCAssembler::Finish() { // Create the layout object. MCAsmLayout Layout(*this); layout(Layout); // Write the object file. stats::ObjectBytes += getWriter().writeObject(*this, Layout); } bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, const MCRelaxableFragment *DF, const MCAsmLayout &Layout) const { assert(getBackendPtr() && "Expected assembler backend"); MCValue Target; uint64_t Value; bool WasForced; bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value, WasForced); if (Target.getSymA() && Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && Fixup.getKind() == FK_Data_1) return false; return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF, Layout, WasForced); } bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, const MCAsmLayout &Layout) const { assert(getBackendPtr() && "Expected assembler backend"); // If this inst doesn't ever need relaxation, ignore it. This occurs when we // are intentionally pushing out inst fragments, or because we relaxed a // previous instruction to one that doesn't need relaxation. if (!getBackend().mayNeedRelaxation(F->getInst(), *F->getSubtargetInfo())) return false; for (const MCFixup &Fixup : F->getFixups()) if (fixupNeedsRelaxation(Fixup, F, Layout)) return true; return false; } bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, MCRelaxableFragment &F) { assert(getEmitterPtr() && "Expected CodeEmitter defined for relaxInstruction"); if (!fragmentNeedsRelaxation(&F, Layout)) return false; ++stats::RelaxedInstructions; // FIXME-PERF: We could immediately lower out instructions if we can tell // they are fully resolved, to avoid retesting on later passes. // Relax the fragment. MCInst Relaxed; getBackend().relaxInstruction(F.getInst(), *F.getSubtargetInfo(), Relaxed); // Encode the new instruction. // // FIXME-PERF: If it matters, we could let the target do this. It can // probably do so more efficiently in many cases. SmallVector Fixups; SmallString<256> Code; raw_svector_ostream VecOS(Code); getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, *F.getSubtargetInfo()); // Update the fragment. F.setInst(Relaxed); F.getContents() = Code; F.getFixups() = Fixups; return true; } bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { uint64_t OldSize = LF.getContents().size(); int64_t Value; bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout); if (!Abs) report_fatal_error("sleb128 and uleb128 expressions must be absolute"); SmallString<8> &Data = LF.getContents(); Data.clear(); raw_svector_ostream OSE(Data); // The compiler can generate EH table assembly that is impossible to assemble // without either adding padding to an LEB fragment or adding extra padding // to a later alignment fragment. To accommodate such tables, relaxation can // only increase an LEB fragment size here, not decrease it. See PR35809. if (LF.isSigned()) encodeSLEB128(Value, OSE, OldSize); else encodeULEB128(Value, OSE, OldSize); return OldSize != LF.getContents().size(); } /// Check if the branch crosses the boundary. /// /// \param StartAddr start address of the fused/unfused branch. /// \param Size size of the fused/unfused branch. /// \param BoundaryAlignment alignment requirement of the branch. /// \returns true if the branch cross the boundary. static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, Align BoundaryAlignment) { uint64_t EndAddr = StartAddr + Size; return (StartAddr >> Log2(BoundaryAlignment)) != ((EndAddr - 1) >> Log2(BoundaryAlignment)); } /// Check if the branch is against the boundary. /// /// \param StartAddr start address of the fused/unfused branch. /// \param Size size of the fused/unfused branch. /// \param BoundaryAlignment alignment requirement of the branch. /// \returns true if the branch is against the boundary. static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, Align BoundaryAlignment) { uint64_t EndAddr = StartAddr + Size; return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; } /// Check if the branch needs padding. /// /// \param StartAddr start address of the fused/unfused branch. /// \param Size size of the fused/unfused branch. /// \param BoundaryAlignment alignment requirement of the branch. /// \returns true if the branch needs padding. static bool needPadding(uint64_t StartAddr, uint64_t Size, Align BoundaryAlignment) { return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || isAgainstBoundary(StartAddr, Size, BoundaryAlignment); } bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout, MCBoundaryAlignFragment &BF) { // The MCBoundaryAlignFragment that doesn't emit NOP should not be relaxed. if (!BF.canEmitNops()) return false; uint64_t AlignedOffset = Layout.getFragmentOffset(BF.getNextNode()); uint64_t AlignedSize = 0; const MCFragment *F = BF.getNextNode(); // If the branch is unfused, it is emitted into one fragment, otherwise it is // emitted into two fragments at most, the next MCBoundaryAlignFragment(if // exists) also marks the end of the branch. for (auto i = 0, N = BF.isFused() ? 2 : 1; i != N && !isa(F); ++i, F = F->getNextNode()) { AlignedSize += computeFragmentSize(Layout, *F); } uint64_t OldSize = BF.getSize(); AlignedOffset -= OldSize; Align BoundaryAlignment = BF.getAlignment(); uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment) ? offsetToAlignment(AlignedOffset, BoundaryAlignment) : 0U; if (NewSize == OldSize) return false; BF.setSize(NewSize); Layout.invalidateFragmentsFrom(&BF); return true; } bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, MCDwarfLineAddrFragment &DF) { MCContext &Context = Layout.getAssembler().getContext(); uint64_t OldSize = DF.getContents().size(); int64_t AddrDelta; bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); assert(Abs && "We created a line delta with an invalid expression"); (void)Abs; int64_t LineDelta; LineDelta = DF.getLineDelta(); SmallVectorImpl &Data = DF.getContents(); Data.clear(); raw_svector_ostream OSE(Data); DF.getFixups().clear(); if (!getBackend().requiresDiffExpressionRelocations()) { MCDwarfLineAddr::Encode(Context, getDWARFLinetableParams(), LineDelta, AddrDelta, OSE); } else { uint32_t Offset; uint32_t Size; bool SetDelta = MCDwarfLineAddr::FixedEncode(Context, getDWARFLinetableParams(), LineDelta, AddrDelta, OSE, &Offset, &Size); // Add Fixups for address delta or new address. const MCExpr *FixupExpr; if (SetDelta) { FixupExpr = &DF.getAddrDelta(); } else { const MCBinaryExpr *ABE = cast(&DF.getAddrDelta()); FixupExpr = ABE->getLHS(); } DF.getFixups().push_back( MCFixup::create(Offset, FixupExpr, MCFixup::getKindForSize(Size, false /*isPCRel*/))); } return OldSize != Data.size(); } bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, MCDwarfCallFrameFragment &DF) { MCContext &Context = Layout.getAssembler().getContext(); uint64_t OldSize = DF.getContents().size(); int64_t AddrDelta; bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); assert(Abs && "We created call frame with an invalid expression"); (void) Abs; SmallVectorImpl &Data = DF.getContents(); Data.clear(); raw_svector_ostream OSE(Data); DF.getFixups().clear(); if (getBackend().requiresDiffExpressionRelocations()) { uint32_t Offset; uint32_t Size; MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE, &Offset, &Size); if (Size) { DF.getFixups().push_back(MCFixup::create( Offset, &DF.getAddrDelta(), MCFixup::getKindForSizeInBits(Size /*In bits.*/, false /*isPCRel*/))); } } else { MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE); } return OldSize != Data.size(); } bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout, MCCVInlineLineTableFragment &F) { unsigned OldSize = F.getContents().size(); getContext().getCVContext().encodeInlineLineTable(Layout, F); return OldSize != F.getContents().size(); } bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout, MCCVDefRangeFragment &F) { unsigned OldSize = F.getContents().size(); getContext().getCVContext().encodeDefRange(Layout, F); return OldSize != F.getContents().size(); } bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) { // Holds the first fragment which needed relaxing during this layout. It will // remain NULL if none were relaxed. // When a fragment is relaxed, all the fragments following it should get // invalidated because their offset is going to change. MCFragment *FirstRelaxedFragment = nullptr; // Attempt to relax all the fragments in the section. for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) { // Check if this is a fragment that needs relaxation. bool RelaxedFrag = false; switch(I->getKind()) { default: break; case MCFragment::FT_Relaxable: assert(!getRelaxAll() && "Did not expect a MCRelaxableFragment in RelaxAll mode"); RelaxedFrag = relaxInstruction(Layout, *cast(I)); break; case MCFragment::FT_Dwarf: RelaxedFrag = relaxDwarfLineAddr(Layout, *cast(I)); break; case MCFragment::FT_DwarfFrame: RelaxedFrag = relaxDwarfCallFrameFragment(Layout, *cast(I)); break; case MCFragment::FT_LEB: RelaxedFrag = relaxLEB(Layout, *cast(I)); break; case MCFragment::FT_BoundaryAlign: RelaxedFrag = relaxBoundaryAlign(Layout, *cast(I)); break; case MCFragment::FT_CVInlineLines: RelaxedFrag = relaxCVInlineLineTable(Layout, *cast(I)); break; case MCFragment::FT_CVDefRange: RelaxedFrag = relaxCVDefRange(Layout, *cast(I)); break; } if (RelaxedFrag && !FirstRelaxedFragment) FirstRelaxedFragment = &*I; } if (FirstRelaxedFragment) { Layout.invalidateFragmentsFrom(FirstRelaxedFragment); return true; } return false; } bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { ++stats::RelaxationSteps; bool WasRelaxed = false; for (iterator it = begin(), ie = end(); it != ie; ++it) { MCSection &Sec = *it; while (layoutSectionOnce(Layout, Sec)) WasRelaxed = true; } return WasRelaxed; } void MCAssembler::finishLayout(MCAsmLayout &Layout) { assert(getBackendPtr() && "Expected assembler backend"); // The layout is done. Mark every fragment as valid. for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { MCSection &Section = *Layout.getSectionOrder()[i]; Layout.getFragmentOffset(&*Section.getFragmentList().rbegin()); computeFragmentSize(Layout, *Section.getFragmentList().rbegin()); } getBackend().finishLayout(*this, Layout); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) LLVM_DUMP_METHOD void MCAssembler::dump() const{ raw_ostream &OS = errs(); OS << "dump(); } OS << "],\n"; OS << " Symbols:["; for (const_symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) { if (it != symbol_begin()) OS << ",\n "; OS << "("; it->dump(); OS << ", Index:" << it->getIndex() << ", "; OS << ")"; } OS << "]>\n"; } #endif Index: projects/clang1000-import/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp =================================================================== --- projects/clang1000-import/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp (revision 358395) +++ projects/clang1000-import/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp (revision 358396) @@ -1,1344 +1,1342 @@ //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "MCTargetDesc/ARMAsmBackend.h" #include "MCTargetDesc/ARMAddressingModes.h" #include "MCTargetDesc/ARMAsmBackendDarwin.h" #include "MCTargetDesc/ARMAsmBackendELF.h" #include "MCTargetDesc/ARMAsmBackendWinCOFF.h" #include "MCTargetDesc/ARMFixupKinds.h" #include "MCTargetDesc/ARMMCTargetDesc.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/BinaryFormat/MachO.h" #include "llvm/MC/MCAsmBackend.h" #include "llvm/MC/MCAssembler.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCDirectives.h" #include "llvm/MC/MCELFObjectWriter.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCFixupKindInfo.h" #include "llvm/MC/MCObjectWriter.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCValue.h" #include "llvm/MC/MCAsmLayout.h" #include "llvm/Support/Debug.h" #include "llvm/Support/EndianStream.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" #include "llvm/Support/TargetParser.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; namespace { class ARMELFObjectWriter : public MCELFObjectTargetWriter { public: ARMELFObjectWriter(uint8_t OSABI) : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM, /*HasRelocationAddend*/ false) {} }; } // end anonymous namespace Optional ARMAsmBackend::getFixupKind(StringRef Name) const { if (STI.getTargetTriple().isOSBinFormatELF() && Name == "R_ARM_NONE") return FK_NONE; return MCAsmBackend::getFixupKind(Name); } const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { + unsigned IsPCRelConstant = + MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_Constant; const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = { // This table *must* be in the order that the fixup_* kinds are defined in // ARMFixupKinds.h. // // Name Offset (bits) Size (bits) Flags - {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, + {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant}, {"fixup_t2_ldst_pcrel_12", 0, 32, - MCFixupKindInfo::FKF_IsPCRel | - MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, - {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, - {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, + IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, + {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant}, + {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant}, {"fixup_t2_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_t2_pcrel_9", 0, 32, - MCFixupKindInfo::FKF_IsPCRel | - MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, + IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_thumb_adr_pcrel_10", 0, 8, - MCFixupKindInfo::FKF_IsPCRel | - MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, - {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, + IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, + {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant}, {"fixup_t2_adr_pcrel_12", 0, 32, - MCFixupKindInfo::FKF_IsPCRel | - MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, + IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_thumb_cp", 0, 8, MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel}, // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 // - 19. {"fixup_arm_movt_hi16", 0, 20, 0}, {"fixup_arm_movw_lo16", 0, 20, 0}, {"fixup_t2_movt_hi16", 0, 20, 0}, {"fixup_t2_movw_lo16", 0, 20, 0}, {"fixup_arm_mod_imm", 0, 12, 0}, {"fixup_t2_so_imm", 0, 26, 0}, {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_bfcsel_else_target", 0, 32, 0}, {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel} }; const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = { // This table *must* be in the order that the fixup_* kinds are defined in // ARMFixupKinds.h. // // Name Offset (bits) Size (bits) Flags {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_t2_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_t2_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_t2_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_thumb_adr_pcrel_10", 8, 8, MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_t2_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_arm_thumb_cp", 8, 8, MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel}, // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 // - 19. {"fixup_arm_movt_hi16", 12, 20, 0}, {"fixup_arm_movw_lo16", 12, 20, 0}, {"fixup_t2_movt_hi16", 12, 20, 0}, {"fixup_t2_movw_lo16", 12, 20, 0}, {"fixup_arm_mod_imm", 20, 12, 0}, {"fixup_t2_so_imm", 26, 6, 0}, {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_bfcsel_else_target", 0, 32, 0}, {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel} }; if (Kind < FirstTargetFixupKind) return MCAsmBackend::getFixupKindInfo(Kind); assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && "Invalid kind!"); return (Endian == support::little ? InfosLE : InfosBE)[Kind - FirstTargetFixupKind]; } void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) { switch (Flag) { default: break; case MCAF_Code16: setIsThumb(true); break; case MCAF_Code32: setIsThumb(false); break; } } unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op, const MCSubtargetInfo &STI) const { bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2]; bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps]; switch (Op) { default: return Op; case ARM::tBcc: return HasThumb2 ? (unsigned)ARM::t2Bcc : Op; case ARM::tLDRpci: return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op; case ARM::tADR: return HasThumb2 ? (unsigned)ARM::t2ADR : Op; case ARM::tB: return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op; case ARM::tCBZ: return ARM::tHINT; case ARM::tCBNZ: return ARM::tHINT; } } bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst, const MCSubtargetInfo &STI) const { if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode()) return true; return false; } static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) { int64_t Offset = int64_t(Value) - 4; if (Offset < Min || Offset > Max) return "out of range pc-relative fixup value"; return nullptr; } const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup, uint64_t Value) const { switch (Fixup.getTargetKind()) { case ARM::fixup_arm_thumb_br: { // Relaxing tB to t2B. tB has a signed 12-bit displacement with the // low bit being an implied zero. There's an implied +4 offset for the // branch, so we adjust the other way here to determine what's // encodable. // // Relax if the value is too big for a (signed) i8. int64_t Offset = int64_t(Value) - 4; if (Offset > 2046 || Offset < -2048) return "out of range pc-relative fixup value"; break; } case ARM::fixup_arm_thumb_bcc: { // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the // low bit being an implied zero. There's an implied +4 offset for the // branch, so we adjust the other way here to determine what's // encodable. // // Relax if the value is too big for a (signed) i8. int64_t Offset = int64_t(Value) - 4; if (Offset > 254 || Offset < -256) return "out of range pc-relative fixup value"; break; } case ARM::fixup_thumb_adr_pcrel_10: case ARM::fixup_arm_thumb_cp: { // If the immediate is negative, greater than 1020, or not a multiple // of four, the wide version of the instruction must be used. int64_t Offset = int64_t(Value) - 4; if (Offset & 3) return "misaligned pc-relative fixup value"; else if (Offset > 1020 || Offset < 0) return "out of range pc-relative fixup value"; break; } case ARM::fixup_arm_thumb_cb: { // If we have a Thumb CBZ or CBNZ instruction and its target is the next // instruction it is actually out of range for the instruction. // It will be changed to a NOP. int64_t Offset = (Value & ~1); if (Offset == 2) return "will be converted to nop"; break; } case ARM::fixup_bf_branch: return checkPCRelOffset(Value, 0, 30); case ARM::fixup_bf_target: return checkPCRelOffset(Value, -0x10000, +0xfffe); case ARM::fixup_bfl_target: return checkPCRelOffset(Value, -0x40000, +0x3fffe); case ARM::fixup_bfc_target: return checkPCRelOffset(Value, -0x1000, +0xffe); case ARM::fixup_wls: return checkPCRelOffset(Value, 0, +0xffe); case ARM::fixup_le: // The offset field in the LE and LETP instructions is an 11-bit // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is // interpreted as a negative offset from the value read from pc, // i.e. from instruction_address+4. // // So an LE instruction can in principle address the instruction // immediately after itself, or (not very usefully) the address // half way through the 4-byte LE. return checkPCRelOffset(Value, -0xffe, 0); case ARM::fixup_bfcsel_else_target: { if (Value != 2 && Value != 4) return "out of range label-relative fixup value"; break; } default: llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!"); } return nullptr; } bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, const MCRelaxableFragment *DF, const MCAsmLayout &Layout) const { return reasonForFixupRelaxation(Fixup, Value); } void ARMAsmBackend::relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, MCInst &Res) const { unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI); // Sanity check w/ diagnostic if we get here w/ a bogus instruction. if (RelaxedOp == Inst.getOpcode()) { SmallString<256> Tmp; raw_svector_ostream OS(Tmp); Inst.dump_pretty(OS); OS << "\n"; report_fatal_error("unexpected instruction to relax: " + OS.str()); } // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we // have to change the operands too. if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) && RelaxedOp == ARM::tHINT) { Res.setOpcode(RelaxedOp); Res.addOperand(MCOperand::createImm(0)); Res.addOperand(MCOperand::createImm(14)); Res.addOperand(MCOperand::createReg(0)); return; } // The rest of instructions we're relaxing have the same operands. // We just need to update to the proper opcode. Res = Inst; Res.setOpcode(RelaxedOp); } bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const { const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP if (isThumb()) { const uint16_t nopEncoding = hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding; uint64_t NumNops = Count / 2; for (uint64_t i = 0; i != NumNops; ++i) support::endian::write(OS, nopEncoding, Endian); if (Count & 1) OS << '\0'; return true; } // ARM mode const uint32_t nopEncoding = hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding; uint64_t NumNops = Count / 4; for (uint64_t i = 0; i != NumNops; ++i) support::endian::write(OS, nopEncoding, Endian); // FIXME: should this function return false when unable to write exactly // 'Count' bytes with NOP encodings? switch (Count % 4) { default: break; // No leftover bytes to write case 1: OS << '\0'; break; case 2: OS.write("\0\0", 2); break; case 3: OS.write("\0\0\xa0", 3); break; } return true; } static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) { if (IsLittleEndian) { // Note that the halfwords are stored high first and low second in thumb; // so we need to swap the fixup value here to map properly. uint32_t Swapped = (Value & 0xFFFF0000) >> 16; Swapped |= (Value & 0x0000FFFF) << 16; return Swapped; } else return Value; } static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf, bool IsLittleEndian) { uint32_t Value; if (IsLittleEndian) { Value = (SecondHalf & 0xFFFF) << 16; Value |= (FirstHalf & 0xFFFF); } else { Value = (SecondHalf & 0xFFFF); Value |= (FirstHalf & 0xFFFF) << 16; } return Value; } unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, uint64_t Value, bool IsResolved, MCContext &Ctx, const MCSubtargetInfo* STI) const { unsigned Kind = Fixup.getKind(); // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT // and .word relocations they put the Thumb bit into the addend if possible. // Other relocation types don't want this bit though (branches couldn't encode // it if it *was* present, and no other relocations exist) and it can // interfere with checking valid expressions. if (const MCSymbolRefExpr *A = Target.getSymA()) { if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) && A->getSymbol().isExternal() && (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 || Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 || Kind == ARM::fixup_t2_movt_hi16)) Value |= 1; } switch (Kind) { default: Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type"); return 0; case FK_NONE: case FK_Data_1: case FK_Data_2: case FK_Data_4: return Value; case FK_SecRel_2: return Value; case FK_SecRel_4: return Value; case ARM::fixup_arm_movt_hi16: assert(STI != nullptr); if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) Value >>= 16; LLVM_FALLTHROUGH; case ARM::fixup_arm_movw_lo16: { unsigned Hi4 = (Value & 0xF000) >> 12; unsigned Lo12 = Value & 0x0FFF; // inst{19-16} = Hi4; // inst{11-0} = Lo12; Value = (Hi4 << 16) | (Lo12); return Value; } case ARM::fixup_t2_movt_hi16: assert(STI != nullptr); if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) Value >>= 16; LLVM_FALLTHROUGH; case ARM::fixup_t2_movw_lo16: { unsigned Hi4 = (Value & 0xF000) >> 12; unsigned i = (Value & 0x800) >> 11; unsigned Mid3 = (Value & 0x700) >> 8; unsigned Lo8 = Value & 0x0FF; // inst{19-16} = Hi4; // inst{26} = i; // inst{14-12} = Mid3; // inst{7-0} = Lo8; Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8); return swapHalfWords(Value, Endian == support::little); } case ARM::fixup_arm_ldst_pcrel_12: // ARM PC-relative values are offset by 8. Value -= 4; LLVM_FALLTHROUGH; case ARM::fixup_t2_ldst_pcrel_12: { // Offset by 4, adjusted by two due to the half-word ordering of thumb. Value -= 4; bool isAdd = true; if ((int64_t)Value < 0) { Value = -Value; isAdd = false; } if (Value >= 4096) { Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); return 0; } Value |= isAdd << 23; // Same addressing mode as fixup_arm_pcrel_10, // but with 16-bit halfwords swapped. if (Kind == ARM::fixup_t2_ldst_pcrel_12) return swapHalfWords(Value, Endian == support::little); return Value; } case ARM::fixup_arm_adr_pcrel_12: { // ARM PC-relative values are offset by 8. Value -= 8; unsigned opc = 4; // bits {24-21}. Default to add: 0b0100 if ((int64_t)Value < 0) { Value = -Value; opc = 2; // 0b0010 } if (ARM_AM::getSOImmVal(Value) == -1) { Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); return 0; } // Encode the immediate and shift the opcode into place. return ARM_AM::getSOImmVal(Value) | (opc << 21); } case ARM::fixup_t2_adr_pcrel_12: { Value -= 4; unsigned opc = 0; if ((int64_t)Value < 0) { Value = -Value; opc = 5; } uint32_t out = (opc << 21); out |= (Value & 0x800) << 15; out |= (Value & 0x700) << 4; out |= (Value & 0x0FF); return swapHalfWords(out, Endian == support::little); } case ARM::fixup_arm_condbranch: case ARM::fixup_arm_uncondbranch: case ARM::fixup_arm_uncondbl: case ARM::fixup_arm_condbl: case ARM::fixup_arm_blx: // These values don't encode the low two bits since they're always zero. // Offset by 8 just as above. if (const MCSymbolRefExpr *SRE = dyn_cast(Fixup.getValue())) if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL) return 0; return 0xffffff & ((Value - 8) >> 2); case ARM::fixup_t2_uncondbranch: { Value = Value - 4; if (!isInt<25>(Value)) { Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); return 0; } Value >>= 1; // Low bit is not encoded. uint32_t out = 0; bool I = Value & 0x800000; bool J1 = Value & 0x400000; bool J2 = Value & 0x200000; J1 ^= I; J2 ^= I; out |= I << 26; // S bit out |= !J1 << 13; // J1 bit out |= !J2 << 11; // J2 bit out |= (Value & 0x1FF800) << 5; // imm6 field out |= (Value & 0x0007FF); // imm11 field return swapHalfWords(out, Endian == support::little); } case ARM::fixup_t2_condbranch: { Value = Value - 4; if (!isInt<21>(Value)) { Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); return 0; } Value >>= 1; // Low bit is not encoded. uint64_t out = 0; out |= (Value & 0x80000) << 7; // S bit out |= (Value & 0x40000) >> 7; // J2 bit out |= (Value & 0x20000) >> 4; // J1 bit out |= (Value & 0x1F800) << 5; // imm6 field out |= (Value & 0x007FF); // imm11 field return swapHalfWords(out, Endian == support::little); } case ARM::fixup_arm_thumb_bl: { if (!isInt<25>(Value - 4) || (!STI->getFeatureBits()[ARM::FeatureThumb2] && !STI->getFeatureBits()[ARM::HasV8MBaselineOps] && !STI->getFeatureBits()[ARM::HasV6MOps] && !isInt<23>(Value - 4))) { Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); return 0; } // The value doesn't encode the low bit (always zero) and is offset by // four. The 32-bit immediate value is encoded as // imm32 = SignExtend(S:I1:I2:imm10:imm11:0) // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). // The value is encoded into disjoint bit positions in the destination // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, // J = either J1 or J2 bit // // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII // // Note that the halfwords are stored high first, low second; so we need // to transpose the fixup value here to map properly. uint32_t offset = (Value - 4) >> 1; uint32_t signBit = (offset & 0x800000) >> 23; uint32_t I1Bit = (offset & 0x400000) >> 22; uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; uint32_t I2Bit = (offset & 0x200000) >> 21; uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; uint32_t imm10Bits = (offset & 0x1FF800) >> 11; uint32_t imm11Bits = (offset & 0x000007FF); uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits); uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | (uint16_t)imm11Bits); return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little); } case ARM::fixup_arm_thumb_blx: { // The value doesn't encode the low two bits (always zero) and is offset by // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00) // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). // The value is encoded into disjoint bit positions in the destination // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, // J = either J1 or J2 bit, 0 = zero. // // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0 // // Note that the halfwords are stored high first, low second; so we need // to transpose the fixup value here to map properly. if (Value % 4 != 0) { Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination"); return 0; } uint32_t offset = (Value - 4) >> 2; if (const MCSymbolRefExpr *SRE = dyn_cast(Fixup.getValue())) if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL) offset = 0; uint32_t signBit = (offset & 0x400000) >> 22; uint32_t I1Bit = (offset & 0x200000) >> 21; uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; uint32_t I2Bit = (offset & 0x100000) >> 20; uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; uint32_t imm10HBits = (offset & 0xFFC00) >> 10; uint32_t imm10LBits = (offset & 0x3FF); uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits); uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | ((uint16_t)imm10LBits) << 1); return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little); } case ARM::fixup_thumb_adr_pcrel_10: case ARM::fixup_arm_thumb_cp: // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we // could have an error on our hands. assert(STI != nullptr); if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) { const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); if (FixupDiagnostic) { Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); return 0; } } // Offset by 4, and don't encode the low two bits. return ((Value - 4) >> 2) & 0xff; case ARM::fixup_arm_thumb_cb: { // CB instructions can only branch to offsets in [4, 126] in multiples of 2 // so ensure that the raw value LSB is zero and it lies in [2, 130]. // An offset of 2 will be relaxed to a NOP. if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) { Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); return 0; } // Offset by 4 and don't encode the lower bit, which is always 0. // FIXME: diagnose if no Thumb2 uint32_t Binary = (Value - 4) >> 1; return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3); } case ARM::fixup_arm_thumb_br: // Offset by 4 and don't encode the lower bit, which is always 0. assert(STI != nullptr); if (!STI->getFeatureBits()[ARM::FeatureThumb2] && !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) { const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); if (FixupDiagnostic) { Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); return 0; } } return ((Value - 4) >> 1) & 0x7ff; case ARM::fixup_arm_thumb_bcc: // Offset by 4 and don't encode the lower bit, which is always 0. assert(STI != nullptr); if (!STI->getFeatureBits()[ARM::FeatureThumb2]) { const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); if (FixupDiagnostic) { Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); return 0; } } return ((Value - 4) >> 1) & 0xff; case ARM::fixup_arm_pcrel_10_unscaled: { Value = Value - 8; // ARM fixups offset by an additional word and don't // need to adjust for the half-word ordering. bool isAdd = true; if ((int64_t)Value < 0) { Value = -Value; isAdd = false; } // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8]. if (Value >= 256) { Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); return 0; } Value = (Value & 0xf) | ((Value & 0xf0) << 4); return Value | (isAdd << 23); } case ARM::fixup_arm_pcrel_10: Value = Value - 4; // ARM fixups offset by an additional word and don't // need to adjust for the half-word ordering. LLVM_FALLTHROUGH; case ARM::fixup_t2_pcrel_10: { // Offset by 4, adjusted by two due to the half-word ordering of thumb. Value = Value - 4; bool isAdd = true; if ((int64_t)Value < 0) { Value = -Value; isAdd = false; } // These values don't encode the low two bits since they're always zero. Value >>= 2; if (Value >= 256) { Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); return 0; } Value |= isAdd << 23; // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords // swapped. if (Kind == ARM::fixup_t2_pcrel_10) return swapHalfWords(Value, Endian == support::little); return Value; } case ARM::fixup_arm_pcrel_9: Value = Value - 4; // ARM fixups offset by an additional word and don't // need to adjust for the half-word ordering. LLVM_FALLTHROUGH; case ARM::fixup_t2_pcrel_9: { // Offset by 4, adjusted by two due to the half-word ordering of thumb. Value = Value - 4; bool isAdd = true; if ((int64_t)Value < 0) { Value = -Value; isAdd = false; } // These values don't encode the low bit since it's always zero. if (Value & 1) { Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup"); return 0; } Value >>= 1; if (Value >= 256) { Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); return 0; } Value |= isAdd << 23; // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords // swapped. if (Kind == ARM::fixup_t2_pcrel_9) return swapHalfWords(Value, Endian == support::little); return Value; } case ARM::fixup_arm_mod_imm: Value = ARM_AM::getSOImmVal(Value); if (Value >> 12) { Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value"); return 0; } return Value; case ARM::fixup_t2_so_imm: { Value = ARM_AM::getT2SOImmVal(Value); if ((int64_t)Value < 0) { Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value"); return 0; } // Value will contain a 12-bit value broken up into a 4-bit shift in bits // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit // 10 of the upper half-word and imm3 is placed at 14:12 of the lower // half-word. uint64_t EncValue = 0; EncValue |= (Value & 0x800) << 15; EncValue |= (Value & 0x700) << 4; EncValue |= (Value & 0xff); return swapHalfWords(EncValue, Endian == support::little); } case ARM::fixup_bf_branch: { const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); if (FixupDiagnostic) { Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); return 0; } uint32_t out = (((Value - 4) >> 1) & 0xf) << 23; return swapHalfWords(out, Endian == support::little); } case ARM::fixup_bf_target: case ARM::fixup_bfl_target: case ARM::fixup_bfc_target: { const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); if (FixupDiagnostic) { Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); return 0; } uint32_t out = 0; uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 : Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800); out |= (((Value - 4) >> 1) & 0x1) << 11; out |= (((Value - 4) >> 1) & 0x7fe); out |= (((Value - 4) >> 1) & HighBitMask) << 5; return swapHalfWords(out, Endian == support::little); } case ARM::fixup_bfcsel_else_target: { // If this is a fixup of a branch future's else target then it should be a // constant MCExpr representing the distance between the branch targetted // and the instruction after that same branch. Value = Target.getConstant(); const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); if (FixupDiagnostic) { Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); return 0; } uint32_t out = ((Value >> 2) & 1) << 17; return swapHalfWords(out, Endian == support::little); } case ARM::fixup_wls: case ARM::fixup_le: { const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); if (FixupDiagnostic) { Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); return 0; } uint64_t real_value = Value - 4; uint32_t out = 0; if (Kind == ARM::fixup_le) real_value = -real_value; out |= ((real_value >> 1) & 0x1) << 11; out |= ((real_value >> 1) & 0x7fe); return swapHalfWords(out, Endian == support::little); } } } bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target) { const MCSymbolRefExpr *A = Target.getSymA(); const MCSymbol *Sym = A ? &A->getSymbol() : nullptr; const unsigned FixupKind = Fixup.getKind(); if (FixupKind == FK_NONE) return true; if (FixupKind == ARM::fixup_arm_thumb_bl) { assert(Sym && "How did we resolve this?"); // If the symbol is external the linker will handle it. // FIXME: Should we handle it as an optimization? // If the symbol is out of range, produce a relocation and hope the // linker can handle it. GNU AS produces an error in this case. if (Sym->isExternal()) return true; } // Create relocations for unconditional branches to function symbols with // different execution mode in ELF binaries. if (Sym && Sym->isELF()) { unsigned Type = cast(Sym)->getType(); if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) { if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch)) return true; if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br || FixupKind == ARM::fixup_arm_thumb_bl || FixupKind == ARM::fixup_t2_condbranch || FixupKind == ARM::fixup_t2_uncondbranch)) return true; } } // We must always generate a relocation for BL/BLX instructions if we have // a symbol to reference, as the linker relies on knowing the destination // symbol's thumb-ness to get interworking right. if (A && (FixupKind == ARM::fixup_arm_thumb_blx || FixupKind == ARM::fixup_arm_blx || FixupKind == ARM::fixup_arm_uncondbl || FixupKind == ARM::fixup_arm_condbl)) return true; return false; } /// getFixupKindNumBytes - The number of bytes the fixup may change. static unsigned getFixupKindNumBytes(unsigned Kind) { switch (Kind) { default: llvm_unreachable("Unknown fixup kind!"); case FK_NONE: return 0; case FK_Data_1: case ARM::fixup_arm_thumb_bcc: case ARM::fixup_arm_thumb_cp: case ARM::fixup_thumb_adr_pcrel_10: return 1; case FK_Data_2: case ARM::fixup_arm_thumb_br: case ARM::fixup_arm_thumb_cb: case ARM::fixup_arm_mod_imm: return 2; case ARM::fixup_arm_pcrel_10_unscaled: case ARM::fixup_arm_ldst_pcrel_12: case ARM::fixup_arm_pcrel_10: case ARM::fixup_arm_pcrel_9: case ARM::fixup_arm_adr_pcrel_12: case ARM::fixup_arm_uncondbl: case ARM::fixup_arm_condbl: case ARM::fixup_arm_blx: case ARM::fixup_arm_condbranch: case ARM::fixup_arm_uncondbranch: return 3; case FK_Data_4: case ARM::fixup_t2_ldst_pcrel_12: case ARM::fixup_t2_condbranch: case ARM::fixup_t2_uncondbranch: case ARM::fixup_t2_pcrel_10: case ARM::fixup_t2_pcrel_9: case ARM::fixup_t2_adr_pcrel_12: case ARM::fixup_arm_thumb_bl: case ARM::fixup_arm_thumb_blx: case ARM::fixup_arm_movt_hi16: case ARM::fixup_arm_movw_lo16: case ARM::fixup_t2_movt_hi16: case ARM::fixup_t2_movw_lo16: case ARM::fixup_t2_so_imm: case ARM::fixup_bf_branch: case ARM::fixup_bf_target: case ARM::fixup_bfl_target: case ARM::fixup_bfc_target: case ARM::fixup_bfcsel_else_target: case ARM::fixup_wls: case ARM::fixup_le: return 4; case FK_SecRel_2: return 2; case FK_SecRel_4: return 4; } } /// getFixupKindContainerSizeBytes - The number of bytes of the /// container involved in big endian. static unsigned getFixupKindContainerSizeBytes(unsigned Kind) { switch (Kind) { default: llvm_unreachable("Unknown fixup kind!"); case FK_NONE: return 0; case FK_Data_1: return 1; case FK_Data_2: return 2; case FK_Data_4: return 4; case ARM::fixup_arm_thumb_bcc: case ARM::fixup_arm_thumb_cp: case ARM::fixup_thumb_adr_pcrel_10: case ARM::fixup_arm_thumb_br: case ARM::fixup_arm_thumb_cb: // Instruction size is 2 bytes. return 2; case ARM::fixup_arm_pcrel_10_unscaled: case ARM::fixup_arm_ldst_pcrel_12: case ARM::fixup_arm_pcrel_10: case ARM::fixup_arm_pcrel_9: case ARM::fixup_arm_adr_pcrel_12: case ARM::fixup_arm_uncondbl: case ARM::fixup_arm_condbl: case ARM::fixup_arm_blx: case ARM::fixup_arm_condbranch: case ARM::fixup_arm_uncondbranch: case ARM::fixup_t2_ldst_pcrel_12: case ARM::fixup_t2_condbranch: case ARM::fixup_t2_uncondbranch: case ARM::fixup_t2_pcrel_10: case ARM::fixup_t2_adr_pcrel_12: case ARM::fixup_arm_thumb_bl: case ARM::fixup_arm_thumb_blx: case ARM::fixup_arm_movt_hi16: case ARM::fixup_arm_movw_lo16: case ARM::fixup_t2_movt_hi16: case ARM::fixup_t2_movw_lo16: case ARM::fixup_arm_mod_imm: case ARM::fixup_t2_so_imm: case ARM::fixup_bf_branch: case ARM::fixup_bf_target: case ARM::fixup_bfl_target: case ARM::fixup_bfc_target: case ARM::fixup_bfcsel_else_target: case ARM::fixup_wls: case ARM::fixup_le: // Instruction size is 4 bytes. return 4; } } void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef Data, uint64_t Value, bool IsResolved, const MCSubtargetInfo* STI) const { unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); MCContext &Ctx = Asm.getContext(); Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI); if (!Value) return; // Doesn't change encoding. unsigned Offset = Fixup.getOffset(); assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); // Used to point to big endian bytes. unsigned FullSizeBytes; if (Endian == support::big) { FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind()); assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!"); assert(NumBytes <= FullSizeBytes && "Invalid fixup size!"); } // For each byte of the fragment that the fixup touches, mask in the bits from // the fixup value. The Value has been "split up" into the appropriate // bitfields above. for (unsigned i = 0; i != NumBytes; ++i) { unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i); Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); } } namespace CU { /// Compact unwind encoding values. enum CompactUnwindEncodings { UNWIND_ARM_MODE_MASK = 0x0F000000, UNWIND_ARM_MODE_FRAME = 0x01000000, UNWIND_ARM_MODE_FRAME_D = 0x02000000, UNWIND_ARM_MODE_DWARF = 0x04000000, UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000, UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001, UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002, UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004, UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008, UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010, UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020, UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040, UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080, UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00, UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF }; } // end CU namespace /// Generate compact unwind encoding for the function based on the CFI /// instructions. If the CFI instructions describe a frame that cannot be /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which /// tells the runtime to fallback and unwind using dwarf. uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( ArrayRef Instrs) const { DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n"); // Only armv7k uses CFI based unwinding. if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K) return 0; // No .cfi directives means no frame. if (Instrs.empty()) return 0; // Start off assuming CFA is at SP+0. unsigned CFARegister = ARM::SP; int CFARegisterOffset = 0; // Mark savable registers as initially unsaved DenseMap RegOffsets; int FloatRegCount = 0; // Process each .cfi directive and build up compact unwind info. for (size_t i = 0, e = Instrs.size(); i != e; ++i) { unsigned Reg; const MCCFIInstruction &Inst = Instrs[i]; switch (Inst.getOperation()) { case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa CFARegisterOffset = -Inst.getOffset(); CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true); break; case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset CFARegisterOffset = -Inst.getOffset(); break; case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true); break; case MCCFIInstruction::OpOffset: // DW_CFA_offset Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true); if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) RegOffsets[Reg] = Inst.getOffset(); else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { RegOffsets[Reg] = Inst.getOffset(); ++FloatRegCount; } else { DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << ".cfi_offset on unknown register=" << Inst.getRegister() << "\n"); return CU::UNWIND_ARM_MODE_DWARF; } break; case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc // Ignore break; default: // Directive not convertable to compact unwind, bail out. DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "CFI directive not compatiable with comact " "unwind encoding, opcode=" << Inst.getOperation() << "\n"); return CU::UNWIND_ARM_MODE_DWARF; break; } } // If no frame set up, return no unwind info. if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0)) return 0; // Verify standard frame (lr/r7) was used. if (CFARegister != ARM::R7) { DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is " << CFARegister << " instead of r7\n"); return CU::UNWIND_ARM_MODE_DWARF; } int StackAdjust = CFARegisterOffset - 8; if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) { DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "LR not saved as standard frame, StackAdjust=" << StackAdjust << ", CFARegisterOffset=" << CFARegisterOffset << ", lr save at offset=" << RegOffsets[14] << "\n"); return CU::UNWIND_ARM_MODE_DWARF; } if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) { DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "r7 not saved as standard frame\n"); return CU::UNWIND_ARM_MODE_DWARF; } uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME; // If var-args are used, there may be a stack adjust required. switch (StackAdjust) { case 0: break; case 4: CompactUnwindEncoding |= 0x00400000; break; case 8: CompactUnwindEncoding |= 0x00800000; break; case 12: CompactUnwindEncoding |= 0x00C00000; break; default: DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << ".cfi_def_cfa stack adjust (" << StackAdjust << ") out of range\n"); return CU::UNWIND_ARM_MODE_DWARF; } // If r6 is saved, it must be right below r7. static struct { unsigned Reg; unsigned Encoding; } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6}, {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5}, {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4}, {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12}, {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11}, {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10}, {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9}, {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}}; int CurOffset = -8 - StackAdjust; for (auto CSReg : GPRCSRegs) { auto Offset = RegOffsets.find(CSReg.Reg); if (Offset == RegOffsets.end()) continue; int RegOffset = Offset->second; if (RegOffset != CurOffset - 4) { DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at " << RegOffset << " but only supported at " << CurOffset << "\n"); return CU::UNWIND_ARM_MODE_DWARF; } CompactUnwindEncoding |= CSReg.Encoding; CurOffset -= 4; } // If no floats saved, we are done. if (FloatRegCount == 0) return CompactUnwindEncoding; // Switch mode to include D register saving. CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK; CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D; // FIXME: supporting more than 4 saved D-registers compactly would be trivial, // but needs coordination with the linker and libunwind. if (FloatRegCount > 4) { DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "unsupported number of D registers saved (" << FloatRegCount << ")\n"); return CU::UNWIND_ARM_MODE_DWARF; } // Floating point registers must either be saved sequentially, or we defer to // DWARF. No gaps allowed here so check that each saved d-register is // precisely where it should be. static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 }; for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) { auto Offset = RegOffsets.find(FPRCSRegs[Idx]); if (Offset == RegOffsets.end()) { DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << FloatRegCount << " D-regs saved, but " << MRI.getName(FPRCSRegs[Idx]) << " not saved\n"); return CU::UNWIND_ARM_MODE_DWARF; } else if (Offset->second != CurOffset - 8) { DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << FloatRegCount << " D-regs saved, but " << MRI.getName(FPRCSRegs[Idx]) << " saved at " << Offset->second << ", expected at " << CurOffset - 8 << "\n"); return CU::UNWIND_ARM_MODE_DWARF; } CurOffset -= 8; } return CompactUnwindEncoding | ((FloatRegCount - 1) << 8); } static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) { ARM::ArchKind AK = ARM::parseArch(Arch); switch (AK) { default: return MachO::CPU_SUBTYPE_ARM_V7; case ARM::ArchKind::ARMV4T: return MachO::CPU_SUBTYPE_ARM_V4T; case ARM::ArchKind::ARMV5T: case ARM::ArchKind::ARMV5TE: case ARM::ArchKind::ARMV5TEJ: return MachO::CPU_SUBTYPE_ARM_V5; case ARM::ArchKind::ARMV6: case ARM::ArchKind::ARMV6K: return MachO::CPU_SUBTYPE_ARM_V6; case ARM::ArchKind::ARMV7A: return MachO::CPU_SUBTYPE_ARM_V7; case ARM::ArchKind::ARMV7S: return MachO::CPU_SUBTYPE_ARM_V7S; case ARM::ArchKind::ARMV7K: return MachO::CPU_SUBTYPE_ARM_V7K; case ARM::ArchKind::ARMV6M: return MachO::CPU_SUBTYPE_ARM_V6M; case ARM::ArchKind::ARMV7M: return MachO::CPU_SUBTYPE_ARM_V7M; case ARM::ArchKind::ARMV7EM: return MachO::CPU_SUBTYPE_ARM_V7EM; } } static MCAsmBackend *createARMAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options, support::endianness Endian) { const Triple &TheTriple = STI.getTargetTriple(); switch (TheTriple.getObjectFormat()) { default: llvm_unreachable("unsupported object format"); case Triple::MachO: { MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName()); return new ARMAsmBackendDarwin(T, STI, MRI, CS); } case Triple::COFF: assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported"); return new ARMAsmBackendWinCOFF(T, STI); case Triple::ELF: assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target"); uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); return new ARMAsmBackendELF(T, STI, OSABI, Endian); } } MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options) { return createARMAsmBackend(T, STI, MRI, Options, support::little); } MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options) { return createARMAsmBackend(T, STI, MRI, Options, support::big); }