MCAssembler.cpp 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. #include "llvm/MC/MCAssembler.h"
  10. #include "llvm/ADT/Statistic.h"
  11. #include "llvm/ADT/StringExtras.h"
  12. #include "llvm/ADT/Twine.h"
  13. #include "llvm/MC/MCAsmBackend.h"
  14. #include "llvm/MC/MCAsmInfo.h"
  15. #include "llvm/MC/MCAsmLayout.h"
  16. #include "llvm/MC/MCCodeEmitter.h"
  17. #include "llvm/MC/MCContext.h"
  18. #include "llvm/MC/MCDwarf.h"
  19. #include "llvm/MC/MCExpr.h"
  20. #include "llvm/MC/MCFixupKindInfo.h"
  21. #include "llvm/MC/MCObjectWriter.h"
  22. #include "llvm/MC/MCSection.h"
  23. #include "llvm/MC/MCSectionELF.h"
  24. #include "llvm/MC/MCSymbol.h"
  25. #include "llvm/MC/MCValue.h"
  26. #include "llvm/Support/Debug.h"
  27. #include "llvm/Support/ErrorHandling.h"
  28. #include "llvm/Support/LEB128.h"
  29. #include "llvm/Support/TargetRegistry.h"
  30. #include "llvm/Support/raw_ostream.h"
  31. #include <tuple>
  32. using namespace llvm;
  33. #define DEBUG_TYPE "assembler"
  34. namespace {
  35. namespace stats {
  36. STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total");
  37. STATISTIC(EmittedRelaxableFragments,
  38. "Number of emitted assembler fragments - relaxable");
  39. STATISTIC(EmittedDataFragments,
  40. "Number of emitted assembler fragments - data");
  41. STATISTIC(EmittedCompactEncodedInstFragments,
  42. "Number of emitted assembler fragments - compact encoded inst");
  43. STATISTIC(EmittedAlignFragments,
  44. "Number of emitted assembler fragments - align");
  45. STATISTIC(EmittedFillFragments,
  46. "Number of emitted assembler fragments - fill");
  47. STATISTIC(EmittedOrgFragments,
  48. "Number of emitted assembler fragments - org");
  49. STATISTIC(evaluateFixup, "Number of evaluated fixups");
  50. STATISTIC(FragmentLayouts, "Number of fragment layouts");
  51. STATISTIC(ObjectBytes, "Number of emitted object file bytes");
  52. STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
  53. STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
  54. }
  55. }
  56. // FIXME FIXME FIXME: There are number of places in this file where we convert
  57. // what is a 64-bit assembler value used for computation into a value in the
  58. // object file, which may truncate it. We should detect that truncation where
  59. // invalid and report errors back.
  60. /* *** */
  61. MCAsmLayout::MCAsmLayout(MCAssembler &Asm)
  62. : Assembler(Asm), LastValidFragment()
  63. {
  64. // Compute the section layout order. Virtual sections must go last.
  65. for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
  66. if (!it->isVirtualSection())
  67. SectionOrder.push_back(&*it);
  68. for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
  69. if (it->isVirtualSection())
  70. SectionOrder.push_back(&*it);
  71. }
  72. bool MCAsmLayout::isFragmentValid(const MCFragment *F) const {
  73. const MCSection *Sec = F->getParent();
  74. const MCFragment *LastValid = LastValidFragment.lookup(Sec);
  75. if (!LastValid)
  76. return false;
  77. assert(LastValid->getParent() == Sec);
  78. return F->getLayoutOrder() <= LastValid->getLayoutOrder();
  79. }
  80. void MCAsmLayout::invalidateFragmentsFrom(MCFragment *F) {
  81. // If this fragment wasn't already valid, we don't need to do anything.
  82. if (!isFragmentValid(F))
  83. return;
  84. // Otherwise, reset the last valid fragment to the previous fragment
  85. // (if this is the first fragment, it will be NULL).
  86. LastValidFragment[F->getParent()] = F->getPrevNode();
  87. }
  88. void MCAsmLayout::ensureValid(const MCFragment *F) const {
  89. MCSection *Sec = F->getParent();
  90. MCFragment *Cur = LastValidFragment[Sec];
  91. if (!Cur)
  92. Cur = Sec->begin();
  93. else
  94. Cur = Cur->getNextNode();
  95. // Advance the layout position until the fragment is valid.
  96. while (!isFragmentValid(F)) {
  97. assert(Cur && "Layout bookkeeping error");
  98. const_cast<MCAsmLayout*>(this)->layoutFragment(Cur);
  99. Cur = Cur->getNextNode();
  100. }
  101. }
  102. uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const {
  103. ensureValid(F);
  104. assert(F->Offset != ~UINT64_C(0) && "Address not set!");
  105. return F->Offset;
  106. }
  107. // Simple getSymbolOffset helper for the non-varibale case.
  108. static bool getLabelOffset(const MCAsmLayout &Layout, const MCSymbol &S,
  109. bool ReportError, uint64_t &Val) {
  110. if (!S.getFragment()) {
  111. if (ReportError)
  112. report_fatal_error("unable to evaluate offset to undefined symbol '" +
  113. S.getName() + "'");
  114. return false;
  115. }
  116. Val = Layout.getFragmentOffset(S.getFragment()) + S.getOffset();
  117. return true;
  118. }
  119. static bool getSymbolOffsetImpl(const MCAsmLayout &Layout, const MCSymbol &S,
  120. bool ReportError, uint64_t &Val) {
  121. if (!S.isVariable())
  122. return getLabelOffset(Layout, S, ReportError, Val);
  123. // If SD is a variable, evaluate it.
  124. MCValue Target;
  125. if (!S.getVariableValue()->evaluateAsRelocatable(Target, &Layout, nullptr))
  126. report_fatal_error("unable to evaluate offset for variable '" +
  127. S.getName() + "'");
  128. uint64_t Offset = Target.getConstant();
  129. const MCSymbolRefExpr *A = Target.getSymA();
  130. if (A) {
  131. uint64_t ValA;
  132. if (!getLabelOffset(Layout, A->getSymbol(), ReportError, ValA))
  133. return false;
  134. Offset += ValA;
  135. }
  136. const MCSymbolRefExpr *B = Target.getSymB();
  137. if (B) {
  138. uint64_t ValB;
  139. if (!getLabelOffset(Layout, B->getSymbol(), ReportError, ValB))
  140. return false;
  141. Offset -= ValB;
  142. }
  143. Val = Offset;
  144. return true;
  145. }
  146. bool MCAsmLayout::getSymbolOffset(const MCSymbol &S, uint64_t &Val) const {
  147. return getSymbolOffsetImpl(*this, S, false, Val);
  148. }
  149. uint64_t MCAsmLayout::getSymbolOffset(const MCSymbol &S) const {
  150. uint64_t Val;
  151. getSymbolOffsetImpl(*this, S, true, Val);
  152. return Val;
  153. }
  154. const MCSymbol *MCAsmLayout::getBaseSymbol(const MCSymbol &Symbol) const {
  155. if (!Symbol.isVariable())
  156. return &Symbol;
  157. const MCExpr *Expr = Symbol.getVariableValue();
  158. MCValue Value;
  159. if (!Expr->evaluateAsValue(Value, *this))
  160. llvm_unreachable("Invalid Expression");
  161. const MCSymbolRefExpr *RefB = Value.getSymB();
  162. if (RefB)
  163. Assembler.getContext().reportFatalError(
  164. SMLoc(), Twine("symbol '") + RefB->getSymbol().getName() +
  165. "' could not be evaluated in a subtraction expression");
  166. const MCSymbolRefExpr *A = Value.getSymA();
  167. if (!A)
  168. return nullptr;
  169. const MCSymbol &ASym = A->getSymbol();
  170. const MCAssembler &Asm = getAssembler();
  171. if (ASym.isCommon()) {
  172. // FIXME: we should probably add a SMLoc to MCExpr.
  173. Asm.getContext().reportFatalError(SMLoc(),
  174. "Common symbol " + ASym.getName() +
  175. " cannot be used in assignment expr");
  176. }
  177. return &ASym;
  178. }
  179. uint64_t MCAsmLayout::getSectionAddressSize(const MCSection *Sec) const {
  180. // The size is the last fragment's end offset.
  181. const MCFragment &F = Sec->getFragmentList().back();
  182. return getFragmentOffset(&F) + getAssembler().computeFragmentSize(*this, F);
  183. }
  184. uint64_t MCAsmLayout::getSectionFileSize(const MCSection *Sec) const {
  185. // Virtual sections have no file size.
  186. if (Sec->isVirtualSection())
  187. return 0;
  188. // Otherwise, the file size is the same as the address space size.
  189. return getSectionAddressSize(Sec);
  190. }
  191. uint64_t llvm::computeBundlePadding(const MCAssembler &Assembler,
  192. const MCFragment *F,
  193. uint64_t FOffset, uint64_t FSize) {
  194. uint64_t BundleSize = Assembler.getBundleAlignSize();
  195. assert(BundleSize > 0 &&
  196. "computeBundlePadding should only be called if bundling is enabled");
  197. uint64_t BundleMask = BundleSize - 1;
  198. uint64_t OffsetInBundle = FOffset & BundleMask;
  199. uint64_t EndOfFragment = OffsetInBundle + FSize;
  200. // There are two kinds of bundling restrictions:
  201. //
  202. // 1) For alignToBundleEnd(), add padding to ensure that the fragment will
  203. // *end* on a bundle boundary.
  204. // 2) Otherwise, check if the fragment would cross a bundle boundary. If it
  205. // would, add padding until the end of the bundle so that the fragment
  206. // will start in a new one.
  207. if (F->alignToBundleEnd()) {
  208. // Three possibilities here:
  209. //
  210. // A) The fragment just happens to end at a bundle boundary, so we're good.
  211. // B) The fragment ends before the current bundle boundary: pad it just
  212. // enough to reach the boundary.
  213. // C) The fragment ends after the current bundle boundary: pad it until it
  214. // reaches the end of the next bundle boundary.
  215. //
  216. // Note: this code could be made shorter with some modulo trickery, but it's
  217. // intentionally kept in its more explicit form for simplicity.
  218. if (EndOfFragment == BundleSize)
  219. return 0;
  220. else if (EndOfFragment < BundleSize)
  221. return BundleSize - EndOfFragment;
  222. else { // EndOfFragment > BundleSize
  223. return 2 * BundleSize - EndOfFragment;
  224. }
  225. } else if (OffsetInBundle > 0 && EndOfFragment > BundleSize)
  226. return BundleSize - OffsetInBundle;
  227. else
  228. return 0;
  229. }
  230. /* *** */
  231. void ilist_node_traits<MCFragment>::deleteNode(MCFragment *V) {
  232. V->destroy();
  233. }
  234. MCFragment::MCFragment() : Kind(FragmentType(~0)), HasInstructions(false),
  235. AlignToBundleEnd(false), BundlePadding(0) {
  236. }
  237. MCFragment::~MCFragment() { }
  238. MCFragment::MCFragment(FragmentType Kind, bool HasInstructions,
  239. uint8_t BundlePadding, MCSection *Parent)
  240. : Kind(Kind), HasInstructions(HasInstructions), AlignToBundleEnd(false),
  241. BundlePadding(BundlePadding), Parent(Parent), Atom(nullptr),
  242. Offset(~UINT64_C(0)) {
  243. if (Parent)
  244. Parent->getFragmentList().push_back(this);
  245. }
  246. void MCFragment::destroy() {
  247. // First check if we are the sentinal.
  248. if (Kind == FragmentType(~0)) {
  249. delete this;
  250. return;
  251. }
  252. switch (Kind) {
  253. case FT_Align:
  254. delete cast<MCAlignFragment>(this);
  255. return;
  256. case FT_Data:
  257. delete cast<MCDataFragment>(this);
  258. return;
  259. case FT_CompactEncodedInst:
  260. delete cast<MCCompactEncodedInstFragment>(this);
  261. return;
  262. case FT_Fill:
  263. delete cast<MCFillFragment>(this);
  264. return;
  265. case FT_Relaxable:
  266. delete cast<MCRelaxableFragment>(this);
  267. return;
  268. case FT_Org:
  269. delete cast<MCOrgFragment>(this);
  270. return;
  271. case FT_Dwarf:
  272. delete cast<MCDwarfLineAddrFragment>(this);
  273. return;
  274. case FT_DwarfFrame:
  275. delete cast<MCDwarfCallFrameFragment>(this);
  276. return;
  277. case FT_LEB:
  278. delete cast<MCLEBFragment>(this);
  279. return;
  280. case FT_SafeSEH:
  281. delete cast<MCSafeSEHFragment>(this);
  282. return;
  283. }
  284. }
  285. /* *** */
  286. MCAssembler::MCAssembler(MCContext &Context_, MCAsmBackend &Backend_,
  287. MCCodeEmitter &Emitter_, MCObjectWriter &Writer_,
  288. raw_ostream &OS_)
  289. : Context(Context_), Backend(Backend_), Emitter(Emitter_), Writer(Writer_),
  290. OS(OS_), BundleAlignSize(0), RelaxAll(false),
  291. SubsectionsViaSymbols(false), ELFHeaderEFlags(0) {
  292. VersionMinInfo.Major = 0; // Major version == 0 for "none specified"
  293. }
  294. MCAssembler::~MCAssembler() {
  295. }
  296. void MCAssembler::reset() {
  297. Sections.clear();
  298. Symbols.clear();
  299. IndirectSymbols.clear();
  300. DataRegions.clear();
  301. LinkerOptions.clear();
  302. FileNames.clear();
  303. ThumbFuncs.clear();
  304. BundleAlignSize = 0;
  305. RelaxAll = false;
  306. SubsectionsViaSymbols = false;
  307. ELFHeaderEFlags = 0;
  308. LOHContainer.reset();
  309. VersionMinInfo.Major = 0;
  310. // reset objects owned by us
  311. getBackend().reset();
  312. getEmitter().reset();
  313. getWriter().reset();
  314. getLOHContainer().reset();
  315. }
  316. bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
  317. if (ThumbFuncs.count(Symbol))
  318. return true;
  319. if (!Symbol->isVariable())
  320. return false;
  321. // FIXME: It looks like gas supports some cases of the form "foo + 2". It
  322. // is not clear if that is a bug or a feature.
  323. const MCExpr *Expr = Symbol->getVariableValue();
  324. const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Expr);
  325. if (!Ref)
  326. return false;
  327. if (Ref->getKind() != MCSymbolRefExpr::VK_None)
  328. return false;
  329. const MCSymbol &Sym = Ref->getSymbol();
  330. if (!isThumbFunc(&Sym))
  331. return false;
  332. ThumbFuncs.insert(Symbol); // Cache it.
  333. return true;
  334. }
  335. bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
  336. // Non-temporary labels should always be visible to the linker.
  337. if (!Symbol.isTemporary())
  338. return true;
  339. // Absolute temporary labels are never visible.
  340. if (!Symbol.isInSection())
  341. return false;
  342. if (Symbol.isUsedInReloc())
  343. return true;
  344. return false;
  345. }
  346. const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const {
  347. // Linker visible symbols define atoms.
  348. if (isSymbolLinkerVisible(S))
  349. return &S;
  350. // Absolute and undefined symbols have no defining atom.
  351. if (!S.getFragment())
  352. return nullptr;
  353. // Non-linker visible symbols in sections which can't be atomized have no
  354. // defining atom.
  355. if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols(
  356. *S.getFragment()->getParent()))
  357. return nullptr;
  358. // Otherwise, return the atom for the containing fragment.
  359. return S.getFragment()->getAtom();
  360. }
  361. bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout,
  362. const MCFixup &Fixup, const MCFragment *DF,
  363. MCValue &Target, uint64_t &Value) const {
  364. ++stats::evaluateFixup;
  365. // FIXME: This code has some duplication with recordRelocation. We should
  366. // probably merge the two into a single callback that tries to evaluate a
  367. // fixup and records a relocation if one is needed.
  368. const MCExpr *Expr = Fixup.getValue();
  369. if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup))
  370. getContext().reportFatalError(Fixup.getLoc(), "expected relocatable expression");
  371. bool IsPCRel = Backend.getFixupKindInfo(
  372. Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel;
  373. bool IsResolved;
  374. if (IsPCRel) {
  375. if (Target.getSymB()) {
  376. IsResolved = false;
  377. } else if (!Target.getSymA()) {
  378. IsResolved = false;
  379. } else {
  380. const MCSymbolRefExpr *A = Target.getSymA();
  381. const MCSymbol &SA = A->getSymbol();
  382. if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) {
  383. IsResolved = false;
  384. } else {
  385. IsResolved = getWriter().isSymbolRefDifferenceFullyResolvedImpl(
  386. *this, SA, *DF, false, true);
  387. }
  388. }
  389. } else {
  390. IsResolved = Target.isAbsolute();
  391. }
  392. Value = Target.getConstant();
  393. if (const MCSymbolRefExpr *A = Target.getSymA()) {
  394. const MCSymbol &Sym = A->getSymbol();
  395. if (Sym.isDefined())
  396. Value += Layout.getSymbolOffset(Sym);
  397. }
  398. if (const MCSymbolRefExpr *B = Target.getSymB()) {
  399. const MCSymbol &Sym = B->getSymbol();
  400. if (Sym.isDefined())
  401. Value -= Layout.getSymbolOffset(Sym);
  402. }
  403. bool ShouldAlignPC = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
  404. MCFixupKindInfo::FKF_IsAlignedDownTo32Bits;
  405. assert((ShouldAlignPC ? IsPCRel : true) &&
  406. "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!");
  407. if (IsPCRel) {
  408. uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset();
  409. // A number of ARM fixups in Thumb mode require that the effective PC
  410. // address be determined as the 32-bit aligned version of the actual offset.
  411. if (ShouldAlignPC) Offset &= ~0x3;
  412. Value -= Offset;
  413. }
  414. // Let the backend adjust the fixup value if necessary, including whether
  415. // we need a relocation.
  416. Backend.processFixupValue(*this, Layout, Fixup, DF, Target, Value,
  417. IsResolved);
  418. return IsResolved;
  419. }
  420. uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout,
  421. const MCFragment &F) const {
  422. switch (F.getKind()) {
  423. case MCFragment::FT_Data:
  424. return cast<MCDataFragment>(F).getContents().size();
  425. case MCFragment::FT_Relaxable:
  426. return cast<MCRelaxableFragment>(F).getContents().size();
  427. case MCFragment::FT_CompactEncodedInst:
  428. return cast<MCCompactEncodedInstFragment>(F).getContents().size();
  429. case MCFragment::FT_Fill:
  430. return cast<MCFillFragment>(F).getSize();
  431. case MCFragment::FT_LEB:
  432. return cast<MCLEBFragment>(F).getContents().size();
  433. case MCFragment::FT_SafeSEH:
  434. return 4;
  435. case MCFragment::FT_Align: {
  436. const MCAlignFragment &AF = cast<MCAlignFragment>(F);
  437. unsigned Offset = Layout.getFragmentOffset(&AF);
  438. unsigned Size = OffsetToAlignment(Offset, AF.getAlignment());
  439. // If we are padding with nops, force the padding to be larger than the
  440. // minimum nop size.
  441. if (Size > 0 && AF.hasEmitNops()) {
  442. while (Size % getBackend().getMinimumNopSize())
  443. Size += AF.getAlignment();
  444. }
  445. if (Size > AF.getMaxBytesToEmit())
  446. return 0;
  447. return Size;
  448. }
  449. case MCFragment::FT_Org: {
  450. const MCOrgFragment &OF = cast<MCOrgFragment>(F);
  451. int64_t TargetLocation;
  452. if (!OF.getOffset().evaluateAsAbsolute(TargetLocation, Layout))
  453. report_fatal_error("expected assembly-time absolute expression");
  454. // FIXME: We need a way to communicate this error.
  455. uint64_t FragmentOffset = Layout.getFragmentOffset(&OF);
  456. int64_t Size = TargetLocation - FragmentOffset;
  457. if (Size < 0 || Size >= 0x40000000)
  458. report_fatal_error("invalid .org offset '" + Twine(TargetLocation) +
  459. "' (at offset '" + Twine(FragmentOffset) + "')");
  460. return Size;
  461. }
  462. case MCFragment::FT_Dwarf:
  463. return cast<MCDwarfLineAddrFragment>(F).getContents().size();
  464. case MCFragment::FT_DwarfFrame:
  465. return cast<MCDwarfCallFrameFragment>(F).getContents().size();
  466. }
  467. llvm_unreachable("invalid fragment kind");
  468. }
  469. void MCAsmLayout::layoutFragment(MCFragment *F) {
  470. MCFragment *Prev = F->getPrevNode();
  471. // We should never try to recompute something which is valid.
  472. assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!");
  473. // We should never try to compute the fragment layout if its predecessor
  474. // isn't valid.
  475. assert((!Prev || isFragmentValid(Prev)) &&
  476. "Attempt to compute fragment before its predecessor!");
  477. ++stats::FragmentLayouts;
  478. // Compute fragment offset and size.
  479. if (Prev)
  480. F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev);
  481. else
  482. F->Offset = 0;
  483. LastValidFragment[F->getParent()] = F;
  484. // If bundling is enabled and this fragment has instructions in it, it has to
  485. // obey the bundling restrictions. With padding, we'll have:
  486. //
  487. //
  488. // BundlePadding
  489. // |||
  490. // -------------------------------------
  491. // Prev |##########| F |
  492. // -------------------------------------
  493. // ^
  494. // |
  495. // F->Offset
  496. //
  497. // The fragment's offset will point to after the padding, and its computed
  498. // size won't include the padding.
  499. //
  500. // When the -mc-relax-all flag is used, we optimize bundling by writting the
  501. // padding directly into fragments when the instructions are emitted inside
  502. // the streamer. When the fragment is larger than the bundle size, we need to
  503. // ensure that it's bundle aligned. This means that if we end up with
  504. // multiple fragments, we must emit bundle padding between fragments.
  505. //
  506. // ".align N" is an example of a directive that introduces multiple
  507. // fragments. We could add a special case to handle ".align N" by emitting
  508. // within-fragment padding (which would produce less padding when N is less
  509. // than the bundle size), but for now we don't.
  510. //
  511. if (Assembler.isBundlingEnabled() && F->hasInstructions()) {
  512. assert(isa<MCEncodedFragment>(F) &&
  513. "Only MCEncodedFragment implementations have instructions");
  514. uint64_t FSize = Assembler.computeFragmentSize(*this, *F);
  515. if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize())
  516. report_fatal_error("Fragment can't be larger than a bundle size");
  517. uint64_t RequiredBundlePadding = computeBundlePadding(Assembler, F,
  518. F->Offset, FSize);
  519. if (RequiredBundlePadding > UINT8_MAX)
  520. report_fatal_error("Padding cannot exceed 255 bytes");
  521. F->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding));
  522. F->Offset += RequiredBundlePadding;
  523. }
  524. }
  525. void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) {
  526. bool New = !Symbol.isRegistered();
  527. if (Created)
  528. *Created = New;
  529. if (New) {
  530. Symbol.setIsRegistered(true);
  531. Symbols.push_back(&Symbol);
  532. }
  533. }
  534. void MCAssembler::writeFragmentPadding(const MCFragment &F, uint64_t FSize,
  535. MCObjectWriter *OW) const {
  536. // Should NOP padding be written out before this fragment?
  537. unsigned BundlePadding = F.getBundlePadding();
  538. if (BundlePadding > 0) {
  539. assert(isBundlingEnabled() &&
  540. "Writing bundle padding with disabled bundling");
  541. assert(F.hasInstructions() &&
  542. "Writing bundle padding for a fragment without instructions");
  543. unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize);
  544. if (F.alignToBundleEnd() && TotalLength > getBundleAlignSize()) {
  545. // If the padding itself crosses a bundle boundary, it must be emitted
  546. // in 2 pieces, since even nop instructions must not cross boundaries.
  547. // v--------------v <- BundleAlignSize
  548. // v---------v <- BundlePadding
  549. // ----------------------------
  550. // | Prev |####|####| F |
  551. // ----------------------------
  552. // ^-------------------^ <- TotalLength
  553. unsigned DistanceToBoundary = TotalLength - getBundleAlignSize();
  554. if (!getBackend().writeNopData(DistanceToBoundary, OW))
  555. report_fatal_error("unable to write NOP sequence of " +
  556. Twine(DistanceToBoundary) + " bytes");
  557. BundlePadding -= DistanceToBoundary;
  558. }
  559. if (!getBackend().writeNopData(BundlePadding, OW))
  560. report_fatal_error("unable to write NOP sequence of " +
  561. Twine(BundlePadding) + " bytes");
  562. }
  563. }
  564. /// \brief Write the fragment \p F to the output file.
  565. static void writeFragment(const MCAssembler &Asm, const MCAsmLayout &Layout,
  566. const MCFragment &F) {
  567. MCObjectWriter *OW = &Asm.getWriter();
  568. // FIXME: Embed in fragments instead?
  569. uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F);
  570. Asm.writeFragmentPadding(F, FragmentSize, OW);
  571. // This variable (and its dummy usage) is to participate in the assert at
  572. // the end of the function.
  573. uint64_t Start = OW->getStream().tell();
  574. (void) Start;
  575. ++stats::EmittedFragments;
  576. switch (F.getKind()) {
  577. case MCFragment::FT_Align: {
  578. ++stats::EmittedAlignFragments;
  579. const MCAlignFragment &AF = cast<MCAlignFragment>(F);
  580. assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!");
  581. uint64_t Count = FragmentSize / AF.getValueSize();
  582. // FIXME: This error shouldn't actually occur (the front end should emit
  583. // multiple .align directives to enforce the semantics it wants), but is
  584. // severe enough that we want to report it. How to handle this?
  585. if (Count * AF.getValueSize() != FragmentSize)
  586. report_fatal_error("undefined .align directive, value size '" +
  587. Twine(AF.getValueSize()) +
  588. "' is not a divisor of padding size '" +
  589. Twine(FragmentSize) + "'");
  590. // See if we are aligning with nops, and if so do that first to try to fill
  591. // the Count bytes. Then if that did not fill any bytes or there are any
  592. // bytes left to fill use the Value and ValueSize to fill the rest.
  593. // If we are aligning with nops, ask that target to emit the right data.
  594. if (AF.hasEmitNops()) {
  595. if (!Asm.getBackend().writeNopData(Count, OW))
  596. report_fatal_error("unable to write nop sequence of " +
  597. Twine(Count) + " bytes");
  598. break;
  599. }
  600. // Otherwise, write out in multiples of the value size.
  601. for (uint64_t i = 0; i != Count; ++i) {
  602. switch (AF.getValueSize()) {
  603. default: llvm_unreachable("Invalid size!");
  604. case 1: OW->write8 (uint8_t (AF.getValue())); break;
  605. case 2: OW->write16(uint16_t(AF.getValue())); break;
  606. case 4: OW->write32(uint32_t(AF.getValue())); break;
  607. case 8: OW->write64(uint64_t(AF.getValue())); break;
  608. }
  609. }
  610. break;
  611. }
  612. case MCFragment::FT_Data:
  613. ++stats::EmittedDataFragments;
  614. OW->writeBytes(cast<MCDataFragment>(F).getContents());
  615. break;
  616. case MCFragment::FT_Relaxable:
  617. ++stats::EmittedRelaxableFragments;
  618. OW->writeBytes(cast<MCRelaxableFragment>(F).getContents());
  619. break;
  620. case MCFragment::FT_CompactEncodedInst:
  621. ++stats::EmittedCompactEncodedInstFragments;
  622. OW->writeBytes(cast<MCCompactEncodedInstFragment>(F).getContents());
  623. break;
  624. case MCFragment::FT_Fill: {
  625. ++stats::EmittedFillFragments;
  626. const MCFillFragment &FF = cast<MCFillFragment>(F);
  627. assert(FF.getValueSize() && "Invalid virtual align in concrete fragment!");
  628. for (uint64_t i = 0, e = FF.getSize() / FF.getValueSize(); i != e; ++i) {
  629. switch (FF.getValueSize()) {
  630. default: llvm_unreachable("Invalid size!");
  631. case 1: OW->write8 (uint8_t (FF.getValue())); break;
  632. case 2: OW->write16(uint16_t(FF.getValue())); break;
  633. case 4: OW->write32(uint32_t(FF.getValue())); break;
  634. case 8: OW->write64(uint64_t(FF.getValue())); break;
  635. }
  636. }
  637. break;
  638. }
  639. case MCFragment::FT_LEB: {
  640. const MCLEBFragment &LF = cast<MCLEBFragment>(F);
  641. OW->writeBytes(LF.getContents());
  642. break;
  643. }
  644. case MCFragment::FT_SafeSEH: {
  645. const MCSafeSEHFragment &SF = cast<MCSafeSEHFragment>(F);
  646. OW->write32(SF.getSymbol()->getIndex());
  647. break;
  648. }
  649. case MCFragment::FT_Org: {
  650. ++stats::EmittedOrgFragments;
  651. const MCOrgFragment &OF = cast<MCOrgFragment>(F);
  652. for (uint64_t i = 0, e = FragmentSize; i != e; ++i)
  653. OW->write8(uint8_t(OF.getValue()));
  654. break;
  655. }
  656. case MCFragment::FT_Dwarf: {
  657. const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F);
  658. OW->writeBytes(OF.getContents());
  659. break;
  660. }
  661. case MCFragment::FT_DwarfFrame: {
  662. const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F);
  663. OW->writeBytes(CF.getContents());
  664. break;
  665. }
  666. }
  667. assert(OW->getStream().tell() - Start == FragmentSize &&
  668. "The stream should advance by fragment size");
  669. }
  670. void MCAssembler::writeSectionData(const MCSection *Sec,
  671. const MCAsmLayout &Layout) const {
  672. // Ignore virtual sections.
  673. if (Sec->isVirtualSection()) {
  674. assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!");
  675. // Check that contents are only things legal inside a virtual section.
  676. for (MCSection::const_iterator it = Sec->begin(), ie = Sec->end(); it != ie;
  677. ++it) {
  678. switch (it->getKind()) {
  679. default: llvm_unreachable("Invalid fragment in virtual section!");
  680. case MCFragment::FT_Data: {
  681. // Check that we aren't trying to write a non-zero contents (or fixups)
  682. // into a virtual section. This is to support clients which use standard
  683. // directives to fill the contents of virtual sections.
  684. const MCDataFragment &DF = cast<MCDataFragment>(*it);
  685. assert(DF.fixup_begin() == DF.fixup_end() &&
  686. "Cannot have fixups in virtual section!");
  687. for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i)
  688. if (DF.getContents()[i]) {
  689. if (auto *ELFSec = dyn_cast<const MCSectionELF>(Sec))
  690. report_fatal_error("non-zero initializer found in section '" +
  691. ELFSec->getSectionName() + "'");
  692. else
  693. report_fatal_error("non-zero initializer found in virtual section");
  694. }
  695. break;
  696. }
  697. case MCFragment::FT_Align:
  698. // Check that we aren't trying to write a non-zero value into a virtual
  699. // section.
  700. assert((cast<MCAlignFragment>(it)->getValueSize() == 0 ||
  701. cast<MCAlignFragment>(it)->getValue() == 0) &&
  702. "Invalid align in virtual section!");
  703. break;
  704. case MCFragment::FT_Fill:
  705. assert((cast<MCFillFragment>(it)->getValueSize() == 0 ||
  706. cast<MCFillFragment>(it)->getValue() == 0) &&
  707. "Invalid fill in virtual section!");
  708. break;
  709. }
  710. }
  711. return;
  712. }
  713. uint64_t Start = getWriter().getStream().tell();
  714. (void)Start;
  715. for (MCSection::const_iterator it = Sec->begin(), ie = Sec->end(); it != ie;
  716. ++it)
  717. writeFragment(*this, Layout, *it);
  718. assert(getWriter().getStream().tell() - Start ==
  719. Layout.getSectionAddressSize(Sec));
  720. }
  721. std::pair<uint64_t, bool> MCAssembler::handleFixup(const MCAsmLayout &Layout,
  722. MCFragment &F,
  723. const MCFixup &Fixup) {
  724. // Evaluate the fixup.
  725. MCValue Target;
  726. uint64_t FixedValue;
  727. bool IsPCRel = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
  728. MCFixupKindInfo::FKF_IsPCRel;
  729. if (!evaluateFixup(Layout, Fixup, &F, Target, FixedValue)) {
  730. // The fixup was unresolved, we need a relocation. Inform the object
  731. // writer of the relocation, and give it an opportunity to adjust the
  732. // fixup value if need be.
  733. getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, IsPCRel,
  734. FixedValue);
  735. }
  736. return std::make_pair(FixedValue, IsPCRel);
  737. }
  738. void MCAssembler::Finish() {
  739. DEBUG_WITH_TYPE("mc-dump", {
  740. llvm::errs() << "assembler backend - pre-layout\n--\n";
  741. dump(); });
  742. // Create the layout object.
  743. MCAsmLayout Layout(*this);
  744. // Create dummy fragments and assign section ordinals.
  745. unsigned SectionIndex = 0;
  746. for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
  747. // Create dummy fragments to eliminate any empty sections, this simplifies
  748. // layout.
  749. if (it->getFragmentList().empty())
  750. new MCDataFragment(&*it);
  751. it->setOrdinal(SectionIndex++);
  752. }
  753. // Assign layout order indices to sections and fragments.
  754. for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) {
  755. MCSection *Sec = Layout.getSectionOrder()[i];
  756. Sec->setLayoutOrder(i);
  757. unsigned FragmentIndex = 0;
  758. for (MCSection::iterator iFrag = Sec->begin(), iFragEnd = Sec->end();
  759. iFrag != iFragEnd; ++iFrag)
  760. iFrag->setLayoutOrder(FragmentIndex++);
  761. }
  762. // Layout until everything fits.
  763. while (layoutOnce(Layout))
  764. continue;
  765. DEBUG_WITH_TYPE("mc-dump", {
  766. llvm::errs() << "assembler backend - post-relaxation\n--\n";
  767. dump(); });
  768. // Finalize the layout, including fragment lowering.
  769. finishLayout(Layout);
  770. DEBUG_WITH_TYPE("mc-dump", {
  771. llvm::errs() << "assembler backend - final-layout\n--\n";
  772. dump(); });
  773. uint64_t StartOffset = OS.tell();
  774. // Allow the object writer a chance to perform post-layout binding (for
  775. // example, to set the index fields in the symbol data).
  776. getWriter().executePostLayoutBinding(*this, Layout);
  777. // Evaluate and apply the fixups, generating relocation entries as necessary.
  778. for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
  779. for (MCSection::iterator it2 = it->begin(), ie2 = it->end(); it2 != ie2;
  780. ++it2) {
  781. MCEncodedFragment *F = dyn_cast<MCEncodedFragment>(it2);
  782. // Data and relaxable fragments both have fixups. So only process
  783. // those here.
  784. // FIXME: Is there a better way to do this? MCEncodedFragmentWithFixups
  785. // being templated makes this tricky.
  786. if (!F || isa<MCCompactEncodedInstFragment>(F))
  787. continue;
  788. ArrayRef<MCFixup> Fixups;
  789. MutableArrayRef<char> Contents;
  790. if (auto *FragWithFixups = dyn_cast<MCDataFragment>(F)) {
  791. Fixups = FragWithFixups->getFixups();
  792. Contents = FragWithFixups->getContents();
  793. } else if (auto *FragWithFixups = dyn_cast<MCRelaxableFragment>(F)) {
  794. Fixups = FragWithFixups->getFixups();
  795. Contents = FragWithFixups->getContents();
  796. } else
  797. llvm_unreachable("Unknown fragment with fixups!");
  798. for (const MCFixup &Fixup : Fixups) {
  799. uint64_t FixedValue;
  800. bool IsPCRel;
  801. std::tie(FixedValue, IsPCRel) = handleFixup(Layout, *F, Fixup);
  802. getBackend().applyFixup(Fixup, Contents.data(),
  803. Contents.size(), FixedValue, IsPCRel);
  804. }
  805. }
  806. }
  807. // Write the object file.
  808. getWriter().writeObject(*this, Layout);
  809. stats::ObjectBytes += OS.tell() - StartOffset;
  810. }
  811. bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup,
  812. const MCRelaxableFragment *DF,
  813. const MCAsmLayout &Layout) const {
  814. MCValue Target;
  815. uint64_t Value;
  816. bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value);
  817. return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF,
  818. Layout);
  819. }
  820. bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F,
  821. const MCAsmLayout &Layout) const {
  822. // If this inst doesn't ever need relaxation, ignore it. This occurs when we
  823. // are intentionally pushing out inst fragments, or because we relaxed a
  824. // previous instruction to one that doesn't need relaxation.
  825. if (!getBackend().mayNeedRelaxation(F->getInst()))
  826. return false;
  827. for (MCRelaxableFragment::const_fixup_iterator it = F->fixup_begin(),
  828. ie = F->fixup_end(); it != ie; ++it)
  829. if (fixupNeedsRelaxation(*it, F, Layout))
  830. return true;
  831. return false;
  832. }
  833. bool MCAssembler::relaxInstruction(MCAsmLayout &Layout,
  834. MCRelaxableFragment &F) {
  835. if (!fragmentNeedsRelaxation(&F, Layout))
  836. return false;
  837. ++stats::RelaxedInstructions;
  838. // FIXME-PERF: We could immediately lower out instructions if we can tell
  839. // they are fully resolved, to avoid retesting on later passes.
  840. // Relax the fragment.
  841. MCInst Relaxed;
  842. getBackend().relaxInstruction(F.getInst(), Relaxed);
  843. // Encode the new instruction.
  844. //
  845. // FIXME-PERF: If it matters, we could let the target do this. It can
  846. // probably do so more efficiently in many cases.
  847. SmallVector<MCFixup, 4> Fixups;
  848. SmallString<256> Code;
  849. raw_svector_ostream VecOS(Code);
  850. getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, F.getSubtargetInfo());
  851. VecOS.flush();
  852. // Update the fragment.
  853. F.setInst(Relaxed);
  854. F.getContents() = Code;
  855. F.getFixups() = Fixups;
  856. return true;
  857. }
  858. bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) {
  859. uint64_t OldSize = LF.getContents().size();
  860. int64_t Value;
  861. bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout);
  862. if (!Abs)
  863. report_fatal_error("sleb128 and uleb128 expressions must be absolute");
  864. SmallString<8> &Data = LF.getContents();
  865. Data.clear();
  866. raw_svector_ostream OSE(Data);
  867. if (LF.isSigned())
  868. encodeSLEB128(Value, OSE);
  869. else
  870. encodeULEB128(Value, OSE);
  871. OSE.flush();
  872. return OldSize != LF.getContents().size();
  873. }
  874. bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout,
  875. MCDwarfLineAddrFragment &DF) {
  876. MCContext &Context = Layout.getAssembler().getContext();
  877. uint64_t OldSize = DF.getContents().size();
  878. int64_t AddrDelta;
  879. bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout);
  880. assert(Abs && "We created a line delta with an invalid expression");
  881. (void) Abs;
  882. int64_t LineDelta;
  883. LineDelta = DF.getLineDelta();
  884. SmallString<8> &Data = DF.getContents();
  885. Data.clear();
  886. raw_svector_ostream OSE(Data);
  887. MCDwarfLineAddr::Encode(Context, LineDelta, AddrDelta, OSE);
  888. OSE.flush();
  889. return OldSize != Data.size();
  890. }
  891. bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout,
  892. MCDwarfCallFrameFragment &DF) {
  893. MCContext &Context = Layout.getAssembler().getContext();
  894. uint64_t OldSize = DF.getContents().size();
  895. int64_t AddrDelta;
  896. bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout);
  897. assert(Abs && "We created call frame with an invalid expression");
  898. (void) Abs;
  899. SmallString<8> &Data = DF.getContents();
  900. Data.clear();
  901. raw_svector_ostream OSE(Data);
  902. MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE);
  903. OSE.flush();
  904. return OldSize != Data.size();
  905. }
  906. bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) {
  907. // Holds the first fragment which needed relaxing during this layout. It will
  908. // remain NULL if none were relaxed.
  909. // When a fragment is relaxed, all the fragments following it should get
  910. // invalidated because their offset is going to change.
  911. MCFragment *FirstRelaxedFragment = nullptr;
  912. // Attempt to relax all the fragments in the section.
  913. for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
  914. // Check if this is a fragment that needs relaxation.
  915. bool RelaxedFrag = false;
  916. switch(I->getKind()) {
  917. default:
  918. break;
  919. case MCFragment::FT_Relaxable:
  920. assert(!getRelaxAll() &&
  921. "Did not expect a MCRelaxableFragment in RelaxAll mode");
  922. RelaxedFrag = relaxInstruction(Layout, *cast<MCRelaxableFragment>(I));
  923. break;
  924. case MCFragment::FT_Dwarf:
  925. RelaxedFrag = relaxDwarfLineAddr(Layout,
  926. *cast<MCDwarfLineAddrFragment>(I));
  927. break;
  928. case MCFragment::FT_DwarfFrame:
  929. RelaxedFrag =
  930. relaxDwarfCallFrameFragment(Layout,
  931. *cast<MCDwarfCallFrameFragment>(I));
  932. break;
  933. case MCFragment::FT_LEB:
  934. RelaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(I));
  935. break;
  936. }
  937. if (RelaxedFrag && !FirstRelaxedFragment)
  938. FirstRelaxedFragment = I;
  939. }
  940. if (FirstRelaxedFragment) {
  941. Layout.invalidateFragmentsFrom(FirstRelaxedFragment);
  942. return true;
  943. }
  944. return false;
  945. }
  946. bool MCAssembler::layoutOnce(MCAsmLayout &Layout) {
  947. ++stats::RelaxationSteps;
  948. bool WasRelaxed = false;
  949. for (iterator it = begin(), ie = end(); it != ie; ++it) {
  950. MCSection &Sec = *it;
  951. while (layoutSectionOnce(Layout, Sec))
  952. WasRelaxed = true;
  953. }
  954. return WasRelaxed;
  955. }
  956. void MCAssembler::finishLayout(MCAsmLayout &Layout) {
  957. // The layout is done. Mark every fragment as valid.
  958. for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
  959. Layout.getFragmentOffset(&*Layout.getSectionOrder()[i]->rbegin());
  960. }
  961. }
  962. // Debugging methods
  963. namespace llvm {
  964. raw_ostream &operator<<(raw_ostream &OS, const MCFixup &AF) {
  965. OS << "<MCFixup" << " Offset:" << AF.getOffset()
  966. << " Value:" << *AF.getValue()
  967. << " Kind:" << AF.getKind() << ">";
  968. return OS;
  969. }
  970. }
  971. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  972. void MCFragment::dump() {
  973. raw_ostream &OS = llvm::errs();
  974. OS << "<";
  975. switch (getKind()) {
  976. case MCFragment::FT_Align: OS << "MCAlignFragment"; break;
  977. case MCFragment::FT_Data: OS << "MCDataFragment"; break;
  978. case MCFragment::FT_CompactEncodedInst:
  979. OS << "MCCompactEncodedInstFragment"; break;
  980. case MCFragment::FT_Fill: OS << "MCFillFragment"; break;
  981. case MCFragment::FT_Relaxable: OS << "MCRelaxableFragment"; break;
  982. case MCFragment::FT_Org: OS << "MCOrgFragment"; break;
  983. case MCFragment::FT_Dwarf: OS << "MCDwarfFragment"; break;
  984. case MCFragment::FT_DwarfFrame: OS << "MCDwarfCallFrameFragment"; break;
  985. case MCFragment::FT_LEB: OS << "MCLEBFragment"; break;
  986. case MCFragment::FT_SafeSEH: OS << "MCSafeSEHFragment"; break;
  987. }
  988. OS << "<MCFragment " << (void*) this << " LayoutOrder:" << LayoutOrder
  989. << " Offset:" << Offset
  990. << " HasInstructions:" << hasInstructions()
  991. << " BundlePadding:" << static_cast<unsigned>(getBundlePadding()) << ">";
  992. switch (getKind()) {
  993. case MCFragment::FT_Align: {
  994. const MCAlignFragment *AF = cast<MCAlignFragment>(this);
  995. if (AF->hasEmitNops())
  996. OS << " (emit nops)";
  997. OS << "\n ";
  998. OS << " Alignment:" << AF->getAlignment()
  999. << " Value:" << AF->getValue() << " ValueSize:" << AF->getValueSize()
  1000. << " MaxBytesToEmit:" << AF->getMaxBytesToEmit() << ">";
  1001. break;
  1002. }
  1003. case MCFragment::FT_Data: {
  1004. const MCDataFragment *DF = cast<MCDataFragment>(this);
  1005. OS << "\n ";
  1006. OS << " Contents:[";
  1007. const SmallVectorImpl<char> &Contents = DF->getContents();
  1008. for (unsigned i = 0, e = Contents.size(); i != e; ++i) {
  1009. if (i) OS << ",";
  1010. OS << hexdigit((Contents[i] >> 4) & 0xF) << hexdigit(Contents[i] & 0xF);
  1011. }
  1012. OS << "] (" << Contents.size() << " bytes)";
  1013. if (DF->fixup_begin() != DF->fixup_end()) {
  1014. OS << ",\n ";
  1015. OS << " Fixups:[";
  1016. for (MCDataFragment::const_fixup_iterator it = DF->fixup_begin(),
  1017. ie = DF->fixup_end(); it != ie; ++it) {
  1018. if (it != DF->fixup_begin()) OS << ",\n ";
  1019. OS << *it;
  1020. }
  1021. OS << "]";
  1022. }
  1023. break;
  1024. }
  1025. case MCFragment::FT_CompactEncodedInst: {
  1026. const MCCompactEncodedInstFragment *CEIF =
  1027. cast<MCCompactEncodedInstFragment>(this);
  1028. OS << "\n ";
  1029. OS << " Contents:[";
  1030. const SmallVectorImpl<char> &Contents = CEIF->getContents();
  1031. for (unsigned i = 0, e = Contents.size(); i != e; ++i) {
  1032. if (i) OS << ",";
  1033. OS << hexdigit((Contents[i] >> 4) & 0xF) << hexdigit(Contents[i] & 0xF);
  1034. }
  1035. OS << "] (" << Contents.size() << " bytes)";
  1036. break;
  1037. }
  1038. case MCFragment::FT_Fill: {
  1039. const MCFillFragment *FF = cast<MCFillFragment>(this);
  1040. OS << " Value:" << FF->getValue() << " ValueSize:" << FF->getValueSize()
  1041. << " Size:" << FF->getSize();
  1042. break;
  1043. }
  1044. case MCFragment::FT_Relaxable: {
  1045. const MCRelaxableFragment *F = cast<MCRelaxableFragment>(this);
  1046. OS << "\n ";
  1047. OS << " Inst:";
  1048. F->getInst().dump_pretty(OS);
  1049. break;
  1050. }
  1051. case MCFragment::FT_Org: {
  1052. const MCOrgFragment *OF = cast<MCOrgFragment>(this);
  1053. OS << "\n ";
  1054. OS << " Offset:" << OF->getOffset() << " Value:" << OF->getValue();
  1055. break;
  1056. }
  1057. case MCFragment::FT_Dwarf: {
  1058. const MCDwarfLineAddrFragment *OF = cast<MCDwarfLineAddrFragment>(this);
  1059. OS << "\n ";
  1060. OS << " AddrDelta:" << OF->getAddrDelta()
  1061. << " LineDelta:" << OF->getLineDelta();
  1062. break;
  1063. }
  1064. case MCFragment::FT_DwarfFrame: {
  1065. const MCDwarfCallFrameFragment *CF = cast<MCDwarfCallFrameFragment>(this);
  1066. OS << "\n ";
  1067. OS << " AddrDelta:" << CF->getAddrDelta();
  1068. break;
  1069. }
  1070. case MCFragment::FT_LEB: {
  1071. const MCLEBFragment *LF = cast<MCLEBFragment>(this);
  1072. OS << "\n ";
  1073. OS << " Value:" << LF->getValue() << " Signed:" << LF->isSigned();
  1074. break;
  1075. }
  1076. case MCFragment::FT_SafeSEH: {
  1077. const MCSafeSEHFragment *F = cast<MCSafeSEHFragment>(this);
  1078. OS << "\n ";
  1079. OS << " Sym:" << F->getSymbol();
  1080. break;
  1081. }
  1082. }
  1083. OS << ">";
  1084. }
  1085. void MCAssembler::dump() {
  1086. raw_ostream &OS = llvm::errs();
  1087. OS << "<MCAssembler\n";
  1088. OS << " Sections:[\n ";
  1089. for (iterator it = begin(), ie = end(); it != ie; ++it) {
  1090. if (it != begin()) OS << ",\n ";
  1091. it->dump();
  1092. }
  1093. OS << "],\n";
  1094. OS << " Symbols:[";
  1095. for (symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) {
  1096. if (it != symbol_begin()) OS << ",\n ";
  1097. OS << "(";
  1098. it->dump();
  1099. OS << ", Index:" << it->getIndex() << ", ";
  1100. OS << ")";
  1101. }
  1102. OS << "]>\n";
  1103. }
  1104. #endif