RuntimeDyldMachOAArch64.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. //===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. #ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
  10. #define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
  11. #include "../RuntimeDyldMachO.h"
  12. #include "llvm/Support/Endian.h"
  13. #define DEBUG_TYPE "dyld"
  14. namespace llvm {
  15. class RuntimeDyldMachOAArch64
  16. : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
  17. public:
  18. typedef uint64_t TargetPtrT;
  19. RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM,
  20. RuntimeDyld::SymbolResolver &Resolver)
  21. : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
  22. unsigned getMaxStubSize() override { return 8; }
  23. unsigned getStubAlignment() override { return 8; }
  24. /// Extract the addend encoded in the instruction / memory location.
  25. int64_t decodeAddend(const RelocationEntry &RE) const {
  26. const SectionEntry &Section = Sections[RE.SectionID];
  27. uint8_t *LocalAddress = Section.Address + RE.Offset;
  28. unsigned NumBytes = 1 << RE.Size;
  29. int64_t Addend = 0;
  30. // Verify that the relocation has the correct size and alignment.
  31. switch (RE.RelType) {
  32. default:
  33. llvm_unreachable("Unsupported relocation type!");
  34. case MachO::ARM64_RELOC_UNSIGNED:
  35. assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
  36. break;
  37. case MachO::ARM64_RELOC_BRANCH26:
  38. case MachO::ARM64_RELOC_PAGE21:
  39. case MachO::ARM64_RELOC_PAGEOFF12:
  40. case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
  41. case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
  42. assert(NumBytes == 4 && "Invalid relocation size.");
  43. assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
  44. "Instruction address is not aligned to 4 bytes.");
  45. break;
  46. }
  47. switch (RE.RelType) {
  48. default:
  49. llvm_unreachable("Unsupported relocation type!");
  50. case MachO::ARM64_RELOC_UNSIGNED:
  51. // This could be an unaligned memory location.
  52. if (NumBytes == 4)
  53. Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
  54. else
  55. Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
  56. break;
  57. case MachO::ARM64_RELOC_BRANCH26: {
  58. // Verify that the relocation points to the expected branch instruction.
  59. auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
  60. assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
  61. // Get the 26 bit addend encoded in the branch instruction and sign-extend
  62. // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
  63. // (<< 2).
  64. Addend = (*p & 0x03FFFFFF) << 2;
  65. Addend = SignExtend64(Addend, 28);
  66. break;
  67. }
  68. case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
  69. case MachO::ARM64_RELOC_PAGE21: {
  70. // Verify that the relocation points to the expected adrp instruction.
  71. auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
  72. assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
  73. // Get the 21 bit addend encoded in the adrp instruction and sign-extend
  74. // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
  75. // therefore implicit (<< 12).
  76. Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
  77. Addend = SignExtend64(Addend, 33);
  78. break;
  79. }
  80. case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
  81. // Verify that the relocation points to one of the expected load / store
  82. // instructions.
  83. auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
  84. (void)p;
  85. assert((*p & 0x3B000000) == 0x39000000 &&
  86. "Only expected load / store instructions.");
  87. } // fall-through
  88. case MachO::ARM64_RELOC_PAGEOFF12: {
  89. // Verify that the relocation points to one of the expected load / store
  90. // or add / sub instructions.
  91. auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
  92. assert((((*p & 0x3B000000) == 0x39000000) ||
  93. ((*p & 0x11C00000) == 0x11000000) ) &&
  94. "Expected load / store or add/sub instruction.");
  95. // Get the 12 bit addend encoded in the instruction.
  96. Addend = (*p & 0x003FFC00) >> 10;
  97. // Check which instruction we are decoding to obtain the implicit shift
  98. // factor of the instruction.
  99. int ImplicitShift = 0;
  100. if ((*p & 0x3B000000) == 0x39000000) { // << load / store
  101. // For load / store instructions the size is encoded in bits 31:30.
  102. ImplicitShift = ((*p >> 30) & 0x3);
  103. if (ImplicitShift == 0) {
  104. // Check if this a vector op to get the correct shift value.
  105. if ((*p & 0x04800000) == 0x04800000)
  106. ImplicitShift = 4;
  107. }
  108. }
  109. // Compensate for implicit shift.
  110. Addend <<= ImplicitShift;
  111. break;
  112. }
  113. }
  114. return Addend;
  115. }
  116. /// Extract the addend encoded in the instruction.
  117. void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
  118. MachO::RelocationInfoType RelType, int64_t Addend) const {
  119. // Verify that the relocation has the correct alignment.
  120. switch (RelType) {
  121. default:
  122. llvm_unreachable("Unsupported relocation type!");
  123. case MachO::ARM64_RELOC_UNSIGNED:
  124. assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
  125. break;
  126. case MachO::ARM64_RELOC_BRANCH26:
  127. case MachO::ARM64_RELOC_PAGE21:
  128. case MachO::ARM64_RELOC_PAGEOFF12:
  129. case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
  130. case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
  131. assert(NumBytes == 4 && "Invalid relocation size.");
  132. assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
  133. "Instruction address is not aligned to 4 bytes.");
  134. break;
  135. }
  136. switch (RelType) {
  137. default:
  138. llvm_unreachable("Unsupported relocation type!");
  139. case MachO::ARM64_RELOC_UNSIGNED:
  140. // This could be an unaligned memory location.
  141. if (NumBytes == 4)
  142. *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
  143. else
  144. *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
  145. break;
  146. case MachO::ARM64_RELOC_BRANCH26: {
  147. auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
  148. // Verify that the relocation points to the expected branch instruction.
  149. assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
  150. // Verify addend value.
  151. assert((Addend & 0x3) == 0 && "Branch target is not aligned");
  152. assert(isInt<28>(Addend) && "Branch target is out of range.");
  153. // Encode the addend as 26 bit immediate in the branch instruction.
  154. *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
  155. break;
  156. }
  157. case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
  158. case MachO::ARM64_RELOC_PAGE21: {
  159. // Verify that the relocation points to the expected adrp instruction.
  160. auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
  161. assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
  162. // Check that the addend fits into 21 bits (+ 12 lower bits).
  163. assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
  164. assert(isInt<33>(Addend) && "Invalid page reloc value.");
  165. // Encode the addend into the instruction.
  166. uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000;
  167. uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0;
  168. *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
  169. break;
  170. }
  171. case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
  172. // Verify that the relocation points to one of the expected load / store
  173. // instructions.
  174. auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
  175. assert((*p & 0x3B000000) == 0x39000000 &&
  176. "Only expected load / store instructions.");
  177. (void)p;
  178. } // fall-through
  179. case MachO::ARM64_RELOC_PAGEOFF12: {
  180. // Verify that the relocation points to one of the expected load / store
  181. // or add / sub instructions.
  182. auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
  183. assert((((*p & 0x3B000000) == 0x39000000) ||
  184. ((*p & 0x11C00000) == 0x11000000) ) &&
  185. "Expected load / store or add/sub instruction.");
  186. // Check which instruction we are decoding to obtain the implicit shift
  187. // factor of the instruction and verify alignment.
  188. int ImplicitShift = 0;
  189. if ((*p & 0x3B000000) == 0x39000000) { // << load / store
  190. // For load / store instructions the size is encoded in bits 31:30.
  191. ImplicitShift = ((*p >> 30) & 0x3);
  192. switch (ImplicitShift) {
  193. case 0:
  194. // Check if this a vector op to get the correct shift value.
  195. if ((*p & 0x04800000) == 0x04800000) {
  196. ImplicitShift = 4;
  197. assert(((Addend & 0xF) == 0) &&
  198. "128-bit LDR/STR not 16-byte aligned.");
  199. }
  200. break;
  201. case 1:
  202. assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
  203. break;
  204. case 2:
  205. assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
  206. break;
  207. case 3:
  208. assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
  209. break;
  210. }
  211. }
  212. // Compensate for implicit shift.
  213. Addend >>= ImplicitShift;
  214. assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
  215. // Encode the addend into the instruction.
  216. *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
  217. break;
  218. }
  219. }
  220. }
  221. relocation_iterator
  222. processRelocationRef(unsigned SectionID, relocation_iterator RelI,
  223. const ObjectFile &BaseObjT,
  224. ObjSectionToIDMap &ObjSectionToID,
  225. StubMap &Stubs) override {
  226. const MachOObjectFile &Obj =
  227. static_cast<const MachOObjectFile &>(BaseObjT);
  228. MachO::any_relocation_info RelInfo =
  229. Obj.getRelocation(RelI->getRawDataRefImpl());
  230. assert(!Obj.isRelocationScattered(RelInfo) && "");
  231. // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
  232. // addend for the following relocation. If found: (1) store the associated
  233. // addend, (2) consume the next relocation, and (3) use the stored addend to
  234. // override the addend.
  235. int64_t ExplicitAddend = 0;
  236. if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
  237. assert(!Obj.getPlainRelocationExternal(RelInfo));
  238. assert(!Obj.getAnyRelocationPCRel(RelInfo));
  239. assert(Obj.getAnyRelocationLength(RelInfo) == 2);
  240. int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
  241. // Sign-extend the 24-bit to 64-bit.
  242. ExplicitAddend = SignExtend64(RawAddend, 24);
  243. ++RelI;
  244. RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
  245. }
  246. RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
  247. RE.Addend = decodeAddend(RE);
  248. RelocationValueRef Value(
  249. getRelocationValueRef(Obj, RelI, RE, ObjSectionToID));
  250. assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
  251. "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
  252. if (ExplicitAddend) {
  253. RE.Addend = ExplicitAddend;
  254. Value.Offset = ExplicitAddend;
  255. }
  256. bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
  257. if (!IsExtern && RE.IsPCRel)
  258. makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
  259. RE.Addend = Value.Offset;
  260. if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
  261. RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
  262. processGOTRelocation(RE, Value, Stubs);
  263. else {
  264. if (Value.SymbolName)
  265. addRelocationForSymbol(RE, Value.SymbolName);
  266. else
  267. addRelocationForSection(RE, Value.SectionID);
  268. }
  269. return ++RelI;
  270. }
  271. void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
  272. DEBUG(dumpRelocationToResolve(RE, Value));
  273. const SectionEntry &Section = Sections[RE.SectionID];
  274. uint8_t *LocalAddress = Section.Address + RE.Offset;
  275. MachO::RelocationInfoType RelType =
  276. static_cast<MachO::RelocationInfoType>(RE.RelType);
  277. switch (RelType) {
  278. default:
  279. llvm_unreachable("Invalid relocation type!");
  280. case MachO::ARM64_RELOC_UNSIGNED: {
  281. assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
  282. // Mask in the target value a byte at a time (we don't have an alignment
  283. // guarantee for the target address, so this is safest).
  284. if (RE.Size < 2)
  285. llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
  286. encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
  287. break;
  288. }
  289. case MachO::ARM64_RELOC_BRANCH26: {
  290. assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
  291. // Check if branch is in range.
  292. uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
  293. int64_t PCRelVal = Value - FinalAddress + RE.Addend;
  294. encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
  295. break;
  296. }
  297. case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
  298. case MachO::ARM64_RELOC_PAGE21: {
  299. assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
  300. // Adjust for PC-relative relocation and offset.
  301. uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
  302. int64_t PCRelVal =
  303. ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
  304. encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
  305. break;
  306. }
  307. case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
  308. case MachO::ARM64_RELOC_PAGEOFF12: {
  309. assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
  310. // Add the offset from the symbol.
  311. Value += RE.Addend;
  312. // Mask out the page address and only use the lower 12 bits.
  313. Value &= 0xFFF;
  314. encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
  315. break;
  316. }
  317. case MachO::ARM64_RELOC_SUBTRACTOR:
  318. case MachO::ARM64_RELOC_POINTER_TO_GOT:
  319. case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
  320. case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
  321. llvm_unreachable("Relocation type not yet implemented!");
  322. case MachO::ARM64_RELOC_ADDEND:
  323. llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
  324. "processRelocationRef!");
  325. }
  326. }
  327. void finalizeSection(const ObjectFile &Obj, unsigned SectionID,
  328. const SectionRef &Section) {}
  329. private:
  330. void processGOTRelocation(const RelocationEntry &RE,
  331. RelocationValueRef &Value, StubMap &Stubs) {
  332. assert(RE.Size == 2);
  333. SectionEntry &Section = Sections[RE.SectionID];
  334. StubMap::const_iterator i = Stubs.find(Value);
  335. int64_t Offset;
  336. if (i != Stubs.end())
  337. Offset = static_cast<int64_t>(i->second);
  338. else {
  339. // FIXME: There must be a better way to do this then to check and fix the
  340. // alignment every time!!!
  341. uintptr_t BaseAddress = uintptr_t(Section.Address);
  342. uintptr_t StubAlignment = getStubAlignment();
  343. uintptr_t StubAddress =
  344. (BaseAddress + Section.StubOffset + StubAlignment - 1) &
  345. -StubAlignment;
  346. unsigned StubOffset = StubAddress - BaseAddress;
  347. Stubs[Value] = StubOffset;
  348. assert(((StubAddress % getStubAlignment()) == 0) &&
  349. "GOT entry not aligned");
  350. RelocationEntry GOTRE(RE.SectionID, StubOffset,
  351. MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
  352. /*IsPCRel=*/false, /*Size=*/3);
  353. if (Value.SymbolName)
  354. addRelocationForSymbol(GOTRE, Value.SymbolName);
  355. else
  356. addRelocationForSection(GOTRE, Value.SectionID);
  357. Section.StubOffset = StubOffset + getMaxStubSize();
  358. Offset = static_cast<int64_t>(StubOffset);
  359. }
  360. RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset,
  361. RE.IsPCRel, RE.Size);
  362. addRelocationForSection(TargetRE, RE.SectionID);
  363. }
  364. };
  365. }
  366. #undef DEBUG_TYPE
  367. #endif