1 //===- ARM64.cpp ----------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "Arch/ARM64Common.h"
10 #include "InputFiles.h"
11 #include "Symbols.h"
12 #include "SyntheticSections.h"
13 #include "Target.h"
14 
15 #include "lld/Common/ErrorHandler.h"
16 #include "mach-o/compact_unwind_encoding.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/BinaryFormat/MachO.h"
20 #include "llvm/Support/Endian.h"
21 #include "llvm/Support/MathExtras.h"
22 
23 using namespace llvm;
24 using namespace llvm::MachO;
25 using namespace llvm::support::endian;
26 using namespace lld;
27 using namespace lld::macho;
28 
29 namespace {
30 
31 struct ARM64 : ARM64Common {
32   ARM64();
33   void writeStub(uint8_t *buf, const Symbol &) const override;
34   void writeStubHelperHeader(uint8_t *buf) const override;
35   void writeStubHelperEntry(uint8_t *buf, const Symbol &,
36                             uint64_t entryAddr) const override;
37   void populateThunk(InputSection *thunk, Symbol *funcSym) override;
38   void applyOptimizationHints(uint8_t *, const ConcatInputSection *,
39                               ArrayRef<uint64_t>) const override;
40 };
41 
42 } // namespace
43 
44 // Random notes on reloc types:
45 // ADDEND always pairs with BRANCH26, PAGE21, or PAGEOFF12
46 // POINTER_TO_GOT: ld64 supports a 4-byte pc-relative form as well as an 8-byte
47 // absolute version of this relocation. The semantics of the absolute relocation
48 // are weird -- it results in the value of the GOT slot being written, instead
49 // of the address. Let's not support it unless we find a real-world use case.
50 static constexpr std::array<RelocAttrs, 11> relocAttrsArray{{
51 #define B(x) RelocAttrBits::x
52     {"UNSIGNED",
53      B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE4) | B(BYTE8)},
54     {"SUBTRACTOR", B(SUBTRAHEND) | B(EXTERN) | B(BYTE4) | B(BYTE8)},
55     {"BRANCH26", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
56     {"PAGE21", B(PCREL) | B(EXTERN) | B(BYTE4)},
57     {"PAGEOFF12", B(ABSOLUTE) | B(EXTERN) | B(BYTE4)},
58     {"GOT_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(GOT) | B(BYTE4)},
59     {"GOT_LOAD_PAGEOFF12",
60      B(ABSOLUTE) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
61     {"POINTER_TO_GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
62     {"TLVP_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(TLV) | B(BYTE4)},
63     {"TLVP_LOAD_PAGEOFF12",
64      B(ABSOLUTE) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
65     {"ADDEND", B(ADDEND)},
66 #undef B
67 }};
68 
69 static constexpr uint32_t stubCode[] = {
70     0x90000010, // 00: adrp  x16, __la_symbol_ptr@page
71     0xf9400210, // 04: ldr   x16, [x16, __la_symbol_ptr@pageoff]
72     0xd61f0200, // 08: br    x16
73 };
74 
75 void ARM64::writeStub(uint8_t *buf8, const Symbol &sym) const {
76   ::writeStub<LP64>(buf8, stubCode, sym);
77 }
78 
79 static constexpr uint32_t stubHelperHeaderCode[] = {
80     0x90000011, // 00: adrp  x17, _dyld_private@page
81     0x91000231, // 04: add   x17, x17, _dyld_private@pageoff
82     0xa9bf47f0, // 08: stp   x16/x17, [sp, #-16]!
83     0x90000010, // 0c: adrp  x16, dyld_stub_binder@page
84     0xf9400210, // 10: ldr   x16, [x16, dyld_stub_binder@pageoff]
85     0xd61f0200, // 14: br    x16
86 };
87 
88 void ARM64::writeStubHelperHeader(uint8_t *buf8) const {
89   ::writeStubHelperHeader<LP64>(buf8, stubHelperHeaderCode);
90 }
91 
92 static constexpr uint32_t stubHelperEntryCode[] = {
93     0x18000050, // 00: ldr  w16, l0
94     0x14000000, // 04: b    stubHelperHeader
95     0x00000000, // 08: l0: .long 0
96 };
97 
98 void ARM64::writeStubHelperEntry(uint8_t *buf8, const Symbol &sym,
99                                  uint64_t entryVA) const {
100   ::writeStubHelperEntry(buf8, stubHelperEntryCode, sym, entryVA);
101 }
102 
103 // A thunk is the relaxed variation of stubCode. We don't need the
104 // extra indirection through a lazy pointer because the target address
105 // is known at link time.
106 static constexpr uint32_t thunkCode[] = {
107     0x90000010, // 00: adrp  x16, <thunk.ptr>@page
108     0x91000210, // 04: add   x16, [x16,<thunk.ptr>@pageoff]
109     0xd61f0200, // 08: br    x16
110 };
111 
112 void ARM64::populateThunk(InputSection *thunk, Symbol *funcSym) {
113   thunk->align = 4;
114   thunk->data = {reinterpret_cast<const uint8_t *>(thunkCode),
115                  sizeof(thunkCode)};
116   thunk->relocs.push_back({/*type=*/ARM64_RELOC_PAGEOFF12,
117                            /*pcrel=*/false, /*length=*/2,
118                            /*offset=*/4, /*addend=*/0,
119                            /*referent=*/funcSym});
120   thunk->relocs.push_back({/*type=*/ARM64_RELOC_PAGE21,
121                            /*pcrel=*/true, /*length=*/2,
122                            /*offset=*/0, /*addend=*/0,
123                            /*referent=*/funcSym});
124 }
125 
126 ARM64::ARM64() : ARM64Common(LP64()) {
127   cpuType = CPU_TYPE_ARM64;
128   cpuSubtype = CPU_SUBTYPE_ARM64_ALL;
129 
130   stubSize = sizeof(stubCode);
131   thunkSize = sizeof(thunkCode);
132 
133   // Branch immediate is two's complement 26 bits, which is implicitly
134   // multiplied by 4 (since all functions are 4-aligned: The branch range
135   // is -4*(2**(26-1))..4*(2**(26-1) - 1).
136   backwardBranchRange = 128 * 1024 * 1024;
137   forwardBranchRange = backwardBranchRange - 4;
138 
139   modeDwarfEncoding = UNWIND_ARM64_MODE_DWARF;
140   subtractorRelocType = ARM64_RELOC_SUBTRACTOR;
141   unsignedRelocType = ARM64_RELOC_UNSIGNED;
142 
143   stubHelperHeaderSize = sizeof(stubHelperHeaderCode);
144   stubHelperEntrySize = sizeof(stubHelperEntryCode);
145 
146   relocAttrs = {relocAttrsArray.data(), relocAttrsArray.size()};
147 }
148 
149 namespace {
150 struct Adrp {
151   uint32_t destRegister;
152 };
153 
154 struct Add {
155   uint8_t destRegister;
156   uint8_t srcRegister;
157   uint32_t addend;
158 };
159 
160 enum ExtendType { ZeroExtend = 1, Sign64 = 2, Sign32 = 3 };
161 
162 struct Ldr {
163   uint8_t destRegister;
164   uint8_t baseRegister;
165   uint8_t p2Size;
166   bool isFloat;
167   ExtendType extendType;
168   int64_t offset;
169 };
170 
171 struct PerformedReloc {
172   const Reloc &rel;
173   uint64_t referentVA;
174 };
175 
176 class OptimizationHintContext {
177 public:
178   OptimizationHintContext(uint8_t *buf, const ConcatInputSection *isec,
179                           ArrayRef<uint64_t> relocTargets)
180       : buf(buf), isec(isec), relocTargets(relocTargets),
181         relocIt(isec->relocs.rbegin()) {}
182 
183   void applyAdrpAdd(const OptimizationHint &);
184   void applyAdrpAdrp(const OptimizationHint &);
185   void applyAdrpLdr(const OptimizationHint &);
186   void applyAdrpLdrGot(const OptimizationHint &);
187   void applyAdrpLdrGotLdr(const OptimizationHint &);
188 
189 private:
190   uint8_t *buf;
191   const ConcatInputSection *isec;
192   ArrayRef<uint64_t> relocTargets;
193   std::vector<Reloc>::const_reverse_iterator relocIt;
194 
195   uint64_t getRelocTarget(const Reloc &);
196 
197   Optional<PerformedReloc> findPrimaryReloc(uint64_t offset);
198   Optional<PerformedReloc> findReloc(uint64_t offset);
199 };
200 } // namespace
201 
202 static bool parseAdrp(uint32_t insn, Adrp &adrp) {
203   if ((insn & 0x9f000000) != 0x90000000)
204     return false;
205   adrp.destRegister = insn & 0x1f;
206   return true;
207 }
208 
209 static bool parseAdd(uint32_t insn, Add &add) {
210   if ((insn & 0xffc00000) != 0x91000000)
211     return false;
212   add.destRegister = insn & 0x1f;
213   add.srcRegister = (insn >> 5) & 0x1f;
214   add.addend = (insn >> 10) & 0xfff;
215   return true;
216 }
217 
218 static bool parseLdr(uint32_t insn, Ldr &ldr) {
219   ldr.destRegister = insn & 0x1f;
220   ldr.baseRegister = (insn >> 5) & 0x1f;
221   uint8_t size = insn >> 30;
222   uint8_t opc = (insn >> 22) & 3;
223 
224   if ((insn & 0x3fc00000) == 0x39400000) {
225     // LDR (immediate), LDRB (immediate), LDRH (immediate)
226     ldr.p2Size = size;
227     ldr.extendType = ZeroExtend;
228     ldr.isFloat = false;
229   } else if ((insn & 0x3f800000) == 0x39800000) {
230     // LDRSB (immediate), LDRSH (immediate), LDRSW (immediate)
231     ldr.p2Size = size;
232     ldr.extendType = static_cast<ExtendType>(opc);
233     ldr.isFloat = false;
234   } else if ((insn & 0x3f400000) == 0x3d400000) {
235     // LDR (immediate, SIMD&FP)
236     ldr.extendType = ZeroExtend;
237     ldr.isFloat = true;
238     if (opc == 1)
239       ldr.p2Size = size;
240     else if (size == 0 && opc == 3)
241       ldr.p2Size = 4;
242     else
243       return false;
244   } else {
245     return false;
246   }
247   ldr.offset = ((insn >> 10) & 0xfff) << ldr.p2Size;
248   return true;
249 }
250 
251 static bool isValidAdrOffset(int32_t delta) { return isInt<21>(delta); }
252 
253 static void writeAdr(void *loc, uint32_t dest, int32_t delta) {
254   assert(isValidAdrOffset(delta));
255   uint32_t opcode = 0x10000000;
256   uint32_t immHi = (delta & 0x001ffffc) << 3;
257   uint32_t immLo = (delta & 0x00000003) << 29;
258   write32le(loc, opcode | immHi | immLo | dest);
259 }
260 
261 static void writeNop(void *loc) { write32le(loc, 0xd503201f); }
262 
263 static bool isLiteralLdrEligible(const Ldr &ldr) {
264   return ldr.p2Size > 1 && isShiftedInt<19, 2>(ldr.offset);
265 }
266 
267 static void writeLiteralLdr(void *loc, const Ldr &ldr) {
268   assert(isLiteralLdrEligible(ldr));
269   uint32_t imm19 = (ldr.offset / 4 & maskTrailingOnes<uint32_t>(19)) << 5;
270   uint32_t opcode;
271   switch (ldr.p2Size) {
272   case 2:
273     if (ldr.isFloat)
274       opcode = 0x1c000000;
275     else
276       opcode = ldr.extendType == Sign64 ? 0x98000000 : 0x18000000;
277     break;
278   case 3:
279     opcode = ldr.isFloat ? 0x5c000000 : 0x58000000;
280     break;
281   case 4:
282     opcode = 0x9c000000;
283     break;
284   default:
285     llvm_unreachable("Invalid literal ldr size");
286   }
287   write32le(loc, opcode | imm19 | ldr.destRegister);
288 }
289 
290 static bool isImmediateLdrEligible(const Ldr &ldr) {
291   // Note: We deviate from ld64's behavior, which converts to immediate loads
292   // only if ldr.offset < 4096, even though the offset is divided by the load's
293   // size in the 12-bit immediate operand. Only the unsigned offset variant is
294   // supported.
295 
296   uint32_t size = 1 << ldr.p2Size;
297   return ldr.offset >= 0 && (ldr.offset % size) == 0 &&
298          isUInt<12>(ldr.offset >> ldr.p2Size);
299 }
300 
301 static void writeImmediateLdr(void *loc, const Ldr &ldr) {
302   assert(isImmediateLdrEligible(ldr));
303   uint32_t opcode = 0x39000000;
304   if (ldr.isFloat) {
305     opcode |= 0x04000000;
306     assert(ldr.extendType == ZeroExtend);
307   }
308   opcode |= ldr.destRegister;
309   opcode |= ldr.baseRegister << 5;
310   uint8_t size, opc;
311   if (ldr.p2Size == 4) {
312     size = 0;
313     opc = 3;
314   } else {
315     opc = ldr.extendType;
316     size = ldr.p2Size;
317   }
318   uint32_t immBits = ldr.offset >> ldr.p2Size;
319   write32le(loc, opcode | (immBits << 10) | (opc << 22) | (size << 30));
320 }
321 
322 uint64_t OptimizationHintContext::getRelocTarget(const Reloc &reloc) {
323   size_t relocIdx = &reloc - isec->relocs.data();
324   return relocTargets[relocIdx];
325 }
326 
327 // Optimization hints are sorted in a monotonically increasing order by their
328 // first address as are relocations (albeit in decreasing order), so if we keep
329 // a pointer around to the last found relocation, we don't have to do a full
330 // binary search every time.
331 Optional<PerformedReloc>
332 OptimizationHintContext::findPrimaryReloc(uint64_t offset) {
333   const auto end = isec->relocs.rend();
334   while (relocIt != end && relocIt->offset < offset)
335     ++relocIt;
336   if (relocIt == end || relocIt->offset != offset)
337     return None;
338   return PerformedReloc{*relocIt, getRelocTarget(*relocIt)};
339 }
340 
341 // The second and third addresses of optimization hints have no such
342 // monotonicity as the first, so we search the entire range of relocations.
343 Optional<PerformedReloc> OptimizationHintContext::findReloc(uint64_t offset) {
344   // Optimization hints often apply to successive relocations, so we check for
345   // that first before doing a full binary search.
346   auto end = isec->relocs.rend();
347   if (relocIt < end - 1 && (relocIt + 1)->offset == offset)
348     return PerformedReloc{*(relocIt + 1), getRelocTarget(*(relocIt + 1))};
349 
350   auto reloc = lower_bound(isec->relocs, offset,
351                            [](const Reloc &reloc, uint64_t offset) {
352                              return offset < reloc.offset;
353                            });
354 
355   if (reloc == isec->relocs.end() || reloc->offset != offset)
356     return None;
357   return PerformedReloc{*reloc, getRelocTarget(*reloc)};
358 }
359 
360 // Transforms a pair of adrp+add instructions into an adr instruction if the
361 // target is within the +/- 1 MiB range allowed by the adr's 21 bit signed
362 // immediate offset.
363 //
364 //   adrp xN, _foo@PAGE
365 //   add  xM, xN, _foo@PAGEOFF
366 // ->
367 //   adr  xM, _foo
368 //   nop
369 void OptimizationHintContext::applyAdrpAdd(const OptimizationHint &hint) {
370   uint32_t ins1 = read32le(buf + hint.offset0);
371   uint32_t ins2 = read32le(buf + hint.offset0 + hint.delta[0]);
372   Adrp adrp;
373   if (!parseAdrp(ins1, adrp))
374     return;
375   Add add;
376   if (!parseAdd(ins2, add))
377     return;
378   if (adrp.destRegister != add.srcRegister)
379     return;
380 
381   Optional<PerformedReloc> rel1 = findPrimaryReloc(hint.offset0);
382   Optional<PerformedReloc> rel2 = findReloc(hint.offset0 + hint.delta[0]);
383   if (!rel1 || !rel2)
384     return;
385   if (rel1->referentVA != rel2->referentVA)
386     return;
387   int64_t delta = rel1->referentVA - rel1->rel.offset - isec->getVA();
388   if (!isValidAdrOffset(delta))
389     return;
390 
391   writeAdr(buf + hint.offset0, add.destRegister, delta);
392   writeNop(buf + hint.offset0 + hint.delta[0]);
393 }
394 
395 // Transforms two adrp instructions into a single adrp if their referent
396 // addresses are located on the same 4096 byte page.
397 //
398 //   adrp xN, _foo@PAGE
399 //   adrp xN, _bar@PAGE
400 // ->
401 //   adrp xN, _foo@PAGE
402 //   nop
403 void OptimizationHintContext::applyAdrpAdrp(const OptimizationHint &hint) {
404   uint32_t ins1 = read32le(buf + hint.offset0);
405   uint32_t ins2 = read32le(buf + hint.offset0 + hint.delta[0]);
406   Adrp adrp1, adrp2;
407   if (!parseAdrp(ins1, adrp1) || !parseAdrp(ins2, adrp2))
408     return;
409   if (adrp1.destRegister != adrp2.destRegister)
410     return;
411 
412   Optional<PerformedReloc> rel1 = findPrimaryReloc(hint.offset0);
413   Optional<PerformedReloc> rel2 = findReloc(hint.offset0 + hint.delta[0]);
414   if (!rel1 || !rel2)
415     return;
416   if ((rel1->referentVA & ~0xfffULL) != (rel2->referentVA & ~0xfffULL))
417     return;
418 
419   writeNop(buf + hint.offset0 + hint.delta[0]);
420 }
421 
422 // Transforms a pair of adrp+ldr (immediate) instructions into an ldr (literal)
423 // load from a PC-relative address if it is 4-byte aligned and within +/- 1 MiB,
424 // as ldr can encode a signed 19-bit offset that gets multiplied by 4.
425 //
426 //   adrp xN, _foo@PAGE
427 //   ldr  xM, [xN, _foo@PAGEOFF]
428 // ->
429 //   nop
430 //   ldr  xM, _foo
431 void OptimizationHintContext::applyAdrpLdr(const OptimizationHint &hint) {
432   uint32_t ins1 = read32le(buf + hint.offset0);
433   uint32_t ins2 = read32le(buf + hint.offset0 + hint.delta[0]);
434   Adrp adrp;
435   if (!parseAdrp(ins1, adrp))
436     return;
437   Ldr ldr;
438   if (!parseLdr(ins2, ldr))
439     return;
440   if (adrp.destRegister != ldr.baseRegister)
441     return;
442 
443   Optional<PerformedReloc> rel1 = findPrimaryReloc(hint.offset0);
444   Optional<PerformedReloc> rel2 = findReloc(hint.offset0 + hint.delta[0]);
445   if (!rel1 || !rel2)
446     return;
447   if (ldr.offset != static_cast<int64_t>(rel1->referentVA & 0xfff))
448     return;
449   ldr.offset = rel1->referentVA - rel2->rel.offset - isec->getVA();
450   if (!isLiteralLdrEligible(ldr))
451     return;
452 
453   writeNop(buf + hint.offset0);
454   writeLiteralLdr(buf + hint.offset0 + hint.delta[0], ldr);
455 }
456 
457 // GOT loads are emitted by the compiler as a pair of adrp and ldr instructions,
458 // but they may be changed to adrp+add by relaxGotLoad(). This hint performs
459 // the AdrpLdr or AdrpAdd transformation depending on whether it was relaxed.
460 void OptimizationHintContext::applyAdrpLdrGot(const OptimizationHint &hint) {
461   uint32_t ins2 = read32le(buf + hint.offset0 + hint.delta[0]);
462   Add add;
463   Ldr ldr;
464   if (parseAdd(ins2, add))
465     applyAdrpAdd(hint);
466   else if (parseLdr(ins2, ldr))
467     applyAdrpLdr(hint);
468 }
469 
470 // Relaxes a GOT-indirect load.
471 // If the referenced symbol is external and its GOT entry is within +/- 1 MiB,
472 // the GOT entry can be loaded with a single literal ldr instruction.
473 // If the referenced symbol is local, its address may be loaded directly if it's
474 // close enough, or with an adr(p) + ldr pair if it's not.
475 void OptimizationHintContext::applyAdrpLdrGotLdr(const OptimizationHint &hint) {
476   uint32_t ins1 = read32le(buf + hint.offset0);
477   Adrp adrp;
478   if (!parseAdrp(ins1, adrp))
479     return;
480   uint32_t ins3 = read32le(buf + hint.offset0 + hint.delta[1]);
481   Ldr ldr3;
482   if (!parseLdr(ins3, ldr3))
483     return;
484   uint32_t ins2 = read32le(buf + hint.offset0 + hint.delta[0]);
485   Ldr ldr2;
486   Add add2;
487 
488   Optional<PerformedReloc> rel1 = findPrimaryReloc(hint.offset0);
489   Optional<PerformedReloc> rel2 = findReloc(hint.offset0 + hint.delta[0]);
490   if (!rel1 || !rel2)
491     return;
492 
493   if (parseAdd(ins2, add2)) {
494     // adrp x0, _foo@PAGE
495     // add  x1, x0, _foo@PAGEOFF
496     // ldr  x2, [x1, #off]
497 
498     if (adrp.destRegister != add2.srcRegister)
499       return;
500     if (add2.destRegister != ldr3.baseRegister)
501       return;
502 
503     // Load from the target address directly.
504     //   nop
505     //   nop
506     //   ldr x2, [_foo + #off]
507     uint64_t rel3VA = hint.offset0 + hint.delta[1] + isec->getVA();
508     Ldr literalLdr = ldr3;
509     literalLdr.offset += rel1->referentVA - rel3VA;
510     if (isLiteralLdrEligible(literalLdr)) {
511       writeNop(buf + hint.offset0);
512       writeNop(buf + hint.offset0 + hint.delta[0]);
513       writeLiteralLdr(buf + hint.offset0 + hint.delta[1], literalLdr);
514       return;
515     }
516 
517     // Load the target address into a register and load from there indirectly.
518     //   adr x1, _foo
519     //   nop
520     //   ldr x2, [x1, #off]
521     int64_t adrOffset = rel1->referentVA - rel1->rel.offset - isec->getVA();
522     if (isValidAdrOffset(adrOffset)) {
523       writeAdr(buf + hint.offset0, ldr3.baseRegister, adrOffset);
524       writeNop(buf + hint.offset0 + hint.delta[0]);
525       return;
526     }
527 
528     // Move the target's page offset into the ldr's immediate offset.
529     //   adrp x0, _foo@PAGE
530     //   nop
531     //   ldr x2, [x0, _foo@PAGEOFF + #off]
532     Ldr immediateLdr = ldr3;
533     immediateLdr.baseRegister = adrp.destRegister;
534     immediateLdr.offset += add2.addend;
535     if (isImmediateLdrEligible(immediateLdr)) {
536       writeNop(buf + hint.offset0 + hint.delta[0]);
537       writeImmediateLdr(buf + hint.offset0 + hint.delta[1], immediateLdr);
538       return;
539     }
540   } else if (parseLdr(ins2, ldr2)) {
541     // adrp x1, _foo@GOTPAGE
542     // ldr  x2, [x1, _foo@GOTPAGEOFF]
543     // ldr  x3, [x2, #off]
544     if (ldr2.baseRegister != adrp.destRegister)
545       return;
546     if (ldr3.baseRegister != ldr2.destRegister)
547       return;
548     // Loads from the GOT must be pointer sized.
549     if (ldr2.p2Size != 3 || ldr2.isFloat)
550       return;
551 
552     // Load the GOT entry's address directly.
553     //   nop
554     //   ldr x2, _foo@GOTPAGE + _foo@GOTPAGEOFF
555     //   ldr x3, [x2, #off]
556     Ldr literalLdr = ldr2;
557     literalLdr.offset = rel1->referentVA - rel2->rel.offset - isec->getVA();
558     if (isLiteralLdrEligible(literalLdr)) {
559       writeNop(buf + hint.offset0);
560       writeLiteralLdr(buf + hint.offset0 + hint.delta[0], literalLdr);
561     }
562   }
563 }
564 
565 void ARM64::applyOptimizationHints(uint8_t *buf, const ConcatInputSection *isec,
566                                    ArrayRef<uint64_t> relocTargets) const {
567   assert(isec);
568   assert(relocTargets.size() == isec->relocs.size());
569 
570   // Note: Some of these optimizations might not be valid when shared regions
571   // are in use. Will need to revisit this if splitSegInfo is added.
572 
573   OptimizationHintContext ctx1(buf, isec, relocTargets);
574   for (const OptimizationHint &hint : isec->optimizationHints) {
575     switch (hint.type) {
576     case LOH_ARM64_ADRP_ADRP:
577       // This is done in another pass because the other optimization hints
578       // might cause its targets to be turned into NOPs.
579       break;
580     case LOH_ARM64_ADRP_LDR:
581       ctx1.applyAdrpLdr(hint);
582       break;
583     case LOH_ARM64_ADRP_ADD_LDR:
584       // TODO: Implement this
585       break;
586     case LOH_ARM64_ADRP_LDR_GOT_LDR:
587       ctx1.applyAdrpLdrGotLdr(hint);
588       break;
589     case LOH_ARM64_ADRP_ADD_STR:
590     case LOH_ARM64_ADRP_LDR_GOT_STR:
591       // TODO: Implement these
592       break;
593     case LOH_ARM64_ADRP_ADD:
594       ctx1.applyAdrpAdd(hint);
595       break;
596     case LOH_ARM64_ADRP_LDR_GOT:
597       ctx1.applyAdrpLdrGot(hint);
598       break;
599     }
600   }
601 
602   OptimizationHintContext ctx2(buf, isec, relocTargets);
603   for (const OptimizationHint &hint : isec->optimizationHints)
604     if (hint.type == LOH_ARM64_ADRP_ADRP)
605       ctx2.applyAdrpAdrp(hint);
606 }
607 
608 TargetInfo *macho::createARM64TargetInfo() {
609   static ARM64 t;
610   return &t;
611 }
612