1 //===- ARM64.cpp ----------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "Arch/ARM64Common.h"
10 #include "InputFiles.h"
11 #include "Symbols.h"
12 #include "SyntheticSections.h"
13 #include "Target.h"
14 
15 #include "lld/Common/ErrorHandler.h"
16 #include "mach-o/compact_unwind_encoding.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/BinaryFormat/MachO.h"
20 #include "llvm/Support/Endian.h"
21 #include "llvm/Support/MathExtras.h"
22 
23 using namespace llvm;
24 using namespace llvm::MachO;
25 using namespace llvm::support::endian;
26 using namespace lld;
27 using namespace lld::macho;
28 
29 namespace {
30 
31 struct ARM64 : ARM64Common {
32   ARM64();
33   void writeStub(uint8_t *buf, const Symbol &) const override;
34   void writeStubHelperHeader(uint8_t *buf) const override;
35   void writeStubHelperEntry(uint8_t *buf, const Symbol &,
36                             uint64_t entryAddr) const override;
37   const RelocAttrs &getRelocAttrs(uint8_t type) const override;
38   void populateThunk(InputSection *thunk, Symbol *funcSym) override;
39   void applyOptimizationHints(uint8_t *, const ConcatInputSection *,
40                               ArrayRef<uint64_t>) const override;
41 };
42 
43 } // namespace
44 
45 // Random notes on reloc types:
46 // ADDEND always pairs with BRANCH26, PAGE21, or PAGEOFF12
47 // POINTER_TO_GOT: ld64 supports a 4-byte pc-relative form as well as an 8-byte
48 // absolute version of this relocation. The semantics of the absolute relocation
49 // are weird -- it results in the value of the GOT slot being written, instead
50 // of the address. Let's not support it unless we find a real-world use case.
51 
52 const RelocAttrs &ARM64::getRelocAttrs(uint8_t type) const {
53   static const std::array<RelocAttrs, 11> relocAttrsArray{{
54 #define B(x) RelocAttrBits::x
55       {"UNSIGNED",
56        B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE4) | B(BYTE8)},
57       {"SUBTRACTOR", B(SUBTRAHEND) | B(EXTERN) | B(BYTE4) | B(BYTE8)},
58       {"BRANCH26", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
59       {"PAGE21", B(PCREL) | B(EXTERN) | B(BYTE4)},
60       {"PAGEOFF12", B(ABSOLUTE) | B(EXTERN) | B(BYTE4)},
61       {"GOT_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(GOT) | B(BYTE4)},
62       {"GOT_LOAD_PAGEOFF12",
63        B(ABSOLUTE) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
64       {"POINTER_TO_GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
65       {"TLVP_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(TLV) | B(BYTE4)},
66       {"TLVP_LOAD_PAGEOFF12",
67        B(ABSOLUTE) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
68       {"ADDEND", B(ADDEND)},
69 #undef B
70   }};
71   assert(type < relocAttrsArray.size() && "invalid relocation type");
72   if (type >= relocAttrsArray.size())
73     return invalidRelocAttrs;
74   return relocAttrsArray[type];
75 }
76 
77 static constexpr uint32_t stubCode[] = {
78     0x90000010, // 00: adrp  x16, __la_symbol_ptr@page
79     0xf9400210, // 04: ldr   x16, [x16, __la_symbol_ptr@pageoff]
80     0xd61f0200, // 08: br    x16
81 };
82 
83 void ARM64::writeStub(uint8_t *buf8, const Symbol &sym) const {
84   ::writeStub<LP64>(buf8, stubCode, sym);
85 }
86 
87 static constexpr uint32_t stubHelperHeaderCode[] = {
88     0x90000011, // 00: adrp  x17, _dyld_private@page
89     0x91000231, // 04: add   x17, x17, _dyld_private@pageoff
90     0xa9bf47f0, // 08: stp   x16/x17, [sp, #-16]!
91     0x90000010, // 0c: adrp  x16, dyld_stub_binder@page
92     0xf9400210, // 10: ldr   x16, [x16, dyld_stub_binder@pageoff]
93     0xd61f0200, // 14: br    x16
94 };
95 
96 void ARM64::writeStubHelperHeader(uint8_t *buf8) const {
97   ::writeStubHelperHeader<LP64>(buf8, stubHelperHeaderCode);
98 }
99 
100 static constexpr uint32_t stubHelperEntryCode[] = {
101     0x18000050, // 00: ldr  w16, l0
102     0x14000000, // 04: b    stubHelperHeader
103     0x00000000, // 08: l0: .long 0
104 };
105 
106 void ARM64::writeStubHelperEntry(uint8_t *buf8, const Symbol &sym,
107                                  uint64_t entryVA) const {
108   ::writeStubHelperEntry(buf8, stubHelperEntryCode, sym, entryVA);
109 }
110 
111 // A thunk is the relaxed variation of stubCode. We don't need the
112 // extra indirection through a lazy pointer because the target address
113 // is known at link time.
114 static constexpr uint32_t thunkCode[] = {
115     0x90000010, // 00: adrp  x16, <thunk.ptr>@page
116     0x91000210, // 04: add   x16, [x16,<thunk.ptr>@pageoff]
117     0xd61f0200, // 08: br    x16
118 };
119 
120 void ARM64::populateThunk(InputSection *thunk, Symbol *funcSym) {
121   thunk->align = 4;
122   thunk->data = {reinterpret_cast<const uint8_t *>(thunkCode),
123                  sizeof(thunkCode)};
124   thunk->relocs.push_back({/*type=*/ARM64_RELOC_PAGEOFF12,
125                            /*pcrel=*/false, /*length=*/2,
126                            /*offset=*/4, /*addend=*/0,
127                            /*referent=*/funcSym});
128   thunk->relocs.push_back({/*type=*/ARM64_RELOC_PAGE21,
129                            /*pcrel=*/true, /*length=*/2,
130                            /*offset=*/0, /*addend=*/0,
131                            /*referent=*/funcSym});
132 }
133 
134 ARM64::ARM64() : ARM64Common(LP64()) {
135   cpuType = CPU_TYPE_ARM64;
136   cpuSubtype = CPU_SUBTYPE_ARM64_ALL;
137 
138   stubSize = sizeof(stubCode);
139   thunkSize = sizeof(thunkCode);
140 
141   // Branch immediate is two's complement 26 bits, which is implicitly
142   // multiplied by 4 (since all functions are 4-aligned: The branch range
143   // is -4*(2**(26-1))..4*(2**(26-1) - 1).
144   backwardBranchRange = 128 * 1024 * 1024;
145   forwardBranchRange = backwardBranchRange - 4;
146 
147   modeDwarfEncoding = UNWIND_ARM64_MODE_DWARF;
148   subtractorRelocType = ARM64_RELOC_SUBTRACTOR;
149   unsignedRelocType = ARM64_RELOC_UNSIGNED;
150 
151   stubHelperHeaderSize = sizeof(stubHelperHeaderCode);
152   stubHelperEntrySize = sizeof(stubHelperEntryCode);
153 }
154 
155 namespace {
156 struct Adrp {
157   uint32_t destRegister;
158 };
159 
160 struct Add {
161   uint8_t destRegister;
162   uint8_t srcRegister;
163   uint32_t addend;
164 };
165 
166 enum ExtendType { ZeroExtend = 1, Sign64 = 2, Sign32 = 3 };
167 
168 struct Ldr {
169   uint8_t destRegister;
170   uint8_t baseRegister;
171   uint8_t p2Size;
172   bool isFloat;
173   ExtendType extendType;
174   int64_t offset;
175 };
176 
177 struct PerformedReloc {
178   const Reloc &rel;
179   uint64_t referentVA;
180 };
181 
182 class OptimizationHintContext {
183 public:
184   OptimizationHintContext(uint8_t *buf, const ConcatInputSection *isec,
185                           ArrayRef<uint64_t> relocTargets)
186       : buf(buf), isec(isec), relocTargets(relocTargets),
187         relocIt(isec->relocs.rbegin()) {}
188 
189   void applyAdrpAdd(const OptimizationHint &);
190   void applyAdrpAdrp(const OptimizationHint &);
191   void applyAdrpLdr(const OptimizationHint &);
192   void applyAdrpLdrGot(const OptimizationHint &);
193   void applyAdrpLdrGotLdr(const OptimizationHint &);
194 
195 private:
196   uint8_t *buf;
197   const ConcatInputSection *isec;
198   ArrayRef<uint64_t> relocTargets;
199   std::vector<Reloc>::const_reverse_iterator relocIt;
200 
201   uint64_t getRelocTarget(const Reloc &);
202 
203   Optional<PerformedReloc> findPrimaryReloc(uint64_t offset);
204   Optional<PerformedReloc> findReloc(uint64_t offset);
205 };
206 } // namespace
207 
208 static bool parseAdrp(uint32_t insn, Adrp &adrp) {
209   if ((insn & 0x9f000000) != 0x90000000)
210     return false;
211   adrp.destRegister = insn & 0x1f;
212   return true;
213 }
214 
215 static bool parseAdd(uint32_t insn, Add &add) {
216   if ((insn & 0xffc00000) != 0x91000000)
217     return false;
218   add.destRegister = insn & 0x1f;
219   add.srcRegister = (insn >> 5) & 0x1f;
220   add.addend = (insn >> 10) & 0xfff;
221   return true;
222 }
223 
224 static bool parseLdr(uint32_t insn, Ldr &ldr) {
225   ldr.destRegister = insn & 0x1f;
226   ldr.baseRegister = (insn >> 5) & 0x1f;
227   uint8_t size = insn >> 30;
228   uint8_t opc = (insn >> 22) & 3;
229 
230   if ((insn & 0x3fc00000) == 0x39400000) {
231     // LDR (immediate), LDRB (immediate), LDRH (immediate)
232     ldr.p2Size = size;
233     ldr.extendType = ZeroExtend;
234     ldr.isFloat = false;
235   } else if ((insn & 0x3f800000) == 0x39800000) {
236     // LDRSB (immediate), LDRSH (immediate), LDRSW (immediate)
237     ldr.p2Size = size;
238     ldr.extendType = static_cast<ExtendType>(opc);
239     ldr.isFloat = false;
240   } else if ((insn & 0x3f400000) == 0x3d400000) {
241     // LDR (immediate, SIMD&FP)
242     ldr.extendType = ZeroExtend;
243     ldr.isFloat = true;
244     if (opc == 1)
245       ldr.p2Size = size;
246     else if (size == 0 && opc == 3)
247       ldr.p2Size = 4;
248     else
249       return false;
250   } else {
251     return false;
252   }
253   ldr.offset = ((insn >> 10) & 0xfff) << ldr.p2Size;
254   return true;
255 }
256 
257 static bool isValidAdrOffset(int32_t delta) { return isInt<21>(delta); }
258 
259 static void writeAdr(void *loc, uint32_t dest, int32_t delta) {
260   assert(isValidAdrOffset(delta));
261   uint32_t opcode = 0x10000000;
262   uint32_t immHi = (delta & 0x001ffffc) << 3;
263   uint32_t immLo = (delta & 0x00000003) << 29;
264   write32le(loc, opcode | immHi | immLo | dest);
265 }
266 
267 static void writeNop(void *loc) { write32le(loc, 0xd503201f); }
268 
269 static bool isLiteralLdrEligible(const Ldr &ldr) {
270   return ldr.p2Size > 1 && isShiftedInt<19, 2>(ldr.offset);
271 }
272 
273 static void writeLiteralLdr(void *loc, const Ldr &ldr) {
274   assert(isLiteralLdrEligible(ldr));
275   uint32_t imm19 = (ldr.offset / 4 & maskTrailingOnes<uint32_t>(19)) << 5;
276   uint32_t opcode;
277   switch (ldr.p2Size) {
278   case 2:
279     if (ldr.isFloat)
280       opcode = 0x1c000000;
281     else
282       opcode = ldr.extendType == Sign64 ? 0x98000000 : 0x18000000;
283     break;
284   case 3:
285     opcode = ldr.isFloat ? 0x5c000000 : 0x58000000;
286     break;
287   case 4:
288     opcode = 0x9c000000;
289     break;
290   default:
291     llvm_unreachable("Invalid literal ldr size");
292   }
293   write32le(loc, opcode | imm19 | ldr.destRegister);
294 }
295 
296 static bool isImmediateLdrEligible(const Ldr &ldr) {
297   // Note: We deviate from ld64's behavior, which converts to immediate loads
298   // only if ldr.offset < 4096, even though the offset is divided by the load's
299   // size in the 12-bit immediate operand. Only the unsigned offset variant is
300   // supported.
301 
302   uint32_t size = 1 << ldr.p2Size;
303   return ldr.offset >= 0 && (ldr.offset % size) == 0 &&
304          isUInt<12>(ldr.offset >> ldr.p2Size);
305 }
306 
307 static void writeImmediateLdr(void *loc, const Ldr &ldr) {
308   assert(isImmediateLdrEligible(ldr));
309   uint32_t opcode = 0x39000000;
310   if (ldr.isFloat) {
311     opcode |= 0x04000000;
312     assert(ldr.extendType == ZeroExtend);
313   }
314   opcode |= ldr.destRegister;
315   opcode |= ldr.baseRegister << 5;
316   uint8_t size, opc;
317   if (ldr.p2Size == 4) {
318     size = 0;
319     opc = 3;
320   } else {
321     opc = ldr.extendType;
322     size = ldr.p2Size;
323   }
324   uint32_t immBits = ldr.offset >> ldr.p2Size;
325   write32le(loc, opcode | (immBits << 10) | (opc << 22) | (size << 30));
326 }
327 
328 uint64_t OptimizationHintContext::getRelocTarget(const Reloc &reloc) {
329   size_t relocIdx = &reloc - isec->relocs.data();
330   return relocTargets[relocIdx];
331 }
332 
333 // Optimization hints are sorted in a monotonically increasing order by their
334 // first address as are relocations (albeit in decreasing order), so if we keep
335 // a pointer around to the last found relocation, we don't have to do a full
336 // binary search every time.
337 Optional<PerformedReloc>
338 OptimizationHintContext::findPrimaryReloc(uint64_t offset) {
339   const auto end = isec->relocs.rend();
340   while (relocIt != end && relocIt->offset < offset)
341     ++relocIt;
342   if (relocIt == end || relocIt->offset != offset)
343     return None;
344   return PerformedReloc{*relocIt, getRelocTarget(*relocIt)};
345 }
346 
347 // The second and third addresses of optimization hints have no such
348 // monotonicity as the first, so we search the entire range of relocations.
349 Optional<PerformedReloc> OptimizationHintContext::findReloc(uint64_t offset) {
350   // Optimization hints often apply to successive relocations, so we check for
351   // that first before doing a full binary search.
352   auto end = isec->relocs.rend();
353   if (relocIt < end - 1 && (relocIt + 1)->offset == offset)
354     return PerformedReloc{*(relocIt + 1), getRelocTarget(*(relocIt + 1))};
355 
356   auto reloc = lower_bound(isec->relocs, offset,
357                            [](const Reloc &reloc, uint64_t offset) {
358                              return offset < reloc.offset;
359                            });
360 
361   if (reloc == isec->relocs.end() || reloc->offset != offset)
362     return None;
363   return PerformedReloc{*reloc, getRelocTarget(*reloc)};
364 }
365 
366 // Transforms a pair of adrp+add instructions into an adr instruction if the
367 // target is within the +/- 1 MiB range allowed by the adr's 21 bit signed
368 // immediate offset.
369 //
370 //   adrp xN, _foo@PAGE
371 //   add  xM, xN, _foo@PAGEOFF
372 // ->
373 //   adr  xM, _foo
374 //   nop
375 void OptimizationHintContext::applyAdrpAdd(const OptimizationHint &hint) {
376   uint32_t ins1 = read32le(buf + hint.offset0);
377   uint32_t ins2 = read32le(buf + hint.offset0 + hint.delta[0]);
378   Adrp adrp;
379   if (!parseAdrp(ins1, adrp))
380     return;
381   Add add;
382   if (!parseAdd(ins2, add))
383     return;
384   if (adrp.destRegister != add.srcRegister)
385     return;
386 
387   Optional<PerformedReloc> rel1 = findPrimaryReloc(hint.offset0);
388   Optional<PerformedReloc> rel2 = findReloc(hint.offset0 + hint.delta[0]);
389   if (!rel1 || !rel2)
390     return;
391   if (rel1->referentVA != rel2->referentVA)
392     return;
393   int64_t delta = rel1->referentVA - rel1->rel.offset - isec->getVA();
394   if (!isValidAdrOffset(delta))
395     return;
396 
397   writeAdr(buf + hint.offset0, add.destRegister, delta);
398   writeNop(buf + hint.offset0 + hint.delta[0]);
399 }
400 
401 // Transforms two adrp instructions into a single adrp if their referent
402 // addresses are located on the same 4096 byte page.
403 //
404 //   adrp xN, _foo@PAGE
405 //   adrp xN, _bar@PAGE
406 // ->
407 //   adrp xN, _foo@PAGE
408 //   nop
409 void OptimizationHintContext::applyAdrpAdrp(const OptimizationHint &hint) {
410   uint32_t ins1 = read32le(buf + hint.offset0);
411   uint32_t ins2 = read32le(buf + hint.offset0 + hint.delta[0]);
412   Adrp adrp1, adrp2;
413   if (!parseAdrp(ins1, adrp1) || !parseAdrp(ins2, adrp2))
414     return;
415   if (adrp1.destRegister != adrp2.destRegister)
416     return;
417 
418   Optional<PerformedReloc> rel1 = findPrimaryReloc(hint.offset0);
419   Optional<PerformedReloc> rel2 = findReloc(hint.offset0 + hint.delta[0]);
420   if (!rel1 || !rel2)
421     return;
422   if ((rel1->referentVA & ~0xfffULL) != (rel2->referentVA & ~0xfffULL))
423     return;
424 
425   writeNop(buf + hint.offset0 + hint.delta[0]);
426 }
427 
428 // Transforms a pair of adrp+ldr (immediate) instructions into an ldr (literal)
429 // load from a PC-relative address if it is 4-byte aligned and within +/- 1 MiB,
430 // as ldr can encode a signed 19-bit offset that gets multiplied by 4.
431 //
432 //   adrp xN, _foo@PAGE
433 //   ldr  xM, [xN, _foo@PAGEOFF]
434 // ->
435 //   nop
436 //   ldr  xM, _foo
437 void OptimizationHintContext::applyAdrpLdr(const OptimizationHint &hint) {
438   uint32_t ins1 = read32le(buf + hint.offset0);
439   uint32_t ins2 = read32le(buf + hint.offset0 + hint.delta[0]);
440   Adrp adrp;
441   if (!parseAdrp(ins1, adrp))
442     return;
443   Ldr ldr;
444   if (!parseLdr(ins2, ldr))
445     return;
446   if (adrp.destRegister != ldr.baseRegister)
447     return;
448 
449   Optional<PerformedReloc> rel1 = findPrimaryReloc(hint.offset0);
450   Optional<PerformedReloc> rel2 = findReloc(hint.offset0 + hint.delta[0]);
451   if (!rel1 || !rel2)
452     return;
453   if (ldr.offset != (rel1->referentVA & 0xfff))
454     return;
455   ldr.offset = rel1->referentVA - rel2->rel.offset - isec->getVA();
456   if (!isLiteralLdrEligible(ldr))
457     return;
458 
459   writeNop(buf + hint.offset0);
460   writeLiteralLdr(buf + hint.offset0 + hint.delta[0], ldr);
461 }
462 
463 // GOT loads are emitted by the compiler as a pair of adrp and ldr instructions,
464 // but they may be changed to adrp+add by relaxGotLoad(). This hint performs
465 // the AdrpLdr or AdrpAdd transformation depending on whether it was relaxed.
466 void OptimizationHintContext::applyAdrpLdrGot(const OptimizationHint &hint) {
467   uint32_t ins2 = read32le(buf + hint.offset0 + hint.delta[0]);
468   Add add;
469   Ldr ldr;
470   if (parseAdd(ins2, add))
471     applyAdrpAdd(hint);
472   else if (parseLdr(ins2, ldr))
473     applyAdrpLdr(hint);
474 }
475 
476 // Relaxes a GOT-indirect load.
477 // If the referenced symbol is external and its GOT entry is within +/- 1 MiB,
478 // the GOT entry can be loaded with a single literal ldr instruction.
479 // If the referenced symbol is local, its address may be loaded directly if it's
480 // close enough, or with an adr(p) + ldr pair if it's not.
481 void OptimizationHintContext::applyAdrpLdrGotLdr(const OptimizationHint &hint) {
482   uint32_t ins1 = read32le(buf + hint.offset0);
483   Adrp adrp;
484   if (!parseAdrp(ins1, adrp))
485     return;
486   uint32_t ins3 = read32le(buf + hint.offset0 + hint.delta[1]);
487   Ldr ldr3;
488   if (!parseLdr(ins3, ldr3))
489     return;
490   uint32_t ins2 = read32le(buf + hint.offset0 + hint.delta[0]);
491   Ldr ldr2;
492   Add add2;
493 
494   Optional<PerformedReloc> rel1 = findPrimaryReloc(hint.offset0);
495   Optional<PerformedReloc> rel2 = findReloc(hint.offset0 + hint.delta[0]);
496   if (!rel1 || !rel2)
497     return;
498 
499   if (parseAdd(ins2, add2)) {
500     // adrp x0, _foo@PAGE
501     // add  x1, x0, _foo@PAGEOFF
502     // ldr  x2, [x1, #off]
503 
504     if (adrp.destRegister != add2.srcRegister)
505       return;
506     if (add2.destRegister != ldr3.baseRegister)
507       return;
508 
509     // Load from the target address directly.
510     //   nop
511     //   nop
512     //   ldr x2, [_foo + #off]
513     uint64_t rel3VA = hint.offset0 + hint.delta[1] + isec->getVA();
514     Ldr literalLdr = ldr3;
515     literalLdr.offset += rel1->referentVA - rel3VA;
516     if (isLiteralLdrEligible(literalLdr)) {
517       writeNop(buf + hint.offset0);
518       writeNop(buf + hint.offset0 + hint.delta[0]);
519       writeLiteralLdr(buf + hint.offset0 + hint.delta[1], literalLdr);
520       return;
521     }
522 
523     // Load the target address into a register and load from there indirectly.
524     //   adr x1, _foo
525     //   nop
526     //   ldr x2, [x1, #off]
527     int64_t adrOffset = rel1->referentVA - rel1->rel.offset - isec->getVA();
528     if (isValidAdrOffset(adrOffset)) {
529       writeAdr(buf + hint.offset0, ldr3.baseRegister, adrOffset);
530       writeNop(buf + hint.offset0 + hint.delta[0]);
531       return;
532     }
533 
534     // Move the target's page offset into the ldr's immediate offset.
535     //   adrp x0, _foo@PAGE
536     //   nop
537     //   ldr x2, [x0, _foo@PAGEOFF + #off]
538     Ldr immediateLdr = ldr3;
539     immediateLdr.baseRegister = adrp.destRegister;
540     immediateLdr.offset += add2.addend;
541     if (isImmediateLdrEligible(immediateLdr)) {
542       writeNop(buf + hint.offset0 + hint.delta[0]);
543       writeImmediateLdr(buf + hint.offset0 + hint.delta[1], immediateLdr);
544       return;
545     }
546   } else if (parseLdr(ins2, ldr2)) {
547     // adrp x1, _foo@GOTPAGE
548     // ldr  x2, [x1, _foo@GOTPAGEOFF]
549     // ldr  x3, [x2, #off]
550     if (ldr2.baseRegister != adrp.destRegister)
551       return;
552     if (ldr3.baseRegister != ldr2.destRegister)
553       return;
554     // Loads from the GOT must be pointer sized.
555     if (ldr2.p2Size != 3 || ldr2.isFloat)
556       return;
557 
558     // Load the GOT entry's address directly.
559     //   nop
560     //   ldr x2, _foo@GOTPAGE + _foo@GOTPAGEOFF
561     //   ldr x3, [x2, #off]
562     Ldr literalLdr = ldr2;
563     literalLdr.offset = rel1->referentVA - rel2->rel.offset - isec->getVA();
564     if (isLiteralLdrEligible(literalLdr)) {
565       writeNop(buf + hint.offset0);
566       writeLiteralLdr(buf + hint.offset0 + hint.delta[0], literalLdr);
567     }
568   }
569 }
570 
571 void ARM64::applyOptimizationHints(uint8_t *buf, const ConcatInputSection *isec,
572                                    ArrayRef<uint64_t> relocTargets) const {
573   assert(isec);
574   assert(relocTargets.size() == isec->relocs.size());
575 
576   // Note: Some of these optimizations might not be valid when shared regions
577   // are in use. Will need to revisit this if splitSegInfo is added.
578 
579   OptimizationHintContext ctx1(buf, isec, relocTargets);
580   for (const OptimizationHint &hint : isec->optimizationHints) {
581     switch (hint.type) {
582     case LOH_ARM64_ADRP_ADRP:
583       // This is done in another pass because the other optimization hints
584       // might cause its targets to be turned into NOPs.
585       break;
586     case LOH_ARM64_ADRP_LDR:
587       ctx1.applyAdrpLdr(hint);
588       break;
589     case LOH_ARM64_ADRP_ADD_LDR:
590       // TODO: Implement this
591       break;
592     case LOH_ARM64_ADRP_LDR_GOT_LDR:
593       ctx1.applyAdrpLdrGotLdr(hint);
594       break;
595     case LOH_ARM64_ADRP_ADD_STR:
596     case LOH_ARM64_ADRP_LDR_GOT_STR:
597       // TODO: Implement these
598       break;
599     case LOH_ARM64_ADRP_ADD:
600       ctx1.applyAdrpAdd(hint);
601       break;
602     case LOH_ARM64_ADRP_LDR_GOT:
603       ctx1.applyAdrpLdrGot(hint);
604       break;
605     }
606   }
607 
608   OptimizationHintContext ctx2(buf, isec, relocTargets);
609   for (const OptimizationHint &hint : isec->optimizationHints)
610     if (hint.type == LOH_ARM64_ADRP_ADRP)
611       ctx2.applyAdrpAdrp(hint);
612 }
613 
614 TargetInfo *macho::createARM64TargetInfo() {
615   static ARM64 t;
616   return &t;
617 }
618