1 //===- lib/FileFormat/MachO/ArchHandler_arm64.cpp -------------------------===//
2 //
3 // The LLVM Linker
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "ArchHandler.h"
11 #include "Atoms.h"
12 #include "MachONormalizedFileBinaryUtils.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/ADT/StringSwitch.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/Support/Endian.h"
17 #include "llvm/Support/ErrorHandling.h"
18 #include "llvm/Support/Format.h"
19
20 using namespace llvm::MachO;
21 using namespace lld::mach_o::normalized;
22
23 namespace lld {
24 namespace mach_o {
25
26 using llvm::support::ulittle32_t;
27 using llvm::support::ulittle64_t;
28
29 using llvm::support::little32_t;
30 using llvm::support::little64_t;
31
32 class ArchHandler_arm64 : public ArchHandler {
33 public:
34 ArchHandler_arm64() = default;
35 ~ArchHandler_arm64() override = default;
36
kindStrings()37 const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
38
kindArch()39 Reference::KindArch kindArch() override {
40 return Reference::KindArch::AArch64;
41 }
42
43 /// Used by GOTPass to locate GOT References
isGOTAccess(const Reference & ref,bool & canBypassGOT)44 bool isGOTAccess(const Reference &ref, bool &canBypassGOT) override {
45 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
46 return false;
47 assert(ref.kindArch() == Reference::KindArch::AArch64);
48 switch (ref.kindValue()) {
49 case gotPage21:
50 case gotOffset12:
51 canBypassGOT = true;
52 return true;
53 case delta32ToGOT:
54 case unwindCIEToPersonalityFunction:
55 case imageOffsetGot:
56 canBypassGOT = false;
57 return true;
58 default:
59 return false;
60 }
61 }
62
63 /// Used by GOTPass to update GOT References.
updateReferenceToGOT(const Reference * ref,bool targetNowGOT)64 void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
65 // If GOT slot was instanciated, transform:
66 // gotPage21/gotOffset12 -> page21/offset12scale8
67 // If GOT slot optimized away, transform:
68 // gotPage21/gotOffset12 -> page21/addOffset12
69 assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
70 assert(ref->kindArch() == Reference::KindArch::AArch64);
71 switch (ref->kindValue()) {
72 case gotPage21:
73 const_cast<Reference *>(ref)->setKindValue(page21);
74 break;
75 case gotOffset12:
76 const_cast<Reference *>(ref)->setKindValue(targetNowGOT ?
77 offset12scale8 : addOffset12);
78 break;
79 case delta32ToGOT:
80 const_cast<Reference *>(ref)->setKindValue(delta32);
81 break;
82 case imageOffsetGot:
83 const_cast<Reference *>(ref)->setKindValue(imageOffset);
84 break;
85 default:
86 llvm_unreachable("Not a GOT reference");
87 }
88 }
89
stubInfo()90 const StubInfo &stubInfo() override { return _sStubInfo; }
91
92 bool isCallSite(const Reference &) override;
isNonCallBranch(const Reference &)93 bool isNonCallBranch(const Reference &) override {
94 return false;
95 }
96
97 bool isPointer(const Reference &) override;
98 bool isPairedReloc(const normalized::Relocation &) override;
99
needsCompactUnwind()100 bool needsCompactUnwind() override {
101 return true;
102 }
imageOffsetKind()103 Reference::KindValue imageOffsetKind() override {
104 return imageOffset;
105 }
imageOffsetKindIndirect()106 Reference::KindValue imageOffsetKindIndirect() override {
107 return imageOffsetGot;
108 }
109
unwindRefToPersonalityFunctionKind()110 Reference::KindValue unwindRefToPersonalityFunctionKind() override {
111 return unwindCIEToPersonalityFunction;
112 }
113
unwindRefToCIEKind()114 Reference::KindValue unwindRefToCIEKind() override {
115 return negDelta32;
116 }
117
unwindRefToFunctionKind()118 Reference::KindValue unwindRefToFunctionKind() override {
119 return unwindFDEToFunction;
120 }
121
unwindRefToEhFrameKind()122 Reference::KindValue unwindRefToEhFrameKind() override {
123 return unwindInfoToEhFrame;
124 }
125
pointerKind()126 Reference::KindValue pointerKind() override {
127 return pointer64;
128 }
129
lazyImmediateLocationKind()130 Reference::KindValue lazyImmediateLocationKind() override {
131 return lazyImmediateLocation;
132 }
133
dwarfCompactUnwindType()134 uint32_t dwarfCompactUnwindType() override {
135 return 0x03000000;
136 }
137
138 llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
139 const DefinedAtom *inAtom,
140 uint32_t offsetInAtom,
141 uint64_t fixupAddress, bool isBig,
142 FindAtomBySectionAndAddress atomFromAddress,
143 FindAtomBySymbolIndex atomFromSymbolIndex,
144 Reference::KindValue *kind,
145 const lld::Atom **target,
146 Reference::Addend *addend) override;
147 llvm::Error
148 getPairReferenceInfo(const normalized::Relocation &reloc1,
149 const normalized::Relocation &reloc2,
150 const DefinedAtom *inAtom,
151 uint32_t offsetInAtom,
152 uint64_t fixupAddress, bool isBig, bool scatterable,
153 FindAtomBySectionAndAddress atomFromAddress,
154 FindAtomBySymbolIndex atomFromSymbolIndex,
155 Reference::KindValue *kind,
156 const lld::Atom **target,
157 Reference::Addend *addend) override;
158
needsLocalSymbolInRelocatableFile(const DefinedAtom * atom)159 bool needsLocalSymbolInRelocatableFile(const DefinedAtom *atom) override {
160 return (atom->contentType() == DefinedAtom::typeCString);
161 }
162
163 void generateAtomContent(const DefinedAtom &atom, bool relocatable,
164 FindAddressForAtom findAddress,
165 FindAddressForAtom findSectionAddress,
166 uint64_t imageBaseAddress,
167 llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
168
169 void appendSectionRelocations(const DefinedAtom &atom,
170 uint64_t atomSectionOffset,
171 const Reference &ref,
172 FindSymbolIndexForAtom symbolIndexForAtom,
173 FindSectionIndexForAtom sectionIndexForAtom,
174 FindAddressForAtom addressForAtom,
175 normalized::Relocations &relocs) override;
176
177 private:
178 static const Registry::KindStrings _sKindStrings[];
179 static const StubInfo _sStubInfo;
180
181 enum Arm64Kind : Reference::KindValue {
182 invalid, /// for error condition
183
184 // Kinds found in mach-o .o files:
185 branch26, /// ex: bl _foo
186 page21, /// ex: adrp x1, _foo@PAGE
187 offset12, /// ex: ldrb w0, [x1, _foo@PAGEOFF]
188 offset12scale2, /// ex: ldrs w0, [x1, _foo@PAGEOFF]
189 offset12scale4, /// ex: ldr w0, [x1, _foo@PAGEOFF]
190 offset12scale8, /// ex: ldr x0, [x1, _foo@PAGEOFF]
191 offset12scale16, /// ex: ldr q0, [x1, _foo@PAGEOFF]
192 gotPage21, /// ex: adrp x1, _foo@GOTPAGE
193 gotOffset12, /// ex: ldr w0, [x1, _foo@GOTPAGEOFF]
194 tlvPage21, /// ex: adrp x1, _foo@TLVPAGE
195 tlvOffset12, /// ex: ldr w0, [x1, _foo@TLVPAGEOFF]
196
197 pointer64, /// ex: .quad _foo
198 delta64, /// ex: .quad _foo - .
199 delta32, /// ex: .long _foo - .
200 negDelta32, /// ex: .long . - _foo
201 pointer64ToGOT, /// ex: .quad _foo@GOT
202 delta32ToGOT, /// ex: .long _foo@GOT - .
203
204 // Kinds introduced by Passes:
205 addOffset12, /// Location contains LDR to change into ADD.
206 lazyPointer, /// Location contains a lazy pointer.
207 lazyImmediateLocation, /// Location contains immediate value used in stub.
208 imageOffset, /// Location contains offset of atom in final image
209 imageOffsetGot, /// Location contains offset of GOT entry for atom in
210 /// final image (typically personality function).
211 unwindCIEToPersonalityFunction, /// Nearly delta32ToGOT, but cannot be
212 /// rematerialized in relocatable object
213 /// (yay for implicit contracts!).
214 unwindFDEToFunction, /// Nearly delta64, but cannot be rematerialized in
215 /// relocatable object (yay for implicit contracts!).
216 unwindInfoToEhFrame, /// Fix low 24 bits of compact unwind encoding to
217 /// refer to __eh_frame entry.
218 };
219
220 void applyFixupFinal(const Reference &ref, uint8_t *location,
221 uint64_t fixupAddress, uint64_t targetAddress,
222 uint64_t inAtomAddress, uint64_t imageBaseAddress,
223 FindAddressForAtom findSectionAddress);
224
225 void applyFixupRelocatable(const Reference &ref, uint8_t *location,
226 uint64_t fixupAddress, uint64_t targetAddress,
227 uint64_t inAtomAddress, bool targetUnnamed);
228
229 // Utility functions for inspecting/updating instructions.
230 static uint32_t setDisplacementInBranch26(uint32_t instr, int32_t disp);
231 static uint32_t setDisplacementInADRP(uint32_t instr, int64_t disp);
232 static Arm64Kind offset12KindFromInstruction(uint32_t instr);
233 static uint32_t setImm12(uint32_t instr, uint32_t offset);
234 };
235
236 const Registry::KindStrings ArchHandler_arm64::_sKindStrings[] = {
237 LLD_KIND_STRING_ENTRY(invalid),
238 LLD_KIND_STRING_ENTRY(branch26),
239 LLD_KIND_STRING_ENTRY(page21),
240 LLD_KIND_STRING_ENTRY(offset12),
241 LLD_KIND_STRING_ENTRY(offset12scale2),
242 LLD_KIND_STRING_ENTRY(offset12scale4),
243 LLD_KIND_STRING_ENTRY(offset12scale8),
244 LLD_KIND_STRING_ENTRY(offset12scale16),
245 LLD_KIND_STRING_ENTRY(gotPage21),
246 LLD_KIND_STRING_ENTRY(gotOffset12),
247 LLD_KIND_STRING_ENTRY(tlvPage21),
248 LLD_KIND_STRING_ENTRY(tlvOffset12),
249 LLD_KIND_STRING_ENTRY(pointer64),
250 LLD_KIND_STRING_ENTRY(delta64),
251 LLD_KIND_STRING_ENTRY(delta32),
252 LLD_KIND_STRING_ENTRY(negDelta32),
253 LLD_KIND_STRING_ENTRY(pointer64ToGOT),
254 LLD_KIND_STRING_ENTRY(delta32ToGOT),
255
256 LLD_KIND_STRING_ENTRY(addOffset12),
257 LLD_KIND_STRING_ENTRY(lazyPointer),
258 LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
259 LLD_KIND_STRING_ENTRY(imageOffset),
260 LLD_KIND_STRING_ENTRY(imageOffsetGot),
261 LLD_KIND_STRING_ENTRY(unwindCIEToPersonalityFunction),
262 LLD_KIND_STRING_ENTRY(unwindFDEToFunction),
263 LLD_KIND_STRING_ENTRY(unwindInfoToEhFrame),
264
265 LLD_KIND_STRING_END
266 };
267
268 const ArchHandler::StubInfo ArchHandler_arm64::_sStubInfo = {
269 "dyld_stub_binder",
270
271 // Lazy pointer references
272 { Reference::KindArch::AArch64, pointer64, 0, 0 },
273 { Reference::KindArch::AArch64, lazyPointer, 0, 0 },
274
275 // GOT pointer to dyld_stub_binder
276 { Reference::KindArch::AArch64, pointer64, 0, 0 },
277
278 // arm64 code alignment 2^1
279 1,
280
281 // Stub size and code
282 12,
283 { 0x10, 0x00, 0x00, 0x90, // ADRP X16, lazy_pointer@page
284 0x10, 0x02, 0x40, 0xF9, // LDR X16, [X16, lazy_pointer@pageoff]
285 0x00, 0x02, 0x1F, 0xD6 }, // BR X16
286 { Reference::KindArch::AArch64, page21, 0, 0 },
287 { true, offset12scale8, 4, 0 },
288
289 // Stub Helper size and code
290 12,
291 { 0x50, 0x00, 0x00, 0x18, // LDR W16, L0
292 0x00, 0x00, 0x00, 0x14, // LDR B helperhelper
293 0x00, 0x00, 0x00, 0x00 }, // L0: .long 0
294 { Reference::KindArch::AArch64, lazyImmediateLocation, 8, 0 },
295 { Reference::KindArch::AArch64, branch26, 4, 0 },
296
297 // Stub helper image cache content type
298 DefinedAtom::typeGOT,
299
300 // Stub Helper-Common size and code
301 24,
302 // Stub helper alignment
303 2,
304 { 0x11, 0x00, 0x00, 0x90, // ADRP X17, dyld_ImageLoaderCache@page
305 0x31, 0x02, 0x00, 0x91, // ADD X17, X17, dyld_ImageLoaderCache@pageoff
306 0xF0, 0x47, 0xBF, 0xA9, // STP X16/X17, [SP, #-16]!
307 0x10, 0x00, 0x00, 0x90, // ADRP X16, _fast_lazy_bind@page
308 0x10, 0x02, 0x40, 0xF9, // LDR X16, [X16,_fast_lazy_bind@pageoff]
309 0x00, 0x02, 0x1F, 0xD6 }, // BR X16
310 { Reference::KindArch::AArch64, page21, 0, 0 },
311 { true, offset12, 4, 0 },
312 { Reference::KindArch::AArch64, page21, 12, 0 },
313 { true, offset12scale8, 16, 0 }
314 };
315
isCallSite(const Reference & ref)316 bool ArchHandler_arm64::isCallSite(const Reference &ref) {
317 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
318 return false;
319 assert(ref.kindArch() == Reference::KindArch::AArch64);
320 return (ref.kindValue() == branch26);
321 }
322
isPointer(const Reference & ref)323 bool ArchHandler_arm64::isPointer(const Reference &ref) {
324 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
325 return false;
326 assert(ref.kindArch() == Reference::KindArch::AArch64);
327 Reference::KindValue kind = ref.kindValue();
328 return (kind == pointer64);
329 }
330
isPairedReloc(const Relocation & r)331 bool ArchHandler_arm64::isPairedReloc(const Relocation &r) {
332 return ((r.type == ARM64_RELOC_ADDEND) || (r.type == ARM64_RELOC_SUBTRACTOR));
333 }
334
setDisplacementInBranch26(uint32_t instr,int32_t displacement)335 uint32_t ArchHandler_arm64::setDisplacementInBranch26(uint32_t instr,
336 int32_t displacement) {
337 assert((displacement <= 134217727) && (displacement > (-134217728)) &&
338 "arm64 branch out of range");
339 return (instr & 0xFC000000) | ((uint32_t)(displacement >> 2) & 0x03FFFFFF);
340 }
341
setDisplacementInADRP(uint32_t instruction,int64_t displacement)342 uint32_t ArchHandler_arm64::setDisplacementInADRP(uint32_t instruction,
343 int64_t displacement) {
344 assert((displacement <= 0x100000000LL) && (displacement > (-0x100000000LL)) &&
345 "arm64 ADRP out of range");
346 assert(((instruction & 0x9F000000) == 0x90000000) &&
347 "reloc not on ADRP instruction");
348 uint32_t immhi = (displacement >> 9) & (0x00FFFFE0);
349 uint32_t immlo = (displacement << 17) & (0x60000000);
350 return (instruction & 0x9F00001F) | immlo | immhi;
351 }
352
353 ArchHandler_arm64::Arm64Kind
offset12KindFromInstruction(uint32_t instruction)354 ArchHandler_arm64::offset12KindFromInstruction(uint32_t instruction) {
355 if (instruction & 0x08000000) {
356 switch ((instruction >> 30) & 0x3) {
357 case 0:
358 if ((instruction & 0x04800000) == 0x04800000)
359 return offset12scale16;
360 return offset12;
361 case 1:
362 return offset12scale2;
363 case 2:
364 return offset12scale4;
365 case 3:
366 return offset12scale8;
367 }
368 }
369 return offset12;
370 }
371
setImm12(uint32_t instruction,uint32_t offset)372 uint32_t ArchHandler_arm64::setImm12(uint32_t instruction, uint32_t offset) {
373 assert(((offset & 0xFFFFF000) == 0) && "imm12 offset out of range");
374 uint32_t imm12 = offset << 10;
375 return (instruction & 0xFFC003FF) | imm12;
376 }
377
getReferenceInfo(const Relocation & reloc,const DefinedAtom * inAtom,uint32_t offsetInAtom,uint64_t fixupAddress,bool isBig,FindAtomBySectionAndAddress atomFromAddress,FindAtomBySymbolIndex atomFromSymbolIndex,Reference::KindValue * kind,const lld::Atom ** target,Reference::Addend * addend)378 llvm::Error ArchHandler_arm64::getReferenceInfo(
379 const Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom,
380 uint64_t fixupAddress, bool isBig,
381 FindAtomBySectionAndAddress atomFromAddress,
382 FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
383 const lld::Atom **target, Reference::Addend *addend) {
384 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
385 switch (relocPattern(reloc)) {
386 case ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4:
387 // ex: bl _foo
388 *kind = branch26;
389 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
390 return ec;
391 *addend = 0;
392 return llvm::Error::success();
393 case ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4:
394 // ex: adrp x1, _foo@PAGE
395 *kind = page21;
396 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
397 return ec;
398 *addend = 0;
399 return llvm::Error::success();
400 case ARM64_RELOC_PAGEOFF12 | rExtern | rLength4:
401 // ex: ldr x0, [x1, _foo@PAGEOFF]
402 *kind = offset12KindFromInstruction(*(const little32_t *)fixupContent);
403 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
404 return ec;
405 *addend = 0;
406 return llvm::Error::success();
407 case ARM64_RELOC_GOT_LOAD_PAGE21 | rPcRel | rExtern | rLength4:
408 // ex: adrp x1, _foo@GOTPAGE
409 *kind = gotPage21;
410 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
411 return ec;
412 *addend = 0;
413 return llvm::Error::success();
414 case ARM64_RELOC_GOT_LOAD_PAGEOFF12 | rExtern | rLength4:
415 // ex: ldr x0, [x1, _foo@GOTPAGEOFF]
416 *kind = gotOffset12;
417 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
418 return ec;
419 *addend = 0;
420 return llvm::Error::success();
421 case ARM64_RELOC_TLVP_LOAD_PAGE21 | rPcRel | rExtern | rLength4:
422 // ex: adrp x1, _foo@TLVPAGE
423 *kind = tlvPage21;
424 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
425 return ec;
426 *addend = 0;
427 return llvm::Error::success();
428 case ARM64_RELOC_TLVP_LOAD_PAGEOFF12 | rExtern | rLength4:
429 // ex: ldr x0, [x1, _foo@TLVPAGEOFF]
430 *kind = tlvOffset12;
431 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
432 return ec;
433 *addend = 0;
434 return llvm::Error::success();
435 case ARM64_RELOC_UNSIGNED | rExtern | rLength8:
436 // ex: .quad _foo + N
437 *kind = pointer64;
438 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
439 return ec;
440 *addend = *(const little64_t *)fixupContent;
441 return llvm::Error::success();
442 case ARM64_RELOC_UNSIGNED | rLength8:
443 // ex: .quad Lfoo + N
444 *kind = pointer64;
445 return atomFromAddress(reloc.symbol, *(const little64_t *)fixupContent,
446 target, addend);
447 case ARM64_RELOC_POINTER_TO_GOT | rExtern | rLength8:
448 // ex: .quad _foo@GOT
449 *kind = pointer64ToGOT;
450 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
451 return ec;
452 *addend = 0;
453 return llvm::Error::success();
454 case ARM64_RELOC_POINTER_TO_GOT | rPcRel | rExtern | rLength4:
455 // ex: .long _foo@GOT - .
456
457 // If we are in an .eh_frame section, then the kind of the relocation should
458 // not be delta32ToGOT. It may instead be unwindCIEToPersonalityFunction.
459 if (inAtom->contentType() == DefinedAtom::typeCFI)
460 *kind = unwindCIEToPersonalityFunction;
461 else
462 *kind = delta32ToGOT;
463
464 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
465 return ec;
466 *addend = 0;
467 return llvm::Error::success();
468 default:
469 return llvm::make_error<GenericError>("unsupported arm64 relocation type");
470 }
471 }
472
getPairReferenceInfo(const normalized::Relocation & reloc1,const normalized::Relocation & reloc2,const DefinedAtom * inAtom,uint32_t offsetInAtom,uint64_t fixupAddress,bool swap,bool scatterable,FindAtomBySectionAndAddress atomFromAddress,FindAtomBySymbolIndex atomFromSymbolIndex,Reference::KindValue * kind,const lld::Atom ** target,Reference::Addend * addend)473 llvm::Error ArchHandler_arm64::getPairReferenceInfo(
474 const normalized::Relocation &reloc1, const normalized::Relocation &reloc2,
475 const DefinedAtom *inAtom, uint32_t offsetInAtom, uint64_t fixupAddress,
476 bool swap, bool scatterable, FindAtomBySectionAndAddress atomFromAddress,
477 FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
478 const lld::Atom **target, Reference::Addend *addend) {
479 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
480 switch (relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
481 case ((ARM64_RELOC_ADDEND | rLength4) << 16 |
482 ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4):
483 // ex: bl _foo+8
484 *kind = branch26;
485 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
486 return ec;
487 *addend = reloc1.symbol;
488 return llvm::Error::success();
489 case ((ARM64_RELOC_ADDEND | rLength4) << 16 |
490 ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4):
491 // ex: adrp x1, _foo@PAGE
492 *kind = page21;
493 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
494 return ec;
495 *addend = reloc1.symbol;
496 return llvm::Error::success();
497 case ((ARM64_RELOC_ADDEND | rLength4) << 16 |
498 ARM64_RELOC_PAGEOFF12 | rExtern | rLength4): {
499 // ex: ldr w0, [x1, _foo@PAGEOFF]
500 uint32_t cont32 = (int32_t)*(const little32_t *)fixupContent;
501 *kind = offset12KindFromInstruction(cont32);
502 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
503 return ec;
504 *addend = reloc1.symbol;
505 return llvm::Error::success();
506 }
507 case ((ARM64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
508 ARM64_RELOC_UNSIGNED | rExtern | rLength8):
509 // ex: .quad _foo - .
510 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
511 return ec;
512
513 // If we are in an .eh_frame section, then the kind of the relocation should
514 // not be delta64. It may instead be unwindFDEToFunction.
515 if (inAtom->contentType() == DefinedAtom::typeCFI)
516 *kind = unwindFDEToFunction;
517 else
518 *kind = delta64;
519
520 // The offsets of the 2 relocations must match
521 if (reloc1.offset != reloc2.offset)
522 return llvm::make_error<GenericError>(
523 "paired relocs must have the same offset");
524 *addend = (int64_t)*(const little64_t *)fixupContent + offsetInAtom;
525 return llvm::Error::success();
526 case ((ARM64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
527 ARM64_RELOC_UNSIGNED | rExtern | rLength4):
528 // ex: .quad _foo - .
529 *kind = delta32;
530 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
531 return ec;
532 *addend = (int32_t)*(const little32_t *)fixupContent + offsetInAtom;
533 return llvm::Error::success();
534 default:
535 return llvm::make_error<GenericError>("unsupported arm64 relocation pair");
536 }
537 }
538
generateAtomContent(const DefinedAtom & atom,bool relocatable,FindAddressForAtom findAddress,FindAddressForAtom findSectionAddress,uint64_t imageBaseAddress,llvm::MutableArrayRef<uint8_t> atomContentBuffer)539 void ArchHandler_arm64::generateAtomContent(
540 const DefinedAtom &atom, bool relocatable, FindAddressForAtom findAddress,
541 FindAddressForAtom findSectionAddress, uint64_t imageBaseAddress,
542 llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
543 // Copy raw bytes.
544 std::copy(atom.rawContent().begin(), atom.rawContent().end(),
545 atomContentBuffer.begin());
546 // Apply fix-ups.
547 #ifndef NDEBUG
548 if (atom.begin() != atom.end()) {
549 DEBUG_WITH_TYPE("atom-content", llvm::dbgs()
550 << "Applying fixups to atom:\n"
551 << " address="
552 << llvm::format(" 0x%09lX", &atom)
553 << ", file=#"
554 << atom.file().ordinal()
555 << ", atom=#"
556 << atom.ordinal()
557 << ", name="
558 << atom.name()
559 << ", type="
560 << atom.contentType()
561 << "\n");
562 }
563 #endif
564 for (const Reference *ref : atom) {
565 uint32_t offset = ref->offsetInAtom();
566 const Atom *target = ref->target();
567 bool targetUnnamed = target->name().empty();
568 uint64_t targetAddress = 0;
569 if (isa<DefinedAtom>(target))
570 targetAddress = findAddress(*target);
571 uint64_t atomAddress = findAddress(atom);
572 uint64_t fixupAddress = atomAddress + offset;
573 if (relocatable) {
574 applyFixupRelocatable(*ref, &atomContentBuffer[offset], fixupAddress,
575 targetAddress, atomAddress, targetUnnamed);
576 } else {
577 applyFixupFinal(*ref, &atomContentBuffer[offset], fixupAddress,
578 targetAddress, atomAddress, imageBaseAddress,
579 findSectionAddress);
580 }
581 }
582 }
583
applyFixupFinal(const Reference & ref,uint8_t * loc,uint64_t fixupAddress,uint64_t targetAddress,uint64_t inAtomAddress,uint64_t imageBaseAddress,FindAddressForAtom findSectionAddress)584 void ArchHandler_arm64::applyFixupFinal(const Reference &ref, uint8_t *loc,
585 uint64_t fixupAddress,
586 uint64_t targetAddress,
587 uint64_t inAtomAddress,
588 uint64_t imageBaseAddress,
589 FindAddressForAtom findSectionAddress) {
590 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
591 return;
592 assert(ref.kindArch() == Reference::KindArch::AArch64);
593 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
594 ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
595 int32_t displacement;
596 uint32_t instruction;
597 uint32_t value32;
598 uint32_t value64;
599 switch (static_cast<Arm64Kind>(ref.kindValue())) {
600 case branch26:
601 displacement = (targetAddress - fixupAddress) + ref.addend();
602 *loc32 = setDisplacementInBranch26(*loc32, displacement);
603 return;
604 case page21:
605 case gotPage21:
606 case tlvPage21:
607 displacement =
608 ((targetAddress + ref.addend()) & (-4096)) - (fixupAddress & (-4096));
609 *loc32 = setDisplacementInADRP(*loc32, displacement);
610 return;
611 case offset12:
612 case gotOffset12:
613 case tlvOffset12:
614 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
615 *loc32 = setImm12(*loc32, displacement);
616 return;
617 case offset12scale2:
618 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
619 assert(((displacement & 0x1) == 0) &&
620 "scaled imm12 not accessing 2-byte aligneds");
621 *loc32 = setImm12(*loc32, displacement >> 1);
622 return;
623 case offset12scale4:
624 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
625 assert(((displacement & 0x3) == 0) &&
626 "scaled imm12 not accessing 4-byte aligned");
627 *loc32 = setImm12(*loc32, displacement >> 2);
628 return;
629 case offset12scale8:
630 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
631 assert(((displacement & 0x7) == 0) &&
632 "scaled imm12 not accessing 8-byte aligned");
633 *loc32 = setImm12(*loc32, displacement >> 3);
634 return;
635 case offset12scale16:
636 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
637 assert(((displacement & 0xF) == 0) &&
638 "scaled imm12 not accessing 16-byte aligned");
639 *loc32 = setImm12(*loc32, displacement >> 4);
640 return;
641 case addOffset12:
642 instruction = *loc32;
643 assert(((instruction & 0xFFC00000) == 0xF9400000) &&
644 "GOT reloc is not an LDR instruction");
645 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
646 value32 = 0x91000000 | (instruction & 0x000003FF);
647 instruction = setImm12(value32, displacement);
648 *loc32 = instruction;
649 return;
650 case pointer64:
651 case pointer64ToGOT:
652 *loc64 = targetAddress + ref.addend();
653 return;
654 case delta64:
655 case unwindFDEToFunction:
656 *loc64 = (targetAddress - fixupAddress) + ref.addend();
657 return;
658 case delta32:
659 case delta32ToGOT:
660 case unwindCIEToPersonalityFunction:
661 *loc32 = (targetAddress - fixupAddress) + ref.addend();
662 return;
663 case negDelta32:
664 *loc32 = fixupAddress - targetAddress + ref.addend();
665 return;
666 case lazyPointer:
667 // Do nothing
668 return;
669 case lazyImmediateLocation:
670 *loc32 = ref.addend();
671 return;
672 case imageOffset:
673 *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
674 return;
675 case imageOffsetGot:
676 llvm_unreachable("imageOffsetGot should have been changed to imageOffset");
677 break;
678 case unwindInfoToEhFrame:
679 value64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
680 assert(value64 < 0xffffffU && "offset in __eh_frame too large");
681 *loc32 = (*loc32 & 0xff000000U) | value64;
682 return;
683 case invalid:
684 // Fall into llvm_unreachable().
685 break;
686 }
687 llvm_unreachable("invalid arm64 Reference Kind");
688 }
689
applyFixupRelocatable(const Reference & ref,uint8_t * loc,uint64_t fixupAddress,uint64_t targetAddress,uint64_t inAtomAddress,bool targetUnnamed)690 void ArchHandler_arm64::applyFixupRelocatable(const Reference &ref,
691 uint8_t *loc,
692 uint64_t fixupAddress,
693 uint64_t targetAddress,
694 uint64_t inAtomAddress,
695 bool targetUnnamed) {
696 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
697 return;
698 assert(ref.kindArch() == Reference::KindArch::AArch64);
699 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
700 ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
701 switch (static_cast<Arm64Kind>(ref.kindValue())) {
702 case branch26:
703 *loc32 = setDisplacementInBranch26(*loc32, 0);
704 return;
705 case page21:
706 case gotPage21:
707 case tlvPage21:
708 *loc32 = setDisplacementInADRP(*loc32, 0);
709 return;
710 case offset12:
711 case offset12scale2:
712 case offset12scale4:
713 case offset12scale8:
714 case offset12scale16:
715 case gotOffset12:
716 case tlvOffset12:
717 *loc32 = setImm12(*loc32, 0);
718 return;
719 case pointer64:
720 if (targetUnnamed)
721 *loc64 = targetAddress + ref.addend();
722 else
723 *loc64 = ref.addend();
724 return;
725 case delta64:
726 *loc64 = ref.addend() + inAtomAddress - fixupAddress;
727 return;
728 case unwindFDEToFunction:
729 // We don't emit unwindFDEToFunction in -r mode as they are implicitly
730 // generated from the data in the __eh_frame section. So here we need
731 // to use the targetAddress so that we can generate the full relocation
732 // when we parse again later.
733 *loc64 = targetAddress - fixupAddress;
734 return;
735 case delta32:
736 *loc32 = ref.addend() + inAtomAddress - fixupAddress;
737 return;
738 case negDelta32:
739 // We don't emit negDelta32 in -r mode as they are implicitly
740 // generated from the data in the __eh_frame section. So here we need
741 // to use the targetAddress so that we can generate the full relocation
742 // when we parse again later.
743 *loc32 = fixupAddress - targetAddress + ref.addend();
744 return;
745 case pointer64ToGOT:
746 *loc64 = 0;
747 return;
748 case delta32ToGOT:
749 *loc32 = inAtomAddress - fixupAddress;
750 return;
751 case unwindCIEToPersonalityFunction:
752 // We don't emit unwindCIEToPersonalityFunction in -r mode as they are
753 // implicitly generated from the data in the __eh_frame section. So here we
754 // need to use the targetAddress so that we can generate the full relocation
755 // when we parse again later.
756 *loc32 = targetAddress - fixupAddress;
757 return;
758 case addOffset12:
759 llvm_unreachable("lazy reference kind implies GOT pass was run");
760 case lazyPointer:
761 case lazyImmediateLocation:
762 llvm_unreachable("lazy reference kind implies Stubs pass was run");
763 case imageOffset:
764 case imageOffsetGot:
765 case unwindInfoToEhFrame:
766 llvm_unreachable("fixup implies __unwind_info");
767 return;
768 case invalid:
769 // Fall into llvm_unreachable().
770 break;
771 }
772 llvm_unreachable("unknown arm64 Reference Kind");
773 }
774
appendSectionRelocations(const DefinedAtom & atom,uint64_t atomSectionOffset,const Reference & ref,FindSymbolIndexForAtom symbolIndexForAtom,FindSectionIndexForAtom sectionIndexForAtom,FindAddressForAtom addressForAtom,normalized::Relocations & relocs)775 void ArchHandler_arm64::appendSectionRelocations(
776 const DefinedAtom &atom, uint64_t atomSectionOffset, const Reference &ref,
777 FindSymbolIndexForAtom symbolIndexForAtom,
778 FindSectionIndexForAtom sectionIndexForAtom,
779 FindAddressForAtom addressForAtom, normalized::Relocations &relocs) {
780 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
781 return;
782 assert(ref.kindArch() == Reference::KindArch::AArch64);
783 uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
784 switch (static_cast<Arm64Kind>(ref.kindValue())) {
785 case branch26:
786 if (ref.addend()) {
787 appendReloc(relocs, sectionOffset, ref.addend(), 0,
788 ARM64_RELOC_ADDEND | rLength4);
789 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
790 ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
791 } else {
792 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
793 ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
794 }
795 return;
796 case page21:
797 if (ref.addend()) {
798 appendReloc(relocs, sectionOffset, ref.addend(), 0,
799 ARM64_RELOC_ADDEND | rLength4);
800 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
801 ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
802 } else {
803 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
804 ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
805 }
806 return;
807 case offset12:
808 case offset12scale2:
809 case offset12scale4:
810 case offset12scale8:
811 case offset12scale16:
812 if (ref.addend()) {
813 appendReloc(relocs, sectionOffset, ref.addend(), 0,
814 ARM64_RELOC_ADDEND | rLength4);
815 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
816 ARM64_RELOC_PAGEOFF12 | rExtern | rLength4);
817 } else {
818 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
819 ARM64_RELOC_PAGEOFF12 | rExtern | rLength4);
820 }
821 return;
822 case gotPage21:
823 assert(ref.addend() == 0);
824 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
825 ARM64_RELOC_GOT_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
826 return;
827 case gotOffset12:
828 assert(ref.addend() == 0);
829 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
830 ARM64_RELOC_GOT_LOAD_PAGEOFF12 | rExtern | rLength4);
831 return;
832 case tlvPage21:
833 assert(ref.addend() == 0);
834 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
835 ARM64_RELOC_TLVP_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
836 return;
837 case tlvOffset12:
838 assert(ref.addend() == 0);
839 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
840 ARM64_RELOC_TLVP_LOAD_PAGEOFF12 | rExtern | rLength4);
841 return;
842 case pointer64:
843 if (ref.target()->name().empty())
844 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
845 ARM64_RELOC_UNSIGNED | rLength8);
846 else
847 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
848 ARM64_RELOC_UNSIGNED | rExtern | rLength8);
849 return;
850 case delta64:
851 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
852 ARM64_RELOC_SUBTRACTOR | rExtern | rLength8);
853 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
854 ARM64_RELOC_UNSIGNED | rExtern | rLength8);
855 return;
856 case delta32:
857 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
858 ARM64_RELOC_SUBTRACTOR | rExtern | rLength4 );
859 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
860 ARM64_RELOC_UNSIGNED | rExtern | rLength4 );
861 return;
862 case pointer64ToGOT:
863 assert(ref.addend() == 0);
864 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
865 ARM64_RELOC_POINTER_TO_GOT | rExtern | rLength8);
866 return;
867 case delta32ToGOT:
868 assert(ref.addend() == 0);
869 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
870 ARM64_RELOC_POINTER_TO_GOT | rPcRel | rExtern | rLength4);
871 return;
872 case addOffset12:
873 llvm_unreachable("lazy reference kind implies GOT pass was run");
874 case lazyPointer:
875 case lazyImmediateLocation:
876 llvm_unreachable("lazy reference kind implies Stubs pass was run");
877 case imageOffset:
878 case imageOffsetGot:
879 llvm_unreachable("deltas from mach_header can only be in final images");
880 case unwindCIEToPersonalityFunction:
881 case unwindFDEToFunction:
882 case unwindInfoToEhFrame:
883 case negDelta32:
884 // Do nothing.
885 return;
886 case invalid:
887 // Fall into llvm_unreachable().
888 break;
889 }
890 llvm_unreachable("unknown arm64 Reference Kind");
891 }
892
create_arm64()893 std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_arm64() {
894 return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_arm64());
895 }
896
897 } // namespace mach_o
898 } // namespace lld
899