1 //===- lib/FileFormat/MachO/ArchHandler_x86_64.cpp ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "ArchHandler.h"
10 #include "Atoms.h"
11 #include "MachONormalizedFileBinaryUtils.h"
12 #include "llvm/ADT/StringRef.h"
13 #include "llvm/ADT/StringSwitch.h"
14 #include "llvm/ADT/Triple.h"
15 #include "llvm/Support/Endian.h"
16 #include "llvm/Support/ErrorHandling.h"
17
18 using namespace llvm::MachO;
19 using namespace lld::mach_o::normalized;
20
21 namespace lld {
22 namespace mach_o {
23
24 using llvm::support::ulittle32_t;
25 using llvm::support::ulittle64_t;
26
27 using llvm::support::little32_t;
28 using llvm::support::little64_t;
29
30 class ArchHandler_x86_64 : public ArchHandler {
31 public:
32 ArchHandler_x86_64() = default;
33 ~ArchHandler_x86_64() override = default;
34
kindStrings()35 const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
36
kindArch()37 Reference::KindArch kindArch() override {
38 return Reference::KindArch::x86_64;
39 }
40
41 /// Used by GOTPass to locate GOT References
isGOTAccess(const Reference & ref,bool & canBypassGOT)42 bool isGOTAccess(const Reference &ref, bool &canBypassGOT) override {
43 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
44 return false;
45 assert(ref.kindArch() == Reference::KindArch::x86_64);
46 switch (ref.kindValue()) {
47 case ripRel32GotLoad:
48 canBypassGOT = true;
49 return true;
50 case ripRel32Got:
51 canBypassGOT = false;
52 return true;
53 case imageOffsetGot:
54 canBypassGOT = false;
55 return true;
56 default:
57 return false;
58 }
59 }
60
isTLVAccess(const Reference & ref) const61 bool isTLVAccess(const Reference &ref) const override {
62 assert(ref.kindNamespace() == Reference::KindNamespace::mach_o);
63 assert(ref.kindArch() == Reference::KindArch::x86_64);
64 return ref.kindValue() == ripRel32Tlv;
65 }
66
updateReferenceToTLV(const Reference * ref)67 void updateReferenceToTLV(const Reference *ref) override {
68 assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
69 assert(ref->kindArch() == Reference::KindArch::x86_64);
70 assert(ref->kindValue() == ripRel32Tlv);
71 const_cast<Reference*>(ref)->setKindValue(ripRel32);
72 }
73
74 /// Used by GOTPass to update GOT References
updateReferenceToGOT(const Reference * ref,bool targetNowGOT)75 void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
76 assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
77 assert(ref->kindArch() == Reference::KindArch::x86_64);
78
79 switch (ref->kindValue()) {
80 case ripRel32Got:
81 assert(targetNowGOT && "target must be GOT");
82 LLVM_FALLTHROUGH;
83 case ripRel32GotLoad:
84 const_cast<Reference *>(ref)
85 ->setKindValue(targetNowGOT ? ripRel32 : ripRel32GotLoadNowLea);
86 break;
87 case imageOffsetGot:
88 const_cast<Reference *>(ref)->setKindValue(imageOffset);
89 break;
90 default:
91 llvm_unreachable("unknown GOT reference kind");
92 }
93 }
94
needsCompactUnwind()95 bool needsCompactUnwind() override {
96 return true;
97 }
98
imageOffsetKind()99 Reference::KindValue imageOffsetKind() override {
100 return imageOffset;
101 }
102
imageOffsetKindIndirect()103 Reference::KindValue imageOffsetKindIndirect() override {
104 return imageOffsetGot;
105 }
106
unwindRefToPersonalityFunctionKind()107 Reference::KindValue unwindRefToPersonalityFunctionKind() override {
108 return ripRel32Got;
109 }
110
unwindRefToCIEKind()111 Reference::KindValue unwindRefToCIEKind() override {
112 return negDelta32;
113 }
114
unwindRefToFunctionKind()115 Reference::KindValue unwindRefToFunctionKind() override{
116 return unwindFDEToFunction;
117 }
118
lazyImmediateLocationKind()119 Reference::KindValue lazyImmediateLocationKind() override {
120 return lazyImmediateLocation;
121 }
122
unwindRefToEhFrameKind()123 Reference::KindValue unwindRefToEhFrameKind() override {
124 return unwindInfoToEhFrame;
125 }
126
pointerKind()127 Reference::KindValue pointerKind() override {
128 return pointer64;
129 }
130
dwarfCompactUnwindType()131 uint32_t dwarfCompactUnwindType() override {
132 return 0x04000000U;
133 }
134
stubInfo()135 const StubInfo &stubInfo() override { return _sStubInfo; }
136
isNonCallBranch(const Reference &)137 bool isNonCallBranch(const Reference &) override {
138 return false;
139 }
140
141 bool isCallSite(const Reference &) override;
142 bool isPointer(const Reference &) override;
143 bool isPairedReloc(const normalized::Relocation &) override;
144
145 llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
146 const DefinedAtom *inAtom,
147 uint32_t offsetInAtom,
148 uint64_t fixupAddress, bool swap,
149 FindAtomBySectionAndAddress atomFromAddress,
150 FindAtomBySymbolIndex atomFromSymbolIndex,
151 Reference::KindValue *kind,
152 const lld::Atom **target,
153 Reference::Addend *addend) override;
154 llvm::Error
155 getPairReferenceInfo(const normalized::Relocation &reloc1,
156 const normalized::Relocation &reloc2,
157 const DefinedAtom *inAtom,
158 uint32_t offsetInAtom,
159 uint64_t fixupAddress, bool swap, bool scatterable,
160 FindAtomBySectionAndAddress atomFromAddress,
161 FindAtomBySymbolIndex atomFromSymbolIndex,
162 Reference::KindValue *kind,
163 const lld::Atom **target,
164 Reference::Addend *addend) override;
165
needsLocalSymbolInRelocatableFile(const DefinedAtom * atom)166 bool needsLocalSymbolInRelocatableFile(const DefinedAtom *atom) override {
167 return (atom->contentType() == DefinedAtom::typeCString);
168 }
169
170 void generateAtomContent(const DefinedAtom &atom, bool relocatable,
171 FindAddressForAtom findAddress,
172 FindAddressForAtom findSectionAddress,
173 uint64_t imageBase,
174 llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
175
176 void appendSectionRelocations(const DefinedAtom &atom,
177 uint64_t atomSectionOffset,
178 const Reference &ref,
179 FindSymbolIndexForAtom symbolIndexForAtom,
180 FindSectionIndexForAtom sectionIndexForAtom,
181 FindAddressForAtom addressForAtom,
182 normalized::Relocations &relocs) override;
183
184 private:
185 static const Registry::KindStrings _sKindStrings[];
186 static const StubInfo _sStubInfo;
187
188 enum X86_64Kind: Reference::KindValue {
189 invalid, /// for error condition
190
191 // Kinds found in mach-o .o files:
192 branch32, /// ex: call _foo
193 ripRel32, /// ex: movq _foo(%rip), %rax
194 ripRel32Minus1, /// ex: movb $0x12, _foo(%rip)
195 ripRel32Minus2, /// ex: movw $0x1234, _foo(%rip)
196 ripRel32Minus4, /// ex: movl $0x12345678, _foo(%rip)
197 ripRel32Anon, /// ex: movq L1(%rip), %rax
198 ripRel32Minus1Anon, /// ex: movb $0x12, L1(%rip)
199 ripRel32Minus2Anon, /// ex: movw $0x1234, L1(%rip)
200 ripRel32Minus4Anon, /// ex: movw $0x12345678, L1(%rip)
201 ripRel32GotLoad, /// ex: movq _foo@GOTPCREL(%rip), %rax
202 ripRel32Got, /// ex: pushq _foo@GOTPCREL(%rip)
203 ripRel32Tlv, /// ex: movq _foo@TLVP(%rip), %rdi
204 pointer64, /// ex: .quad _foo
205 pointer64Anon, /// ex: .quad L1
206 delta64, /// ex: .quad _foo - .
207 delta32, /// ex: .long _foo - .
208 delta64Anon, /// ex: .quad L1 - .
209 delta32Anon, /// ex: .long L1 - .
210 negDelta64, /// ex: .quad . - _foo
211 negDelta32, /// ex: .long . - _foo
212
213 // Kinds introduced by Passes:
214 ripRel32GotLoadNowLea, /// Target of GOT load is in linkage unit so
215 /// "movq _foo@GOTPCREL(%rip), %rax" can be changed
216 /// to "leaq _foo(%rip), %rax
217 lazyPointer, /// Location contains a lazy pointer.
218 lazyImmediateLocation, /// Location contains immediate value used in stub.
219
220 imageOffset, /// Location contains offset of atom in final image
221 imageOffsetGot, /// Location contains offset of GOT entry for atom in
222 /// final image (typically personality function).
223 unwindFDEToFunction, /// Nearly delta64, but cannot be rematerialized in
224 /// relocatable object (yay for implicit contracts!).
225 unwindInfoToEhFrame, /// Fix low 24 bits of compact unwind encoding to
226 /// refer to __eh_frame entry.
227 tlvInitSectionOffset /// Location contains offset tlv init-value atom
228 /// within the __thread_data section.
229 };
230
231 Reference::KindValue kindFromReloc(const normalized::Relocation &reloc);
232
233 void applyFixupFinal(const Reference &ref, uint8_t *location,
234 uint64_t fixupAddress, uint64_t targetAddress,
235 uint64_t inAtomAddress, uint64_t imageBaseAddress,
236 FindAddressForAtom findSectionAddress);
237
238 void applyFixupRelocatable(const Reference &ref, uint8_t *location,
239 uint64_t fixupAddress,
240 uint64_t targetAddress,
241 uint64_t inAtomAddress);
242 };
243
244 const Registry::KindStrings ArchHandler_x86_64::_sKindStrings[] = {
245 LLD_KIND_STRING_ENTRY(invalid), LLD_KIND_STRING_ENTRY(branch32),
246 LLD_KIND_STRING_ENTRY(ripRel32), LLD_KIND_STRING_ENTRY(ripRel32Minus1),
247 LLD_KIND_STRING_ENTRY(ripRel32Minus2), LLD_KIND_STRING_ENTRY(ripRel32Minus4),
248 LLD_KIND_STRING_ENTRY(ripRel32Anon),
249 LLD_KIND_STRING_ENTRY(ripRel32Minus1Anon),
250 LLD_KIND_STRING_ENTRY(ripRel32Minus2Anon),
251 LLD_KIND_STRING_ENTRY(ripRel32Minus4Anon),
252 LLD_KIND_STRING_ENTRY(ripRel32GotLoad),
253 LLD_KIND_STRING_ENTRY(ripRel32GotLoadNowLea),
254 LLD_KIND_STRING_ENTRY(ripRel32Got), LLD_KIND_STRING_ENTRY(ripRel32Tlv),
255 LLD_KIND_STRING_ENTRY(lazyPointer),
256 LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
257 LLD_KIND_STRING_ENTRY(pointer64), LLD_KIND_STRING_ENTRY(pointer64Anon),
258 LLD_KIND_STRING_ENTRY(delta32), LLD_KIND_STRING_ENTRY(delta64),
259 LLD_KIND_STRING_ENTRY(delta32Anon), LLD_KIND_STRING_ENTRY(delta64Anon),
260 LLD_KIND_STRING_ENTRY(negDelta64),
261 LLD_KIND_STRING_ENTRY(negDelta32),
262 LLD_KIND_STRING_ENTRY(imageOffset), LLD_KIND_STRING_ENTRY(imageOffsetGot),
263 LLD_KIND_STRING_ENTRY(unwindFDEToFunction),
264 LLD_KIND_STRING_ENTRY(unwindInfoToEhFrame),
265 LLD_KIND_STRING_ENTRY(tlvInitSectionOffset),
266 LLD_KIND_STRING_END
267 };
268
269 const ArchHandler::StubInfo ArchHandler_x86_64::_sStubInfo = {
270 "dyld_stub_binder",
271
272 // Lazy pointer references
273 { Reference::KindArch::x86_64, pointer64, 0, 0 },
274 { Reference::KindArch::x86_64, lazyPointer, 0, 0 },
275
276 // GOT pointer to dyld_stub_binder
277 { Reference::KindArch::x86_64, pointer64, 0, 0 },
278
279 // x86_64 code alignment 2^1
280 1,
281
282 // Stub size and code
283 6,
284 { 0xff, 0x25, 0x00, 0x00, 0x00, 0x00 }, // jmp *lazyPointer
285 { Reference::KindArch::x86_64, ripRel32, 2, 0 },
286 { false, 0, 0, 0 },
287
288 // Stub Helper size and code
289 10,
290 { 0x68, 0x00, 0x00, 0x00, 0x00, // pushq $lazy-info-offset
291 0xE9, 0x00, 0x00, 0x00, 0x00 }, // jmp helperhelper
292 { Reference::KindArch::x86_64, lazyImmediateLocation, 1, 0 },
293 { Reference::KindArch::x86_64, branch32, 6, 0 },
294
295 // Stub helper image cache content type
296 DefinedAtom::typeNonLazyPointer,
297
298 // Stub Helper-Common size and code
299 16,
300 // Stub helper alignment
301 2,
302 { 0x4C, 0x8D, 0x1D, 0x00, 0x00, 0x00, 0x00, // leaq cache(%rip),%r11
303 0x41, 0x53, // push %r11
304 0xFF, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *binder(%rip)
305 0x90 }, // nop
306 { Reference::KindArch::x86_64, ripRel32, 3, 0 },
307 { false, 0, 0, 0 },
308 { Reference::KindArch::x86_64, ripRel32, 11, 0 },
309 { false, 0, 0, 0 }
310
311 };
312
isCallSite(const Reference & ref)313 bool ArchHandler_x86_64::isCallSite(const Reference &ref) {
314 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
315 return false;
316 assert(ref.kindArch() == Reference::KindArch::x86_64);
317 return (ref.kindValue() == branch32);
318 }
319
isPointer(const Reference & ref)320 bool ArchHandler_x86_64::isPointer(const Reference &ref) {
321 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
322 return false;
323 assert(ref.kindArch() == Reference::KindArch::x86_64);
324 Reference::KindValue kind = ref.kindValue();
325 return (kind == pointer64 || kind == pointer64Anon);
326 }
327
isPairedReloc(const Relocation & reloc)328 bool ArchHandler_x86_64::isPairedReloc(const Relocation &reloc) {
329 return (reloc.type == X86_64_RELOC_SUBTRACTOR);
330 }
331
332 Reference::KindValue
kindFromReloc(const Relocation & reloc)333 ArchHandler_x86_64::kindFromReloc(const Relocation &reloc) {
334 switch(relocPattern(reloc)) {
335 case X86_64_RELOC_BRANCH | rPcRel | rExtern | rLength4:
336 return branch32;
337 case X86_64_RELOC_SIGNED | rPcRel | rExtern | rLength4:
338 return ripRel32;
339 case X86_64_RELOC_SIGNED | rPcRel | rLength4:
340 return ripRel32Anon;
341 case X86_64_RELOC_SIGNED_1 | rPcRel | rExtern | rLength4:
342 return ripRel32Minus1;
343 case X86_64_RELOC_SIGNED_1 | rPcRel | rLength4:
344 return ripRel32Minus1Anon;
345 case X86_64_RELOC_SIGNED_2 | rPcRel | rExtern | rLength4:
346 return ripRel32Minus2;
347 case X86_64_RELOC_SIGNED_2 | rPcRel | rLength4:
348 return ripRel32Minus2Anon;
349 case X86_64_RELOC_SIGNED_4 | rPcRel | rExtern | rLength4:
350 return ripRel32Minus4;
351 case X86_64_RELOC_SIGNED_4 | rPcRel | rLength4:
352 return ripRel32Minus4Anon;
353 case X86_64_RELOC_GOT_LOAD | rPcRel | rExtern | rLength4:
354 return ripRel32GotLoad;
355 case X86_64_RELOC_GOT | rPcRel | rExtern | rLength4:
356 return ripRel32Got;
357 case X86_64_RELOC_TLV | rPcRel | rExtern | rLength4:
358 return ripRel32Tlv;
359 case X86_64_RELOC_UNSIGNED | rExtern | rLength8:
360 return pointer64;
361 case X86_64_RELOC_UNSIGNED | rLength8:
362 return pointer64Anon;
363 default:
364 return invalid;
365 }
366 }
367
368 llvm::Error
getReferenceInfo(const Relocation & reloc,const DefinedAtom * inAtom,uint32_t offsetInAtom,uint64_t fixupAddress,bool swap,FindAtomBySectionAndAddress atomFromAddress,FindAtomBySymbolIndex atomFromSymbolIndex,Reference::KindValue * kind,const lld::Atom ** target,Reference::Addend * addend)369 ArchHandler_x86_64::getReferenceInfo(const Relocation &reloc,
370 const DefinedAtom *inAtom,
371 uint32_t offsetInAtom,
372 uint64_t fixupAddress, bool swap,
373 FindAtomBySectionAndAddress atomFromAddress,
374 FindAtomBySymbolIndex atomFromSymbolIndex,
375 Reference::KindValue *kind,
376 const lld::Atom **target,
377 Reference::Addend *addend) {
378 *kind = kindFromReloc(reloc);
379 if (*kind == invalid)
380 return llvm::make_error<GenericError>("unknown type");
381 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
382 uint64_t targetAddress;
383 switch (*kind) {
384 case branch32:
385 case ripRel32:
386 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
387 return ec;
388 *addend = *(const little32_t *)fixupContent;
389 return llvm::Error::success();
390 case ripRel32Minus1:
391 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
392 return ec;
393 *addend = (int32_t)*(const little32_t *)fixupContent + 1;
394 return llvm::Error::success();
395 case ripRel32Minus2:
396 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
397 return ec;
398 *addend = (int32_t)*(const little32_t *)fixupContent + 2;
399 return llvm::Error::success();
400 case ripRel32Minus4:
401 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
402 return ec;
403 *addend = (int32_t)*(const little32_t *)fixupContent + 4;
404 return llvm::Error::success();
405 case ripRel32Anon:
406 targetAddress = fixupAddress + 4 + *(const little32_t *)fixupContent;
407 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
408 case ripRel32Minus1Anon:
409 targetAddress = fixupAddress + 5 + *(const little32_t *)fixupContent;
410 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
411 case ripRel32Minus2Anon:
412 targetAddress = fixupAddress + 6 + *(const little32_t *)fixupContent;
413 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
414 case ripRel32Minus4Anon:
415 targetAddress = fixupAddress + 8 + *(const little32_t *)fixupContent;
416 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
417 case ripRel32GotLoad:
418 case ripRel32Got:
419 case ripRel32Tlv:
420 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
421 return ec;
422 *addend = *(const little32_t *)fixupContent;
423 return llvm::Error::success();
424 case tlvInitSectionOffset:
425 case pointer64:
426 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
427 return ec;
428 // If this is the 3rd pointer of a tlv-thunk (i.e. the pointer to the TLV's
429 // initial value) we need to handle it specially.
430 if (inAtom->contentType() == DefinedAtom::typeThunkTLV &&
431 offsetInAtom == 16) {
432 *kind = tlvInitSectionOffset;
433 assert(*addend == 0 && "TLV-init has non-zero addend?");
434 } else
435 *addend = *(const little64_t *)fixupContent;
436 return llvm::Error::success();
437 case pointer64Anon:
438 targetAddress = *(const little64_t *)fixupContent;
439 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
440 default:
441 llvm_unreachable("bad reloc kind");
442 }
443 }
444
445 llvm::Error
getPairReferenceInfo(const normalized::Relocation & reloc1,const normalized::Relocation & reloc2,const DefinedAtom * inAtom,uint32_t offsetInAtom,uint64_t fixupAddress,bool swap,bool scatterable,FindAtomBySectionAndAddress atomFromAddress,FindAtomBySymbolIndex atomFromSymbolIndex,Reference::KindValue * kind,const lld::Atom ** target,Reference::Addend * addend)446 ArchHandler_x86_64::getPairReferenceInfo(const normalized::Relocation &reloc1,
447 const normalized::Relocation &reloc2,
448 const DefinedAtom *inAtom,
449 uint32_t offsetInAtom,
450 uint64_t fixupAddress, bool swap,
451 bool scatterable,
452 FindAtomBySectionAndAddress atomFromAddress,
453 FindAtomBySymbolIndex atomFromSymbolIndex,
454 Reference::KindValue *kind,
455 const lld::Atom **target,
456 Reference::Addend *addend) {
457 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
458 uint64_t targetAddress;
459 const lld::Atom *fromTarget;
460 if (auto ec = atomFromSymbolIndex(reloc1.symbol, &fromTarget))
461 return ec;
462
463 switch(relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
464 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
465 X86_64_RELOC_UNSIGNED | rExtern | rLength8): {
466 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
467 return ec;
468 uint64_t encodedAddend = (int64_t)*(const little64_t *)fixupContent;
469 if (inAtom == fromTarget) {
470 if (inAtom->contentType() == DefinedAtom::typeCFI)
471 *kind = unwindFDEToFunction;
472 else
473 *kind = delta64;
474 *addend = encodedAddend + offsetInAtom;
475 } else if (inAtom == *target) {
476 *kind = negDelta64;
477 *addend = encodedAddend - offsetInAtom;
478 *target = fromTarget;
479 } else
480 return llvm::make_error<GenericError>("Invalid pointer diff");
481 return llvm::Error::success();
482 }
483 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
484 X86_64_RELOC_UNSIGNED | rExtern | rLength4): {
485 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
486 return ec;
487 uint32_t encodedAddend = (int32_t)*(const little32_t *)fixupContent;
488 if (inAtom == fromTarget) {
489 *kind = delta32;
490 *addend = encodedAddend + offsetInAtom;
491 } else if (inAtom == *target) {
492 *kind = negDelta32;
493 *addend = encodedAddend - offsetInAtom;
494 *target = fromTarget;
495 } else
496 return llvm::make_error<GenericError>("Invalid pointer diff");
497 return llvm::Error::success();
498 }
499 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
500 X86_64_RELOC_UNSIGNED | rLength8):
501 if (fromTarget != inAtom)
502 return llvm::make_error<GenericError>("pointer diff not in base atom");
503 *kind = delta64Anon;
504 targetAddress = offsetInAtom + (int64_t)*(const little64_t *)fixupContent;
505 return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
506 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
507 X86_64_RELOC_UNSIGNED | rLength4):
508 if (fromTarget != inAtom)
509 return llvm::make_error<GenericError>("pointer diff not in base atom");
510 *kind = delta32Anon;
511 targetAddress = offsetInAtom + (int32_t)*(const little32_t *)fixupContent;
512 return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
513 default:
514 return llvm::make_error<GenericError>("unknown pair");
515 }
516 }
517
generateAtomContent(const DefinedAtom & atom,bool relocatable,FindAddressForAtom findAddress,FindAddressForAtom findSectionAddress,uint64_t imageBaseAddress,llvm::MutableArrayRef<uint8_t> atomContentBuffer)518 void ArchHandler_x86_64::generateAtomContent(
519 const DefinedAtom &atom, bool relocatable, FindAddressForAtom findAddress,
520 FindAddressForAtom findSectionAddress, uint64_t imageBaseAddress,
521 llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
522 // Copy raw bytes.
523 std::copy(atom.rawContent().begin(), atom.rawContent().end(),
524 atomContentBuffer.begin());
525 // Apply fix-ups.
526 for (const Reference *ref : atom) {
527 uint32_t offset = ref->offsetInAtom();
528 const Atom *target = ref->target();
529 uint64_t targetAddress = 0;
530 if (isa<DefinedAtom>(target))
531 targetAddress = findAddress(*target);
532 uint64_t atomAddress = findAddress(atom);
533 uint64_t fixupAddress = atomAddress + offset;
534 if (relocatable) {
535 applyFixupRelocatable(*ref, &atomContentBuffer[offset],
536 fixupAddress, targetAddress,
537 atomAddress);
538 } else {
539 applyFixupFinal(*ref, &atomContentBuffer[offset],
540 fixupAddress, targetAddress,
541 atomAddress, imageBaseAddress, findSectionAddress);
542 }
543 }
544 }
545
applyFixupFinal(const Reference & ref,uint8_t * loc,uint64_t fixupAddress,uint64_t targetAddress,uint64_t inAtomAddress,uint64_t imageBaseAddress,FindAddressForAtom findSectionAddress)546 void ArchHandler_x86_64::applyFixupFinal(
547 const Reference &ref, uint8_t *loc, uint64_t fixupAddress,
548 uint64_t targetAddress, uint64_t inAtomAddress, uint64_t imageBaseAddress,
549 FindAddressForAtom findSectionAddress) {
550 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
551 return;
552 assert(ref.kindArch() == Reference::KindArch::x86_64);
553 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
554 ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
555 switch (static_cast<X86_64Kind>(ref.kindValue())) {
556 case branch32:
557 case ripRel32:
558 case ripRel32Anon:
559 case ripRel32Got:
560 case ripRel32GotLoad:
561 case ripRel32Tlv:
562 *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
563 return;
564 case pointer64:
565 case pointer64Anon:
566 *loc64 = targetAddress + ref.addend();
567 return;
568 case tlvInitSectionOffset:
569 *loc64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
570 return;
571 case ripRel32Minus1:
572 case ripRel32Minus1Anon:
573 *loc32 = targetAddress - (fixupAddress + 5) + ref.addend();
574 return;
575 case ripRel32Minus2:
576 case ripRel32Minus2Anon:
577 *loc32 = targetAddress - (fixupAddress + 6) + ref.addend();
578 return;
579 case ripRel32Minus4:
580 case ripRel32Minus4Anon:
581 *loc32 = targetAddress - (fixupAddress + 8) + ref.addend();
582 return;
583 case delta32:
584 case delta32Anon:
585 *loc32 = targetAddress - fixupAddress + ref.addend();
586 return;
587 case delta64:
588 case delta64Anon:
589 case unwindFDEToFunction:
590 *loc64 = targetAddress - fixupAddress + ref.addend();
591 return;
592 case ripRel32GotLoadNowLea:
593 // Change MOVQ to LEA
594 assert(loc[-2] == 0x8B);
595 loc[-2] = 0x8D;
596 *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
597 return;
598 case negDelta64:
599 *loc64 = fixupAddress - targetAddress + ref.addend();
600 return;
601 case negDelta32:
602 *loc32 = fixupAddress - targetAddress + ref.addend();
603 return;
604 case lazyPointer:
605 // Do nothing
606 return;
607 case lazyImmediateLocation:
608 *loc32 = ref.addend();
609 return;
610 case imageOffset:
611 case imageOffsetGot:
612 *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
613 return;
614 case unwindInfoToEhFrame: {
615 uint64_t val = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
616 assert(val < 0xffffffU && "offset in __eh_frame too large");
617 *loc32 = (*loc32 & 0xff000000U) | val;
618 return;
619 }
620 case invalid:
621 // Fall into llvm_unreachable().
622 break;
623 }
624 llvm_unreachable("invalid x86_64 Reference Kind");
625 }
626
applyFixupRelocatable(const Reference & ref,uint8_t * loc,uint64_t fixupAddress,uint64_t targetAddress,uint64_t inAtomAddress)627 void ArchHandler_x86_64::applyFixupRelocatable(const Reference &ref,
628 uint8_t *loc,
629 uint64_t fixupAddress,
630 uint64_t targetAddress,
631 uint64_t inAtomAddress) {
632 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
633 return;
634 assert(ref.kindArch() == Reference::KindArch::x86_64);
635 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
636 ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
637 switch (static_cast<X86_64Kind>(ref.kindValue())) {
638 case branch32:
639 case ripRel32:
640 case ripRel32Got:
641 case ripRel32GotLoad:
642 case ripRel32Tlv:
643 *loc32 = ref.addend();
644 return;
645 case ripRel32Anon:
646 *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend();
647 return;
648 case tlvInitSectionOffset:
649 case pointer64:
650 *loc64 = ref.addend();
651 return;
652 case pointer64Anon:
653 *loc64 = targetAddress + ref.addend();
654 return;
655 case ripRel32Minus1:
656 *loc32 = ref.addend() - 1;
657 return;
658 case ripRel32Minus1Anon:
659 *loc32 = (targetAddress - (fixupAddress + 5)) + ref.addend();
660 return;
661 case ripRel32Minus2:
662 *loc32 = ref.addend() - 2;
663 return;
664 case ripRel32Minus2Anon:
665 *loc32 = (targetAddress - (fixupAddress + 6)) + ref.addend();
666 return;
667 case ripRel32Minus4:
668 *loc32 = ref.addend() - 4;
669 return;
670 case ripRel32Minus4Anon:
671 *loc32 = (targetAddress - (fixupAddress + 8)) + ref.addend();
672 return;
673 case delta32:
674 *loc32 = ref.addend() + inAtomAddress - fixupAddress;
675 return;
676 case delta32Anon:
677 // The value we write here should be the delta to the target
678 // after taking in to account the difference from the fixup back to the
679 // last defined label
680 // ie, if we have:
681 // _base: ...
682 // Lfixup: .quad Ltarget - .
683 // ...
684 // Ltarget:
685 //
686 // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
687 *loc32 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
688 return;
689 case delta64:
690 *loc64 = ref.addend() + inAtomAddress - fixupAddress;
691 return;
692 case delta64Anon:
693 // The value we write here should be the delta to the target
694 // after taking in to account the difference from the fixup back to the
695 // last defined label
696 // ie, if we have:
697 // _base: ...
698 // Lfixup: .quad Ltarget - .
699 // ...
700 // Ltarget:
701 //
702 // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
703 *loc64 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
704 return;
705 case negDelta64:
706 *loc64 = ref.addend() + fixupAddress - inAtomAddress;
707 return;
708 case negDelta32:
709 *loc32 = ref.addend() + fixupAddress - inAtomAddress;
710 return;
711 case ripRel32GotLoadNowLea:
712 llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
713 return;
714 case lazyPointer:
715 case lazyImmediateLocation:
716 llvm_unreachable("lazy reference kind implies Stubs pass was run");
717 return;
718 case imageOffset:
719 case imageOffsetGot:
720 case unwindInfoToEhFrame:
721 llvm_unreachable("fixup implies __unwind_info");
722 return;
723 case unwindFDEToFunction:
724 // Do nothing for now
725 return;
726 case invalid:
727 // Fall into llvm_unreachable().
728 break;
729 }
730 llvm_unreachable("unknown x86_64 Reference Kind");
731 }
732
appendSectionRelocations(const DefinedAtom & atom,uint64_t atomSectionOffset,const Reference & ref,FindSymbolIndexForAtom symbolIndexForAtom,FindSectionIndexForAtom sectionIndexForAtom,FindAddressForAtom addressForAtom,normalized::Relocations & relocs)733 void ArchHandler_x86_64::appendSectionRelocations(
734 const DefinedAtom &atom,
735 uint64_t atomSectionOffset,
736 const Reference &ref,
737 FindSymbolIndexForAtom symbolIndexForAtom,
738 FindSectionIndexForAtom sectionIndexForAtom,
739 FindAddressForAtom addressForAtom,
740 normalized::Relocations &relocs) {
741 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
742 return;
743 assert(ref.kindArch() == Reference::KindArch::x86_64);
744 uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
745 switch (static_cast<X86_64Kind>(ref.kindValue())) {
746 case branch32:
747 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
748 X86_64_RELOC_BRANCH | rPcRel | rExtern | rLength4);
749 return;
750 case ripRel32:
751 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
752 X86_64_RELOC_SIGNED | rPcRel | rExtern | rLength4 );
753 return;
754 case ripRel32Anon:
755 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
756 X86_64_RELOC_SIGNED | rPcRel | rLength4 );
757 return;
758 case ripRel32Got:
759 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
760 X86_64_RELOC_GOT | rPcRel | rExtern | rLength4 );
761 return;
762 case ripRel32GotLoad:
763 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
764 X86_64_RELOC_GOT_LOAD | rPcRel | rExtern | rLength4 );
765 return;
766 case ripRel32Tlv:
767 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
768 X86_64_RELOC_TLV | rPcRel | rExtern | rLength4 );
769 return;
770 case tlvInitSectionOffset:
771 case pointer64:
772 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
773 X86_64_RELOC_UNSIGNED | rExtern | rLength8);
774 return;
775 case pointer64Anon:
776 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
777 X86_64_RELOC_UNSIGNED | rLength8);
778 return;
779 case ripRel32Minus1:
780 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
781 X86_64_RELOC_SIGNED_1 | rPcRel | rExtern | rLength4 );
782 return;
783 case ripRel32Minus1Anon:
784 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
785 X86_64_RELOC_SIGNED_1 | rPcRel | rLength4 );
786 return;
787 case ripRel32Minus2:
788 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
789 X86_64_RELOC_SIGNED_2 | rPcRel | rExtern | rLength4 );
790 return;
791 case ripRel32Minus2Anon:
792 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
793 X86_64_RELOC_SIGNED_2 | rPcRel | rLength4 );
794 return;
795 case ripRel32Minus4:
796 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
797 X86_64_RELOC_SIGNED_4 | rPcRel | rExtern | rLength4 );
798 return;
799 case ripRel32Minus4Anon:
800 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
801 X86_64_RELOC_SIGNED_4 | rPcRel | rLength4 );
802 return;
803 case delta32:
804 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
805 X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
806 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
807 X86_64_RELOC_UNSIGNED | rExtern | rLength4 );
808 return;
809 case delta32Anon:
810 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
811 X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
812 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
813 X86_64_RELOC_UNSIGNED | rLength4 );
814 return;
815 case delta64:
816 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
817 X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
818 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
819 X86_64_RELOC_UNSIGNED | rExtern | rLength8 );
820 return;
821 case delta64Anon:
822 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
823 X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
824 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
825 X86_64_RELOC_UNSIGNED | rLength8 );
826 return;
827 case unwindFDEToFunction:
828 case unwindInfoToEhFrame:
829 return;
830 case negDelta32:
831 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
832 X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
833 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
834 X86_64_RELOC_UNSIGNED | rExtern | rLength4 );
835 return;
836 case negDelta64:
837 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
838 X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
839 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
840 X86_64_RELOC_UNSIGNED | rExtern | rLength8 );
841 return;
842 case ripRel32GotLoadNowLea:
843 llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
844 return;
845 case lazyPointer:
846 case lazyImmediateLocation:
847 llvm_unreachable("lazy reference kind implies Stubs pass was run");
848 return;
849 case imageOffset:
850 case imageOffsetGot:
851 llvm_unreachable("__unwind_info references should have been resolved");
852 return;
853 case invalid:
854 // Fall into llvm_unreachable().
855 break;
856 }
857 llvm_unreachable("unknown x86_64 Reference Kind");
858 }
859
create_x86_64()860 std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_x86_64() {
861 return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_x86_64());
862 }
863
864 } // namespace mach_o
865 } // namespace lld
866