1 //===- PPC64.cpp ----------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "InputFiles.h"
10 #include "OutputSections.h"
11 #include "SymbolTable.h"
12 #include "Symbols.h"
13 #include "SyntheticSections.h"
14 #include "Target.h"
15 #include "Thunks.h"
16 #include "lld/Common/CommonLinkerContext.h"
17 #include "llvm/Support/Endian.h"
18
19 using namespace llvm;
20 using namespace llvm::object;
21 using namespace llvm::support::endian;
22 using namespace llvm::ELF;
23 using namespace lld;
24 using namespace lld::elf;
25
26 constexpr uint64_t ppc64TocOffset = 0x8000;
27 constexpr uint64_t dynamicThreadPointerOffset = 0x8000;
28
29 // The instruction encoding of bits 21-30 from the ISA for the Xform and Dform
30 // instructions that can be used as part of the initial exec TLS sequence.
31 enum XFormOpcd {
32 LBZX = 87,
33 LHZX = 279,
34 LWZX = 23,
35 LDX = 21,
36 STBX = 215,
37 STHX = 407,
38 STWX = 151,
39 STDX = 149,
40 ADD = 266,
41 };
42
43 enum DFormOpcd {
44 LBZ = 34,
45 LBZU = 35,
46 LHZ = 40,
47 LHZU = 41,
48 LHAU = 43,
49 LWZ = 32,
50 LWZU = 33,
51 LFSU = 49,
52 LD = 58,
53 LFDU = 51,
54 STB = 38,
55 STBU = 39,
56 STH = 44,
57 STHU = 45,
58 STW = 36,
59 STWU = 37,
60 STFSU = 53,
61 STFDU = 55,
62 STD = 62,
63 ADDI = 14
64 };
65
66 constexpr uint32_t NOP = 0x60000000;
67
68 enum class PPCLegacyInsn : uint32_t {
69 NOINSN = 0,
70 // Loads.
71 LBZ = 0x88000000,
72 LHZ = 0xa0000000,
73 LWZ = 0x80000000,
74 LHA = 0xa8000000,
75 LWA = 0xe8000002,
76 LD = 0xe8000000,
77 LFS = 0xC0000000,
78 LXSSP = 0xe4000003,
79 LFD = 0xc8000000,
80 LXSD = 0xe4000002,
81 LXV = 0xf4000001,
82 LXVP = 0x18000000,
83
84 // Stores.
85 STB = 0x98000000,
86 STH = 0xb0000000,
87 STW = 0x90000000,
88 STD = 0xf8000000,
89 STFS = 0xd0000000,
90 STXSSP = 0xf4000003,
91 STFD = 0xd8000000,
92 STXSD = 0xf4000002,
93 STXV = 0xf4000005,
94 STXVP = 0x18000001
95 };
96 enum class PPCPrefixedInsn : uint64_t {
97 NOINSN = 0,
98 PREFIX_MLS = 0x0610000000000000,
99 PREFIX_8LS = 0x0410000000000000,
100
101 // Loads.
102 PLBZ = PREFIX_MLS,
103 PLHZ = PREFIX_MLS,
104 PLWZ = PREFIX_MLS,
105 PLHA = PREFIX_MLS,
106 PLWA = PREFIX_8LS | 0xa4000000,
107 PLD = PREFIX_8LS | 0xe4000000,
108 PLFS = PREFIX_MLS,
109 PLXSSP = PREFIX_8LS | 0xac000000,
110 PLFD = PREFIX_MLS,
111 PLXSD = PREFIX_8LS | 0xa8000000,
112 PLXV = PREFIX_8LS | 0xc8000000,
113 PLXVP = PREFIX_8LS | 0xe8000000,
114
115 // Stores.
116 PSTB = PREFIX_MLS,
117 PSTH = PREFIX_MLS,
118 PSTW = PREFIX_MLS,
119 PSTD = PREFIX_8LS | 0xf4000000,
120 PSTFS = PREFIX_MLS,
121 PSTXSSP = PREFIX_8LS | 0xbc000000,
122 PSTFD = PREFIX_MLS,
123 PSTXSD = PREFIX_8LS | 0xb8000000,
124 PSTXV = PREFIX_8LS | 0xd8000000,
125 PSTXVP = PREFIX_8LS | 0xf8000000
126 };
checkPPCLegacyInsn(uint32_t encoding)127 static bool checkPPCLegacyInsn(uint32_t encoding) {
128 PPCLegacyInsn insn = static_cast<PPCLegacyInsn>(encoding);
129 if (insn == PPCLegacyInsn::NOINSN)
130 return false;
131 #define PCREL_OPT(Legacy, PCRel, InsnMask) \
132 if (insn == PPCLegacyInsn::Legacy) \
133 return true;
134 #include "PPCInsns.def"
135 #undef PCREL_OPT
136 return false;
137 }
138
139 // Masks to apply to legacy instructions when converting them to prefixed,
140 // pc-relative versions. For the most part, the primary opcode is shared
141 // between the legacy instruction and the suffix of its prefixed version.
142 // However, there are some instances where that isn't the case (DS-Form and
143 // DQ-form instructions).
144 enum class LegacyToPrefixMask : uint64_t {
145 NOMASK = 0x0,
146 OPC_AND_RST = 0xffe00000, // Primary opc (0-5) and R[ST] (6-10).
147 ONLY_RST = 0x3e00000, // [RS]T (6-10).
148 ST_STX28_TO5 =
149 0x8000000003e00000, // S/T (6-10) - The [S/T]X bit moves from 28 to 5.
150 };
151
152 namespace {
153 class PPC64 final : public TargetInfo {
154 public:
155 PPC64();
156 int getTlsGdRelaxSkip(RelType type) const override;
157 uint32_t calcEFlags() const override;
158 RelExpr getRelExpr(RelType type, const Symbol &s,
159 const uint8_t *loc) const override;
160 RelType getDynRel(RelType type) const override;
161 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
162 void writePltHeader(uint8_t *buf) const override;
163 void writePlt(uint8_t *buf, const Symbol &sym,
164 uint64_t pltEntryAddr) const override;
165 void writeIplt(uint8_t *buf, const Symbol &sym,
166 uint64_t pltEntryAddr) const override;
167 void relocate(uint8_t *loc, const Relocation &rel,
168 uint64_t val) const override;
169 void writeGotHeader(uint8_t *buf) const override;
170 bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
171 uint64_t branchAddr, const Symbol &s,
172 int64_t a) const override;
173 uint32_t getThunkSectionSpacing() const override;
174 bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
175 RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
176 RelExpr adjustGotPcExpr(RelType type, int64_t addend,
177 const uint8_t *loc) const override;
178 void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const;
179 void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;
180
181 bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
182 uint8_t stOther) const override;
183
184 private:
185 void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
186 void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
187 void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
188 void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
189 };
190 } // namespace
191
getPPC64TocBase()192 uint64_t elf::getPPC64TocBase() {
193 // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
194 // TOC starts where the first of these sections starts. We always create a
195 // .got when we see a relocation that uses it, so for us the start is always
196 // the .got.
197 uint64_t tocVA = in.got->getVA();
198
199 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
200 // thus permitting a full 64 Kbytes segment. Note that the glibc startup
201 // code (crt1.o) assumes that you can get from the TOC base to the
202 // start of the .toc section with only a single (signed) 16-bit relocation.
203 return tocVA + ppc64TocOffset;
204 }
205
getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther)206 unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther) {
207 // The offset is encoded into the 3 most significant bits of the st_other
208 // field, with some special values described in section 3.4.1 of the ABI:
209 // 0 --> Zero offset between the GEP and LEP, and the function does NOT use
210 // the TOC pointer (r2). r2 will hold the same value on returning from
211 // the function as it did on entering the function.
212 // 1 --> Zero offset between the GEP and LEP, and r2 should be treated as a
213 // caller-saved register for all callers.
214 // 2-6 --> The binary logarithm of the offset eg:
215 // 2 --> 2^2 = 4 bytes --> 1 instruction.
216 // 6 --> 2^6 = 64 bytes --> 16 instructions.
217 // 7 --> Reserved.
218 uint8_t gepToLep = (stOther >> 5) & 7;
219 if (gepToLep < 2)
220 return 0;
221
222 // The value encoded in the st_other bits is the
223 // log-base-2(offset).
224 if (gepToLep < 7)
225 return 1 << gepToLep;
226
227 error("reserved value of 7 in the 3 most-significant-bits of st_other");
228 return 0;
229 }
230
writePrefixedInstruction(uint8_t * loc,uint64_t insn)231 void elf::writePrefixedInstruction(uint8_t *loc, uint64_t insn) {
232 insn = config->isLE ? insn << 32 | insn >> 32 : insn;
233 write64(loc, insn);
234 }
235
addOptional(StringRef name,uint64_t value,std::vector<Defined * > & defined)236 static bool addOptional(StringRef name, uint64_t value,
237 std::vector<Defined *> &defined) {
238 Symbol *sym = symtab.find(name);
239 if (!sym || sym->isDefined())
240 return false;
241 sym->resolve(Defined{/*file=*/nullptr, StringRef(), STB_GLOBAL, STV_HIDDEN,
242 STT_FUNC, value,
243 /*size=*/0, /*section=*/nullptr});
244 defined.push_back(cast<Defined>(sym));
245 return true;
246 }
247
248 // If from is 14, write ${prefix}14: firstInsn; ${prefix}15:
249 // firstInsn+0x200008; ...; ${prefix}31: firstInsn+(31-14)*0x200008; $tail
250 // The labels are defined only if they exist in the symbol table.
writeSequence(MutableArrayRef<uint32_t> buf,const char * prefix,int from,uint32_t firstInsn,ArrayRef<uint32_t> tail)251 static void writeSequence(MutableArrayRef<uint32_t> buf, const char *prefix,
252 int from, uint32_t firstInsn,
253 ArrayRef<uint32_t> tail) {
254 std::vector<Defined *> defined;
255 char name[16];
256 int first;
257 uint32_t *ptr = buf.data();
258 for (int r = from; r < 32; ++r) {
259 format("%s%d", prefix, r).snprint(name, sizeof(name));
260 if (addOptional(name, 4 * (r - from), defined) && defined.size() == 1)
261 first = r - from;
262 write32(ptr++, firstInsn + 0x200008 * (r - from));
263 }
264 for (uint32_t insn : tail)
265 write32(ptr++, insn);
266 assert(ptr == &*buf.end());
267
268 if (defined.empty())
269 return;
270 // The full section content has the extent of [begin, end). We drop unused
271 // instructions and write [first,end).
272 auto *sec = make<InputSection>(
273 nullptr, SHF_ALLOC, SHT_PROGBITS, 4,
274 ArrayRef(reinterpret_cast<uint8_t *>(buf.data() + first),
275 4 * (buf.size() - first)),
276 ".text");
277 ctx.inputSections.push_back(sec);
278 for (Defined *sym : defined) {
279 sym->section = sec;
280 sym->value -= 4 * first;
281 }
282 }
283
284 // Implements some save and restore functions as described by ELF V2 ABI to be
285 // compatible with GCC. With GCC -Os, when the number of call-saved registers
286 // exceeds a certain threshold, GCC generates _savegpr0_* _restgpr0_* calls and
287 // expects the linker to define them. See
288 // https://sourceware.org/pipermail/binutils/2002-February/017444.html and
289 // https://sourceware.org/pipermail/binutils/2004-August/036765.html . This is
290 // weird because libgcc.a would be the natural place. The linker generation
291 // approach has the advantage that the linker can generate multiple copies to
292 // avoid long branch thunks. However, we don't consider the advantage
293 // significant enough to complicate our trunk implementation, so we take the
294 // simple approach and synthesize .text sections providing the implementation.
addPPC64SaveRestore()295 void elf::addPPC64SaveRestore() {
296 static uint32_t savegpr0[20], restgpr0[21], savegpr1[19], restgpr1[19];
297 constexpr uint32_t blr = 0x4e800020, mtlr_0 = 0x7c0803a6;
298
299 // _restgpr0_14: ld 14, -144(1); _restgpr0_15: ld 15, -136(1); ...
300 // Tail: ld 0, 16(1); mtlr 0; blr
301 writeSequence(restgpr0, "_restgpr0_", 14, 0xe9c1ff70,
302 {0xe8010010, mtlr_0, blr});
303 // _restgpr1_14: ld 14, -144(12); _restgpr1_15: ld 15, -136(12); ...
304 // Tail: blr
305 writeSequence(restgpr1, "_restgpr1_", 14, 0xe9ccff70, {blr});
306 // _savegpr0_14: std 14, -144(1); _savegpr0_15: std 15, -136(1); ...
307 // Tail: std 0, 16(1); blr
308 writeSequence(savegpr0, "_savegpr0_", 14, 0xf9c1ff70, {0xf8010010, blr});
309 // _savegpr1_14: std 14, -144(12); _savegpr1_15: std 15, -136(12); ...
310 // Tail: blr
311 writeSequence(savegpr1, "_savegpr1_", 14, 0xf9ccff70, {blr});
312 }
313
314 // Find the R_PPC64_ADDR64 in .rela.toc with matching offset.
315 template <typename ELFT>
316 static std::pair<Defined *, int64_t>
getRelaTocSymAndAddend(InputSectionBase * tocSec,uint64_t offset)317 getRelaTocSymAndAddend(InputSectionBase *tocSec, uint64_t offset) {
318 // .rela.toc contains exclusively R_PPC64_ADDR64 relocations sorted by
319 // r_offset: 0, 8, 16, etc. For a given Offset, Offset / 8 gives us the
320 // relocation index in most cases.
321 //
322 // In rare cases a TOC entry may store a constant that doesn't need an
323 // R_PPC64_ADDR64, the corresponding r_offset is therefore missing. Offset / 8
324 // points to a relocation with larger r_offset. Do a linear probe then.
325 // Constants are extremely uncommon in .toc and the extra number of array
326 // accesses can be seen as a small constant.
327 ArrayRef<typename ELFT::Rela> relas =
328 tocSec->template relsOrRelas<ELFT>().relas;
329 if (relas.empty())
330 return {};
331 uint64_t index = std::min<uint64_t>(offset / 8, relas.size() - 1);
332 for (;;) {
333 if (relas[index].r_offset == offset) {
334 Symbol &sym = tocSec->getFile<ELFT>()->getRelocTargetSym(relas[index]);
335 return {dyn_cast<Defined>(&sym), getAddend<ELFT>(relas[index])};
336 }
337 if (relas[index].r_offset < offset || index == 0)
338 break;
339 --index;
340 }
341 return {};
342 }
343
344 // When accessing a symbol defined in another translation unit, compilers
345 // reserve a .toc entry, allocate a local label and generate toc-indirect
346 // instructions:
347 //
348 // addis 3, 2, .LC0@toc@ha # R_PPC64_TOC16_HA
349 // ld 3, .LC0@toc@l(3) # R_PPC64_TOC16_LO_DS, load the address from a .toc entry
350 // ld/lwa 3, 0(3) # load the value from the address
351 //
352 // .section .toc,"aw",@progbits
353 // .LC0: .tc var[TC],var
354 //
355 // If var is defined, non-preemptable and addressable with a 32-bit signed
356 // offset from the toc base, the address of var can be computed by adding an
357 // offset to the toc base, saving a load.
358 //
359 // addis 3,2,var@toc@ha # this may be relaxed to a nop,
360 // addi 3,3,var@toc@l # then this becomes addi 3,2,var@toc
361 // ld/lwa 3, 0(3) # load the value from the address
362 //
363 // Returns true if the relaxation is performed.
tryRelaxPPC64TocIndirection(const Relocation & rel,uint8_t * bufLoc)364 static bool tryRelaxPPC64TocIndirection(const Relocation &rel,
365 uint8_t *bufLoc) {
366 assert(config->tocOptimize);
367 if (rel.addend < 0)
368 return false;
369
370 // If the symbol is not the .toc section, this isn't a toc-indirection.
371 Defined *defSym = dyn_cast<Defined>(rel.sym);
372 if (!defSym || !defSym->isSection() || defSym->section->name != ".toc")
373 return false;
374
375 Defined *d;
376 int64_t addend;
377 auto *tocISB = cast<InputSectionBase>(defSym->section);
378 std::tie(d, addend) =
379 config->isLE ? getRelaTocSymAndAddend<ELF64LE>(tocISB, rel.addend)
380 : getRelaTocSymAndAddend<ELF64BE>(tocISB, rel.addend);
381
382 // Only non-preemptable defined symbols can be relaxed.
383 if (!d || d->isPreemptible)
384 return false;
385
386 // R_PPC64_ADDR64 should have created a canonical PLT for the non-preemptable
387 // ifunc and changed its type to STT_FUNC.
388 assert(!d->isGnuIFunc());
389
390 // Two instructions can materialize a 32-bit signed offset from the toc base.
391 uint64_t tocRelative = d->getVA(addend) - getPPC64TocBase();
392 if (!isInt<32>(tocRelative))
393 return false;
394
395 // Add PPC64TocOffset that will be subtracted by PPC64::relocate().
396 static_cast<const PPC64 &>(*target).relaxGot(bufLoc, rel,
397 tocRelative + ppc64TocOffset);
398 return true;
399 }
400
401 // Relocation masks following the #lo(value), #hi(value), #ha(value),
402 // #higher(value), #highera(value), #highest(value), and #highesta(value)
403 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
404 // document.
lo(uint64_t v)405 static uint16_t lo(uint64_t v) { return v; }
hi(uint64_t v)406 static uint16_t hi(uint64_t v) { return v >> 16; }
ha(uint64_t v)407 static uint64_t ha(uint64_t v) { return (v + 0x8000) >> 16; }
higher(uint64_t v)408 static uint16_t higher(uint64_t v) { return v >> 32; }
highera(uint64_t v)409 static uint16_t highera(uint64_t v) { return (v + 0x8000) >> 32; }
highest(uint64_t v)410 static uint16_t highest(uint64_t v) { return v >> 48; }
highesta(uint64_t v)411 static uint16_t highesta(uint64_t v) { return (v + 0x8000) >> 48; }
412
413 // Extracts the 'PO' field of an instruction encoding.
getPrimaryOpCode(uint32_t encoding)414 static uint8_t getPrimaryOpCode(uint32_t encoding) { return (encoding >> 26); }
415
isDQFormInstruction(uint32_t encoding)416 static bool isDQFormInstruction(uint32_t encoding) {
417 switch (getPrimaryOpCode(encoding)) {
418 default:
419 return false;
420 case 6: // Power10 paired loads/stores (lxvp, stxvp).
421 case 56:
422 // The only instruction with a primary opcode of 56 is `lq`.
423 return true;
424 case 61:
425 // There are both DS and DQ instruction forms with this primary opcode.
426 // Namely `lxv` and `stxv` are the DQ-forms that use it.
427 // The DS 'XO' bits being set to 01 is restricted to DQ form.
428 return (encoding & 3) == 0x1;
429 }
430 }
431
isDSFormInstruction(PPCLegacyInsn insn)432 static bool isDSFormInstruction(PPCLegacyInsn insn) {
433 switch (insn) {
434 default:
435 return false;
436 case PPCLegacyInsn::LWA:
437 case PPCLegacyInsn::LD:
438 case PPCLegacyInsn::LXSD:
439 case PPCLegacyInsn::LXSSP:
440 case PPCLegacyInsn::STD:
441 case PPCLegacyInsn::STXSD:
442 case PPCLegacyInsn::STXSSP:
443 return true;
444 }
445 }
446
getPPCLegacyInsn(uint32_t encoding)447 static PPCLegacyInsn getPPCLegacyInsn(uint32_t encoding) {
448 uint32_t opc = encoding & 0xfc000000;
449
450 // If the primary opcode is shared between multiple instructions, we need to
451 // fix it up to match the actual instruction we are after.
452 if ((opc == 0xe4000000 || opc == 0xe8000000 || opc == 0xf4000000 ||
453 opc == 0xf8000000) &&
454 !isDQFormInstruction(encoding))
455 opc = encoding & 0xfc000003;
456 else if (opc == 0xf4000000)
457 opc = encoding & 0xfc000007;
458 else if (opc == 0x18000000)
459 opc = encoding & 0xfc00000f;
460
461 // If the value is not one of the enumerators in PPCLegacyInsn, we want to
462 // return PPCLegacyInsn::NOINSN.
463 if (!checkPPCLegacyInsn(opc))
464 return PPCLegacyInsn::NOINSN;
465 return static_cast<PPCLegacyInsn>(opc);
466 }
467
getPCRelativeForm(PPCLegacyInsn insn)468 static PPCPrefixedInsn getPCRelativeForm(PPCLegacyInsn insn) {
469 switch (insn) {
470 #define PCREL_OPT(Legacy, PCRel, InsnMask) \
471 case PPCLegacyInsn::Legacy: \
472 return PPCPrefixedInsn::PCRel
473 #include "PPCInsns.def"
474 #undef PCREL_OPT
475 }
476 return PPCPrefixedInsn::NOINSN;
477 }
478
getInsnMask(PPCLegacyInsn insn)479 static LegacyToPrefixMask getInsnMask(PPCLegacyInsn insn) {
480 switch (insn) {
481 #define PCREL_OPT(Legacy, PCRel, InsnMask) \
482 case PPCLegacyInsn::Legacy: \
483 return LegacyToPrefixMask::InsnMask
484 #include "PPCInsns.def"
485 #undef PCREL_OPT
486 }
487 return LegacyToPrefixMask::NOMASK;
488 }
getPCRelativeForm(uint32_t encoding)489 static uint64_t getPCRelativeForm(uint32_t encoding) {
490 PPCLegacyInsn origInsn = getPPCLegacyInsn(encoding);
491 PPCPrefixedInsn pcrelInsn = getPCRelativeForm(origInsn);
492 if (pcrelInsn == PPCPrefixedInsn::NOINSN)
493 return UINT64_C(-1);
494 LegacyToPrefixMask origInsnMask = getInsnMask(origInsn);
495 uint64_t pcrelEncoding =
496 (uint64_t)pcrelInsn | (encoding & (uint64_t)origInsnMask);
497
498 // If the mask requires moving bit 28 to bit 5, do that now.
499 if (origInsnMask == LegacyToPrefixMask::ST_STX28_TO5)
500 pcrelEncoding |= (encoding & 0x8) << 23;
501 return pcrelEncoding;
502 }
503
isInstructionUpdateForm(uint32_t encoding)504 static bool isInstructionUpdateForm(uint32_t encoding) {
505 switch (getPrimaryOpCode(encoding)) {
506 default:
507 return false;
508 case LBZU:
509 case LHAU:
510 case LHZU:
511 case LWZU:
512 case LFSU:
513 case LFDU:
514 case STBU:
515 case STHU:
516 case STWU:
517 case STFSU:
518 case STFDU:
519 return true;
520 // LWA has the same opcode as LD, and the DS bits is what differentiates
521 // between LD/LDU/LWA
522 case LD:
523 case STD:
524 return (encoding & 3) == 1;
525 }
526 }
527
528 // Compute the total displacement between the prefixed instruction that gets
529 // to the start of the data and the load/store instruction that has the offset
530 // into the data structure.
531 // For example:
532 // paddi 3, 0, 1000, 1
533 // lwz 3, 20(3)
534 // Should add up to 1020 for total displacement.
getTotalDisp(uint64_t prefixedInsn,uint32_t accessInsn)535 static int64_t getTotalDisp(uint64_t prefixedInsn, uint32_t accessInsn) {
536 int64_t disp34 = llvm::SignExtend64(
537 ((prefixedInsn & 0x3ffff00000000) >> 16) | (prefixedInsn & 0xffff), 34);
538 int32_t disp16 = llvm::SignExtend32(accessInsn & 0xffff, 16);
539 // For DS and DQ form instructions, we need to mask out the XO bits.
540 if (isDQFormInstruction(accessInsn))
541 disp16 &= ~0xf;
542 else if (isDSFormInstruction(getPPCLegacyInsn(accessInsn)))
543 disp16 &= ~0x3;
544 return disp34 + disp16;
545 }
546
547 // There are a number of places when we either want to read or write an
548 // instruction when handling a half16 relocation type. On big-endian the buffer
549 // pointer is pointing into the middle of the word we want to extract, and on
550 // little-endian it is pointing to the start of the word. These 2 helpers are to
551 // simplify reading and writing in that context.
writeFromHalf16(uint8_t * loc,uint32_t insn)552 static void writeFromHalf16(uint8_t *loc, uint32_t insn) {
553 write32(config->isLE ? loc : loc - 2, insn);
554 }
555
readFromHalf16(const uint8_t * loc)556 static uint32_t readFromHalf16(const uint8_t *loc) {
557 return read32(config->isLE ? loc : loc - 2);
558 }
559
readPrefixedInstruction(const uint8_t * loc)560 static uint64_t readPrefixedInstruction(const uint8_t *loc) {
561 uint64_t fullInstr = read64(loc);
562 return config->isLE ? (fullInstr << 32 | fullInstr >> 32) : fullInstr;
563 }
564
PPC64()565 PPC64::PPC64() {
566 copyRel = R_PPC64_COPY;
567 gotRel = R_PPC64_GLOB_DAT;
568 pltRel = R_PPC64_JMP_SLOT;
569 relativeRel = R_PPC64_RELATIVE;
570 iRelativeRel = R_PPC64_IRELATIVE;
571 symbolicRel = R_PPC64_ADDR64;
572 #ifdef __OpenBSD__
573 pltHeaderSize = 52;
574 #else
575 pltHeaderSize = 60;
576 #endif
577 pltEntrySize = 4;
578 ipltEntrySize = 16; // PPC64PltCallStub::size
579 gotHeaderEntriesNum = 1;
580 gotPltHeaderEntriesNum = 2;
581 needsThunks = true;
582
583 tlsModuleIndexRel = R_PPC64_DTPMOD64;
584 tlsOffsetRel = R_PPC64_DTPREL64;
585
586 tlsGotRel = R_PPC64_TPREL64;
587
588 needsMoreStackNonSplit = false;
589
590 // We need 64K pages (at least under glibc/Linux, the loader won't
591 // set different permissions on a finer granularity than that).
592 defaultMaxPageSize = 65536;
593
594 // The PPC64 ELF ABI v1 spec, says:
595 //
596 // It is normally desirable to put segments with different characteristics
597 // in separate 256 Mbyte portions of the address space, to give the
598 // operating system full paging flexibility in the 64-bit address space.
599 //
600 // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
601 // use 0x10000000 as the starting address.
602 defaultImageBase = 0x10000000;
603
604 write32(trapInstr.data(), 0x7fe00008);
605 }
606
getTlsGdRelaxSkip(RelType type) const607 int PPC64::getTlsGdRelaxSkip(RelType type) const {
608 // A __tls_get_addr call instruction is marked with 2 relocations:
609 //
610 // R_PPC64_TLSGD / R_PPC64_TLSLD: marker relocation
611 // R_PPC64_REL24: __tls_get_addr
612 //
613 // After the relaxation we no longer call __tls_get_addr and should skip both
614 // relocations to not create a false dependence on __tls_get_addr being
615 // defined.
616 if (type == R_PPC64_TLSGD || type == R_PPC64_TLSLD)
617 return 2;
618 return 1;
619 }
620
getEFlags(InputFile * file)621 static uint32_t getEFlags(InputFile *file) {
622 if (file->ekind == ELF64BEKind)
623 return cast<ObjFile<ELF64BE>>(file)->getObj().getHeader().e_flags;
624 return cast<ObjFile<ELF64LE>>(file)->getObj().getHeader().e_flags;
625 }
626
627 // This file implements v2 ABI. This function makes sure that all
628 // object files have v2 or an unspecified version as an ABI version.
calcEFlags() const629 uint32_t PPC64::calcEFlags() const {
630 for (InputFile *f : ctx.objectFiles) {
631 uint32_t flag = getEFlags(f);
632 if (flag == 1)
633 error(toString(f) + ": ABI version 1 is not supported");
634 else if (flag > 2)
635 error(toString(f) + ": unrecognized e_flags: " + Twine(flag));
636 }
637 return 2;
638 }
639
relaxGot(uint8_t * loc,const Relocation & rel,uint64_t val) const640 void PPC64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const {
641 switch (rel.type) {
642 case R_PPC64_TOC16_HA:
643 // Convert "addis reg, 2, .LC0@toc@h" to "addis reg, 2, var@toc@h" or "nop".
644 relocate(loc, rel, val);
645 break;
646 case R_PPC64_TOC16_LO_DS: {
647 // Convert "ld reg, .LC0@toc@l(reg)" to "addi reg, reg, var@toc@l" or
648 // "addi reg, 2, var@toc".
649 uint32_t insn = readFromHalf16(loc);
650 if (getPrimaryOpCode(insn) != LD)
651 error("expected a 'ld' for got-indirect to toc-relative relaxing");
652 writeFromHalf16(loc, (insn & 0x03ffffff) | 0x38000000);
653 relocateNoSym(loc, R_PPC64_TOC16_LO, val);
654 break;
655 }
656 case R_PPC64_GOT_PCREL34: {
657 // Clear the first 8 bits of the prefix and the first 6 bits of the
658 // instruction (the primary opcode).
659 uint64_t insn = readPrefixedInstruction(loc);
660 if ((insn & 0xfc000000) != 0xe4000000)
661 error("expected a 'pld' for got-indirect to pc-relative relaxing");
662 insn &= ~0xff000000fc000000;
663
664 // Replace the cleared bits with the values for PADDI (0x600000038000000);
665 insn |= 0x600000038000000;
666 writePrefixedInstruction(loc, insn);
667 relocate(loc, rel, val);
668 break;
669 }
670 case R_PPC64_PCREL_OPT: {
671 // We can only relax this if the R_PPC64_GOT_PCREL34 at this offset can
672 // be relaxed. The eligibility for the relaxation needs to be determined
673 // on that relocation since this one does not relocate a symbol.
674 uint64_t insn = readPrefixedInstruction(loc);
675 uint32_t accessInsn = read32(loc + rel.addend);
676 uint64_t pcRelInsn = getPCRelativeForm(accessInsn);
677
678 // This error is not necessary for correctness but is emitted for now
679 // to ensure we don't miss these opportunities in real code. It can be
680 // removed at a later date.
681 if (pcRelInsn == UINT64_C(-1)) {
682 errorOrWarn(
683 "unrecognized instruction for R_PPC64_PCREL_OPT relaxation: 0x" +
684 Twine::utohexstr(accessInsn));
685 break;
686 }
687
688 int64_t totalDisp = getTotalDisp(insn, accessInsn);
689 if (!isInt<34>(totalDisp))
690 break; // Displacement doesn't fit.
691 // Convert the PADDI to the prefixed version of accessInsn and convert
692 // accessInsn to a nop.
693 writePrefixedInstruction(loc, pcRelInsn |
694 ((totalDisp & 0x3ffff0000) << 16) |
695 (totalDisp & 0xffff));
696 write32(loc + rel.addend, NOP); // nop accessInsn.
697 break;
698 }
699 default:
700 llvm_unreachable("unexpected relocation type");
701 }
702 }
703
relaxTlsGdToLe(uint8_t * loc,const Relocation & rel,uint64_t val) const704 void PPC64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
705 uint64_t val) const {
706 // Reference: 3.7.4.2 of the 64-bit ELF V2 abi supplement.
707 // The general dynamic code sequence for a global `x` will look like:
708 // Instruction Relocation Symbol
709 // addis r3, r2, x@got@tlsgd@ha R_PPC64_GOT_TLSGD16_HA x
710 // addi r3, r3, x@got@tlsgd@l R_PPC64_GOT_TLSGD16_LO x
711 // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSGD x
712 // R_PPC64_REL24 __tls_get_addr
713 // nop None None
714
715 // Relaxing to local exec entails converting:
716 // addis r3, r2, x@got@tlsgd@ha into nop
717 // addi r3, r3, x@got@tlsgd@l into addis r3, r13, x@tprel@ha
718 // bl __tls_get_addr(x@tlsgd) into nop
719 // nop into addi r3, r3, x@tprel@l
720
721 switch (rel.type) {
722 case R_PPC64_GOT_TLSGD16_HA:
723 writeFromHalf16(loc, NOP);
724 break;
725 case R_PPC64_GOT_TLSGD16:
726 case R_PPC64_GOT_TLSGD16_LO:
727 writeFromHalf16(loc, 0x3c6d0000); // addis r3, r13
728 relocateNoSym(loc, R_PPC64_TPREL16_HA, val);
729 break;
730 case R_PPC64_GOT_TLSGD_PCREL34:
731 // Relax from paddi r3, 0, x@got@tlsgd@pcrel, 1 to
732 // paddi r3, r13, x@tprel, 0
733 writePrefixedInstruction(loc, 0x06000000386d0000);
734 relocateNoSym(loc, R_PPC64_TPREL34, val);
735 break;
736 case R_PPC64_TLSGD: {
737 // PC Relative Relaxation:
738 // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
739 // nop
740 // TOC Relaxation:
741 // Relax from bl __tls_get_addr(x@tlsgd)
742 // nop
743 // to
744 // nop
745 // addi r3, r3, x@tprel@l
746 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
747 if (locAsInt % 4 == 0) {
748 write32(loc, NOP); // nop
749 write32(loc + 4, 0x38630000); // addi r3, r3
750 // Since we are relocating a half16 type relocation and Loc + 4 points to
751 // the start of an instruction we need to advance the buffer by an extra
752 // 2 bytes on BE.
753 relocateNoSym(loc + 4 + (config->ekind == ELF64BEKind ? 2 : 0),
754 R_PPC64_TPREL16_LO, val);
755 } else if (locAsInt % 4 == 1) {
756 write32(loc - 1, NOP);
757 } else {
758 errorOrWarn("R_PPC64_TLSGD has unexpected byte alignment");
759 }
760 break;
761 }
762 default:
763 llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
764 }
765 }
766
relaxTlsLdToLe(uint8_t * loc,const Relocation & rel,uint64_t val) const767 void PPC64::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
768 uint64_t val) const {
769 // Reference: 3.7.4.3 of the 64-bit ELF V2 abi supplement.
770 // The local dynamic code sequence for a global `x` will look like:
771 // Instruction Relocation Symbol
772 // addis r3, r2, x@got@tlsld@ha R_PPC64_GOT_TLSLD16_HA x
773 // addi r3, r3, x@got@tlsld@l R_PPC64_GOT_TLSLD16_LO x
774 // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSLD x
775 // R_PPC64_REL24 __tls_get_addr
776 // nop None None
777
778 // Relaxing to local exec entails converting:
779 // addis r3, r2, x@got@tlsld@ha into nop
780 // addi r3, r3, x@got@tlsld@l into addis r3, r13, 0
781 // bl __tls_get_addr(x@tlsgd) into nop
782 // nop into addi r3, r3, 4096
783
784 switch (rel.type) {
785 case R_PPC64_GOT_TLSLD16_HA:
786 writeFromHalf16(loc, NOP);
787 break;
788 case R_PPC64_GOT_TLSLD16_LO:
789 writeFromHalf16(loc, 0x3c6d0000); // addis r3, r13, 0
790 break;
791 case R_PPC64_GOT_TLSLD_PCREL34:
792 // Relax from paddi r3, 0, x1@got@tlsld@pcrel, 1 to
793 // paddi r3, r13, 0x1000, 0
794 writePrefixedInstruction(loc, 0x06000000386d1000);
795 break;
796 case R_PPC64_TLSLD: {
797 // PC Relative Relaxation:
798 // Relax from bl __tls_get_addr@notoc(x@tlsld)
799 // to
800 // nop
801 // TOC Relaxation:
802 // Relax from bl __tls_get_addr(x@tlsld)
803 // nop
804 // to
805 // nop
806 // addi r3, r3, 4096
807 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
808 if (locAsInt % 4 == 0) {
809 write32(loc, NOP);
810 write32(loc + 4, 0x38631000); // addi r3, r3, 4096
811 } else if (locAsInt % 4 == 1) {
812 write32(loc - 1, NOP);
813 } else {
814 errorOrWarn("R_PPC64_TLSLD has unexpected byte alignment");
815 }
816 break;
817 }
818 case R_PPC64_DTPREL16:
819 case R_PPC64_DTPREL16_HA:
820 case R_PPC64_DTPREL16_HI:
821 case R_PPC64_DTPREL16_DS:
822 case R_PPC64_DTPREL16_LO:
823 case R_PPC64_DTPREL16_LO_DS:
824 case R_PPC64_DTPREL34:
825 relocate(loc, rel, val);
826 break;
827 default:
828 llvm_unreachable("unsupported relocation for TLS LD to LE relaxation");
829 }
830 }
831
getPPCDFormOp(unsigned secondaryOp)832 unsigned elf::getPPCDFormOp(unsigned secondaryOp) {
833 switch (secondaryOp) {
834 case LBZX:
835 return LBZ;
836 case LHZX:
837 return LHZ;
838 case LWZX:
839 return LWZ;
840 case LDX:
841 return LD;
842 case STBX:
843 return STB;
844 case STHX:
845 return STH;
846 case STWX:
847 return STW;
848 case STDX:
849 return STD;
850 case ADD:
851 return ADDI;
852 default:
853 return 0;
854 }
855 }
856
relaxTlsIeToLe(uint8_t * loc,const Relocation & rel,uint64_t val) const857 void PPC64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
858 uint64_t val) const {
859 // The initial exec code sequence for a global `x` will look like:
860 // Instruction Relocation Symbol
861 // addis r9, r2, x@got@tprel@ha R_PPC64_GOT_TPREL16_HA x
862 // ld r9, x@got@tprel@l(r9) R_PPC64_GOT_TPREL16_LO_DS x
863 // add r9, r9, x@tls R_PPC64_TLS x
864
865 // Relaxing to local exec entails converting:
866 // addis r9, r2, x@got@tprel@ha into nop
867 // ld r9, x@got@tprel@l(r9) into addis r9, r13, x@tprel@ha
868 // add r9, r9, x@tls into addi r9, r9, x@tprel@l
869
870 // x@tls R_PPC64_TLS is a relocation which does not compute anything,
871 // it is replaced with r13 (thread pointer).
872
873 // The add instruction in the initial exec sequence has multiple variations
874 // that need to be handled. If we are building an address it will use an add
875 // instruction, if we are accessing memory it will use any of the X-form
876 // indexed load or store instructions.
877
878 unsigned offset = (config->ekind == ELF64BEKind) ? 2 : 0;
879 switch (rel.type) {
880 case R_PPC64_GOT_TPREL16_HA:
881 write32(loc - offset, NOP);
882 break;
883 case R_PPC64_GOT_TPREL16_LO_DS:
884 case R_PPC64_GOT_TPREL16_DS: {
885 uint32_t regNo = read32(loc - offset) & 0x03E00000; // bits 6-10
886 write32(loc - offset, 0x3C0D0000 | regNo); // addis RegNo, r13
887 relocateNoSym(loc, R_PPC64_TPREL16_HA, val);
888 break;
889 }
890 case R_PPC64_GOT_TPREL_PCREL34: {
891 const uint64_t pldRT = readPrefixedInstruction(loc) & 0x0000000003e00000;
892 // paddi RT(from pld), r13, symbol@tprel, 0
893 writePrefixedInstruction(loc, 0x06000000380d0000 | pldRT);
894 relocateNoSym(loc, R_PPC64_TPREL34, val);
895 break;
896 }
897 case R_PPC64_TLS: {
898 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
899 if (locAsInt % 4 == 0) {
900 uint32_t primaryOp = getPrimaryOpCode(read32(loc));
901 if (primaryOp != 31)
902 error("unrecognized instruction for IE to LE R_PPC64_TLS");
903 uint32_t secondaryOp = (read32(loc) & 0x000007FE) >> 1; // bits 21-30
904 uint32_t dFormOp = getPPCDFormOp(secondaryOp);
905 if (dFormOp == 0)
906 error("unrecognized instruction for IE to LE R_PPC64_TLS");
907 write32(loc, ((dFormOp << 26) | (read32(loc) & 0x03FFFFFF)));
908 relocateNoSym(loc + offset, R_PPC64_TPREL16_LO, val);
909 } else if (locAsInt % 4 == 1) {
910 // If the offset is not 4 byte aligned then we have a PCRel type reloc.
911 // This version of the relocation is offset by one byte from the
912 // instruction it references.
913 uint32_t tlsInstr = read32(loc - 1);
914 uint32_t primaryOp = getPrimaryOpCode(tlsInstr);
915 if (primaryOp != 31)
916 errorOrWarn("unrecognized instruction for IE to LE R_PPC64_TLS");
917 uint32_t secondaryOp = (tlsInstr & 0x000007FE) >> 1; // bits 21-30
918 // The add is a special case and should be turned into a nop. The paddi
919 // that comes before it will already have computed the address of the
920 // symbol.
921 if (secondaryOp == 266) {
922 // Check if the add uses the same result register as the input register.
923 uint32_t rt = (tlsInstr & 0x03E00000) >> 21; // bits 6-10
924 uint32_t ra = (tlsInstr & 0x001F0000) >> 16; // bits 11-15
925 if (ra == rt) {
926 write32(loc - 1, NOP);
927 } else {
928 // mr rt, ra
929 write32(loc - 1, 0x7C000378 | (rt << 16) | (ra << 21) | (ra << 11));
930 }
931 } else {
932 uint32_t dFormOp = getPPCDFormOp(secondaryOp);
933 if (dFormOp == 0)
934 errorOrWarn("unrecognized instruction for IE to LE R_PPC64_TLS");
935 write32(loc - 1, ((dFormOp << 26) | (tlsInstr & 0x03FF0000)));
936 }
937 } else {
938 errorOrWarn("R_PPC64_TLS must be either 4 byte aligned or one byte "
939 "offset from 4 byte aligned");
940 }
941 break;
942 }
943 default:
944 llvm_unreachable("unknown relocation for IE to LE");
945 break;
946 }
947 }
948
getRelExpr(RelType type,const Symbol & s,const uint8_t * loc) const949 RelExpr PPC64::getRelExpr(RelType type, const Symbol &s,
950 const uint8_t *loc) const {
951 switch (type) {
952 case R_PPC64_NONE:
953 return R_NONE;
954 case R_PPC64_ADDR16:
955 case R_PPC64_ADDR16_DS:
956 case R_PPC64_ADDR16_HA:
957 case R_PPC64_ADDR16_HI:
958 case R_PPC64_ADDR16_HIGH:
959 case R_PPC64_ADDR16_HIGHER:
960 case R_PPC64_ADDR16_HIGHERA:
961 case R_PPC64_ADDR16_HIGHEST:
962 case R_PPC64_ADDR16_HIGHESTA:
963 case R_PPC64_ADDR16_LO:
964 case R_PPC64_ADDR16_LO_DS:
965 case R_PPC64_ADDR32:
966 case R_PPC64_ADDR64:
967 return R_ABS;
968 case R_PPC64_GOT16:
969 case R_PPC64_GOT16_DS:
970 case R_PPC64_GOT16_HA:
971 case R_PPC64_GOT16_HI:
972 case R_PPC64_GOT16_LO:
973 case R_PPC64_GOT16_LO_DS:
974 return R_GOT_OFF;
975 case R_PPC64_TOC16:
976 case R_PPC64_TOC16_DS:
977 case R_PPC64_TOC16_HI:
978 case R_PPC64_TOC16_LO:
979 return R_GOTREL;
980 case R_PPC64_GOT_PCREL34:
981 case R_PPC64_GOT_TPREL_PCREL34:
982 case R_PPC64_PCREL_OPT:
983 return R_GOT_PC;
984 case R_PPC64_TOC16_HA:
985 case R_PPC64_TOC16_LO_DS:
986 return config->tocOptimize ? R_PPC64_RELAX_TOC : R_GOTREL;
987 case R_PPC64_TOC:
988 return R_PPC64_TOCBASE;
989 case R_PPC64_REL14:
990 case R_PPC64_REL24:
991 return R_PPC64_CALL_PLT;
992 case R_PPC64_REL24_NOTOC:
993 return R_PLT_PC;
994 case R_PPC64_REL16_LO:
995 case R_PPC64_REL16_HA:
996 case R_PPC64_REL16_HI:
997 case R_PPC64_REL32:
998 case R_PPC64_REL64:
999 case R_PPC64_PCREL34:
1000 return R_PC;
1001 case R_PPC64_GOT_TLSGD16:
1002 case R_PPC64_GOT_TLSGD16_HA:
1003 case R_PPC64_GOT_TLSGD16_HI:
1004 case R_PPC64_GOT_TLSGD16_LO:
1005 return R_TLSGD_GOT;
1006 case R_PPC64_GOT_TLSGD_PCREL34:
1007 return R_TLSGD_PC;
1008 case R_PPC64_GOT_TLSLD16:
1009 case R_PPC64_GOT_TLSLD16_HA:
1010 case R_PPC64_GOT_TLSLD16_HI:
1011 case R_PPC64_GOT_TLSLD16_LO:
1012 return R_TLSLD_GOT;
1013 case R_PPC64_GOT_TLSLD_PCREL34:
1014 return R_TLSLD_PC;
1015 case R_PPC64_GOT_TPREL16_HA:
1016 case R_PPC64_GOT_TPREL16_LO_DS:
1017 case R_PPC64_GOT_TPREL16_DS:
1018 case R_PPC64_GOT_TPREL16_HI:
1019 return R_GOT_OFF;
1020 case R_PPC64_GOT_DTPREL16_HA:
1021 case R_PPC64_GOT_DTPREL16_LO_DS:
1022 case R_PPC64_GOT_DTPREL16_DS:
1023 case R_PPC64_GOT_DTPREL16_HI:
1024 return R_TLSLD_GOT_OFF;
1025 case R_PPC64_TPREL16:
1026 case R_PPC64_TPREL16_HA:
1027 case R_PPC64_TPREL16_LO:
1028 case R_PPC64_TPREL16_HI:
1029 case R_PPC64_TPREL16_DS:
1030 case R_PPC64_TPREL16_LO_DS:
1031 case R_PPC64_TPREL16_HIGHER:
1032 case R_PPC64_TPREL16_HIGHERA:
1033 case R_PPC64_TPREL16_HIGHEST:
1034 case R_PPC64_TPREL16_HIGHESTA:
1035 case R_PPC64_TPREL34:
1036 return R_TPREL;
1037 case R_PPC64_DTPREL16:
1038 case R_PPC64_DTPREL16_DS:
1039 case R_PPC64_DTPREL16_HA:
1040 case R_PPC64_DTPREL16_HI:
1041 case R_PPC64_DTPREL16_HIGHER:
1042 case R_PPC64_DTPREL16_HIGHERA:
1043 case R_PPC64_DTPREL16_HIGHEST:
1044 case R_PPC64_DTPREL16_HIGHESTA:
1045 case R_PPC64_DTPREL16_LO:
1046 case R_PPC64_DTPREL16_LO_DS:
1047 case R_PPC64_DTPREL64:
1048 case R_PPC64_DTPREL34:
1049 return R_DTPREL;
1050 case R_PPC64_TLSGD:
1051 return R_TLSDESC_CALL;
1052 case R_PPC64_TLSLD:
1053 return R_TLSLD_HINT;
1054 case R_PPC64_TLS:
1055 return R_TLSIE_HINT;
1056 default:
1057 error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
1058 ") against symbol " + toString(s));
1059 return R_NONE;
1060 }
1061 }
1062
getDynRel(RelType type) const1063 RelType PPC64::getDynRel(RelType type) const {
1064 if (type == R_PPC64_ADDR64 || type == R_PPC64_TOC)
1065 return R_PPC64_ADDR64;
1066 return R_PPC64_NONE;
1067 }
1068
getImplicitAddend(const uint8_t * buf,RelType type) const1069 int64_t PPC64::getImplicitAddend(const uint8_t *buf, RelType type) const {
1070 switch (type) {
1071 case R_PPC64_NONE:
1072 case R_PPC64_GLOB_DAT:
1073 case R_PPC64_JMP_SLOT:
1074 return 0;
1075 case R_PPC64_REL32:
1076 return SignExtend64<32>(read32(buf));
1077 case R_PPC64_ADDR64:
1078 case R_PPC64_REL64:
1079 case R_PPC64_RELATIVE:
1080 case R_PPC64_IRELATIVE:
1081 case R_PPC64_DTPMOD64:
1082 case R_PPC64_DTPREL64:
1083 case R_PPC64_TPREL64:
1084 return read64(buf);
1085 default:
1086 internalLinkerError(getErrorLocation(buf),
1087 "cannot read addend for relocation " + toString(type));
1088 return 0;
1089 }
1090 }
1091
writeGotHeader(uint8_t * buf) const1092 void PPC64::writeGotHeader(uint8_t *buf) const {
1093 write64(buf, getPPC64TocBase());
1094 }
1095
writePltHeader(uint8_t * buf) const1096 void PPC64::writePltHeader(uint8_t *buf) const {
1097 int64_t gotPltOffset = in.gotPlt->getVA() - (in.plt->getVA() + 8);
1098
1099 // The generic resolver stub goes first.
1100 write32(buf + 0, 0x7c0802a6); // mflr r0
1101 write32(buf + 4, 0x429f0005); // bcl 20,4*cr7+so,8 <_glink+0x8>
1102 write32(buf + 8, 0x7d6802a6); // mflr r11
1103 write32(buf + 12, 0x7c0803a6); // mtlr r0
1104 write32(buf + 16, 0x7d8b6050); // subf r12, r11, r12
1105 #ifdef __OpenBSD__
1106 write32(buf + 20, 0x380cffd4); // subi r0,r12,44
1107 #else
1108 write32(buf + 20, 0x380cffcc); // subi r0,r12,52
1109 #endif
1110 write32(buf + 24, 0x7800f082); // srdi r0,r0,62,2
1111 #ifdef __OpenBSD__
1112 write32(buf + 28, 0x3d6b0000 | ha(gotPltOffset)); // addis r11,r11,offset@ha
1113 write32(buf + 32, 0x396b0000 | lo(gotPltOffset)); // addi r11,r11,offset@l
1114 #else
1115 write32(buf + 28, 0xe98b002c); // ld r12,44(r11)
1116 write32(buf + 32, 0x7d6c5a14); // add r11,r12,r11
1117 #endif
1118 write32(buf + 36, 0xe98b0000); // ld r12,0(r11)
1119 write32(buf + 40, 0xe96b0008); // ld r11,8(r11)
1120 write32(buf + 44, 0x7d8903a6); // mtctr r12
1121 write32(buf + 48, 0x4e800420); // bctr
1122
1123 #ifndef __OpenBSD__
1124 // The 'bcl' instruction will set the link register to the address of the
1125 // following instruction ('mflr r11'). Here we store the offset from that
1126 // instruction to the first entry in the GotPlt section.
1127 write64(buf + 52, gotPltOffset);
1128 #endif
1129 }
1130
writePlt(uint8_t * buf,const Symbol & sym,uint64_t) const1131 void PPC64::writePlt(uint8_t *buf, const Symbol &sym,
1132 uint64_t /*pltEntryAddr*/) const {
1133 int32_t offset = pltHeaderSize + sym.getPltIdx() * pltEntrySize;
1134 // bl __glink_PLTresolve
1135 write32(buf, 0x48000000 | ((-offset) & 0x03FFFFFc));
1136 }
1137
writeIplt(uint8_t * buf,const Symbol & sym,uint64_t) const1138 void PPC64::writeIplt(uint8_t *buf, const Symbol &sym,
1139 uint64_t /*pltEntryAddr*/) const {
1140 writePPC64LoadAndBranch(buf, sym.getGotPltVA() - getPPC64TocBase());
1141 }
1142
toAddr16Rel(RelType type,uint64_t val)1143 static std::pair<RelType, uint64_t> toAddr16Rel(RelType type, uint64_t val) {
1144 // Relocations relative to the toc-base need to be adjusted by the Toc offset.
1145 uint64_t tocBiasedVal = val - ppc64TocOffset;
1146 // Relocations relative to dtv[dtpmod] need to be adjusted by the DTP offset.
1147 uint64_t dtpBiasedVal = val - dynamicThreadPointerOffset;
1148
1149 switch (type) {
1150 // TOC biased relocation.
1151 case R_PPC64_GOT16:
1152 case R_PPC64_GOT_TLSGD16:
1153 case R_PPC64_GOT_TLSLD16:
1154 case R_PPC64_TOC16:
1155 return {R_PPC64_ADDR16, tocBiasedVal};
1156 case R_PPC64_GOT16_DS:
1157 case R_PPC64_TOC16_DS:
1158 case R_PPC64_GOT_TPREL16_DS:
1159 case R_PPC64_GOT_DTPREL16_DS:
1160 return {R_PPC64_ADDR16_DS, tocBiasedVal};
1161 case R_PPC64_GOT16_HA:
1162 case R_PPC64_GOT_TLSGD16_HA:
1163 case R_PPC64_GOT_TLSLD16_HA:
1164 case R_PPC64_GOT_TPREL16_HA:
1165 case R_PPC64_GOT_DTPREL16_HA:
1166 case R_PPC64_TOC16_HA:
1167 return {R_PPC64_ADDR16_HA, tocBiasedVal};
1168 case R_PPC64_GOT16_HI:
1169 case R_PPC64_GOT_TLSGD16_HI:
1170 case R_PPC64_GOT_TLSLD16_HI:
1171 case R_PPC64_GOT_TPREL16_HI:
1172 case R_PPC64_GOT_DTPREL16_HI:
1173 case R_PPC64_TOC16_HI:
1174 return {R_PPC64_ADDR16_HI, tocBiasedVal};
1175 case R_PPC64_GOT16_LO:
1176 case R_PPC64_GOT_TLSGD16_LO:
1177 case R_PPC64_GOT_TLSLD16_LO:
1178 case R_PPC64_TOC16_LO:
1179 return {R_PPC64_ADDR16_LO, tocBiasedVal};
1180 case R_PPC64_GOT16_LO_DS:
1181 case R_PPC64_TOC16_LO_DS:
1182 case R_PPC64_GOT_TPREL16_LO_DS:
1183 case R_PPC64_GOT_DTPREL16_LO_DS:
1184 return {R_PPC64_ADDR16_LO_DS, tocBiasedVal};
1185
1186 // Dynamic Thread pointer biased relocation types.
1187 case R_PPC64_DTPREL16:
1188 return {R_PPC64_ADDR16, dtpBiasedVal};
1189 case R_PPC64_DTPREL16_DS:
1190 return {R_PPC64_ADDR16_DS, dtpBiasedVal};
1191 case R_PPC64_DTPREL16_HA:
1192 return {R_PPC64_ADDR16_HA, dtpBiasedVal};
1193 case R_PPC64_DTPREL16_HI:
1194 return {R_PPC64_ADDR16_HI, dtpBiasedVal};
1195 case R_PPC64_DTPREL16_HIGHER:
1196 return {R_PPC64_ADDR16_HIGHER, dtpBiasedVal};
1197 case R_PPC64_DTPREL16_HIGHERA:
1198 return {R_PPC64_ADDR16_HIGHERA, dtpBiasedVal};
1199 case R_PPC64_DTPREL16_HIGHEST:
1200 return {R_PPC64_ADDR16_HIGHEST, dtpBiasedVal};
1201 case R_PPC64_DTPREL16_HIGHESTA:
1202 return {R_PPC64_ADDR16_HIGHESTA, dtpBiasedVal};
1203 case R_PPC64_DTPREL16_LO:
1204 return {R_PPC64_ADDR16_LO, dtpBiasedVal};
1205 case R_PPC64_DTPREL16_LO_DS:
1206 return {R_PPC64_ADDR16_LO_DS, dtpBiasedVal};
1207 case R_PPC64_DTPREL64:
1208 return {R_PPC64_ADDR64, dtpBiasedVal};
1209
1210 default:
1211 return {type, val};
1212 }
1213 }
1214
isTocOptType(RelType type)1215 static bool isTocOptType(RelType type) {
1216 switch (type) {
1217 case R_PPC64_GOT16_HA:
1218 case R_PPC64_GOT16_LO_DS:
1219 case R_PPC64_TOC16_HA:
1220 case R_PPC64_TOC16_LO_DS:
1221 case R_PPC64_TOC16_LO:
1222 return true;
1223 default:
1224 return false;
1225 }
1226 }
1227
relocate(uint8_t * loc,const Relocation & rel,uint64_t val) const1228 void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
1229 RelType type = rel.type;
1230 bool shouldTocOptimize = isTocOptType(type);
1231 // For dynamic thread pointer relative, toc-relative, and got-indirect
1232 // relocations, proceed in terms of the corresponding ADDR16 relocation type.
1233 std::tie(type, val) = toAddr16Rel(type, val);
1234
1235 switch (type) {
1236 case R_PPC64_ADDR14: {
1237 checkAlignment(loc, val, 4, rel);
1238 // Preserve the AA/LK bits in the branch instruction
1239 uint8_t aalk = loc[3];
1240 write16(loc + 2, (aalk & 3) | (val & 0xfffc));
1241 break;
1242 }
1243 case R_PPC64_ADDR16:
1244 checkIntUInt(loc, val, 16, rel);
1245 write16(loc, val);
1246 break;
1247 case R_PPC64_ADDR32:
1248 checkIntUInt(loc, val, 32, rel);
1249 write32(loc, val);
1250 break;
1251 case R_PPC64_ADDR16_DS:
1252 case R_PPC64_TPREL16_DS: {
1253 checkInt(loc, val, 16, rel);
1254 // DQ-form instructions use bits 28-31 as part of the instruction encoding
1255 // DS-form instructions only use bits 30-31.
1256 uint16_t mask = isDQFormInstruction(readFromHalf16(loc)) ? 0xf : 0x3;
1257 checkAlignment(loc, lo(val), mask + 1, rel);
1258 write16(loc, (read16(loc) & mask) | lo(val));
1259 } break;
1260 case R_PPC64_ADDR16_HA:
1261 case R_PPC64_REL16_HA:
1262 case R_PPC64_TPREL16_HA:
1263 if (config->tocOptimize && shouldTocOptimize && ha(val) == 0)
1264 writeFromHalf16(loc, NOP);
1265 else {
1266 checkInt(loc, val + 0x8000, 32, rel);
1267 write16(loc, ha(val));
1268 }
1269 break;
1270 case R_PPC64_ADDR16_HI:
1271 case R_PPC64_REL16_HI:
1272 case R_PPC64_TPREL16_HI:
1273 checkInt(loc, val, 32, rel);
1274 write16(loc, hi(val));
1275 break;
1276 case R_PPC64_ADDR16_HIGH:
1277 write16(loc, hi(val));
1278 break;
1279 case R_PPC64_ADDR16_HIGHER:
1280 case R_PPC64_TPREL16_HIGHER:
1281 write16(loc, higher(val));
1282 break;
1283 case R_PPC64_ADDR16_HIGHERA:
1284 case R_PPC64_TPREL16_HIGHERA:
1285 write16(loc, highera(val));
1286 break;
1287 case R_PPC64_ADDR16_HIGHEST:
1288 case R_PPC64_TPREL16_HIGHEST:
1289 write16(loc, highest(val));
1290 break;
1291 case R_PPC64_ADDR16_HIGHESTA:
1292 case R_PPC64_TPREL16_HIGHESTA:
1293 write16(loc, highesta(val));
1294 break;
1295 case R_PPC64_ADDR16_LO:
1296 case R_PPC64_REL16_LO:
1297 case R_PPC64_TPREL16_LO:
1298 // When the high-adjusted part of a toc relocation evaluates to 0, it is
1299 // changed into a nop. The lo part then needs to be updated to use the
1300 // toc-pointer register r2, as the base register.
1301 if (config->tocOptimize && shouldTocOptimize && ha(val) == 0) {
1302 uint32_t insn = readFromHalf16(loc);
1303 if (isInstructionUpdateForm(insn))
1304 error(getErrorLocation(loc) +
1305 "can't toc-optimize an update instruction: 0x" +
1306 utohexstr(insn));
1307 writeFromHalf16(loc, (insn & 0xffe00000) | 0x00020000 | lo(val));
1308 } else {
1309 write16(loc, lo(val));
1310 }
1311 break;
1312 case R_PPC64_ADDR16_LO_DS:
1313 case R_PPC64_TPREL16_LO_DS: {
1314 // DQ-form instructions use bits 28-31 as part of the instruction encoding
1315 // DS-form instructions only use bits 30-31.
1316 uint32_t insn = readFromHalf16(loc);
1317 uint16_t mask = isDQFormInstruction(insn) ? 0xf : 0x3;
1318 checkAlignment(loc, lo(val), mask + 1, rel);
1319 if (config->tocOptimize && shouldTocOptimize && ha(val) == 0) {
1320 // When the high-adjusted part of a toc relocation evaluates to 0, it is
1321 // changed into a nop. The lo part then needs to be updated to use the toc
1322 // pointer register r2, as the base register.
1323 if (isInstructionUpdateForm(insn))
1324 error(getErrorLocation(loc) +
1325 "Can't toc-optimize an update instruction: 0x" +
1326 Twine::utohexstr(insn));
1327 insn &= 0xffe00000 | mask;
1328 writeFromHalf16(loc, insn | 0x00020000 | lo(val));
1329 } else {
1330 write16(loc, (read16(loc) & mask) | lo(val));
1331 }
1332 } break;
1333 case R_PPC64_TPREL16:
1334 checkInt(loc, val, 16, rel);
1335 write16(loc, val);
1336 break;
1337 case R_PPC64_REL32:
1338 checkInt(loc, val, 32, rel);
1339 write32(loc, val);
1340 break;
1341 case R_PPC64_ADDR64:
1342 case R_PPC64_REL64:
1343 case R_PPC64_TOC:
1344 write64(loc, val);
1345 break;
1346 case R_PPC64_REL14: {
1347 uint32_t mask = 0x0000FFFC;
1348 checkInt(loc, val, 16, rel);
1349 checkAlignment(loc, val, 4, rel);
1350 write32(loc, (read32(loc) & ~mask) | (val & mask));
1351 break;
1352 }
1353 case R_PPC64_REL24:
1354 case R_PPC64_REL24_NOTOC: {
1355 uint32_t mask = 0x03FFFFFC;
1356 checkInt(loc, val, 26, rel);
1357 checkAlignment(loc, val, 4, rel);
1358 write32(loc, (read32(loc) & ~mask) | (val & mask));
1359 break;
1360 }
1361 case R_PPC64_DTPREL64:
1362 write64(loc, val - dynamicThreadPointerOffset);
1363 break;
1364 case R_PPC64_DTPREL34:
1365 // The Dynamic Thread Vector actually points 0x8000 bytes past the start
1366 // of the TLS block. Therefore, in the case of R_PPC64_DTPREL34 we first
1367 // need to subtract that value then fallthrough to the general case.
1368 val -= dynamicThreadPointerOffset;
1369 [[fallthrough]];
1370 case R_PPC64_PCREL34:
1371 case R_PPC64_GOT_PCREL34:
1372 case R_PPC64_GOT_TLSGD_PCREL34:
1373 case R_PPC64_GOT_TLSLD_PCREL34:
1374 case R_PPC64_GOT_TPREL_PCREL34:
1375 case R_PPC64_TPREL34: {
1376 const uint64_t si0Mask = 0x00000003ffff0000;
1377 const uint64_t si1Mask = 0x000000000000ffff;
1378 const uint64_t fullMask = 0x0003ffff0000ffff;
1379 checkInt(loc, val, 34, rel);
1380
1381 uint64_t instr = readPrefixedInstruction(loc) & ~fullMask;
1382 writePrefixedInstruction(loc, instr | ((val & si0Mask) << 16) |
1383 (val & si1Mask));
1384 break;
1385 }
1386 // If we encounter a PCREL_OPT relocation that we won't optimize.
1387 case R_PPC64_PCREL_OPT:
1388 break;
1389 default:
1390 llvm_unreachable("unknown relocation");
1391 }
1392 }
1393
needsThunk(RelExpr expr,RelType type,const InputFile * file,uint64_t branchAddr,const Symbol & s,int64_t a) const1394 bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
1395 uint64_t branchAddr, const Symbol &s, int64_t a) const {
1396 if (type != R_PPC64_REL14 && type != R_PPC64_REL24 &&
1397 type != R_PPC64_REL24_NOTOC)
1398 return false;
1399
1400 // If a function is in the Plt it needs to be called with a call-stub.
1401 if (s.isInPlt())
1402 return true;
1403
1404 // This check looks at the st_other bits of the callee with relocation
1405 // R_PPC64_REL14 or R_PPC64_REL24. If the value is 1, then the callee
1406 // clobbers the TOC and we need an R2 save stub.
1407 if (type != R_PPC64_REL24_NOTOC && (s.stOther >> 5) == 1)
1408 return true;
1409
1410 if (type == R_PPC64_REL24_NOTOC && (s.stOther >> 5) > 1)
1411 return true;
1412
1413 // An undefined weak symbol not in a PLT does not need a thunk. If it is
1414 // hidden, its binding has been converted to local, so we just check
1415 // isUndefined() here. A undefined non-weak symbol has been errored.
1416 if (s.isUndefined())
1417 return false;
1418
1419 // If the offset exceeds the range of the branch type then it will need
1420 // a range-extending thunk.
1421 // See the comment in getRelocTargetVA() about R_PPC64_CALL.
1422 return !inBranchRange(type, branchAddr,
1423 s.getVA(a) +
1424 getPPC64GlobalEntryToLocalEntryOffset(s.stOther));
1425 }
1426
getThunkSectionSpacing() const1427 uint32_t PPC64::getThunkSectionSpacing() const {
1428 // See comment in Arch/ARM.cpp for a more detailed explanation of
1429 // getThunkSectionSpacing(). For PPC64 we pick the constant here based on
1430 // R_PPC64_REL24, which is used by unconditional branch instructions.
1431 // 0x2000000 = (1 << 24-1) * 4
1432 return 0x2000000;
1433 }
1434
inBranchRange(RelType type,uint64_t src,uint64_t dst) const1435 bool PPC64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
1436 int64_t offset = dst - src;
1437 if (type == R_PPC64_REL14)
1438 return isInt<16>(offset);
1439 if (type == R_PPC64_REL24 || type == R_PPC64_REL24_NOTOC)
1440 return isInt<26>(offset);
1441 llvm_unreachable("unsupported relocation type used in branch");
1442 }
1443
adjustTlsExpr(RelType type,RelExpr expr) const1444 RelExpr PPC64::adjustTlsExpr(RelType type, RelExpr expr) const {
1445 if (type != R_PPC64_GOT_TLSGD_PCREL34 && expr == R_RELAX_TLS_GD_TO_IE)
1446 return R_RELAX_TLS_GD_TO_IE_GOT_OFF;
1447 if (expr == R_RELAX_TLS_LD_TO_LE)
1448 return R_RELAX_TLS_LD_TO_LE_ABS;
1449 return expr;
1450 }
1451
adjustGotPcExpr(RelType type,int64_t addend,const uint8_t * loc) const1452 RelExpr PPC64::adjustGotPcExpr(RelType type, int64_t addend,
1453 const uint8_t *loc) const {
1454 if ((type == R_PPC64_GOT_PCREL34 || type == R_PPC64_PCREL_OPT) &&
1455 config->pcRelOptimize) {
1456 // It only makes sense to optimize pld since paddi means that the address
1457 // of the object in the GOT is required rather than the object itself.
1458 if ((readPrefixedInstruction(loc) & 0xfc000000) == 0xe4000000)
1459 return R_PPC64_RELAX_GOT_PC;
1460 }
1461 return R_GOT_PC;
1462 }
1463
1464 // Reference: 3.7.4.1 of the 64-bit ELF V2 abi supplement.
1465 // The general dynamic code sequence for a global `x` uses 4 instructions.
1466 // Instruction Relocation Symbol
1467 // addis r3, r2, x@got@tlsgd@ha R_PPC64_GOT_TLSGD16_HA x
1468 // addi r3, r3, x@got@tlsgd@l R_PPC64_GOT_TLSGD16_LO x
1469 // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSGD x
1470 // R_PPC64_REL24 __tls_get_addr
1471 // nop None None
1472 //
1473 // Relaxing to initial-exec entails:
1474 // 1) Convert the addis/addi pair that builds the address of the tls_index
1475 // struct for 'x' to an addis/ld pair that loads an offset from a got-entry.
1476 // 2) Convert the call to __tls_get_addr to a nop.
1477 // 3) Convert the nop following the call to an add of the loaded offset to the
1478 // thread pointer.
1479 // Since the nop must directly follow the call, the R_PPC64_TLSGD relocation is
1480 // used as the relaxation hint for both steps 2 and 3.
relaxTlsGdToIe(uint8_t * loc,const Relocation & rel,uint64_t val) const1481 void PPC64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
1482 uint64_t val) const {
1483 switch (rel.type) {
1484 case R_PPC64_GOT_TLSGD16_HA:
1485 // This is relaxed from addis rT, r2, sym@got@tlsgd@ha to
1486 // addis rT, r2, sym@got@tprel@ha.
1487 relocateNoSym(loc, R_PPC64_GOT_TPREL16_HA, val);
1488 return;
1489 case R_PPC64_GOT_TLSGD16:
1490 case R_PPC64_GOT_TLSGD16_LO: {
1491 // Relax from addi r3, rA, sym@got@tlsgd@l to
1492 // ld r3, sym@got@tprel@l(rA)
1493 uint32_t ra = (readFromHalf16(loc) & (0x1f << 16));
1494 writeFromHalf16(loc, 0xe8600000 | ra);
1495 relocateNoSym(loc, R_PPC64_GOT_TPREL16_LO_DS, val);
1496 return;
1497 }
1498 case R_PPC64_GOT_TLSGD_PCREL34: {
1499 // Relax from paddi r3, 0, sym@got@tlsgd@pcrel, 1 to
1500 // pld r3, sym@got@tprel@pcrel
1501 writePrefixedInstruction(loc, 0x04100000e4600000);
1502 relocateNoSym(loc, R_PPC64_GOT_TPREL_PCREL34, val);
1503 return;
1504 }
1505 case R_PPC64_TLSGD: {
1506 // PC Relative Relaxation:
1507 // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
1508 // nop
1509 // TOC Relaxation:
1510 // Relax from bl __tls_get_addr(x@tlsgd)
1511 // nop
1512 // to
1513 // nop
1514 // add r3, r3, r13
1515 const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
1516 if (locAsInt % 4 == 0) {
1517 write32(loc, NOP); // bl __tls_get_addr(sym@tlsgd) --> nop
1518 write32(loc + 4, 0x7c636A14); // nop --> add r3, r3, r13
1519 } else if (locAsInt % 4 == 1) {
1520 // bl __tls_get_addr(sym@tlsgd) --> add r3, r3, r13
1521 write32(loc - 1, 0x7c636a14);
1522 } else {
1523 errorOrWarn("R_PPC64_TLSGD has unexpected byte alignment");
1524 }
1525 return;
1526 }
1527 default:
1528 llvm_unreachable("unsupported relocation for TLS GD to IE relaxation");
1529 }
1530 }
1531
relocateAlloc(InputSectionBase & sec,uint8_t * buf) const1532 void PPC64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
1533 uint64_t secAddr = sec.getOutputSection()->addr;
1534 if (auto *s = dyn_cast<InputSection>(&sec))
1535 secAddr += s->outSecOff;
1536 uint64_t lastPPCRelaxedRelocOff = -1;
1537 for (const Relocation &rel : sec.relocs()) {
1538 uint8_t *loc = buf + rel.offset;
1539 const uint64_t val =
1540 sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
1541 secAddr + rel.offset, *rel.sym, rel.expr);
1542 switch (rel.expr) {
1543 case R_PPC64_RELAX_GOT_PC: {
1544 // The R_PPC64_PCREL_OPT relocation must appear immediately after
1545 // R_PPC64_GOT_PCREL34 in the relocations table at the same offset.
1546 // We can only relax R_PPC64_PCREL_OPT if we have also relaxed
1547 // the associated R_PPC64_GOT_PCREL34 since only the latter has an
1548 // associated symbol. So save the offset when relaxing R_PPC64_GOT_PCREL34
1549 // and only relax the other if the saved offset matches.
1550 if (rel.type == R_PPC64_GOT_PCREL34)
1551 lastPPCRelaxedRelocOff = rel.offset;
1552 if (rel.type == R_PPC64_PCREL_OPT && rel.offset != lastPPCRelaxedRelocOff)
1553 break;
1554 relaxGot(loc, rel, val);
1555 break;
1556 }
1557 case R_PPC64_RELAX_TOC:
1558 // rel.sym refers to the STT_SECTION symbol associated to the .toc input
1559 // section. If an R_PPC64_TOC16_LO (.toc + addend) references the TOC
1560 // entry, there may be R_PPC64_TOC16_HA not paired with
1561 // R_PPC64_TOC16_LO_DS. Don't relax. This loses some relaxation
1562 // opportunities but is safe.
1563 if (ppc64noTocRelax.count({rel.sym, rel.addend}) ||
1564 !tryRelaxPPC64TocIndirection(rel, loc))
1565 relocate(loc, rel, val);
1566 break;
1567 case R_PPC64_CALL:
1568 // If this is a call to __tls_get_addr, it may be part of a TLS
1569 // sequence that has been relaxed and turned into a nop. In this
1570 // case, we don't want to handle it as a call.
1571 if (read32(loc) == 0x60000000) // nop
1572 break;
1573
1574 // Patch a nop (0x60000000) to a ld.
1575 if (rel.sym->needsTocRestore) {
1576 // gcc/gfortran 5.4, 6.3 and earlier versions do not add nop for
1577 // recursive calls even if the function is preemptible. This is not
1578 // wrong in the common case where the function is not preempted at
1579 // runtime. Just ignore.
1580 if ((rel.offset + 8 > sec.content().size() ||
1581 read32(loc + 4) != 0x60000000) &&
1582 rel.sym->file != sec.file) {
1583 // Use substr(6) to remove the "__plt_" prefix.
1584 errorOrWarn(getErrorLocation(loc) + "call to " +
1585 lld::toString(*rel.sym).substr(6) +
1586 " lacks nop, can't restore toc");
1587 break;
1588 }
1589 write32(loc + 4, 0xe8410018); // ld %r2, 24(%r1)
1590 }
1591 relocate(loc, rel, val);
1592 break;
1593 case R_RELAX_TLS_GD_TO_IE:
1594 case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
1595 relaxTlsGdToIe(loc, rel, val);
1596 break;
1597 case R_RELAX_TLS_GD_TO_LE:
1598 relaxTlsGdToLe(loc, rel, val);
1599 break;
1600 case R_RELAX_TLS_LD_TO_LE_ABS:
1601 relaxTlsLdToLe(loc, rel, val);
1602 break;
1603 case R_RELAX_TLS_IE_TO_LE:
1604 relaxTlsIeToLe(loc, rel, val);
1605 break;
1606 default:
1607 relocate(loc, rel, val);
1608 break;
1609 }
1610 }
1611 }
1612
1613 // The prologue for a split-stack function is expected to look roughly
1614 // like this:
1615 // .Lglobal_entry_point:
1616 // # TOC pointer initialization.
1617 // ...
1618 // .Llocal_entry_point:
1619 // # load the __private_ss member of the threads tcbhead.
1620 // ld r0,-0x7000-64(r13)
1621 // # subtract the functions stack size from the stack pointer.
1622 // addis r12, r1, ha(-stack-frame size)
1623 // addi r12, r12, l(-stack-frame size)
1624 // # compare needed to actual and branch to allocate_more_stack if more
1625 // # space is needed, otherwise fallthrough to 'normal' function body.
1626 // cmpld cr7,r12,r0
1627 // blt- cr7, .Lallocate_more_stack
1628 //
1629 // -) The allocate_more_stack block might be placed after the split-stack
1630 // prologue and the `blt-` replaced with a `bge+ .Lnormal_func_body`
1631 // instead.
1632 // -) If either the addis or addi is not needed due to the stack size being
1633 // smaller then 32K or a multiple of 64K they will be replaced with a nop,
1634 // but there will always be 2 instructions the linker can overwrite for the
1635 // adjusted stack size.
1636 //
1637 // The linkers job here is to increase the stack size used in the addis/addi
1638 // pair by split-stack-size-adjust.
1639 // addis r12, r1, ha(-stack-frame size - split-stack-adjust-size)
1640 // addi r12, r12, l(-stack-frame size - split-stack-adjust-size)
adjustPrologueForCrossSplitStack(uint8_t * loc,uint8_t * end,uint8_t stOther) const1641 bool PPC64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
1642 uint8_t stOther) const {
1643 // If the caller has a global entry point adjust the buffer past it. The start
1644 // of the split-stack prologue will be at the local entry point.
1645 loc += getPPC64GlobalEntryToLocalEntryOffset(stOther);
1646
1647 // At the very least we expect to see a load of some split-stack data from the
1648 // tcb, and 2 instructions that calculate the ending stack address this
1649 // function will require. If there is not enough room for at least 3
1650 // instructions it can't be a split-stack prologue.
1651 if (loc + 12 >= end)
1652 return false;
1653
1654 // First instruction must be `ld r0, -0x7000-64(r13)`
1655 if (read32(loc) != 0xe80d8fc0)
1656 return false;
1657
1658 int16_t hiImm = 0;
1659 int16_t loImm = 0;
1660 // First instruction can be either an addis if the frame size is larger then
1661 // 32K, or an addi if the size is less then 32K.
1662 int32_t firstInstr = read32(loc + 4);
1663 if (getPrimaryOpCode(firstInstr) == 15) {
1664 hiImm = firstInstr & 0xFFFF;
1665 } else if (getPrimaryOpCode(firstInstr) == 14) {
1666 loImm = firstInstr & 0xFFFF;
1667 } else {
1668 return false;
1669 }
1670
1671 // Second instruction is either an addi or a nop. If the first instruction was
1672 // an addi then LoImm is set and the second instruction must be a nop.
1673 uint32_t secondInstr = read32(loc + 8);
1674 if (!loImm && getPrimaryOpCode(secondInstr) == 14) {
1675 loImm = secondInstr & 0xFFFF;
1676 } else if (secondInstr != NOP) {
1677 return false;
1678 }
1679
1680 // The register operands of the first instruction should be the stack-pointer
1681 // (r1) as the input (RA) and r12 as the output (RT). If the second
1682 // instruction is not a nop, then it should use r12 as both input and output.
1683 auto checkRegOperands = [](uint32_t instr, uint8_t expectedRT,
1684 uint8_t expectedRA) {
1685 return ((instr & 0x3E00000) >> 21 == expectedRT) &&
1686 ((instr & 0x1F0000) >> 16 == expectedRA);
1687 };
1688 if (!checkRegOperands(firstInstr, 12, 1))
1689 return false;
1690 if (secondInstr != NOP && !checkRegOperands(secondInstr, 12, 12))
1691 return false;
1692
1693 int32_t stackFrameSize = (hiImm * 65536) + loImm;
1694 // Check that the adjusted size doesn't overflow what we can represent with 2
1695 // instructions.
1696 if (stackFrameSize < config->splitStackAdjustSize + INT32_MIN) {
1697 error(getErrorLocation(loc) + "split-stack prologue adjustment overflows");
1698 return false;
1699 }
1700
1701 int32_t adjustedStackFrameSize =
1702 stackFrameSize - config->splitStackAdjustSize;
1703
1704 loImm = adjustedStackFrameSize & 0xFFFF;
1705 hiImm = (adjustedStackFrameSize + 0x8000) >> 16;
1706 if (hiImm) {
1707 write32(loc + 4, 0x3D810000 | (uint16_t)hiImm);
1708 // If the low immediate is zero the second instruction will be a nop.
1709 secondInstr = loImm ? 0x398C0000 | (uint16_t)loImm : NOP;
1710 write32(loc + 8, secondInstr);
1711 } else {
1712 // addi r12, r1, imm
1713 write32(loc + 4, (0x39810000) | (uint16_t)loImm);
1714 write32(loc + 8, NOP);
1715 }
1716
1717 return true;
1718 }
1719
getPPC64TargetInfo()1720 TargetInfo *elf::getPPC64TargetInfo() {
1721 static PPC64 target;
1722 return ⌖
1723 }
1724