1 //===- SyntheticSections.cpp ----------------------------------------------===//
2 //
3 //                             The LLVM Linker
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains linker-synthesized sections. Currently,
11 // synthetic sections are created either output sections or input sections,
12 // but we are rewriting code so that all synthetic sections are created as
13 // input sections.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "SyntheticSections.h"
18 #include "Bits.h"
19 #include "Config.h"
20 #include "InputFiles.h"
21 #include "LinkerScript.h"
22 #include "OutputSections.h"
23 #include "SymbolTable.h"
24 #include "Symbols.h"
25 #include "Target.h"
26 #include "Writer.h"
27 #include "lld/Common/ErrorHandler.h"
28 #include "lld/Common/Memory.h"
29 #include "lld/Common/Strings.h"
30 #include "lld/Common/Threads.h"
31 #include "lld/Common/Version.h"
32 #include "llvm/ADT/SetOperations.h"
33 #include "llvm/ADT/StringExtras.h"
34 #include "llvm/BinaryFormat/Dwarf.h"
35 #include "llvm/DebugInfo/DWARF/DWARFDebugPubTable.h"
36 #include "llvm/Object/ELFObjectFile.h"
37 #include "llvm/Support/Compression.h"
38 #include "llvm/Support/Endian.h"
39 #include "llvm/Support/LEB128.h"
40 #include "llvm/Support/MD5.h"
41 #include "llvm/Support/RandomNumberGenerator.h"
42 #include "llvm/Support/SHA1.h"
43 #include "llvm/Support/xxhash.h"
44 #include <cstdlib>
45 #include <thread>
46 
47 using namespace llvm;
48 using namespace llvm::dwarf;
49 using namespace llvm::ELF;
50 using namespace llvm::object;
51 using namespace llvm::support;
52 
53 using namespace lld;
54 using namespace lld::elf;
55 
56 using llvm::support::endian::read32le;
57 using llvm::support::endian::write32le;
58 using llvm::support::endian::write64le;
59 
60 constexpr size_t MergeNoTailSection::NumShards;
61 
62 // Returns an LLD version string.
getVersion()63 static ArrayRef<uint8_t> getVersion() {
64   // Check LLD_VERSION first for ease of testing.
65   // You can get consistent output by using the environment variable.
66   // This is only for testing.
67   StringRef S = getenv("LLD_VERSION");
68   if (S.empty())
69     S = Saver.save(Twine("Linker: ") + getLLDVersion());
70 
71   // +1 to include the terminating '\0'.
72   return {(const uint8_t *)S.data(), S.size() + 1};
73 }
74 
75 // Creates a .comment section containing LLD version info.
76 // With this feature, you can identify LLD-generated binaries easily
77 // by "readelf --string-dump .comment <file>".
78 // The returned object is a mergeable string section.
createCommentSection()79 MergeInputSection *elf::createCommentSection() {
80   return make<MergeInputSection>(SHF_MERGE | SHF_STRINGS, SHT_PROGBITS, 1,
81                                  getVersion(), ".comment");
82 }
83 
84 // .MIPS.abiflags section.
85 template <class ELFT>
MipsAbiFlagsSection(Elf_Mips_ABIFlags Flags)86 MipsAbiFlagsSection<ELFT>::MipsAbiFlagsSection(Elf_Mips_ABIFlags Flags)
87     : SyntheticSection(SHF_ALLOC, SHT_MIPS_ABIFLAGS, 8, ".MIPS.abiflags"),
88       Flags(Flags) {
89   this->Entsize = sizeof(Elf_Mips_ABIFlags);
90 }
91 
writeTo(uint8_t * Buf)92 template <class ELFT> void MipsAbiFlagsSection<ELFT>::writeTo(uint8_t *Buf) {
93   memcpy(Buf, &Flags, sizeof(Flags));
94 }
95 
96 template <class ELFT>
create()97 MipsAbiFlagsSection<ELFT> *MipsAbiFlagsSection<ELFT>::create() {
98   Elf_Mips_ABIFlags Flags = {};
99   bool Create = false;
100 
101   for (InputSectionBase *Sec : InputSections) {
102     if (Sec->Type != SHT_MIPS_ABIFLAGS)
103       continue;
104     Sec->Live = false;
105     Create = true;
106 
107     std::string Filename = toString(Sec->File);
108     const size_t Size = Sec->data().size();
109     // Older version of BFD (such as the default FreeBSD linker) concatenate
110     // .MIPS.abiflags instead of merging. To allow for this case (or potential
111     // zero padding) we ignore everything after the first Elf_Mips_ABIFlags
112     if (Size < sizeof(Elf_Mips_ABIFlags)) {
113       error(Filename + ": invalid size of .MIPS.abiflags section: got " +
114             Twine(Size) + " instead of " + Twine(sizeof(Elf_Mips_ABIFlags)));
115       return nullptr;
116     }
117     auto *S = reinterpret_cast<const Elf_Mips_ABIFlags *>(Sec->data().data());
118     if (S->version != 0) {
119       error(Filename + ": unexpected .MIPS.abiflags version " +
120             Twine(S->version));
121       return nullptr;
122     }
123 
124     // LLD checks ISA compatibility in calcMipsEFlags(). Here we just
125     // select the highest number of ISA/Rev/Ext.
126     Flags.isa_level = std::max(Flags.isa_level, S->isa_level);
127     Flags.isa_rev = std::max(Flags.isa_rev, S->isa_rev);
128     Flags.isa_ext = std::max(Flags.isa_ext, S->isa_ext);
129     Flags.gpr_size = std::max(Flags.gpr_size, S->gpr_size);
130     Flags.cpr1_size = std::max(Flags.cpr1_size, S->cpr1_size);
131     Flags.cpr2_size = std::max(Flags.cpr2_size, S->cpr2_size);
132     Flags.ases |= S->ases;
133     Flags.flags1 |= S->flags1;
134     Flags.flags2 |= S->flags2;
135     Flags.fp_abi = elf::getMipsFpAbiFlag(Flags.fp_abi, S->fp_abi, Filename);
136   };
137 
138   if (Create)
139     return make<MipsAbiFlagsSection<ELFT>>(Flags);
140   return nullptr;
141 }
142 
143 // .MIPS.options section.
144 template <class ELFT>
MipsOptionsSection(Elf_Mips_RegInfo Reginfo)145 MipsOptionsSection<ELFT>::MipsOptionsSection(Elf_Mips_RegInfo Reginfo)
146     : SyntheticSection(SHF_ALLOC, SHT_MIPS_OPTIONS, 8, ".MIPS.options"),
147       Reginfo(Reginfo) {
148   this->Entsize = sizeof(Elf_Mips_Options) + sizeof(Elf_Mips_RegInfo);
149 }
150 
writeTo(uint8_t * Buf)151 template <class ELFT> void MipsOptionsSection<ELFT>::writeTo(uint8_t *Buf) {
152   auto *Options = reinterpret_cast<Elf_Mips_Options *>(Buf);
153   Options->kind = ODK_REGINFO;
154   Options->size = getSize();
155 
156   if (!Config->Relocatable)
157     Reginfo.ri_gp_value = In.MipsGot->getGp();
158   memcpy(Buf + sizeof(Elf_Mips_Options), &Reginfo, sizeof(Reginfo));
159 }
160 
161 template <class ELFT>
create()162 MipsOptionsSection<ELFT> *MipsOptionsSection<ELFT>::create() {
163   // N64 ABI only.
164   if (!ELFT::Is64Bits)
165     return nullptr;
166 
167   std::vector<InputSectionBase *> Sections;
168   for (InputSectionBase *Sec : InputSections)
169     if (Sec->Type == SHT_MIPS_OPTIONS)
170       Sections.push_back(Sec);
171 
172   if (Sections.empty())
173     return nullptr;
174 
175   Elf_Mips_RegInfo Reginfo = {};
176   for (InputSectionBase *Sec : Sections) {
177     Sec->Live = false;
178 
179     std::string Filename = toString(Sec->File);
180     ArrayRef<uint8_t> D = Sec->data();
181 
182     while (!D.empty()) {
183       if (D.size() < sizeof(Elf_Mips_Options)) {
184         error(Filename + ": invalid size of .MIPS.options section");
185         break;
186       }
187 
188       auto *Opt = reinterpret_cast<const Elf_Mips_Options *>(D.data());
189       if (Opt->kind == ODK_REGINFO) {
190         Reginfo.ri_gprmask |= Opt->getRegInfo().ri_gprmask;
191         Sec->getFile<ELFT>()->MipsGp0 = Opt->getRegInfo().ri_gp_value;
192         break;
193       }
194 
195       if (!Opt->size)
196         fatal(Filename + ": zero option descriptor size");
197       D = D.slice(Opt->size);
198     }
199   };
200 
201   return make<MipsOptionsSection<ELFT>>(Reginfo);
202 }
203 
204 // MIPS .reginfo section.
205 template <class ELFT>
MipsReginfoSection(Elf_Mips_RegInfo Reginfo)206 MipsReginfoSection<ELFT>::MipsReginfoSection(Elf_Mips_RegInfo Reginfo)
207     : SyntheticSection(SHF_ALLOC, SHT_MIPS_REGINFO, 4, ".reginfo"),
208       Reginfo(Reginfo) {
209   this->Entsize = sizeof(Elf_Mips_RegInfo);
210 }
211 
writeTo(uint8_t * Buf)212 template <class ELFT> void MipsReginfoSection<ELFT>::writeTo(uint8_t *Buf) {
213   if (!Config->Relocatable)
214     Reginfo.ri_gp_value = In.MipsGot->getGp();
215   memcpy(Buf, &Reginfo, sizeof(Reginfo));
216 }
217 
218 template <class ELFT>
create()219 MipsReginfoSection<ELFT> *MipsReginfoSection<ELFT>::create() {
220   // Section should be alive for O32 and N32 ABIs only.
221   if (ELFT::Is64Bits)
222     return nullptr;
223 
224   std::vector<InputSectionBase *> Sections;
225   for (InputSectionBase *Sec : InputSections)
226     if (Sec->Type == SHT_MIPS_REGINFO)
227       Sections.push_back(Sec);
228 
229   if (Sections.empty())
230     return nullptr;
231 
232   Elf_Mips_RegInfo Reginfo = {};
233   for (InputSectionBase *Sec : Sections) {
234     Sec->Live = false;
235 
236     if (Sec->data().size() != sizeof(Elf_Mips_RegInfo)) {
237       error(toString(Sec->File) + ": invalid size of .reginfo section");
238       return nullptr;
239     }
240 
241     auto *R = reinterpret_cast<const Elf_Mips_RegInfo *>(Sec->data().data());
242     Reginfo.ri_gprmask |= R->ri_gprmask;
243     Sec->getFile<ELFT>()->MipsGp0 = R->ri_gp_value;
244   };
245 
246   return make<MipsReginfoSection<ELFT>>(Reginfo);
247 }
248 
createInterpSection()249 InputSection *elf::createInterpSection() {
250   // StringSaver guarantees that the returned string ends with '\0'.
251   StringRef S = Saver.save(Config->DynamicLinker);
252   ArrayRef<uint8_t> Contents = {(const uint8_t *)S.data(), S.size() + 1};
253 
254   auto *Sec = make<InputSection>(nullptr, SHF_ALLOC, SHT_PROGBITS, 1, Contents,
255                                  ".interp");
256   Sec->Live = true;
257   return Sec;
258 }
259 
addSyntheticLocal(StringRef Name,uint8_t Type,uint64_t Value,uint64_t Size,InputSectionBase & Section)260 Defined *elf::addSyntheticLocal(StringRef Name, uint8_t Type, uint64_t Value,
261                                 uint64_t Size, InputSectionBase &Section) {
262   auto *S = make<Defined>(Section.File, Name, STB_LOCAL, STV_DEFAULT, Type,
263                           Value, Size, &Section);
264   if (In.SymTab)
265     In.SymTab->addSymbol(S);
266   return S;
267 }
268 
getHashSize()269 static size_t getHashSize() {
270   switch (Config->BuildId) {
271   case BuildIdKind::Fast:
272     return 8;
273   case BuildIdKind::Md5:
274   case BuildIdKind::Uuid:
275     return 16;
276   case BuildIdKind::Sha1:
277     return 20;
278   case BuildIdKind::Hexstring:
279     return Config->BuildIdVector.size();
280   default:
281     llvm_unreachable("unknown BuildIdKind");
282   }
283 }
284 
BuildIdSection()285 BuildIdSection::BuildIdSection()
286     : SyntheticSection(SHF_ALLOC, SHT_NOTE, 4, ".note.gnu.build-id"),
287       HashSize(getHashSize()) {}
288 
writeTo(uint8_t * Buf)289 void BuildIdSection::writeTo(uint8_t *Buf) {
290   write32(Buf, 4);                      // Name size
291   write32(Buf + 4, HashSize);           // Content size
292   write32(Buf + 8, NT_GNU_BUILD_ID);    // Type
293   memcpy(Buf + 12, "GNU", 4);           // Name string
294   HashBuf = Buf + 16;
295 }
296 
297 // Split one uint8 array into small pieces of uint8 arrays.
split(ArrayRef<uint8_t> Arr,size_t ChunkSize)298 static std::vector<ArrayRef<uint8_t>> split(ArrayRef<uint8_t> Arr,
299                                             size_t ChunkSize) {
300   std::vector<ArrayRef<uint8_t>> Ret;
301   while (Arr.size() > ChunkSize) {
302     Ret.push_back(Arr.take_front(ChunkSize));
303     Arr = Arr.drop_front(ChunkSize);
304   }
305   if (!Arr.empty())
306     Ret.push_back(Arr);
307   return Ret;
308 }
309 
310 // Computes a hash value of Data using a given hash function.
311 // In order to utilize multiple cores, we first split data into 1MB
312 // chunks, compute a hash for each chunk, and then compute a hash value
313 // of the hash values.
computeHash(llvm::ArrayRef<uint8_t> Data,std::function<void (uint8_t * Dest,ArrayRef<uint8_t> Arr)> HashFn)314 void BuildIdSection::computeHash(
315     llvm::ArrayRef<uint8_t> Data,
316     std::function<void(uint8_t *Dest, ArrayRef<uint8_t> Arr)> HashFn) {
317   std::vector<ArrayRef<uint8_t>> Chunks = split(Data, 1024 * 1024);
318   std::vector<uint8_t> Hashes(Chunks.size() * HashSize);
319 
320   // Compute hash values.
321   parallelForEachN(0, Chunks.size(), [&](size_t I) {
322     HashFn(Hashes.data() + I * HashSize, Chunks[I]);
323   });
324 
325   // Write to the final output buffer.
326   HashFn(HashBuf, Hashes);
327 }
328 
BssSection(StringRef Name,uint64_t Size,uint32_t Alignment)329 BssSection::BssSection(StringRef Name, uint64_t Size, uint32_t Alignment)
330     : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_NOBITS, Alignment, Name) {
331   this->Bss = true;
332   this->Size = Size;
333 }
334 
writeBuildId(ArrayRef<uint8_t> Buf)335 void BuildIdSection::writeBuildId(ArrayRef<uint8_t> Buf) {
336   switch (Config->BuildId) {
337   case BuildIdKind::Fast:
338     computeHash(Buf, [](uint8_t *Dest, ArrayRef<uint8_t> Arr) {
339       write64le(Dest, xxHash64(Arr));
340     });
341     break;
342   case BuildIdKind::Md5:
343     computeHash(Buf, [](uint8_t *Dest, ArrayRef<uint8_t> Arr) {
344       memcpy(Dest, MD5::hash(Arr).data(), 16);
345     });
346     break;
347   case BuildIdKind::Sha1:
348     computeHash(Buf, [](uint8_t *Dest, ArrayRef<uint8_t> Arr) {
349       memcpy(Dest, SHA1::hash(Arr).data(), 20);
350     });
351     break;
352   case BuildIdKind::Uuid:
353     if (auto EC = getRandomBytes(HashBuf, HashSize))
354       error("entropy source failure: " + EC.message());
355     break;
356   case BuildIdKind::Hexstring:
357     memcpy(HashBuf, Config->BuildIdVector.data(), Config->BuildIdVector.size());
358     break;
359   default:
360     llvm_unreachable("unknown BuildIdKind");
361   }
362 }
363 
EhFrameSection()364 EhFrameSection::EhFrameSection()
365     : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 1, ".eh_frame") {}
366 
367 // Search for an existing CIE record or create a new one.
368 // CIE records from input object files are uniquified by their contents
369 // and where their relocations point to.
370 template <class ELFT, class RelTy>
addCie(EhSectionPiece & Cie,ArrayRef<RelTy> Rels)371 CieRecord *EhFrameSection::addCie(EhSectionPiece &Cie, ArrayRef<RelTy> Rels) {
372   Symbol *Personality = nullptr;
373   unsigned FirstRelI = Cie.FirstRelocation;
374   if (FirstRelI != (unsigned)-1)
375     Personality =
376         &Cie.Sec->template getFile<ELFT>()->getRelocTargetSym(Rels[FirstRelI]);
377 
378   // Search for an existing CIE by CIE contents/relocation target pair.
379   CieRecord *&Rec = CieMap[{Cie.data(), Personality}];
380 
381   // If not found, create a new one.
382   if (!Rec) {
383     Rec = make<CieRecord>();
384     Rec->Cie = &Cie;
385     CieRecords.push_back(Rec);
386   }
387   return Rec;
388 }
389 
390 // There is one FDE per function. Returns true if a given FDE
391 // points to a live function.
392 template <class ELFT, class RelTy>
isFdeLive(EhSectionPiece & Fde,ArrayRef<RelTy> Rels)393 bool EhFrameSection::isFdeLive(EhSectionPiece &Fde, ArrayRef<RelTy> Rels) {
394   auto *Sec = cast<EhInputSection>(Fde.Sec);
395   unsigned FirstRelI = Fde.FirstRelocation;
396 
397   // An FDE should point to some function because FDEs are to describe
398   // functions. That's however not always the case due to an issue of
399   // ld.gold with -r. ld.gold may discard only functions and leave their
400   // corresponding FDEs, which results in creating bad .eh_frame sections.
401   // To deal with that, we ignore such FDEs.
402   if (FirstRelI == (unsigned)-1)
403     return false;
404 
405   const RelTy &Rel = Rels[FirstRelI];
406   Symbol &B = Sec->template getFile<ELFT>()->getRelocTargetSym(Rel);
407 
408   // FDEs for garbage-collected or merged-by-ICF sections are dead.
409   if (auto *D = dyn_cast<Defined>(&B))
410     if (SectionBase *Sec = D->Section)
411       return Sec->Live;
412   return false;
413 }
414 
415 // .eh_frame is a sequence of CIE or FDE records. In general, there
416 // is one CIE record per input object file which is followed by
417 // a list of FDEs. This function searches an existing CIE or create a new
418 // one and associates FDEs to the CIE.
419 template <class ELFT, class RelTy>
addSectionAux(EhInputSection * Sec,ArrayRef<RelTy> Rels)420 void EhFrameSection::addSectionAux(EhInputSection *Sec, ArrayRef<RelTy> Rels) {
421   OffsetToCie.clear();
422   for (EhSectionPiece &Piece : Sec->Pieces) {
423     // The empty record is the end marker.
424     if (Piece.Size == 4)
425       return;
426 
427     size_t Offset = Piece.InputOff;
428     uint32_t ID = read32(Piece.data().data() + 4);
429     if (ID == 0) {
430       OffsetToCie[Offset] = addCie<ELFT>(Piece, Rels);
431       continue;
432     }
433 
434     uint32_t CieOffset = Offset + 4 - ID;
435     CieRecord *Rec = OffsetToCie[CieOffset];
436     if (!Rec)
437       fatal(toString(Sec) + ": invalid CIE reference");
438 
439     if (!isFdeLive<ELFT>(Piece, Rels))
440       continue;
441     Rec->Fdes.push_back(&Piece);
442     NumFdes++;
443   }
444 }
445 
addSection(InputSectionBase * C)446 template <class ELFT> void EhFrameSection::addSection(InputSectionBase *C) {
447   auto *Sec = cast<EhInputSection>(C);
448   Sec->Parent = this;
449 
450   Alignment = std::max(Alignment, Sec->Alignment);
451   Sections.push_back(Sec);
452 
453   for (auto *DS : Sec->DependentSections)
454     DependentSections.push_back(DS);
455 
456   if (Sec->Pieces.empty())
457     return;
458 
459   if (Sec->AreRelocsRela)
460     addSectionAux<ELFT>(Sec, Sec->template relas<ELFT>());
461   else
462     addSectionAux<ELFT>(Sec, Sec->template rels<ELFT>());
463 }
464 
writeCieFde(uint8_t * Buf,ArrayRef<uint8_t> D)465 static void writeCieFde(uint8_t *Buf, ArrayRef<uint8_t> D) {
466   memcpy(Buf, D.data(), D.size());
467 
468   size_t Aligned = alignTo(D.size(), Config->Wordsize);
469 
470   // Zero-clear trailing padding if it exists.
471   memset(Buf + D.size(), 0, Aligned - D.size());
472 
473   // Fix the size field. -4 since size does not include the size field itself.
474   write32(Buf, Aligned - 4);
475 }
476 
finalizeContents()477 void EhFrameSection::finalizeContents() {
478   assert(!this->Size); // Not finalized.
479   size_t Off = 0;
480   for (CieRecord *Rec : CieRecords) {
481     Rec->Cie->OutputOff = Off;
482     Off += alignTo(Rec->Cie->Size, Config->Wordsize);
483 
484     for (EhSectionPiece *Fde : Rec->Fdes) {
485       Fde->OutputOff = Off;
486       Off += alignTo(Fde->Size, Config->Wordsize);
487     }
488   }
489 
490   // The LSB standard does not allow a .eh_frame section with zero
491   // Call Frame Information records. glibc unwind-dw2-fde.c
492   // classify_object_over_fdes expects there is a CIE record length 0 as a
493   // terminator. Thus we add one unconditionally.
494   Off += 4;
495 
496   this->Size = Off;
497 }
498 
499 // Returns data for .eh_frame_hdr. .eh_frame_hdr is a binary search table
500 // to get an FDE from an address to which FDE is applied. This function
501 // returns a list of such pairs.
getFdeData() const502 std::vector<EhFrameSection::FdeData> EhFrameSection::getFdeData() const {
503   uint8_t *Buf = getParent()->Loc + OutSecOff;
504   std::vector<FdeData> Ret;
505 
506   uint64_t VA = In.EhFrameHdr->getVA();
507   for (CieRecord *Rec : CieRecords) {
508     uint8_t Enc = getFdeEncoding(Rec->Cie);
509     for (EhSectionPiece *Fde : Rec->Fdes) {
510       uint64_t Pc = getFdePc(Buf, Fde->OutputOff, Enc);
511       uint64_t FdeVA = getParent()->Addr + Fde->OutputOff;
512       if (!isInt<32>(Pc - VA))
513         fatal(toString(Fde->Sec) + ": PC offset is too large: 0x" +
514               Twine::utohexstr(Pc - VA));
515       Ret.push_back({uint32_t(Pc - VA), uint32_t(FdeVA - VA)});
516     }
517   }
518 
519   // Sort the FDE list by their PC and uniqueify. Usually there is only
520   // one FDE for a PC (i.e. function), but if ICF merges two functions
521   // into one, there can be more than one FDEs pointing to the address.
522   auto Less = [](const FdeData &A, const FdeData &B) {
523     return A.PcRel < B.PcRel;
524   };
525   std::stable_sort(Ret.begin(), Ret.end(), Less);
526   auto Eq = [](const FdeData &A, const FdeData &B) {
527     return A.PcRel == B.PcRel;
528   };
529   Ret.erase(std::unique(Ret.begin(), Ret.end(), Eq), Ret.end());
530 
531   return Ret;
532 }
533 
readFdeAddr(uint8_t * Buf,int Size)534 static uint64_t readFdeAddr(uint8_t *Buf, int Size) {
535   switch (Size) {
536   case DW_EH_PE_udata2:
537     return read16(Buf);
538   case DW_EH_PE_sdata2:
539     return (int16_t)read16(Buf);
540   case DW_EH_PE_udata4:
541     return read32(Buf);
542   case DW_EH_PE_sdata4:
543     return (int32_t)read32(Buf);
544   case DW_EH_PE_udata8:
545   case DW_EH_PE_sdata8:
546     return read64(Buf);
547   case DW_EH_PE_absptr:
548     return readUint(Buf);
549   }
550   fatal("unknown FDE size encoding");
551 }
552 
553 // Returns the VA to which a given FDE (on a mmap'ed buffer) is applied to.
554 // We need it to create .eh_frame_hdr section.
getFdePc(uint8_t * Buf,size_t FdeOff,uint8_t Enc) const555 uint64_t EhFrameSection::getFdePc(uint8_t *Buf, size_t FdeOff,
556                                   uint8_t Enc) const {
557   // The starting address to which this FDE applies is
558   // stored at FDE + 8 byte.
559   size_t Off = FdeOff + 8;
560   uint64_t Addr = readFdeAddr(Buf + Off, Enc & 0xf);
561   if ((Enc & 0x70) == DW_EH_PE_absptr)
562     return Addr;
563   if ((Enc & 0x70) == DW_EH_PE_pcrel)
564     return Addr + getParent()->Addr + Off;
565   fatal("unknown FDE size relative encoding");
566 }
567 
writeTo(uint8_t * Buf)568 void EhFrameSection::writeTo(uint8_t *Buf) {
569   // Write CIE and FDE records.
570   for (CieRecord *Rec : CieRecords) {
571     size_t CieOffset = Rec->Cie->OutputOff;
572     writeCieFde(Buf + CieOffset, Rec->Cie->data());
573 
574     for (EhSectionPiece *Fde : Rec->Fdes) {
575       size_t Off = Fde->OutputOff;
576       writeCieFde(Buf + Off, Fde->data());
577 
578       // FDE's second word should have the offset to an associated CIE.
579       // Write it.
580       write32(Buf + Off + 4, Off + 4 - CieOffset);
581     }
582   }
583 
584   // Apply relocations. .eh_frame section contents are not contiguous
585   // in the output buffer, but relocateAlloc() still works because
586   // getOffset() takes care of discontiguous section pieces.
587   for (EhInputSection *S : Sections)
588     S->relocateAlloc(Buf, nullptr);
589 }
590 
GotSection()591 GotSection::GotSection()
592     : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
593                        Target->GotEntrySize, ".got") {
594   // PPC64 saves the ElfSym::GlobalOffsetTable .TOC. as the first entry in the
595   // .got. If there are no references to .TOC. in the symbol table,
596   // ElfSym::GlobalOffsetTable will not be defined and we won't need to save
597   // .TOC. in the .got. When it is defined, we increase NumEntries by the number
598   // of entries used to emit ElfSym::GlobalOffsetTable.
599   if (ElfSym::GlobalOffsetTable && !Target->GotBaseSymInGotPlt)
600     NumEntries += Target->GotHeaderEntriesNum;
601 }
602 
addEntry(Symbol & Sym)603 void GotSection::addEntry(Symbol &Sym) {
604   Sym.GotIndex = NumEntries;
605   ++NumEntries;
606 }
607 
addDynTlsEntry(Symbol & Sym)608 bool GotSection::addDynTlsEntry(Symbol &Sym) {
609   if (Sym.GlobalDynIndex != -1U)
610     return false;
611   Sym.GlobalDynIndex = NumEntries;
612   // Global Dynamic TLS entries take two GOT slots.
613   NumEntries += 2;
614   return true;
615 }
616 
617 // Reserves TLS entries for a TLS module ID and a TLS block offset.
618 // In total it takes two GOT slots.
addTlsIndex()619 bool GotSection::addTlsIndex() {
620   if (TlsIndexOff != uint32_t(-1))
621     return false;
622   TlsIndexOff = NumEntries * Config->Wordsize;
623   NumEntries += 2;
624   return true;
625 }
626 
getGlobalDynAddr(const Symbol & B) const627 uint64_t GotSection::getGlobalDynAddr(const Symbol &B) const {
628   return this->getVA() + B.GlobalDynIndex * Config->Wordsize;
629 }
630 
getGlobalDynOffset(const Symbol & B) const631 uint64_t GotSection::getGlobalDynOffset(const Symbol &B) const {
632   return B.GlobalDynIndex * Config->Wordsize;
633 }
634 
finalizeContents()635 void GotSection::finalizeContents() {
636   Size = NumEntries * Config->Wordsize;
637 }
638 
empty() const639 bool GotSection::empty() const {
640   // We need to emit a GOT even if it's empty if there's a relocation that is
641   // relative to GOT(such as GOTOFFREL) or there's a symbol that points to a GOT
642   // (i.e. _GLOBAL_OFFSET_TABLE_) that the target defines relative to the .got.
643   return NumEntries == 0 && !HasGotOffRel &&
644          !(ElfSym::GlobalOffsetTable && !Target->GotBaseSymInGotPlt);
645 }
646 
writeTo(uint8_t * Buf)647 void GotSection::writeTo(uint8_t *Buf) {
648   // Buf points to the start of this section's buffer,
649   // whereas InputSectionBase::relocateAlloc() expects its argument
650   // to point to the start of the output section.
651   Target->writeGotHeader(Buf);
652   relocateAlloc(Buf - OutSecOff, Buf - OutSecOff + Size);
653 }
654 
getMipsPageAddr(uint64_t Addr)655 static uint64_t getMipsPageAddr(uint64_t Addr) {
656   return (Addr + 0x8000) & ~0xffff;
657 }
658 
getMipsPageCount(uint64_t Size)659 static uint64_t getMipsPageCount(uint64_t Size) {
660   return (Size + 0xfffe) / 0xffff + 1;
661 }
662 
MipsGotSection()663 MipsGotSection::MipsGotSection()
664     : SyntheticSection(SHF_ALLOC | SHF_WRITE | SHF_MIPS_GPREL, SHT_PROGBITS, 16,
665                        ".got") {}
666 
addEntry(InputFile & File,Symbol & Sym,int64_t Addend,RelExpr Expr)667 void MipsGotSection::addEntry(InputFile &File, Symbol &Sym, int64_t Addend,
668                               RelExpr Expr) {
669   FileGot &G = getGot(File);
670   if (Expr == R_MIPS_GOT_LOCAL_PAGE) {
671     if (const OutputSection *OS = Sym.getOutputSection())
672       G.PagesMap.insert({OS, {}});
673     else
674       G.Local16.insert({{nullptr, getMipsPageAddr(Sym.getVA(Addend))}, 0});
675   } else if (Sym.isTls())
676     G.Tls.insert({&Sym, 0});
677   else if (Sym.IsPreemptible && Expr == R_ABS)
678     G.Relocs.insert({&Sym, 0});
679   else if (Sym.IsPreemptible)
680     G.Global.insert({&Sym, 0});
681   else if (Expr == R_MIPS_GOT_OFF32)
682     G.Local32.insert({{&Sym, Addend}, 0});
683   else
684     G.Local16.insert({{&Sym, Addend}, 0});
685 }
686 
addDynTlsEntry(InputFile & File,Symbol & Sym)687 void MipsGotSection::addDynTlsEntry(InputFile &File, Symbol &Sym) {
688   getGot(File).DynTlsSymbols.insert({&Sym, 0});
689 }
690 
addTlsIndex(InputFile & File)691 void MipsGotSection::addTlsIndex(InputFile &File) {
692   getGot(File).DynTlsSymbols.insert({nullptr, 0});
693 }
694 
getEntriesNum() const695 size_t MipsGotSection::FileGot::getEntriesNum() const {
696   return getPageEntriesNum() + Local16.size() + Global.size() + Relocs.size() +
697          Tls.size() + DynTlsSymbols.size() * 2;
698 }
699 
getPageEntriesNum() const700 size_t MipsGotSection::FileGot::getPageEntriesNum() const {
701   size_t Num = 0;
702   for (const std::pair<const OutputSection *, FileGot::PageBlock> &P : PagesMap)
703     Num += P.second.Count;
704   return Num;
705 }
706 
getIndexedEntriesNum() const707 size_t MipsGotSection::FileGot::getIndexedEntriesNum() const {
708   size_t Count = getPageEntriesNum() + Local16.size() + Global.size();
709   // If there are relocation-only entries in the GOT, TLS entries
710   // are allocated after them. TLS entries should be addressable
711   // by 16-bit index so count both reloc-only and TLS entries.
712   if (!Tls.empty() || !DynTlsSymbols.empty())
713     Count += Relocs.size() + Tls.size() + DynTlsSymbols.size() * 2;
714   return Count;
715 }
716 
getGot(InputFile & F)717 MipsGotSection::FileGot &MipsGotSection::getGot(InputFile &F) {
718   if (!F.MipsGotIndex.hasValue()) {
719     Gots.emplace_back();
720     Gots.back().File = &F;
721     F.MipsGotIndex = Gots.size() - 1;
722   }
723   return Gots[*F.MipsGotIndex];
724 }
725 
getPageEntryOffset(const InputFile * F,const Symbol & Sym,int64_t Addend) const726 uint64_t MipsGotSection::getPageEntryOffset(const InputFile *F,
727                                             const Symbol &Sym,
728                                             int64_t Addend) const {
729   const FileGot &G = Gots[*F->MipsGotIndex];
730   uint64_t Index = 0;
731   if (const OutputSection *OutSec = Sym.getOutputSection()) {
732     uint64_t SecAddr = getMipsPageAddr(OutSec->Addr);
733     uint64_t SymAddr = getMipsPageAddr(Sym.getVA(Addend));
734     Index = G.PagesMap.lookup(OutSec).FirstIndex + (SymAddr - SecAddr) / 0xffff;
735   } else {
736     Index = G.Local16.lookup({nullptr, getMipsPageAddr(Sym.getVA(Addend))});
737   }
738   return Index * Config->Wordsize;
739 }
740 
getSymEntryOffset(const InputFile * F,const Symbol & S,int64_t Addend) const741 uint64_t MipsGotSection::getSymEntryOffset(const InputFile *F, const Symbol &S,
742                                            int64_t Addend) const {
743   const FileGot &G = Gots[*F->MipsGotIndex];
744   Symbol *Sym = const_cast<Symbol *>(&S);
745   if (Sym->isTls())
746     return G.Tls.lookup(Sym) * Config->Wordsize;
747   if (Sym->IsPreemptible)
748     return G.Global.lookup(Sym) * Config->Wordsize;
749   return G.Local16.lookup({Sym, Addend}) * Config->Wordsize;
750 }
751 
getTlsIndexOffset(const InputFile * F) const752 uint64_t MipsGotSection::getTlsIndexOffset(const InputFile *F) const {
753   const FileGot &G = Gots[*F->MipsGotIndex];
754   return G.DynTlsSymbols.lookup(nullptr) * Config->Wordsize;
755 }
756 
getGlobalDynOffset(const InputFile * F,const Symbol & S) const757 uint64_t MipsGotSection::getGlobalDynOffset(const InputFile *F,
758                                             const Symbol &S) const {
759   const FileGot &G = Gots[*F->MipsGotIndex];
760   Symbol *Sym = const_cast<Symbol *>(&S);
761   return G.DynTlsSymbols.lookup(Sym) * Config->Wordsize;
762 }
763 
getFirstGlobalEntry() const764 const Symbol *MipsGotSection::getFirstGlobalEntry() const {
765   if (Gots.empty())
766     return nullptr;
767   const FileGot &PrimGot = Gots.front();
768   if (!PrimGot.Global.empty())
769     return PrimGot.Global.front().first;
770   if (!PrimGot.Relocs.empty())
771     return PrimGot.Relocs.front().first;
772   return nullptr;
773 }
774 
getLocalEntriesNum() const775 unsigned MipsGotSection::getLocalEntriesNum() const {
776   if (Gots.empty())
777     return HeaderEntriesNum;
778   return HeaderEntriesNum + Gots.front().getPageEntriesNum() +
779          Gots.front().Local16.size();
780 }
781 
tryMergeGots(FileGot & Dst,FileGot & Src,bool IsPrimary)782 bool MipsGotSection::tryMergeGots(FileGot &Dst, FileGot &Src, bool IsPrimary) {
783   FileGot Tmp = Dst;
784   set_union(Tmp.PagesMap, Src.PagesMap);
785   set_union(Tmp.Local16, Src.Local16);
786   set_union(Tmp.Global, Src.Global);
787   set_union(Tmp.Relocs, Src.Relocs);
788   set_union(Tmp.Tls, Src.Tls);
789   set_union(Tmp.DynTlsSymbols, Src.DynTlsSymbols);
790 
791   size_t Count = IsPrimary ? HeaderEntriesNum : 0;
792   Count += Tmp.getIndexedEntriesNum();
793 
794   if (Count * Config->Wordsize > Config->MipsGotSize)
795     return false;
796 
797   std::swap(Tmp, Dst);
798   return true;
799 }
800 
finalizeContents()801 void MipsGotSection::finalizeContents() { updateAllocSize(); }
802 
updateAllocSize()803 bool MipsGotSection::updateAllocSize() {
804   Size = HeaderEntriesNum * Config->Wordsize;
805   for (const FileGot &G : Gots)
806     Size += G.getEntriesNum() * Config->Wordsize;
807   return false;
808 }
809 
build()810 template <class ELFT> void MipsGotSection::build() {
811   if (Gots.empty())
812     return;
813 
814   std::vector<FileGot> MergedGots(1);
815 
816   // For each GOT move non-preemptible symbols from the `Global`
817   // to `Local16` list. Preemptible symbol might become non-preemptible
818   // one if, for example, it gets a related copy relocation.
819   for (FileGot &Got : Gots) {
820     for (auto &P: Got.Global)
821       if (!P.first->IsPreemptible)
822         Got.Local16.insert({{P.first, 0}, 0});
823     Got.Global.remove_if([&](const std::pair<Symbol *, size_t> &P) {
824       return !P.first->IsPreemptible;
825     });
826   }
827 
828   // For each GOT remove "reloc-only" entry if there is "global"
829   // entry for the same symbol. And add local entries which indexed
830   // using 32-bit value at the end of 16-bit entries.
831   for (FileGot &Got : Gots) {
832     Got.Relocs.remove_if([&](const std::pair<Symbol *, size_t> &P) {
833       return Got.Global.count(P.first);
834     });
835     set_union(Got.Local16, Got.Local32);
836     Got.Local32.clear();
837   }
838 
839   // Evaluate number of "reloc-only" entries in the resulting GOT.
840   // To do that put all unique "reloc-only" and "global" entries
841   // from all GOTs to the future primary GOT.
842   FileGot *PrimGot = &MergedGots.front();
843   for (FileGot &Got : Gots) {
844     set_union(PrimGot->Relocs, Got.Global);
845     set_union(PrimGot->Relocs, Got.Relocs);
846     Got.Relocs.clear();
847   }
848 
849   // Evaluate number of "page" entries in each GOT.
850   for (FileGot &Got : Gots) {
851     for (std::pair<const OutputSection *, FileGot::PageBlock> &P :
852          Got.PagesMap) {
853       const OutputSection *OS = P.first;
854       uint64_t SecSize = 0;
855       for (BaseCommand *Cmd : OS->SectionCommands) {
856         if (auto *ISD = dyn_cast<InputSectionDescription>(Cmd))
857           for (InputSection *IS : ISD->Sections) {
858             uint64_t Off = alignTo(SecSize, IS->Alignment);
859             SecSize = Off + IS->getSize();
860           }
861       }
862       P.second.Count = getMipsPageCount(SecSize);
863     }
864   }
865 
866   // Merge GOTs. Try to join as much as possible GOTs but do not exceed
867   // maximum GOT size. At first, try to fill the primary GOT because
868   // the primary GOT can be accessed in the most effective way. If it
869   // is not possible, try to fill the last GOT in the list, and finally
870   // create a new GOT if both attempts failed.
871   for (FileGot &SrcGot : Gots) {
872     InputFile *File = SrcGot.File;
873     if (tryMergeGots(MergedGots.front(), SrcGot, true)) {
874       File->MipsGotIndex = 0;
875     } else {
876       // If this is the first time we failed to merge with the primary GOT,
877       // MergedGots.back() will also be the primary GOT. We must make sure not
878       // to try to merge again with IsPrimary=false, as otherwise, if the
879       // inputs are just right, we could allow the primary GOT to become 1 or 2
880       // words too big due to ignoring the header size.
881       if (MergedGots.size() == 1 ||
882           !tryMergeGots(MergedGots.back(), SrcGot, false)) {
883         MergedGots.emplace_back();
884         std::swap(MergedGots.back(), SrcGot);
885       }
886       File->MipsGotIndex = MergedGots.size() - 1;
887     }
888   }
889   std::swap(Gots, MergedGots);
890 
891   // Reduce number of "reloc-only" entries in the primary GOT
892   // by substracting "global" entries exist in the primary GOT.
893   PrimGot = &Gots.front();
894   PrimGot->Relocs.remove_if([&](const std::pair<Symbol *, size_t> &P) {
895     return PrimGot->Global.count(P.first);
896   });
897 
898   // Calculate indexes for each GOT entry.
899   size_t Index = HeaderEntriesNum;
900   for (FileGot &Got : Gots) {
901     Got.StartIndex = &Got == PrimGot ? 0 : Index;
902     for (std::pair<const OutputSection *, FileGot::PageBlock> &P :
903          Got.PagesMap) {
904       // For each output section referenced by GOT page relocations calculate
905       // and save into PagesMap an upper bound of MIPS GOT entries required
906       // to store page addresses of local symbols. We assume the worst case -
907       // each 64kb page of the output section has at least one GOT relocation
908       // against it. And take in account the case when the section intersects
909       // page boundaries.
910       P.second.FirstIndex = Index;
911       Index += P.second.Count;
912     }
913     for (auto &P: Got.Local16)
914       P.second = Index++;
915     for (auto &P: Got.Global)
916       P.second = Index++;
917     for (auto &P: Got.Relocs)
918       P.second = Index++;
919     for (auto &P: Got.Tls)
920       P.second = Index++;
921     for (auto &P: Got.DynTlsSymbols) {
922       P.second = Index;
923       Index += 2;
924     }
925   }
926 
927   // Update Symbol::GotIndex field to use this
928   // value later in the `sortMipsSymbols` function.
929   for (auto &P : PrimGot->Global)
930     P.first->GotIndex = P.second;
931   for (auto &P : PrimGot->Relocs)
932     P.first->GotIndex = P.second;
933 
934   // Create dynamic relocations.
935   for (FileGot &Got : Gots) {
936     // Create dynamic relocations for TLS entries.
937     for (std::pair<Symbol *, size_t> &P : Got.Tls) {
938       Symbol *S = P.first;
939       uint64_t Offset = P.second * Config->Wordsize;
940       if (S->IsPreemptible)
941         In.RelaDyn->addReloc(Target->TlsGotRel, this, Offset, S);
942     }
943     for (std::pair<Symbol *, size_t> &P : Got.DynTlsSymbols) {
944       Symbol *S = P.first;
945       uint64_t Offset = P.second * Config->Wordsize;
946       if (S == nullptr) {
947         if (!Config->Pic)
948           continue;
949         In.RelaDyn->addReloc(Target->TlsModuleIndexRel, this, Offset, S);
950       } else {
951         // When building a shared library we still need a dynamic relocation
952         // for the module index. Therefore only checking for
953         // S->IsPreemptible is not sufficient (this happens e.g. for
954         // thread-locals that have been marked as local through a linker script)
955         if (!S->IsPreemptible && !Config->Pic)
956           continue;
957         In.RelaDyn->addReloc(Target->TlsModuleIndexRel, this, Offset, S);
958         // However, we can skip writing the TLS offset reloc for non-preemptible
959         // symbols since it is known even in shared libraries
960         if (!S->IsPreemptible)
961           continue;
962         Offset += Config->Wordsize;
963         In.RelaDyn->addReloc(Target->TlsOffsetRel, this, Offset, S);
964       }
965     }
966 
967     // Do not create dynamic relocations for non-TLS
968     // entries in the primary GOT.
969     if (&Got == PrimGot)
970       continue;
971 
972     // Dynamic relocations for "global" entries.
973     for (const std::pair<Symbol *, size_t> &P : Got.Global) {
974       uint64_t Offset = P.second * Config->Wordsize;
975       In.RelaDyn->addReloc(Target->RelativeRel, this, Offset, P.first);
976     }
977     if (!Config->Pic)
978       continue;
979     // Dynamic relocations for "local" entries in case of PIC.
980     for (const std::pair<const OutputSection *, FileGot::PageBlock> &L :
981          Got.PagesMap) {
982       size_t PageCount = L.second.Count;
983       for (size_t PI = 0; PI < PageCount; ++PI) {
984         uint64_t Offset = (L.second.FirstIndex + PI) * Config->Wordsize;
985         In.RelaDyn->addReloc({Target->RelativeRel, this, Offset, L.first,
986                               int64_t(PI * 0x10000)});
987       }
988     }
989     for (const std::pair<GotEntry, size_t> &P : Got.Local16) {
990       uint64_t Offset = P.second * Config->Wordsize;
991       In.RelaDyn->addReloc({Target->RelativeRel, this, Offset, true,
992                             P.first.first, P.first.second});
993     }
994   }
995 }
996 
empty() const997 bool MipsGotSection::empty() const {
998   // We add the .got section to the result for dynamic MIPS target because
999   // its address and properties are mentioned in the .dynamic section.
1000   return Config->Relocatable;
1001 }
1002 
getGp(const InputFile * F) const1003 uint64_t MipsGotSection::getGp(const InputFile *F) const {
1004   // For files without related GOT or files refer a primary GOT
1005   // returns "common" _gp value. For secondary GOTs calculate
1006   // individual _gp values.
1007   if (!F || !F->MipsGotIndex.hasValue() || *F->MipsGotIndex == 0)
1008     return ElfSym::MipsGp->getVA(0);
1009   return getVA() + Gots[*F->MipsGotIndex].StartIndex * Config->Wordsize +
1010          0x7ff0;
1011 }
1012 
writeTo(uint8_t * Buf)1013 void MipsGotSection::writeTo(uint8_t *Buf) {
1014   // Set the MSB of the second GOT slot. This is not required by any
1015   // MIPS ABI documentation, though.
1016   //
1017   // There is a comment in glibc saying that "The MSB of got[1] of a
1018   // gnu object is set to identify gnu objects," and in GNU gold it
1019   // says "the second entry will be used by some runtime loaders".
1020   // But how this field is being used is unclear.
1021   //
1022   // We are not really willing to mimic other linkers behaviors
1023   // without understanding why they do that, but because all files
1024   // generated by GNU tools have this special GOT value, and because
1025   // we've been doing this for years, it is probably a safe bet to
1026   // keep doing this for now. We really need to revisit this to see
1027   // if we had to do this.
1028   writeUint(Buf + Config->Wordsize, (uint64_t)1 << (Config->Wordsize * 8 - 1));
1029   for (const FileGot &G : Gots) {
1030     auto Write = [&](size_t I, const Symbol *S, int64_t A) {
1031       uint64_t VA = A;
1032       if (S) {
1033         VA = S->getVA(A);
1034         if (S->StOther & STO_MIPS_MICROMIPS)
1035           VA |= 1;
1036       }
1037       writeUint(Buf + I * Config->Wordsize, VA);
1038     };
1039     // Write 'page address' entries to the local part of the GOT.
1040     for (const std::pair<const OutputSection *, FileGot::PageBlock> &L :
1041          G.PagesMap) {
1042       size_t PageCount = L.second.Count;
1043       uint64_t FirstPageAddr = getMipsPageAddr(L.first->Addr);
1044       for (size_t PI = 0; PI < PageCount; ++PI)
1045         Write(L.second.FirstIndex + PI, nullptr, FirstPageAddr + PI * 0x10000);
1046     }
1047     // Local, global, TLS, reloc-only  entries.
1048     // If TLS entry has a corresponding dynamic relocations, leave it
1049     // initialized by zero. Write down adjusted TLS symbol's values otherwise.
1050     // To calculate the adjustments use offsets for thread-local storage.
1051     // https://www.linux-mips.org/wiki/NPTL
1052     for (const std::pair<GotEntry, size_t> &P : G.Local16)
1053       Write(P.second, P.first.first, P.first.second);
1054     // Write VA to the primary GOT only. For secondary GOTs that
1055     // will be done by REL32 dynamic relocations.
1056     if (&G == &Gots.front())
1057       for (const std::pair<const Symbol *, size_t> &P : G.Global)
1058         Write(P.second, P.first, 0);
1059     for (const std::pair<Symbol *, size_t> &P : G.Relocs)
1060       Write(P.second, P.first, 0);
1061     for (const std::pair<Symbol *, size_t> &P : G.Tls)
1062       Write(P.second, P.first, P.first->IsPreemptible ? 0 : -0x7000);
1063     for (const std::pair<Symbol *, size_t> &P : G.DynTlsSymbols) {
1064       if (P.first == nullptr && !Config->Pic)
1065         Write(P.second, nullptr, 1);
1066       else if (P.first && !P.first->IsPreemptible) {
1067         // If we are emitting PIC code with relocations we mustn't write
1068         // anything to the GOT here. When using Elf_Rel relocations the value
1069         // one will be treated as an addend and will cause crashes at runtime
1070         if (!Config->Pic)
1071           Write(P.second, nullptr, 1);
1072         Write(P.second + 1, P.first, -0x8000);
1073       }
1074     }
1075   }
1076 }
1077 
1078 // On PowerPC the .plt section is used to hold the table of function addresses
1079 // instead of the .got.plt, and the type is SHT_NOBITS similar to a .bss
1080 // section. I don't know why we have a BSS style type for the section but it is
1081 // consitent across both 64-bit PowerPC ABIs as well as the 32-bit PowerPC ABI.
GotPltSection()1082 GotPltSection::GotPltSection()
1083     : SyntheticSection(SHF_ALLOC | SHF_WRITE,
1084                        Config->EMachine == EM_PPC64 ? SHT_NOBITS : SHT_PROGBITS,
1085                        Target->GotPltEntrySize,
1086                        Config->EMachine == EM_PPC64 ? ".plt" : ".got.plt") {}
1087 
addEntry(Symbol & Sym)1088 void GotPltSection::addEntry(Symbol &Sym) {
1089   assert(Sym.PltIndex == Entries.size());
1090   Entries.push_back(&Sym);
1091 }
1092 
getSize() const1093 size_t GotPltSection::getSize() const {
1094   return (Target->GotPltHeaderEntriesNum + Entries.size()) *
1095          Target->GotPltEntrySize;
1096 }
1097 
writeTo(uint8_t * Buf)1098 void GotPltSection::writeTo(uint8_t *Buf) {
1099   Target->writeGotPltHeader(Buf);
1100   Buf += Target->GotPltHeaderEntriesNum * Target->GotPltEntrySize;
1101   for (const Symbol *B : Entries) {
1102     Target->writeGotPlt(Buf, *B);
1103     Buf += Config->Wordsize;
1104   }
1105 }
1106 
empty() const1107 bool GotPltSection::empty() const {
1108   // We need to emit a GOT.PLT even if it's empty if there's a symbol that
1109   // references the _GLOBAL_OFFSET_TABLE_ and the Target defines the symbol
1110   // relative to the .got.plt section.
1111   return Entries.empty() &&
1112          !(ElfSym::GlobalOffsetTable && Target->GotBaseSymInGotPlt);
1113 }
1114 
getIgotPltName()1115 static StringRef getIgotPltName() {
1116   // On ARM the IgotPltSection is part of the GotSection.
1117   if (Config->EMachine == EM_ARM)
1118     return ".got";
1119 
1120   // On PowerPC64 the GotPltSection is renamed to '.plt' so the IgotPltSection
1121   // needs to be named the same.
1122   if (Config->EMachine == EM_PPC64)
1123     return ".plt";
1124 
1125   return ".got.plt";
1126 }
1127 
1128 // On PowerPC64 the GotPltSection type is SHT_NOBITS so we have to follow suit
1129 // with the IgotPltSection.
IgotPltSection()1130 IgotPltSection::IgotPltSection()
1131     : SyntheticSection(SHF_ALLOC | SHF_WRITE,
1132                        Config->EMachine == EM_PPC64 ? SHT_NOBITS : SHT_PROGBITS,
1133                        Target->GotPltEntrySize, getIgotPltName()) {}
1134 
addEntry(Symbol & Sym)1135 void IgotPltSection::addEntry(Symbol &Sym) {
1136   Sym.IsInIgot = true;
1137   assert(Sym.PltIndex == Entries.size());
1138   Entries.push_back(&Sym);
1139 }
1140 
getSize() const1141 size_t IgotPltSection::getSize() const {
1142   return Entries.size() * Target->GotPltEntrySize;
1143 }
1144 
writeTo(uint8_t * Buf)1145 void IgotPltSection::writeTo(uint8_t *Buf) {
1146   for (const Symbol *B : Entries) {
1147     Target->writeIgotPlt(Buf, *B);
1148     Buf += Config->Wordsize;
1149   }
1150 }
1151 
StringTableSection(StringRef Name,bool Dynamic)1152 StringTableSection::StringTableSection(StringRef Name, bool Dynamic)
1153     : SyntheticSection(Dynamic ? (uint64_t)SHF_ALLOC : 0, SHT_STRTAB, 1, Name),
1154       Dynamic(Dynamic) {
1155   // ELF string tables start with a NUL byte.
1156   addString("");
1157 }
1158 
1159 // Adds a string to the string table. If HashIt is true we hash and check for
1160 // duplicates. It is optional because the name of global symbols are already
1161 // uniqued and hashing them again has a big cost for a small value: uniquing
1162 // them with some other string that happens to be the same.
addString(StringRef S,bool HashIt)1163 unsigned StringTableSection::addString(StringRef S, bool HashIt) {
1164   if (HashIt) {
1165     auto R = StringMap.insert(std::make_pair(S, this->Size));
1166     if (!R.second)
1167       return R.first->second;
1168   }
1169   unsigned Ret = this->Size;
1170   this->Size = this->Size + S.size() + 1;
1171   Strings.push_back(S);
1172   return Ret;
1173 }
1174 
writeTo(uint8_t * Buf)1175 void StringTableSection::writeTo(uint8_t *Buf) {
1176   for (StringRef S : Strings) {
1177     memcpy(Buf, S.data(), S.size());
1178     Buf[S.size()] = '\0';
1179     Buf += S.size() + 1;
1180   }
1181 }
1182 
1183 // Returns the number of version definition entries. Because the first entry
1184 // is for the version definition itself, it is the number of versioned symbols
1185 // plus one. Note that we don't support multiple versions yet.
getVerDefNum()1186 static unsigned getVerDefNum() { return Config->VersionDefinitions.size() + 1; }
1187 
1188 template <class ELFT>
DynamicSection()1189 DynamicSection<ELFT>::DynamicSection()
1190     : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_DYNAMIC, Config->Wordsize,
1191                        ".dynamic") {
1192   this->Entsize = ELFT::Is64Bits ? 16 : 8;
1193 
1194   // .dynamic section is not writable on MIPS and on Fuchsia OS
1195   // which passes -z rodynamic.
1196   // See "Special Section" in Chapter 4 in the following document:
1197   // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
1198   if (Config->EMachine == EM_MIPS || Config->ZRodynamic)
1199     this->Flags = SHF_ALLOC;
1200 
1201   // Add strings to .dynstr early so that .dynstr's size will be
1202   // fixed early.
1203   for (StringRef S : Config->FilterList)
1204     addInt(DT_FILTER, In.DynStrTab->addString(S));
1205   for (StringRef S : Config->AuxiliaryList)
1206     addInt(DT_AUXILIARY, In.DynStrTab->addString(S));
1207 
1208   if (!Config->Rpath.empty())
1209     addInt(Config->EnableNewDtags ? DT_RUNPATH : DT_RPATH,
1210            In.DynStrTab->addString(Config->Rpath));
1211 
1212   for (InputFile *File : SharedFiles) {
1213     SharedFile<ELFT> *F = cast<SharedFile<ELFT>>(File);
1214     if (F->IsNeeded)
1215       addInt(DT_NEEDED, In.DynStrTab->addString(F->SoName));
1216   }
1217   if (!Config->SoName.empty())
1218     addInt(DT_SONAME, In.DynStrTab->addString(Config->SoName));
1219 }
1220 
1221 template <class ELFT>
add(int32_t Tag,std::function<uint64_t ()> Fn)1222 void DynamicSection<ELFT>::add(int32_t Tag, std::function<uint64_t()> Fn) {
1223   Entries.push_back({Tag, Fn});
1224 }
1225 
1226 template <class ELFT>
addInt(int32_t Tag,uint64_t Val)1227 void DynamicSection<ELFT>::addInt(int32_t Tag, uint64_t Val) {
1228   Entries.push_back({Tag, [=] { return Val; }});
1229 }
1230 
1231 template <class ELFT>
addInSec(int32_t Tag,InputSection * Sec)1232 void DynamicSection<ELFT>::addInSec(int32_t Tag, InputSection *Sec) {
1233   Entries.push_back({Tag, [=] { return Sec->getVA(0); }});
1234 }
1235 
1236 template <class ELFT>
addInSecRelative(int32_t Tag,InputSection * Sec)1237 void DynamicSection<ELFT>::addInSecRelative(int32_t Tag, InputSection *Sec) {
1238   size_t TagOffset = Entries.size() * Entsize;
1239   Entries.push_back(
1240       {Tag, [=] { return Sec->getVA(0) - (getVA() + TagOffset); }});
1241 }
1242 
1243 template <class ELFT>
addOutSec(int32_t Tag,OutputSection * Sec)1244 void DynamicSection<ELFT>::addOutSec(int32_t Tag, OutputSection *Sec) {
1245   Entries.push_back({Tag, [=] { return Sec->Addr; }});
1246 }
1247 
1248 template <class ELFT>
addSize(int32_t Tag,OutputSection * Sec)1249 void DynamicSection<ELFT>::addSize(int32_t Tag, OutputSection *Sec) {
1250   Entries.push_back({Tag, [=] { return Sec->Size; }});
1251 }
1252 
1253 template <class ELFT>
addSym(int32_t Tag,Symbol * Sym)1254 void DynamicSection<ELFT>::addSym(int32_t Tag, Symbol *Sym) {
1255   Entries.push_back({Tag, [=] { return Sym->getVA(); }});
1256 }
1257 
1258 // A Linker script may assign the RELA relocation sections to the same
1259 // output section. When this occurs we cannot just use the OutputSection
1260 // Size. Moreover the [DT_JMPREL, DT_JMPREL + DT_PLTRELSZ) is permitted to
1261 // overlap with the [DT_RELA, DT_RELA + DT_RELASZ).
addPltRelSz()1262 static uint64_t addPltRelSz() {
1263   size_t Size = In.RelaPlt->getSize();
1264   if (In.RelaIplt->getParent() == In.RelaPlt->getParent() &&
1265       In.RelaIplt->Name == In.RelaPlt->Name)
1266     Size += In.RelaIplt->getSize();
1267   return Size;
1268 }
1269 
1270 // Add remaining entries to complete .dynamic contents.
finalizeContents()1271 template <class ELFT> void DynamicSection<ELFT>::finalizeContents() {
1272   // Set DT_FLAGS and DT_FLAGS_1.
1273   uint32_t DtFlags = 0;
1274   uint32_t DtFlags1 = 0;
1275   if (Config->Bsymbolic)
1276     DtFlags |= DF_SYMBOLIC;
1277   if (Config->ZGlobal)
1278     DtFlags1 |= DF_1_GLOBAL;
1279   if (Config->ZInitfirst)
1280     DtFlags1 |= DF_1_INITFIRST;
1281   if (Config->ZInterpose)
1282     DtFlags1 |= DF_1_INTERPOSE;
1283   if (Config->ZNodefaultlib)
1284     DtFlags1 |= DF_1_NODEFLIB;
1285   if (Config->ZNodelete)
1286     DtFlags1 |= DF_1_NODELETE;
1287   if (Config->ZNodlopen)
1288     DtFlags1 |= DF_1_NOOPEN;
1289   if (Config->ZNow) {
1290     DtFlags |= DF_BIND_NOW;
1291     DtFlags1 |= DF_1_NOW;
1292   }
1293   if (Config->ZOrigin) {
1294     DtFlags |= DF_ORIGIN;
1295     DtFlags1 |= DF_1_ORIGIN;
1296   }
1297   if (!Config->ZText)
1298     DtFlags |= DF_TEXTREL;
1299 
1300   if (DtFlags)
1301     addInt(DT_FLAGS, DtFlags);
1302   if (DtFlags1)
1303     addInt(DT_FLAGS_1, DtFlags1);
1304 
1305   // DT_DEBUG is a pointer to debug informaion used by debuggers at runtime. We
1306   // need it for each process, so we don't write it for DSOs. The loader writes
1307   // the pointer into this entry.
1308   //
1309   // DT_DEBUG is the only .dynamic entry that needs to be written to. Some
1310   // systems (currently only Fuchsia OS) provide other means to give the
1311   // debugger this information. Such systems may choose make .dynamic read-only.
1312   // If the target is such a system (used -z rodynamic) don't write DT_DEBUG.
1313   if (!Config->Shared && !Config->Relocatable && !Config->ZRodynamic)
1314     addInt(DT_DEBUG, 0);
1315 
1316   if (OutputSection *Sec = In.DynStrTab->getParent())
1317     this->Link = Sec->SectionIndex;
1318 
1319   if (!In.RelaDyn->empty()) {
1320     addInSec(In.RelaDyn->DynamicTag, In.RelaDyn);
1321     addSize(In.RelaDyn->SizeDynamicTag, In.RelaDyn->getParent());
1322 
1323     bool IsRela = Config->IsRela;
1324     addInt(IsRela ? DT_RELAENT : DT_RELENT,
1325            IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel));
1326 
1327     // MIPS dynamic loader does not support RELCOUNT tag.
1328     // The problem is in the tight relation between dynamic
1329     // relocations and GOT. So do not emit this tag on MIPS.
1330     if (Config->EMachine != EM_MIPS) {
1331       size_t NumRelativeRels = In.RelaDyn->getRelativeRelocCount();
1332       if (Config->ZCombreloc && NumRelativeRels)
1333         addInt(IsRela ? DT_RELACOUNT : DT_RELCOUNT, NumRelativeRels);
1334     }
1335   }
1336   if (In.RelrDyn && !In.RelrDyn->Relocs.empty()) {
1337     addInSec(Config->UseAndroidRelrTags ? DT_ANDROID_RELR : DT_RELR,
1338              In.RelrDyn);
1339     addSize(Config->UseAndroidRelrTags ? DT_ANDROID_RELRSZ : DT_RELRSZ,
1340             In.RelrDyn->getParent());
1341     addInt(Config->UseAndroidRelrTags ? DT_ANDROID_RELRENT : DT_RELRENT,
1342            sizeof(Elf_Relr));
1343   }
1344   // .rel[a].plt section usually consists of two parts, containing plt and
1345   // iplt relocations. It is possible to have only iplt relocations in the
1346   // output. In that case RelaPlt is empty and have zero offset, the same offset
1347   // as RelaIplt have. And we still want to emit proper dynamic tags for that
1348   // case, so here we always use RelaPlt as marker for the begining of
1349   // .rel[a].plt section.
1350   if (In.RelaPlt->getParent()->Live) {
1351     addInSec(DT_JMPREL, In.RelaPlt);
1352     Entries.push_back({DT_PLTRELSZ, addPltRelSz});
1353     switch (Config->EMachine) {
1354     case EM_MIPS:
1355       addInSec(DT_MIPS_PLTGOT, In.GotPlt);
1356       break;
1357     case EM_SPARCV9:
1358       addInSec(DT_PLTGOT, In.Plt);
1359       break;
1360     default:
1361       addInSec(DT_PLTGOT, In.GotPlt);
1362       break;
1363     }
1364     addInt(DT_PLTREL, Config->IsRela ? DT_RELA : DT_REL);
1365   }
1366 
1367   addInSec(DT_SYMTAB, In.DynSymTab);
1368   addInt(DT_SYMENT, sizeof(Elf_Sym));
1369   addInSec(DT_STRTAB, In.DynStrTab);
1370   addInt(DT_STRSZ, In.DynStrTab->getSize());
1371   if (!Config->ZText)
1372     addInt(DT_TEXTREL, 0);
1373   if (In.GnuHashTab)
1374     addInSec(DT_GNU_HASH, In.GnuHashTab);
1375   if (In.HashTab)
1376     addInSec(DT_HASH, In.HashTab);
1377 
1378   if (Out::PreinitArray) {
1379     addOutSec(DT_PREINIT_ARRAY, Out::PreinitArray);
1380     addSize(DT_PREINIT_ARRAYSZ, Out::PreinitArray);
1381   }
1382   if (Out::InitArray) {
1383     addOutSec(DT_INIT_ARRAY, Out::InitArray);
1384     addSize(DT_INIT_ARRAYSZ, Out::InitArray);
1385   }
1386   if (Out::FiniArray) {
1387     addOutSec(DT_FINI_ARRAY, Out::FiniArray);
1388     addSize(DT_FINI_ARRAYSZ, Out::FiniArray);
1389   }
1390 
1391   if (Symbol *B = Symtab->find(Config->Init))
1392     if (B->isDefined())
1393       addSym(DT_INIT, B);
1394   if (Symbol *B = Symtab->find(Config->Fini))
1395     if (B->isDefined())
1396       addSym(DT_FINI, B);
1397 
1398   bool HasVerNeed = InX<ELFT>::VerNeed->getNeedNum() != 0;
1399   if (HasVerNeed || In.VerDef)
1400     addInSec(DT_VERSYM, InX<ELFT>::VerSym);
1401   if (In.VerDef) {
1402     addInSec(DT_VERDEF, In.VerDef);
1403     addInt(DT_VERDEFNUM, getVerDefNum());
1404   }
1405   if (HasVerNeed) {
1406     addInSec(DT_VERNEED, InX<ELFT>::VerNeed);
1407     addInt(DT_VERNEEDNUM, InX<ELFT>::VerNeed->getNeedNum());
1408   }
1409 
1410   if (Config->EMachine == EM_MIPS) {
1411     addInt(DT_MIPS_RLD_VERSION, 1);
1412     addInt(DT_MIPS_FLAGS, RHF_NOTPOT);
1413     addInt(DT_MIPS_BASE_ADDRESS, Target->getImageBase());
1414     addInt(DT_MIPS_SYMTABNO, In.DynSymTab->getNumSymbols());
1415 
1416     add(DT_MIPS_LOCAL_GOTNO, [] { return In.MipsGot->getLocalEntriesNum(); });
1417 
1418     if (const Symbol *B = In.MipsGot->getFirstGlobalEntry())
1419       addInt(DT_MIPS_GOTSYM, B->DynsymIndex);
1420     else
1421       addInt(DT_MIPS_GOTSYM, In.DynSymTab->getNumSymbols());
1422     addInSec(DT_PLTGOT, In.MipsGot);
1423     if (In.MipsRldMap) {
1424       if (!Config->Pie)
1425         addInSec(DT_MIPS_RLD_MAP, In.MipsRldMap);
1426       // Store the offset to the .rld_map section
1427       // relative to the address of the tag.
1428       addInSecRelative(DT_MIPS_RLD_MAP_REL, In.MipsRldMap);
1429     }
1430   }
1431 
1432   // Glink dynamic tag is required by the V2 abi if the plt section isn't empty.
1433   if (Config->EMachine == EM_PPC64 && !In.Plt->empty()) {
1434     // The Glink tag points to 32 bytes before the first lazy symbol resolution
1435     // stub, which starts directly after the header.
1436     Entries.push_back({DT_PPC64_GLINK, [=] {
1437                          unsigned Offset = Target->PltHeaderSize - 32;
1438                          return In.Plt->getVA(0) + Offset;
1439                        }});
1440   }
1441 
1442   addInt(DT_NULL, 0);
1443 
1444   getParent()->Link = this->Link;
1445   this->Size = Entries.size() * this->Entsize;
1446 }
1447 
writeTo(uint8_t * Buf)1448 template <class ELFT> void DynamicSection<ELFT>::writeTo(uint8_t *Buf) {
1449   auto *P = reinterpret_cast<Elf_Dyn *>(Buf);
1450 
1451   for (std::pair<int32_t, std::function<uint64_t()>> &KV : Entries) {
1452     P->d_tag = KV.first;
1453     P->d_un.d_val = KV.second();
1454     ++P;
1455   }
1456 }
1457 
getOffset() const1458 uint64_t DynamicReloc::getOffset() const {
1459   return InputSec->getVA(OffsetInSec);
1460 }
1461 
computeAddend() const1462 int64_t DynamicReloc::computeAddend() const {
1463   if (UseSymVA)
1464     return Sym->getVA(Addend);
1465   if (!OutputSec)
1466     return Addend;
1467   // See the comment in the DynamicReloc ctor.
1468   return getMipsPageAddr(OutputSec->Addr) + Addend;
1469 }
1470 
getSymIndex() const1471 uint32_t DynamicReloc::getSymIndex() const {
1472   if (Sym && !UseSymVA)
1473     return Sym->DynsymIndex;
1474   return 0;
1475 }
1476 
RelocationBaseSection(StringRef Name,uint32_t Type,int32_t DynamicTag,int32_t SizeDynamicTag)1477 RelocationBaseSection::RelocationBaseSection(StringRef Name, uint32_t Type,
1478                                              int32_t DynamicTag,
1479                                              int32_t SizeDynamicTag)
1480     : SyntheticSection(SHF_ALLOC, Type, Config->Wordsize, Name),
1481       DynamicTag(DynamicTag), SizeDynamicTag(SizeDynamicTag) {}
1482 
addReloc(RelType DynType,InputSectionBase * IS,uint64_t OffsetInSec,Symbol * Sym)1483 void RelocationBaseSection::addReloc(RelType DynType, InputSectionBase *IS,
1484                                      uint64_t OffsetInSec, Symbol *Sym) {
1485   addReloc({DynType, IS, OffsetInSec, false, Sym, 0});
1486 }
1487 
addReloc(RelType DynType,InputSectionBase * InputSec,uint64_t OffsetInSec,Symbol * Sym,int64_t Addend,RelExpr Expr,RelType Type)1488 void RelocationBaseSection::addReloc(RelType DynType,
1489                                      InputSectionBase *InputSec,
1490                                      uint64_t OffsetInSec, Symbol *Sym,
1491                                      int64_t Addend, RelExpr Expr,
1492                                      RelType Type) {
1493   // Write the addends to the relocated address if required. We skip
1494   // it if the written value would be zero.
1495   if (Config->WriteAddends && (Expr != R_ADDEND || Addend != 0))
1496     InputSec->Relocations.push_back({Expr, Type, OffsetInSec, Addend, Sym});
1497   addReloc({DynType, InputSec, OffsetInSec, Expr != R_ADDEND, Sym, Addend});
1498 }
1499 
addReloc(const DynamicReloc & Reloc)1500 void RelocationBaseSection::addReloc(const DynamicReloc &Reloc) {
1501   if (Reloc.Type == Target->RelativeRel)
1502     ++NumRelativeRelocs;
1503   Relocs.push_back(Reloc);
1504 }
1505 
finalizeContents()1506 void RelocationBaseSection::finalizeContents() {
1507   // When linking glibc statically, .rel{,a}.plt contains R_*_IRELATIVE
1508   // relocations due to IFUNC (e.g. strcpy). sh_link will be set to 0 in that
1509   // case.
1510   InputSection *SymTab = Config->Relocatable ? In.SymTab : In.DynSymTab;
1511   if (SymTab && SymTab->getParent())
1512     getParent()->Link = SymTab->getParent()->SectionIndex;
1513   else
1514     getParent()->Link = 0;
1515 
1516   if (In.RelaPlt == this)
1517     getParent()->Info = In.GotPlt->getParent()->SectionIndex;
1518   if (In.RelaIplt == this)
1519     getParent()->Info = In.IgotPlt->getParent()->SectionIndex;
1520 }
1521 
RelrBaseSection()1522 RelrBaseSection::RelrBaseSection()
1523     : SyntheticSection(SHF_ALLOC,
1524                        Config->UseAndroidRelrTags ? SHT_ANDROID_RELR : SHT_RELR,
1525                        Config->Wordsize, ".relr.dyn") {}
1526 
1527 template <class ELFT>
encodeDynamicReloc(typename ELFT::Rela * P,const DynamicReloc & Rel)1528 static void encodeDynamicReloc(typename ELFT::Rela *P,
1529                                const DynamicReloc &Rel) {
1530   if (Config->IsRela)
1531     P->r_addend = Rel.computeAddend();
1532   P->r_offset = Rel.getOffset();
1533   P->setSymbolAndType(Rel.getSymIndex(), Rel.Type, Config->IsMips64EL);
1534 }
1535 
1536 template <class ELFT>
RelocationSection(StringRef Name,bool Sort)1537 RelocationSection<ELFT>::RelocationSection(StringRef Name, bool Sort)
1538     : RelocationBaseSection(Name, Config->IsRela ? SHT_RELA : SHT_REL,
1539                             Config->IsRela ? DT_RELA : DT_REL,
1540                             Config->IsRela ? DT_RELASZ : DT_RELSZ),
1541       Sort(Sort) {
1542   this->Entsize = Config->IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel);
1543 }
1544 
compRelocations(const DynamicReloc & A,const DynamicReloc & B)1545 static bool compRelocations(const DynamicReloc &A, const DynamicReloc &B) {
1546   bool AIsRel = A.Type == Target->RelativeRel;
1547   bool BIsRel = B.Type == Target->RelativeRel;
1548   if (AIsRel != BIsRel)
1549     return AIsRel;
1550   return A.getSymIndex() < B.getSymIndex();
1551 }
1552 
writeTo(uint8_t * Buf)1553 template <class ELFT> void RelocationSection<ELFT>::writeTo(uint8_t *Buf) {
1554   if (Sort)
1555     std::stable_sort(Relocs.begin(), Relocs.end(), compRelocations);
1556 
1557   for (const DynamicReloc &Rel : Relocs) {
1558     encodeDynamicReloc<ELFT>(reinterpret_cast<Elf_Rela *>(Buf), Rel);
1559     Buf += Config->IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel);
1560   }
1561 }
1562 
getRelocOffset()1563 template <class ELFT> unsigned RelocationSection<ELFT>::getRelocOffset() {
1564   return this->Entsize * Relocs.size();
1565 }
1566 
1567 template <class ELFT>
AndroidPackedRelocationSection(StringRef Name)1568 AndroidPackedRelocationSection<ELFT>::AndroidPackedRelocationSection(
1569     StringRef Name)
1570     : RelocationBaseSection(
1571           Name, Config->IsRela ? SHT_ANDROID_RELA : SHT_ANDROID_REL,
1572           Config->IsRela ? DT_ANDROID_RELA : DT_ANDROID_REL,
1573           Config->IsRela ? DT_ANDROID_RELASZ : DT_ANDROID_RELSZ) {
1574   this->Entsize = 1;
1575 }
1576 
1577 template <class ELFT>
updateAllocSize()1578 bool AndroidPackedRelocationSection<ELFT>::updateAllocSize() {
1579   // This function computes the contents of an Android-format packed relocation
1580   // section.
1581   //
1582   // This format compresses relocations by using relocation groups to factor out
1583   // fields that are common between relocations and storing deltas from previous
1584   // relocations in SLEB128 format (which has a short representation for small
1585   // numbers). A good example of a relocation type with common fields is
1586   // R_*_RELATIVE, which is normally used to represent function pointers in
1587   // vtables. In the REL format, each relative relocation has the same r_info
1588   // field, and is only different from other relative relocations in terms of
1589   // the r_offset field. By sorting relocations by offset, grouping them by
1590   // r_info and representing each relocation with only the delta from the
1591   // previous offset, each 8-byte relocation can be compressed to as little as 1
1592   // byte (or less with run-length encoding). This relocation packer was able to
1593   // reduce the size of the relocation section in an Android Chromium DSO from
1594   // 2,911,184 bytes to 174,693 bytes, or 6% of the original size.
1595   //
1596   // A relocation section consists of a header containing the literal bytes
1597   // 'APS2' followed by a sequence of SLEB128-encoded integers. The first two
1598   // elements are the total number of relocations in the section and an initial
1599   // r_offset value. The remaining elements define a sequence of relocation
1600   // groups. Each relocation group starts with a header consisting of the
1601   // following elements:
1602   //
1603   // - the number of relocations in the relocation group
1604   // - flags for the relocation group
1605   // - (if RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG is set) the r_offset delta
1606   //   for each relocation in the group.
1607   // - (if RELOCATION_GROUPED_BY_INFO_FLAG is set) the value of the r_info
1608   //   field for each relocation in the group.
1609   // - (if RELOCATION_GROUP_HAS_ADDEND_FLAG and
1610   //   RELOCATION_GROUPED_BY_ADDEND_FLAG are set) the r_addend delta for
1611   //   each relocation in the group.
1612   //
1613   // Following the relocation group header are descriptions of each of the
1614   // relocations in the group. They consist of the following elements:
1615   //
1616   // - (if RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG is not set) the r_offset
1617   //   delta for this relocation.
1618   // - (if RELOCATION_GROUPED_BY_INFO_FLAG is not set) the value of the r_info
1619   //   field for this relocation.
1620   // - (if RELOCATION_GROUP_HAS_ADDEND_FLAG is set and
1621   //   RELOCATION_GROUPED_BY_ADDEND_FLAG is not set) the r_addend delta for
1622   //   this relocation.
1623 
1624   size_t OldSize = RelocData.size();
1625 
1626   RelocData = {'A', 'P', 'S', '2'};
1627   raw_svector_ostream OS(RelocData);
1628   auto Add = [&](int64_t V) { encodeSLEB128(V, OS); };
1629 
1630   // The format header includes the number of relocations and the initial
1631   // offset (we set this to zero because the first relocation group will
1632   // perform the initial adjustment).
1633   Add(Relocs.size());
1634   Add(0);
1635 
1636   std::vector<Elf_Rela> Relatives, NonRelatives;
1637 
1638   for (const DynamicReloc &Rel : Relocs) {
1639     Elf_Rela R;
1640     encodeDynamicReloc<ELFT>(&R, Rel);
1641 
1642     if (R.getType(Config->IsMips64EL) == Target->RelativeRel)
1643       Relatives.push_back(R);
1644     else
1645       NonRelatives.push_back(R);
1646   }
1647 
1648   llvm::sort(Relatives, [](const Elf_Rel &A, const Elf_Rel &B) {
1649     return A.r_offset < B.r_offset;
1650   });
1651 
1652   // Try to find groups of relative relocations which are spaced one word
1653   // apart from one another. These generally correspond to vtable entries. The
1654   // format allows these groups to be encoded using a sort of run-length
1655   // encoding, but each group will cost 7 bytes in addition to the offset from
1656   // the previous group, so it is only profitable to do this for groups of
1657   // size 8 or larger.
1658   std::vector<Elf_Rela> UngroupedRelatives;
1659   std::vector<std::vector<Elf_Rela>> RelativeGroups;
1660   for (auto I = Relatives.begin(), E = Relatives.end(); I != E;) {
1661     std::vector<Elf_Rela> Group;
1662     do {
1663       Group.push_back(*I++);
1664     } while (I != E && (I - 1)->r_offset + Config->Wordsize == I->r_offset);
1665 
1666     if (Group.size() < 8)
1667       UngroupedRelatives.insert(UngroupedRelatives.end(), Group.begin(),
1668                                 Group.end());
1669     else
1670       RelativeGroups.emplace_back(std::move(Group));
1671   }
1672 
1673   unsigned HasAddendIfRela =
1674       Config->IsRela ? RELOCATION_GROUP_HAS_ADDEND_FLAG : 0;
1675 
1676   uint64_t Offset = 0;
1677   uint64_t Addend = 0;
1678 
1679   // Emit the run-length encoding for the groups of adjacent relative
1680   // relocations. Each group is represented using two groups in the packed
1681   // format. The first is used to set the current offset to the start of the
1682   // group (and also encodes the first relocation), and the second encodes the
1683   // remaining relocations.
1684   for (std::vector<Elf_Rela> &G : RelativeGroups) {
1685     // The first relocation in the group.
1686     Add(1);
1687     Add(RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG |
1688         RELOCATION_GROUPED_BY_INFO_FLAG | HasAddendIfRela);
1689     Add(G[0].r_offset - Offset);
1690     Add(Target->RelativeRel);
1691     if (Config->IsRela) {
1692       Add(G[0].r_addend - Addend);
1693       Addend = G[0].r_addend;
1694     }
1695 
1696     // The remaining relocations.
1697     Add(G.size() - 1);
1698     Add(RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG |
1699         RELOCATION_GROUPED_BY_INFO_FLAG | HasAddendIfRela);
1700     Add(Config->Wordsize);
1701     Add(Target->RelativeRel);
1702     if (Config->IsRela) {
1703       for (auto I = G.begin() + 1, E = G.end(); I != E; ++I) {
1704         Add(I->r_addend - Addend);
1705         Addend = I->r_addend;
1706       }
1707     }
1708 
1709     Offset = G.back().r_offset;
1710   }
1711 
1712   // Now the ungrouped relatives.
1713   if (!UngroupedRelatives.empty()) {
1714     Add(UngroupedRelatives.size());
1715     Add(RELOCATION_GROUPED_BY_INFO_FLAG | HasAddendIfRela);
1716     Add(Target->RelativeRel);
1717     for (Elf_Rela &R : UngroupedRelatives) {
1718       Add(R.r_offset - Offset);
1719       Offset = R.r_offset;
1720       if (Config->IsRela) {
1721         Add(R.r_addend - Addend);
1722         Addend = R.r_addend;
1723       }
1724     }
1725   }
1726 
1727   // Finally the non-relative relocations.
1728   llvm::sort(NonRelatives, [](const Elf_Rela &A, const Elf_Rela &B) {
1729     return A.r_offset < B.r_offset;
1730   });
1731   if (!NonRelatives.empty()) {
1732     Add(NonRelatives.size());
1733     Add(HasAddendIfRela);
1734     for (Elf_Rela &R : NonRelatives) {
1735       Add(R.r_offset - Offset);
1736       Offset = R.r_offset;
1737       Add(R.r_info);
1738       if (Config->IsRela) {
1739         Add(R.r_addend - Addend);
1740         Addend = R.r_addend;
1741       }
1742     }
1743   }
1744 
1745   // Don't allow the section to shrink; otherwise the size of the section can
1746   // oscillate infinitely.
1747   if (RelocData.size() < OldSize)
1748     RelocData.append(OldSize - RelocData.size(), 0);
1749 
1750   // Returns whether the section size changed. We need to keep recomputing both
1751   // section layout and the contents of this section until the size converges
1752   // because changing this section's size can affect section layout, which in
1753   // turn can affect the sizes of the LEB-encoded integers stored in this
1754   // section.
1755   return RelocData.size() != OldSize;
1756 }
1757 
RelrSection()1758 template <class ELFT> RelrSection<ELFT>::RelrSection() {
1759   this->Entsize = Config->Wordsize;
1760 }
1761 
updateAllocSize()1762 template <class ELFT> bool RelrSection<ELFT>::updateAllocSize() {
1763   // This function computes the contents of an SHT_RELR packed relocation
1764   // section.
1765   //
1766   // Proposal for adding SHT_RELR sections to generic-abi is here:
1767   //   https://groups.google.com/forum/#!topic/generic-abi/bX460iggiKg
1768   //
1769   // The encoded sequence of Elf64_Relr entries in a SHT_RELR section looks
1770   // like [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
1771   //
1772   // i.e. start with an address, followed by any number of bitmaps. The address
1773   // entry encodes 1 relocation. The subsequent bitmap entries encode up to 63
1774   // relocations each, at subsequent offsets following the last address entry.
1775   //
1776   // The bitmap entries must have 1 in the least significant bit. The assumption
1777   // here is that an address cannot have 1 in lsb. Odd addresses are not
1778   // supported.
1779   //
1780   // Excluding the least significant bit in the bitmap, each non-zero bit in
1781   // the bitmap represents a relocation to be applied to a corresponding machine
1782   // word that follows the base address word. The second least significant bit
1783   // represents the machine word immediately following the initial address, and
1784   // each bit that follows represents the next word, in linear order. As such,
1785   // a single bitmap can encode up to 31 relocations in a 32-bit object, and
1786   // 63 relocations in a 64-bit object.
1787   //
1788   // This encoding has a couple of interesting properties:
1789   // 1. Looking at any entry, it is clear whether it's an address or a bitmap:
1790   //    even means address, odd means bitmap.
1791   // 2. Just a simple list of addresses is a valid encoding.
1792 
1793   size_t OldSize = RelrRelocs.size();
1794   RelrRelocs.clear();
1795 
1796   // Same as Config->Wordsize but faster because this is a compile-time
1797   // constant.
1798   const size_t Wordsize = sizeof(typename ELFT::uint);
1799 
1800   // Number of bits to use for the relocation offsets bitmap.
1801   // Must be either 63 or 31.
1802   const size_t NBits = Wordsize * 8 - 1;
1803 
1804   // Get offsets for all relative relocations and sort them.
1805   std::vector<uint64_t> Offsets;
1806   for (const RelativeReloc &Rel : Relocs)
1807     Offsets.push_back(Rel.getOffset());
1808   llvm::sort(Offsets.begin(), Offsets.end());
1809 
1810   // For each leading relocation, find following ones that can be folded
1811   // as a bitmap and fold them.
1812   for (size_t I = 0, E = Offsets.size(); I < E;) {
1813     // Add a leading relocation.
1814     RelrRelocs.push_back(Elf_Relr(Offsets[I]));
1815     uint64_t Base = Offsets[I] + Wordsize;
1816     ++I;
1817 
1818     // Find foldable relocations to construct bitmaps.
1819     while (I < E) {
1820       uint64_t Bitmap = 0;
1821 
1822       while (I < E) {
1823         uint64_t Delta = Offsets[I] - Base;
1824 
1825         // If it is too far, it cannot be folded.
1826         if (Delta >= NBits * Wordsize)
1827           break;
1828 
1829         // If it is not a multiple of wordsize away, it cannot be folded.
1830         if (Delta % Wordsize)
1831           break;
1832 
1833         // Fold it.
1834         Bitmap |= 1ULL << (Delta / Wordsize);
1835         ++I;
1836       }
1837 
1838       if (!Bitmap)
1839         break;
1840 
1841       RelrRelocs.push_back(Elf_Relr((Bitmap << 1) | 1));
1842       Base += NBits * Wordsize;
1843     }
1844   }
1845 
1846   return RelrRelocs.size() != OldSize;
1847 }
1848 
SymbolTableBaseSection(StringTableSection & StrTabSec)1849 SymbolTableBaseSection::SymbolTableBaseSection(StringTableSection &StrTabSec)
1850     : SyntheticSection(StrTabSec.isDynamic() ? (uint64_t)SHF_ALLOC : 0,
1851                        StrTabSec.isDynamic() ? SHT_DYNSYM : SHT_SYMTAB,
1852                        Config->Wordsize,
1853                        StrTabSec.isDynamic() ? ".dynsym" : ".symtab"),
1854       StrTabSec(StrTabSec) {}
1855 
1856 // Orders symbols according to their positions in the GOT,
1857 // in compliance with MIPS ABI rules.
1858 // See "Global Offset Table" in Chapter 5 in the following document
1859 // for detailed description:
1860 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
sortMipsSymbols(const SymbolTableEntry & L,const SymbolTableEntry & R)1861 static bool sortMipsSymbols(const SymbolTableEntry &L,
1862                             const SymbolTableEntry &R) {
1863   // Sort entries related to non-local preemptible symbols by GOT indexes.
1864   // All other entries go to the beginning of a dynsym in arbitrary order.
1865   if (L.Sym->isInGot() && R.Sym->isInGot())
1866     return L.Sym->GotIndex < R.Sym->GotIndex;
1867   if (!L.Sym->isInGot() && !R.Sym->isInGot())
1868     return false;
1869   return !L.Sym->isInGot();
1870 }
1871 
finalizeContents()1872 void SymbolTableBaseSection::finalizeContents() {
1873   if (OutputSection *Sec = StrTabSec.getParent())
1874     getParent()->Link = Sec->SectionIndex;
1875 
1876   if (this->Type != SHT_DYNSYM) {
1877     sortSymTabSymbols();
1878     return;
1879   }
1880 
1881   // If it is a .dynsym, there should be no local symbols, but we need
1882   // to do a few things for the dynamic linker.
1883 
1884   // Section's Info field has the index of the first non-local symbol.
1885   // Because the first symbol entry is a null entry, 1 is the first.
1886   getParent()->Info = 1;
1887 
1888   if (In.GnuHashTab) {
1889     // NB: It also sorts Symbols to meet the GNU hash table requirements.
1890     In.GnuHashTab->addSymbols(Symbols);
1891   } else if (Config->EMachine == EM_MIPS) {
1892     std::stable_sort(Symbols.begin(), Symbols.end(), sortMipsSymbols);
1893   }
1894 
1895   size_t I = 0;
1896   for (const SymbolTableEntry &S : Symbols)
1897     S.Sym->DynsymIndex = ++I;
1898 }
1899 
1900 // The ELF spec requires that all local symbols precede global symbols, so we
1901 // sort symbol entries in this function. (For .dynsym, we don't do that because
1902 // symbols for dynamic linking are inherently all globals.)
1903 //
1904 // Aside from above, we put local symbols in groups starting with the STT_FILE
1905 // symbol. That is convenient for purpose of identifying where are local symbols
1906 // coming from.
sortSymTabSymbols()1907 void SymbolTableBaseSection::sortSymTabSymbols() {
1908   // Move all local symbols before global symbols.
1909   auto E = std::stable_partition(
1910       Symbols.begin(), Symbols.end(), [](const SymbolTableEntry &S) {
1911         return S.Sym->isLocal() || S.Sym->computeBinding() == STB_LOCAL;
1912       });
1913   size_t NumLocals = E - Symbols.begin();
1914   getParent()->Info = NumLocals + 1;
1915 
1916   // We want to group the local symbols by file. For that we rebuild the local
1917   // part of the symbols vector. We do not need to care about the STT_FILE
1918   // symbols, they are already naturally placed first in each group. That
1919   // happens because STT_FILE is always the first symbol in the object and hence
1920   // precede all other local symbols we add for a file.
1921   MapVector<InputFile *, std::vector<SymbolTableEntry>> Arr;
1922   for (const SymbolTableEntry &S : llvm::make_range(Symbols.begin(), E))
1923     Arr[S.Sym->File].push_back(S);
1924 
1925   auto I = Symbols.begin();
1926   for (std::pair<InputFile *, std::vector<SymbolTableEntry>> &P : Arr)
1927     for (SymbolTableEntry &Entry : P.second)
1928       *I++ = Entry;
1929 }
1930 
addSymbol(Symbol * B)1931 void SymbolTableBaseSection::addSymbol(Symbol *B) {
1932   // Adding a local symbol to a .dynsym is a bug.
1933   assert(this->Type != SHT_DYNSYM || !B->isLocal());
1934 
1935   bool HashIt = B->isLocal();
1936   Symbols.push_back({B, StrTabSec.addString(B->getName(), HashIt)});
1937 }
1938 
getSymbolIndex(Symbol * Sym)1939 size_t SymbolTableBaseSection::getSymbolIndex(Symbol *Sym) {
1940   // Initializes symbol lookup tables lazily. This is used only
1941   // for -r or -emit-relocs.
1942   llvm::call_once(OnceFlag, [&] {
1943     SymbolIndexMap.reserve(Symbols.size());
1944     size_t I = 0;
1945     for (const SymbolTableEntry &E : Symbols) {
1946       if (E.Sym->Type == STT_SECTION)
1947         SectionIndexMap[E.Sym->getOutputSection()] = ++I;
1948       else
1949         SymbolIndexMap[E.Sym] = ++I;
1950     }
1951   });
1952 
1953   // Section symbols are mapped based on their output sections
1954   // to maintain their semantics.
1955   if (Sym->Type == STT_SECTION)
1956     return SectionIndexMap.lookup(Sym->getOutputSection());
1957   return SymbolIndexMap.lookup(Sym);
1958 }
1959 
1960 template <class ELFT>
SymbolTableSection(StringTableSection & StrTabSec)1961 SymbolTableSection<ELFT>::SymbolTableSection(StringTableSection &StrTabSec)
1962     : SymbolTableBaseSection(StrTabSec) {
1963   this->Entsize = sizeof(Elf_Sym);
1964 }
1965 
getCommonSec(Symbol * Sym)1966 static BssSection *getCommonSec(Symbol *Sym) {
1967   if (!Config->DefineCommon)
1968     if (auto *D = dyn_cast<Defined>(Sym))
1969       return dyn_cast_or_null<BssSection>(D->Section);
1970   return nullptr;
1971 }
1972 
getSymSectionIndex(Symbol * Sym)1973 static uint32_t getSymSectionIndex(Symbol *Sym) {
1974   if (getCommonSec(Sym))
1975     return SHN_COMMON;
1976   if (!isa<Defined>(Sym) || Sym->NeedsPltAddr)
1977     return SHN_UNDEF;
1978   if (const OutputSection *OS = Sym->getOutputSection())
1979     return OS->SectionIndex >= SHN_LORESERVE ? (uint32_t)SHN_XINDEX
1980                                              : OS->SectionIndex;
1981   return SHN_ABS;
1982 }
1983 
1984 // Write the internal symbol table contents to the output symbol table.
writeTo(uint8_t * Buf)1985 template <class ELFT> void SymbolTableSection<ELFT>::writeTo(uint8_t *Buf) {
1986   // The first entry is a null entry as per the ELF spec.
1987   memset(Buf, 0, sizeof(Elf_Sym));
1988   Buf += sizeof(Elf_Sym);
1989 
1990   auto *ESym = reinterpret_cast<Elf_Sym *>(Buf);
1991 
1992   for (SymbolTableEntry &Ent : Symbols) {
1993     Symbol *Sym = Ent.Sym;
1994 
1995     // Set st_info and st_other.
1996     ESym->st_other = 0;
1997     if (Sym->isLocal()) {
1998       ESym->setBindingAndType(STB_LOCAL, Sym->Type);
1999     } else {
2000       ESym->setBindingAndType(Sym->computeBinding(), Sym->Type);
2001       ESym->setVisibility(Sym->Visibility);
2002     }
2003 
2004     // The 3 most significant bits of st_other are used by OpenPOWER ABI.
2005     // See getPPC64GlobalEntryToLocalEntryOffset() for more details.
2006     if (Config->EMachine == EM_PPC64)
2007       ESym->st_other |= Sym->StOther & 0xe0;
2008 
2009     ESym->st_name = Ent.StrTabOffset;
2010     ESym->st_shndx = getSymSectionIndex(Ent.Sym);
2011 
2012     // Copy symbol size if it is a defined symbol. st_size is not significant
2013     // for undefined symbols, so whether copying it or not is up to us if that's
2014     // the case. We'll leave it as zero because by not setting a value, we can
2015     // get the exact same outputs for two sets of input files that differ only
2016     // in undefined symbol size in DSOs.
2017     if (ESym->st_shndx == SHN_UNDEF)
2018       ESym->st_size = 0;
2019     else
2020       ESym->st_size = Sym->getSize();
2021 
2022     // st_value is usually an address of a symbol, but that has a
2023     // special meaining for uninstantiated common symbols (this can
2024     // occur if -r is given).
2025     if (BssSection *CommonSec = getCommonSec(Ent.Sym))
2026       ESym->st_value = CommonSec->Alignment;
2027     else
2028       ESym->st_value = Sym->getVA();
2029 
2030     ++ESym;
2031   }
2032 
2033   // On MIPS we need to mark symbol which has a PLT entry and requires
2034   // pointer equality by STO_MIPS_PLT flag. That is necessary to help
2035   // dynamic linker distinguish such symbols and MIPS lazy-binding stubs.
2036   // https://sourceware.org/ml/binutils/2008-07/txt00000.txt
2037   if (Config->EMachine == EM_MIPS) {
2038     auto *ESym = reinterpret_cast<Elf_Sym *>(Buf);
2039 
2040     for (SymbolTableEntry &Ent : Symbols) {
2041       Symbol *Sym = Ent.Sym;
2042       if (Sym->isInPlt() && Sym->NeedsPltAddr)
2043         ESym->st_other |= STO_MIPS_PLT;
2044       if (isMicroMips()) {
2045         // Set STO_MIPS_MICROMIPS flag and less-significant bit for
2046         // a defined microMIPS symbol and symbol should point to its
2047         // PLT entry (in case of microMIPS, PLT entries always contain
2048         // microMIPS code).
2049         if (Sym->isDefined() &&
2050             ((Sym->StOther & STO_MIPS_MICROMIPS) || Sym->NeedsPltAddr)) {
2051           if (StrTabSec.isDynamic())
2052             ESym->st_value |= 1;
2053           ESym->st_other |= STO_MIPS_MICROMIPS;
2054         }
2055       }
2056       if (Config->Relocatable)
2057         if (auto *D = dyn_cast<Defined>(Sym))
2058           if (isMipsPIC<ELFT>(D))
2059             ESym->st_other |= STO_MIPS_PIC;
2060       ++ESym;
2061     }
2062   }
2063 }
2064 
SymtabShndxSection()2065 SymtabShndxSection::SymtabShndxSection()
2066     : SyntheticSection(0, SHT_SYMTAB_SHNDX, 4, ".symtab_shndxr") {
2067   this->Entsize = 4;
2068 }
2069 
writeTo(uint8_t * Buf)2070 void SymtabShndxSection::writeTo(uint8_t *Buf) {
2071   // We write an array of 32 bit values, where each value has 1:1 association
2072   // with an entry in .symtab. If the corresponding entry contains SHN_XINDEX,
2073   // we need to write actual index, otherwise, we must write SHN_UNDEF(0).
2074   Buf += 4; // Ignore .symtab[0] entry.
2075   for (const SymbolTableEntry &Entry : In.SymTab->getSymbols()) {
2076     if (getSymSectionIndex(Entry.Sym) == SHN_XINDEX)
2077       write32(Buf, Entry.Sym->getOutputSection()->SectionIndex);
2078     Buf += 4;
2079   }
2080 }
2081 
empty() const2082 bool SymtabShndxSection::empty() const {
2083   // SHT_SYMTAB can hold symbols with section indices values up to
2084   // SHN_LORESERVE. If we need more, we want to use extension SHT_SYMTAB_SHNDX
2085   // section. Problem is that we reveal the final section indices a bit too
2086   // late, and we do not know them here. For simplicity, we just always create
2087   // a .symtab_shndxr section when the amount of output sections is huge.
2088   size_t Size = 0;
2089   for (BaseCommand *Base : Script->SectionCommands)
2090     if (isa<OutputSection>(Base))
2091       ++Size;
2092   return Size < SHN_LORESERVE;
2093 }
2094 
finalizeContents()2095 void SymtabShndxSection::finalizeContents() {
2096   getParent()->Link = In.SymTab->getParent()->SectionIndex;
2097 }
2098 
getSize() const2099 size_t SymtabShndxSection::getSize() const {
2100   return In.SymTab->getNumSymbols() * 4;
2101 }
2102 
2103 // .hash and .gnu.hash sections contain on-disk hash tables that map
2104 // symbol names to their dynamic symbol table indices. Their purpose
2105 // is to help the dynamic linker resolve symbols quickly. If ELF files
2106 // don't have them, the dynamic linker has to do linear search on all
2107 // dynamic symbols, which makes programs slower. Therefore, a .hash
2108 // section is added to a DSO by default. A .gnu.hash is added if you
2109 // give the -hash-style=gnu or -hash-style=both option.
2110 //
2111 // The Unix semantics of resolving dynamic symbols is somewhat expensive.
2112 // Each ELF file has a list of DSOs that the ELF file depends on and a
2113 // list of dynamic symbols that need to be resolved from any of the
2114 // DSOs. That means resolving all dynamic symbols takes O(m)*O(n)
2115 // where m is the number of DSOs and n is the number of dynamic
2116 // symbols. For modern large programs, both m and n are large.  So
2117 // making each step faster by using hash tables substiantially
2118 // improves time to load programs.
2119 //
2120 // (Note that this is not the only way to design the shared library.
2121 // For instance, the Windows DLL takes a different approach. On
2122 // Windows, each dynamic symbol has a name of DLL from which the symbol
2123 // has to be resolved. That makes the cost of symbol resolution O(n).
2124 // This disables some hacky techniques you can use on Unix such as
2125 // LD_PRELOAD, but this is arguably better semantics than the Unix ones.)
2126 //
2127 // Due to historical reasons, we have two different hash tables, .hash
2128 // and .gnu.hash. They are for the same purpose, and .gnu.hash is a new
2129 // and better version of .hash. .hash is just an on-disk hash table, but
2130 // .gnu.hash has a bloom filter in addition to a hash table to skip
2131 // DSOs very quickly. If you are sure that your dynamic linker knows
2132 // about .gnu.hash, you want to specify -hash-style=gnu. Otherwise, a
2133 // safe bet is to specify -hash-style=both for backward compatibilty.
GnuHashTableSection()2134 GnuHashTableSection::GnuHashTableSection()
2135     : SyntheticSection(SHF_ALLOC, SHT_GNU_HASH, Config->Wordsize, ".gnu.hash") {
2136 }
2137 
finalizeContents()2138 void GnuHashTableSection::finalizeContents() {
2139   if (OutputSection *Sec = In.DynSymTab->getParent())
2140     getParent()->Link = Sec->SectionIndex;
2141 
2142   // Computes bloom filter size in word size. We want to allocate 12
2143   // bits for each symbol. It must be a power of two.
2144   if (Symbols.empty()) {
2145     MaskWords = 1;
2146   } else {
2147     uint64_t NumBits = Symbols.size() * 12;
2148     MaskWords = NextPowerOf2(NumBits / (Config->Wordsize * 8));
2149   }
2150 
2151   Size = 16;                            // Header
2152   Size += Config->Wordsize * MaskWords; // Bloom filter
2153   Size += NBuckets * 4;                 // Hash buckets
2154   Size += Symbols.size() * 4;           // Hash values
2155 }
2156 
writeTo(uint8_t * Buf)2157 void GnuHashTableSection::writeTo(uint8_t *Buf) {
2158   // The output buffer is not guaranteed to be zero-cleared because we pre-
2159   // fill executable sections with trap instructions. This is a precaution
2160   // for that case, which happens only when -no-rosegment is given.
2161   memset(Buf, 0, Size);
2162 
2163   // Write a header.
2164   write32(Buf, NBuckets);
2165   write32(Buf + 4, In.DynSymTab->getNumSymbols() - Symbols.size());
2166   write32(Buf + 8, MaskWords);
2167   write32(Buf + 12, Shift2);
2168   Buf += 16;
2169 
2170   // Write a bloom filter and a hash table.
2171   writeBloomFilter(Buf);
2172   Buf += Config->Wordsize * MaskWords;
2173   writeHashTable(Buf);
2174 }
2175 
2176 // This function writes a 2-bit bloom filter. This bloom filter alone
2177 // usually filters out 80% or more of all symbol lookups [1].
2178 // The dynamic linker uses the hash table only when a symbol is not
2179 // filtered out by a bloom filter.
2180 //
2181 // [1] Ulrich Drepper (2011), "How To Write Shared Libraries" (Ver. 4.1.2),
2182 //     p.9, https://www.akkadia.org/drepper/dsohowto.pdf
writeBloomFilter(uint8_t * Buf)2183 void GnuHashTableSection::writeBloomFilter(uint8_t *Buf) {
2184   unsigned C = Config->Is64 ? 64 : 32;
2185   for (const Entry &Sym : Symbols) {
2186     // When C = 64, we choose a word with bits [6:...] and set 1 to two bits in
2187     // the word using bits [0:5] and [26:31].
2188     size_t I = (Sym.Hash / C) & (MaskWords - 1);
2189     uint64_t Val = readUint(Buf + I * Config->Wordsize);
2190     Val |= uint64_t(1) << (Sym.Hash % C);
2191     Val |= uint64_t(1) << ((Sym.Hash >> Shift2) % C);
2192     writeUint(Buf + I * Config->Wordsize, Val);
2193   }
2194 }
2195 
writeHashTable(uint8_t * Buf)2196 void GnuHashTableSection::writeHashTable(uint8_t *Buf) {
2197   uint32_t *Buckets = reinterpret_cast<uint32_t *>(Buf);
2198   uint32_t OldBucket = -1;
2199   uint32_t *Values = Buckets + NBuckets;
2200   for (auto I = Symbols.begin(), E = Symbols.end(); I != E; ++I) {
2201     // Write a hash value. It represents a sequence of chains that share the
2202     // same hash modulo value. The last element of each chain is terminated by
2203     // LSB 1.
2204     uint32_t Hash = I->Hash;
2205     bool IsLastInChain = (I + 1) == E || I->BucketIdx != (I + 1)->BucketIdx;
2206     Hash = IsLastInChain ? Hash | 1 : Hash & ~1;
2207     write32(Values++, Hash);
2208 
2209     if (I->BucketIdx == OldBucket)
2210       continue;
2211     // Write a hash bucket. Hash buckets contain indices in the following hash
2212     // value table.
2213     write32(Buckets + I->BucketIdx, I->Sym->DynsymIndex);
2214     OldBucket = I->BucketIdx;
2215   }
2216 }
2217 
hashGnu(StringRef Name)2218 static uint32_t hashGnu(StringRef Name) {
2219   uint32_t H = 5381;
2220   for (uint8_t C : Name)
2221     H = (H << 5) + H + C;
2222   return H;
2223 }
2224 
2225 // Add symbols to this symbol hash table. Note that this function
2226 // destructively sort a given vector -- which is needed because
2227 // GNU-style hash table places some sorting requirements.
addSymbols(std::vector<SymbolTableEntry> & V)2228 void GnuHashTableSection::addSymbols(std::vector<SymbolTableEntry> &V) {
2229   // We cannot use 'auto' for Mid because GCC 6.1 cannot deduce
2230   // its type correctly.
2231   std::vector<SymbolTableEntry>::iterator Mid =
2232       std::stable_partition(V.begin(), V.end(), [](const SymbolTableEntry &S) {
2233         return !S.Sym->isDefined();
2234       });
2235 
2236   // We chose load factor 4 for the on-disk hash table. For each hash
2237   // collision, the dynamic linker will compare a uint32_t hash value.
2238   // Since the integer comparison is quite fast, we believe we can
2239   // make the load factor even larger. 4 is just a conservative choice.
2240   //
2241   // Note that we don't want to create a zero-sized hash table because
2242   // Android loader as of 2018 doesn't like a .gnu.hash containing such
2243   // table. If that's the case, we create a hash table with one unused
2244   // dummy slot.
2245   NBuckets = std::max<size_t>((V.end() - Mid) / 4, 1);
2246 
2247   if (Mid == V.end())
2248     return;
2249 
2250   for (SymbolTableEntry &Ent : llvm::make_range(Mid, V.end())) {
2251     Symbol *B = Ent.Sym;
2252     uint32_t Hash = hashGnu(B->getName());
2253     uint32_t BucketIdx = Hash % NBuckets;
2254     Symbols.push_back({B, Ent.StrTabOffset, Hash, BucketIdx});
2255   }
2256 
2257   std::stable_sort(
2258       Symbols.begin(), Symbols.end(),
2259       [](const Entry &L, const Entry &R) { return L.BucketIdx < R.BucketIdx; });
2260 
2261   V.erase(Mid, V.end());
2262   for (const Entry &Ent : Symbols)
2263     V.push_back({Ent.Sym, Ent.StrTabOffset});
2264 }
2265 
HashTableSection()2266 HashTableSection::HashTableSection()
2267     : SyntheticSection(SHF_ALLOC, SHT_HASH, 4, ".hash") {
2268   this->Entsize = 4;
2269 }
2270 
finalizeContents()2271 void HashTableSection::finalizeContents() {
2272   if (OutputSection *Sec = In.DynSymTab->getParent())
2273     getParent()->Link = Sec->SectionIndex;
2274 
2275   unsigned NumEntries = 2;                       // nbucket and nchain.
2276   NumEntries += In.DynSymTab->getNumSymbols();   // The chain entries.
2277 
2278   // Create as many buckets as there are symbols.
2279   NumEntries += In.DynSymTab->getNumSymbols();
2280   this->Size = NumEntries * 4;
2281 }
2282 
writeTo(uint8_t * Buf)2283 void HashTableSection::writeTo(uint8_t *Buf) {
2284   // See comment in GnuHashTableSection::writeTo.
2285   memset(Buf, 0, Size);
2286 
2287   unsigned NumSymbols = In.DynSymTab->getNumSymbols();
2288 
2289   uint32_t *P = reinterpret_cast<uint32_t *>(Buf);
2290   write32(P++, NumSymbols); // nbucket
2291   write32(P++, NumSymbols); // nchain
2292 
2293   uint32_t *Buckets = P;
2294   uint32_t *Chains = P + NumSymbols;
2295 
2296   for (const SymbolTableEntry &S : In.DynSymTab->getSymbols()) {
2297     Symbol *Sym = S.Sym;
2298     StringRef Name = Sym->getName();
2299     unsigned I = Sym->DynsymIndex;
2300     uint32_t Hash = hashSysV(Name) % NumSymbols;
2301     Chains[I] = Buckets[Hash];
2302     write32(Buckets + Hash, I);
2303   }
2304 }
2305 
2306 // On PowerPC64 the lazy symbol resolvers go into the `global linkage table`
2307 // in the .glink section, rather then the typical .plt section.
PltSection(bool IsIplt)2308 PltSection::PltSection(bool IsIplt)
2309     : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16,
2310                        Config->EMachine == EM_PPC64 ? ".glink" : ".plt"),
2311       HeaderSize(!IsIplt || Config->ZRetpolineplt ? Target->PltHeaderSize : 0),
2312       IsIplt(IsIplt) {
2313   // The PLT needs to be writable on SPARC as the dynamic linker will
2314   // modify the instructions in the PLT entries.
2315   if (Config->EMachine == EM_SPARCV9)
2316     this->Flags |= SHF_WRITE;
2317 }
2318 
writeTo(uint8_t * Buf)2319 void PltSection::writeTo(uint8_t *Buf) {
2320   // At beginning of PLT or retpoline IPLT, we have code to call the dynamic
2321   // linker to resolve dynsyms at runtime. Write such code.
2322   if (HeaderSize > 0)
2323     Target->writePltHeader(Buf);
2324   size_t Off = HeaderSize;
2325   // The IPlt is immediately after the Plt, account for this in RelOff
2326   unsigned PltOff = getPltRelocOff();
2327 
2328   for (auto &I : Entries) {
2329     const Symbol *B = I.first;
2330     unsigned RelOff = I.second + PltOff;
2331     uint64_t Got = B->getGotPltVA();
2332     uint64_t Plt = this->getVA() + Off;
2333     Target->writePlt(Buf + Off, Got, Plt, B->PltIndex, RelOff);
2334     Off += Target->PltEntrySize;
2335   }
2336 }
2337 
addEntry(Symbol & Sym)2338 template <class ELFT> void PltSection::addEntry(Symbol &Sym) {
2339   Sym.PltIndex = Entries.size();
2340   RelocationBaseSection *PltRelocSection = In.RelaPlt;
2341   if (IsIplt) {
2342     PltRelocSection = In.RelaIplt;
2343     Sym.IsInIplt = true;
2344   }
2345   unsigned RelOff =
2346       static_cast<RelocationSection<ELFT> *>(PltRelocSection)->getRelocOffset();
2347   Entries.push_back(std::make_pair(&Sym, RelOff));
2348 }
2349 
getSize() const2350 size_t PltSection::getSize() const {
2351   return HeaderSize + Entries.size() * Target->PltEntrySize;
2352 }
2353 
2354 // Some architectures such as additional symbols in the PLT section. For
2355 // example ARM uses mapping symbols to aid disassembly
addSymbols()2356 void PltSection::addSymbols() {
2357   // The PLT may have symbols defined for the Header, the IPLT has no header
2358   if (!IsIplt)
2359     Target->addPltHeaderSymbols(*this);
2360   size_t Off = HeaderSize;
2361   for (size_t I = 0; I < Entries.size(); ++I) {
2362     Target->addPltSymbols(*this, Off);
2363     Off += Target->PltEntrySize;
2364   }
2365 }
2366 
getPltRelocOff() const2367 unsigned PltSection::getPltRelocOff() const {
2368   return IsIplt ? In.Plt->getSize() : 0;
2369 }
2370 
2371 // The string hash function for .gdb_index.
computeGdbHash(StringRef S)2372 static uint32_t computeGdbHash(StringRef S) {
2373   uint32_t H = 0;
2374   for (uint8_t C : S)
2375     H = H * 67 + toLower(C) - 113;
2376   return H;
2377 }
2378 
GdbIndexSection()2379 GdbIndexSection::GdbIndexSection()
2380     : SyntheticSection(0, SHT_PROGBITS, 1, ".gdb_index") {}
2381 
2382 // Returns the desired size of an on-disk hash table for a .gdb_index section.
2383 // There's a tradeoff between size and collision rate. We aim 75% utilization.
computeSymtabSize() const2384 size_t GdbIndexSection::computeSymtabSize() const {
2385   return std::max<size_t>(NextPowerOf2(Symbols.size() * 4 / 3), 1024);
2386 }
2387 
2388 // Compute the output section size.
initOutputSize()2389 void GdbIndexSection::initOutputSize() {
2390   Size = sizeof(GdbIndexHeader) + computeSymtabSize() * 8;
2391 
2392   for (GdbChunk &Chunk : Chunks)
2393     Size += Chunk.CompilationUnits.size() * 16 + Chunk.AddressAreas.size() * 20;
2394 
2395   // Add the constant pool size if exists.
2396   if (!Symbols.empty()) {
2397     GdbSymbol &Sym = Symbols.back();
2398     Size += Sym.NameOff + Sym.Name.size() + 1;
2399   }
2400 }
2401 
getDebugInfoSections()2402 static std::vector<InputSection *> getDebugInfoSections() {
2403   std::vector<InputSection *> Ret;
2404   for (InputSectionBase *S : InputSections)
2405     if (InputSection *IS = dyn_cast<InputSection>(S))
2406       if (IS->Name == ".debug_info")
2407         Ret.push_back(IS);
2408   return Ret;
2409 }
2410 
readCuList(DWARFContext & Dwarf)2411 static std::vector<GdbIndexSection::CuEntry> readCuList(DWARFContext &Dwarf) {
2412   std::vector<GdbIndexSection::CuEntry> Ret;
2413   for (std::unique_ptr<DWARFUnit> &Cu : Dwarf.compile_units())
2414     Ret.push_back({Cu->getOffset(), Cu->getLength() + 4});
2415   return Ret;
2416 }
2417 
2418 static std::vector<GdbIndexSection::AddressEntry>
readAddressAreas(DWARFContext & Dwarf,InputSection * Sec)2419 readAddressAreas(DWARFContext &Dwarf, InputSection *Sec) {
2420   std::vector<GdbIndexSection::AddressEntry> Ret;
2421 
2422   uint32_t CuIdx = 0;
2423   for (std::unique_ptr<DWARFUnit> &Cu : Dwarf.compile_units()) {
2424     Expected<DWARFAddressRangesVector> Ranges = Cu->collectAddressRanges();
2425     if (!Ranges) {
2426       error(toString(Sec) + ": " + toString(Ranges.takeError()));
2427       return {};
2428     }
2429 
2430     ArrayRef<InputSectionBase *> Sections = Sec->File->getSections();
2431     for (DWARFAddressRange &R : *Ranges) {
2432       InputSectionBase *S = Sections[R.SectionIndex];
2433       if (!S || S == &InputSection::Discarded || !S->Live)
2434         continue;
2435       // Range list with zero size has no effect.
2436       if (R.LowPC == R.HighPC)
2437         continue;
2438       auto *IS = cast<InputSection>(S);
2439       uint64_t Offset = IS->getOffsetInFile();
2440       Ret.push_back({IS, R.LowPC - Offset, R.HighPC - Offset, CuIdx});
2441     }
2442     ++CuIdx;
2443   }
2444 
2445   return Ret;
2446 }
2447 
2448 template <class ELFT>
2449 static std::vector<GdbIndexSection::NameAttrEntry>
readPubNamesAndTypes(const LLDDwarfObj<ELFT> & Obj,const std::vector<GdbIndexSection::CuEntry> & CUs)2450 readPubNamesAndTypes(const LLDDwarfObj<ELFT> &Obj,
2451                      const std::vector<GdbIndexSection::CuEntry> &CUs) {
2452   const DWARFSection &PubNames = Obj.getGnuPubNamesSection();
2453   const DWARFSection &PubTypes = Obj.getGnuPubTypesSection();
2454 
2455   std::vector<GdbIndexSection::NameAttrEntry> Ret;
2456   for (const DWARFSection *Pub : {&PubNames, &PubTypes}) {
2457     DWARFDebugPubTable Table(Obj, *Pub, Config->IsLE, true);
2458     for (const DWARFDebugPubTable::Set &Set : Table.getData()) {
2459       // The value written into the constant pool is Kind << 24 | CuIndex. As we
2460       // don't know how many compilation units precede this object to compute
2461       // CuIndex, we compute (Kind << 24 | CuIndexInThisObject) instead, and add
2462       // the number of preceding compilation units later.
2463       uint32_t I =
2464           lower_bound(CUs, Set.Offset,
2465                       [](GdbIndexSection::CuEntry CU, uint32_t Offset) {
2466                         return CU.CuOffset < Offset;
2467                       }) -
2468           CUs.begin();
2469       for (const DWARFDebugPubTable::Entry &Ent : Set.Entries)
2470         Ret.push_back({{Ent.Name, computeGdbHash(Ent.Name)},
2471                        (Ent.Descriptor.toBits() << 24) | I});
2472     }
2473   }
2474   return Ret;
2475 }
2476 
2477 // Create a list of symbols from a given list of symbol names and types
2478 // by uniquifying them by name.
2479 static std::vector<GdbIndexSection::GdbSymbol>
createSymbols(ArrayRef<std::vector<GdbIndexSection::NameAttrEntry>> NameAttrs,const std::vector<GdbIndexSection::GdbChunk> & Chunks)2480 createSymbols(ArrayRef<std::vector<GdbIndexSection::NameAttrEntry>> NameAttrs,
2481               const std::vector<GdbIndexSection::GdbChunk> &Chunks) {
2482   typedef GdbIndexSection::GdbSymbol GdbSymbol;
2483   typedef GdbIndexSection::NameAttrEntry NameAttrEntry;
2484 
2485   // For each chunk, compute the number of compilation units preceding it.
2486   uint32_t CuIdx = 0;
2487   std::vector<uint32_t> CuIdxs(Chunks.size());
2488   for (uint32_t I = 0, E = Chunks.size(); I != E; ++I) {
2489     CuIdxs[I] = CuIdx;
2490     CuIdx += Chunks[I].CompilationUnits.size();
2491   }
2492 
2493   // The number of symbols we will handle in this function is of the order
2494   // of millions for very large executables, so we use multi-threading to
2495   // speed it up.
2496   size_t NumShards = 32;
2497   size_t Concurrency = 1;
2498   if (ThreadsEnabled)
2499     Concurrency =
2500         std::min<size_t>(PowerOf2Floor(hardware_concurrency()), NumShards);
2501 
2502   // A sharded map to uniquify symbols by name.
2503   std::vector<DenseMap<CachedHashStringRef, size_t>> Map(NumShards);
2504   size_t Shift = 32 - countTrailingZeros(NumShards);
2505 
2506   // Instantiate GdbSymbols while uniqufying them by name.
2507   std::vector<std::vector<GdbSymbol>> Symbols(NumShards);
2508   parallelForEachN(0, Concurrency, [&](size_t ThreadId) {
2509     uint32_t I = 0;
2510     for (ArrayRef<NameAttrEntry> Entries : NameAttrs) {
2511       for (const NameAttrEntry &Ent : Entries) {
2512         size_t ShardId = Ent.Name.hash() >> Shift;
2513         if ((ShardId & (Concurrency - 1)) != ThreadId)
2514           continue;
2515 
2516         uint32_t V = Ent.CuIndexAndAttrs + CuIdxs[I];
2517         size_t &Idx = Map[ShardId][Ent.Name];
2518         if (Idx) {
2519           Symbols[ShardId][Idx - 1].CuVector.push_back(V);
2520           continue;
2521         }
2522 
2523         Idx = Symbols[ShardId].size() + 1;
2524         Symbols[ShardId].push_back({Ent.Name, {V}, 0, 0});
2525       }
2526       ++I;
2527     }
2528   });
2529 
2530   size_t NumSymbols = 0;
2531   for (ArrayRef<GdbSymbol> V : Symbols)
2532     NumSymbols += V.size();
2533 
2534   // The return type is a flattened vector, so we'll copy each vector
2535   // contents to Ret.
2536   std::vector<GdbSymbol> Ret;
2537   Ret.reserve(NumSymbols);
2538   for (std::vector<GdbSymbol> &Vec : Symbols)
2539     for (GdbSymbol &Sym : Vec)
2540       Ret.push_back(std::move(Sym));
2541 
2542   // CU vectors and symbol names are adjacent in the output file.
2543   // We can compute their offsets in the output file now.
2544   size_t Off = 0;
2545   for (GdbSymbol &Sym : Ret) {
2546     Sym.CuVectorOff = Off;
2547     Off += (Sym.CuVector.size() + 1) * 4;
2548   }
2549   for (GdbSymbol &Sym : Ret) {
2550     Sym.NameOff = Off;
2551     Off += Sym.Name.size() + 1;
2552   }
2553 
2554   return Ret;
2555 }
2556 
2557 // Returns a newly-created .gdb_index section.
create()2558 template <class ELFT> GdbIndexSection *GdbIndexSection::create() {
2559   std::vector<InputSection *> Sections = getDebugInfoSections();
2560 
2561   // .debug_gnu_pub{names,types} are useless in executables.
2562   // They are present in input object files solely for creating
2563   // a .gdb_index. So we can remove them from the output.
2564   for (InputSectionBase *S : InputSections)
2565     if (S->Name == ".debug_gnu_pubnames" || S->Name == ".debug_gnu_pubtypes")
2566       S->Live = false;
2567 
2568   std::vector<GdbChunk> Chunks(Sections.size());
2569   std::vector<std::vector<NameAttrEntry>> NameAttrs(Sections.size());
2570 
2571   parallelForEachN(0, Sections.size(), [&](size_t I) {
2572     ObjFile<ELFT> *File = Sections[I]->getFile<ELFT>();
2573     DWARFContext Dwarf(make_unique<LLDDwarfObj<ELFT>>(File));
2574 
2575     Chunks[I].Sec = Sections[I];
2576     Chunks[I].CompilationUnits = readCuList(Dwarf);
2577     Chunks[I].AddressAreas = readAddressAreas(Dwarf, Sections[I]);
2578     NameAttrs[I] = readPubNamesAndTypes<ELFT>(
2579         static_cast<const LLDDwarfObj<ELFT> &>(Dwarf.getDWARFObj()),
2580         Chunks[I].CompilationUnits);
2581   });
2582 
2583   auto *Ret = make<GdbIndexSection>();
2584   Ret->Chunks = std::move(Chunks);
2585   Ret->Symbols = createSymbols(NameAttrs, Ret->Chunks);
2586   Ret->initOutputSize();
2587   return Ret;
2588 }
2589 
writeTo(uint8_t * Buf)2590 void GdbIndexSection::writeTo(uint8_t *Buf) {
2591   // Write the header.
2592   auto *Hdr = reinterpret_cast<GdbIndexHeader *>(Buf);
2593   uint8_t *Start = Buf;
2594   Hdr->Version = 7;
2595   Buf += sizeof(*Hdr);
2596 
2597   // Write the CU list.
2598   Hdr->CuListOff = Buf - Start;
2599   for (GdbChunk &Chunk : Chunks) {
2600     for (CuEntry &Cu : Chunk.CompilationUnits) {
2601       write64le(Buf, Chunk.Sec->OutSecOff + Cu.CuOffset);
2602       write64le(Buf + 8, Cu.CuLength);
2603       Buf += 16;
2604     }
2605   }
2606 
2607   // Write the address area.
2608   Hdr->CuTypesOff = Buf - Start;
2609   Hdr->AddressAreaOff = Buf - Start;
2610   uint32_t CuOff = 0;
2611   for (GdbChunk &Chunk : Chunks) {
2612     for (AddressEntry &E : Chunk.AddressAreas) {
2613       uint64_t BaseAddr = E.Section->getVA(0);
2614       write64le(Buf, BaseAddr + E.LowAddress);
2615       write64le(Buf + 8, BaseAddr + E.HighAddress);
2616       write32le(Buf + 16, E.CuIndex + CuOff);
2617       Buf += 20;
2618     }
2619     CuOff += Chunk.CompilationUnits.size();
2620   }
2621 
2622   // Write the on-disk open-addressing hash table containing symbols.
2623   Hdr->SymtabOff = Buf - Start;
2624   size_t SymtabSize = computeSymtabSize();
2625   uint32_t Mask = SymtabSize - 1;
2626 
2627   for (GdbSymbol &Sym : Symbols) {
2628     uint32_t H = Sym.Name.hash();
2629     uint32_t I = H & Mask;
2630     uint32_t Step = ((H * 17) & Mask) | 1;
2631 
2632     while (read32le(Buf + I * 8))
2633       I = (I + Step) & Mask;
2634 
2635     write32le(Buf + I * 8, Sym.NameOff);
2636     write32le(Buf + I * 8 + 4, Sym.CuVectorOff);
2637   }
2638 
2639   Buf += SymtabSize * 8;
2640 
2641   // Write the string pool.
2642   Hdr->ConstantPoolOff = Buf - Start;
2643   parallelForEach(Symbols, [&](GdbSymbol &Sym) {
2644     memcpy(Buf + Sym.NameOff, Sym.Name.data(), Sym.Name.size());
2645   });
2646 
2647   // Write the CU vectors.
2648   for (GdbSymbol &Sym : Symbols) {
2649     write32le(Buf, Sym.CuVector.size());
2650     Buf += 4;
2651     for (uint32_t Val : Sym.CuVector) {
2652       write32le(Buf, Val);
2653       Buf += 4;
2654     }
2655   }
2656 }
2657 
empty() const2658 bool GdbIndexSection::empty() const { return Chunks.empty(); }
2659 
EhFrameHeader()2660 EhFrameHeader::EhFrameHeader()
2661     : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 4, ".eh_frame_hdr") {}
2662 
2663 // .eh_frame_hdr contains a binary search table of pointers to FDEs.
2664 // Each entry of the search table consists of two values,
2665 // the starting PC from where FDEs covers, and the FDE's address.
2666 // It is sorted by PC.
writeTo(uint8_t * Buf)2667 void EhFrameHeader::writeTo(uint8_t *Buf) {
2668   typedef EhFrameSection::FdeData FdeData;
2669 
2670   std::vector<FdeData> Fdes = In.EhFrame->getFdeData();
2671 
2672   Buf[0] = 1;
2673   Buf[1] = DW_EH_PE_pcrel | DW_EH_PE_sdata4;
2674   Buf[2] = DW_EH_PE_udata4;
2675   Buf[3] = DW_EH_PE_datarel | DW_EH_PE_sdata4;
2676   write32(Buf + 4, In.EhFrame->getParent()->Addr - this->getVA() - 4);
2677   write32(Buf + 8, Fdes.size());
2678   Buf += 12;
2679 
2680   for (FdeData &Fde : Fdes) {
2681     write32(Buf, Fde.PcRel);
2682     write32(Buf + 4, Fde.FdeVARel);
2683     Buf += 8;
2684   }
2685 }
2686 
getSize() const2687 size_t EhFrameHeader::getSize() const {
2688   // .eh_frame_hdr has a 12 bytes header followed by an array of FDEs.
2689   return 12 + In.EhFrame->NumFdes * 8;
2690 }
2691 
empty() const2692 bool EhFrameHeader::empty() const { return In.EhFrame->empty(); }
2693 
VersionDefinitionSection()2694 VersionDefinitionSection::VersionDefinitionSection()
2695     : SyntheticSection(SHF_ALLOC, SHT_GNU_verdef, sizeof(uint32_t),
2696                        ".gnu.version_d") {}
2697 
getFileDefName()2698 static StringRef getFileDefName() {
2699   if (!Config->SoName.empty())
2700     return Config->SoName;
2701   return Config->OutputFile;
2702 }
2703 
finalizeContents()2704 void VersionDefinitionSection::finalizeContents() {
2705   FileDefNameOff = In.DynStrTab->addString(getFileDefName());
2706   for (VersionDefinition &V : Config->VersionDefinitions)
2707     V.NameOff = In.DynStrTab->addString(V.Name);
2708 
2709   if (OutputSection *Sec = In.DynStrTab->getParent())
2710     getParent()->Link = Sec->SectionIndex;
2711 
2712   // sh_info should be set to the number of definitions. This fact is missed in
2713   // documentation, but confirmed by binutils community:
2714   // https://sourceware.org/ml/binutils/2014-11/msg00355.html
2715   getParent()->Info = getVerDefNum();
2716 }
2717 
writeOne(uint8_t * Buf,uint32_t Index,StringRef Name,size_t NameOff)2718 void VersionDefinitionSection::writeOne(uint8_t *Buf, uint32_t Index,
2719                                         StringRef Name, size_t NameOff) {
2720   uint16_t Flags = Index == 1 ? VER_FLG_BASE : 0;
2721 
2722   // Write a verdef.
2723   write16(Buf, 1);                  // vd_version
2724   write16(Buf + 2, Flags);          // vd_flags
2725   write16(Buf + 4, Index);          // vd_ndx
2726   write16(Buf + 6, 1);              // vd_cnt
2727   write32(Buf + 8, hashSysV(Name)); // vd_hash
2728   write32(Buf + 12, 20);            // vd_aux
2729   write32(Buf + 16, 28);            // vd_next
2730 
2731   // Write a veraux.
2732   write32(Buf + 20, NameOff); // vda_name
2733   write32(Buf + 24, 0);       // vda_next
2734 }
2735 
writeTo(uint8_t * Buf)2736 void VersionDefinitionSection::writeTo(uint8_t *Buf) {
2737   writeOne(Buf, 1, getFileDefName(), FileDefNameOff);
2738 
2739   for (VersionDefinition &V : Config->VersionDefinitions) {
2740     Buf += EntrySize;
2741     writeOne(Buf, V.Id, V.Name, V.NameOff);
2742   }
2743 
2744   // Need to terminate the last version definition.
2745   write32(Buf + 16, 0); // vd_next
2746 }
2747 
getSize() const2748 size_t VersionDefinitionSection::getSize() const {
2749   return EntrySize * getVerDefNum();
2750 }
2751 
2752 // .gnu.version is a table where each entry is 2 byte long.
2753 template <class ELFT>
VersionTableSection()2754 VersionTableSection<ELFT>::VersionTableSection()
2755     : SyntheticSection(SHF_ALLOC, SHT_GNU_versym, sizeof(uint16_t),
2756                        ".gnu.version") {
2757   this->Entsize = 2;
2758 }
2759 
finalizeContents()2760 template <class ELFT> void VersionTableSection<ELFT>::finalizeContents() {
2761   // At the moment of june 2016 GNU docs does not mention that sh_link field
2762   // should be set, but Sun docs do. Also readelf relies on this field.
2763   getParent()->Link = In.DynSymTab->getParent()->SectionIndex;
2764 }
2765 
getSize() const2766 template <class ELFT> size_t VersionTableSection<ELFT>::getSize() const {
2767   return (In.DynSymTab->getSymbols().size() + 1) * 2;
2768 }
2769 
writeTo(uint8_t * Buf)2770 template <class ELFT> void VersionTableSection<ELFT>::writeTo(uint8_t *Buf) {
2771   Buf += 2;
2772   for (const SymbolTableEntry &S : In.DynSymTab->getSymbols()) {
2773     write16(Buf, S.Sym->VersionId);
2774     Buf += 2;
2775   }
2776 }
2777 
empty() const2778 template <class ELFT> bool VersionTableSection<ELFT>::empty() const {
2779   return !In.VerDef && InX<ELFT>::VerNeed->empty();
2780 }
2781 
2782 template <class ELFT>
VersionNeedSection()2783 VersionNeedSection<ELFT>::VersionNeedSection()
2784     : SyntheticSection(SHF_ALLOC, SHT_GNU_verneed, sizeof(uint32_t),
2785                        ".gnu.version_r") {
2786   // Identifiers in verneed section start at 2 because 0 and 1 are reserved
2787   // for VER_NDX_LOCAL and VER_NDX_GLOBAL.
2788   // First identifiers are reserved by verdef section if it exist.
2789   NextIndex = getVerDefNum() + 1;
2790 }
2791 
addSymbol(Symbol * SS)2792 template <class ELFT> void VersionNeedSection<ELFT>::addSymbol(Symbol *SS) {
2793   auto &File = cast<SharedFile<ELFT>>(*SS->File);
2794   if (SS->VerdefIndex == VER_NDX_GLOBAL) {
2795     SS->VersionId = VER_NDX_GLOBAL;
2796     return;
2797   }
2798 
2799   // If we don't already know that we need an Elf_Verneed for this DSO, prepare
2800   // to create one by adding it to our needed list and creating a dynstr entry
2801   // for the soname.
2802   if (File.VerdefMap.empty())
2803     Needed.push_back({&File, In.DynStrTab->addString(File.SoName)});
2804   const typename ELFT::Verdef *Ver = File.Verdefs[SS->VerdefIndex];
2805   typename SharedFile<ELFT>::NeededVer &NV = File.VerdefMap[Ver];
2806 
2807   // If we don't already know that we need an Elf_Vernaux for this Elf_Verdef,
2808   // prepare to create one by allocating a version identifier and creating a
2809   // dynstr entry for the version name.
2810   if (NV.Index == 0) {
2811     NV.StrTab = In.DynStrTab->addString(File.getStringTable().data() +
2812                                         Ver->getAux()->vda_name);
2813     NV.Index = NextIndex++;
2814   }
2815   SS->VersionId = NV.Index;
2816 }
2817 
writeTo(uint8_t * Buf)2818 template <class ELFT> void VersionNeedSection<ELFT>::writeTo(uint8_t *Buf) {
2819   // The Elf_Verneeds need to appear first, followed by the Elf_Vernauxs.
2820   auto *Verneed = reinterpret_cast<Elf_Verneed *>(Buf);
2821   auto *Vernaux = reinterpret_cast<Elf_Vernaux *>(Verneed + Needed.size());
2822 
2823   for (std::pair<SharedFile<ELFT> *, size_t> &P : Needed) {
2824     // Create an Elf_Verneed for this DSO.
2825     Verneed->vn_version = 1;
2826     Verneed->vn_cnt = P.first->VerdefMap.size();
2827     Verneed->vn_file = P.second;
2828     Verneed->vn_aux =
2829         reinterpret_cast<char *>(Vernaux) - reinterpret_cast<char *>(Verneed);
2830     Verneed->vn_next = sizeof(Elf_Verneed);
2831     ++Verneed;
2832 
2833     // Create the Elf_Vernauxs for this Elf_Verneed. The loop iterates over
2834     // VerdefMap, which will only contain references to needed version
2835     // definitions. Each Elf_Vernaux is based on the information contained in
2836     // the Elf_Verdef in the source DSO. This loop iterates over a std::map of
2837     // pointers, but is deterministic because the pointers refer to Elf_Verdef
2838     // data structures within a single input file.
2839     for (auto &NV : P.first->VerdefMap) {
2840       Vernaux->vna_hash = NV.first->vd_hash;
2841       Vernaux->vna_flags = 0;
2842       Vernaux->vna_other = NV.second.Index;
2843       Vernaux->vna_name = NV.second.StrTab;
2844       Vernaux->vna_next = sizeof(Elf_Vernaux);
2845       ++Vernaux;
2846     }
2847 
2848     Vernaux[-1].vna_next = 0;
2849   }
2850   Verneed[-1].vn_next = 0;
2851 }
2852 
finalizeContents()2853 template <class ELFT> void VersionNeedSection<ELFT>::finalizeContents() {
2854   if (OutputSection *Sec = In.DynStrTab->getParent())
2855     getParent()->Link = Sec->SectionIndex;
2856   getParent()->Info = Needed.size();
2857 }
2858 
getSize() const2859 template <class ELFT> size_t VersionNeedSection<ELFT>::getSize() const {
2860   unsigned Size = Needed.size() * sizeof(Elf_Verneed);
2861   for (const std::pair<SharedFile<ELFT> *, size_t> &P : Needed)
2862     Size += P.first->VerdefMap.size() * sizeof(Elf_Vernaux);
2863   return Size;
2864 }
2865 
empty() const2866 template <class ELFT> bool VersionNeedSection<ELFT>::empty() const {
2867   return getNeedNum() == 0;
2868 }
2869 
addSection(MergeInputSection * MS)2870 void MergeSyntheticSection::addSection(MergeInputSection *MS) {
2871   MS->Parent = this;
2872   Sections.push_back(MS);
2873 }
2874 
MergeTailSection(StringRef Name,uint32_t Type,uint64_t Flags,uint32_t Alignment)2875 MergeTailSection::MergeTailSection(StringRef Name, uint32_t Type,
2876                                    uint64_t Flags, uint32_t Alignment)
2877     : MergeSyntheticSection(Name, Type, Flags, Alignment),
2878       Builder(StringTableBuilder::RAW, Alignment) {}
2879 
getSize() const2880 size_t MergeTailSection::getSize() const { return Builder.getSize(); }
2881 
writeTo(uint8_t * Buf)2882 void MergeTailSection::writeTo(uint8_t *Buf) { Builder.write(Buf); }
2883 
finalizeContents()2884 void MergeTailSection::finalizeContents() {
2885   // Add all string pieces to the string table builder to create section
2886   // contents.
2887   for (MergeInputSection *Sec : Sections)
2888     for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
2889       if (Sec->Pieces[I].Live)
2890         Builder.add(Sec->getData(I));
2891 
2892   // Fix the string table content. After this, the contents will never change.
2893   Builder.finalize();
2894 
2895   // finalize() fixed tail-optimized strings, so we can now get
2896   // offsets of strings. Get an offset for each string and save it
2897   // to a corresponding StringPiece for easy access.
2898   for (MergeInputSection *Sec : Sections)
2899     for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
2900       if (Sec->Pieces[I].Live)
2901         Sec->Pieces[I].OutputOff = Builder.getOffset(Sec->getData(I));
2902 }
2903 
writeTo(uint8_t * Buf)2904 void MergeNoTailSection::writeTo(uint8_t *Buf) {
2905   for (size_t I = 0; I < NumShards; ++I)
2906     Shards[I].write(Buf + ShardOffsets[I]);
2907 }
2908 
2909 // This function is very hot (i.e. it can take several seconds to finish)
2910 // because sometimes the number of inputs is in an order of magnitude of
2911 // millions. So, we use multi-threading.
2912 //
2913 // For any strings S and T, we know S is not mergeable with T if S's hash
2914 // value is different from T's. If that's the case, we can safely put S and
2915 // T into different string builders without worrying about merge misses.
2916 // We do it in parallel.
finalizeContents()2917 void MergeNoTailSection::finalizeContents() {
2918   // Initializes string table builders.
2919   for (size_t I = 0; I < NumShards; ++I)
2920     Shards.emplace_back(StringTableBuilder::RAW, Alignment);
2921 
2922   // Concurrency level. Must be a power of 2 to avoid expensive modulo
2923   // operations in the following tight loop.
2924   size_t Concurrency = 1;
2925   if (ThreadsEnabled)
2926     Concurrency =
2927         std::min<size_t>(PowerOf2Floor(hardware_concurrency()), NumShards);
2928 
2929   // Add section pieces to the builders.
2930   parallelForEachN(0, Concurrency, [&](size_t ThreadId) {
2931     for (MergeInputSection *Sec : Sections) {
2932       for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I) {
2933         size_t ShardId = getShardId(Sec->Pieces[I].Hash);
2934         if ((ShardId & (Concurrency - 1)) == ThreadId && Sec->Pieces[I].Live)
2935           Sec->Pieces[I].OutputOff = Shards[ShardId].add(Sec->getData(I));
2936       }
2937     }
2938   });
2939 
2940   // Compute an in-section offset for each shard.
2941   size_t Off = 0;
2942   for (size_t I = 0; I < NumShards; ++I) {
2943     Shards[I].finalizeInOrder();
2944     if (Shards[I].getSize() > 0)
2945       Off = alignTo(Off, Alignment);
2946     ShardOffsets[I] = Off;
2947     Off += Shards[I].getSize();
2948   }
2949   Size = Off;
2950 
2951   // So far, section pieces have offsets from beginning of shards, but
2952   // we want offsets from beginning of the whole section. Fix them.
2953   parallelForEach(Sections, [&](MergeInputSection *Sec) {
2954     for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
2955       if (Sec->Pieces[I].Live)
2956         Sec->Pieces[I].OutputOff +=
2957             ShardOffsets[getShardId(Sec->Pieces[I].Hash)];
2958   });
2959 }
2960 
createMergeSynthetic(StringRef Name,uint32_t Type,uint64_t Flags,uint32_t Alignment)2961 static MergeSyntheticSection *createMergeSynthetic(StringRef Name,
2962                                                    uint32_t Type,
2963                                                    uint64_t Flags,
2964                                                    uint32_t Alignment) {
2965   bool ShouldTailMerge = (Flags & SHF_STRINGS) && Config->Optimize >= 2;
2966   if (ShouldTailMerge)
2967     return make<MergeTailSection>(Name, Type, Flags, Alignment);
2968   return make<MergeNoTailSection>(Name, Type, Flags, Alignment);
2969 }
2970 
splitSections()2971 template <class ELFT> void elf::splitSections() {
2972   // splitIntoPieces needs to be called on each MergeInputSection
2973   // before calling finalizeContents().
2974   parallelForEach(InputSections, [](InputSectionBase *Sec) {
2975     if (auto *S = dyn_cast<MergeInputSection>(Sec))
2976       S->splitIntoPieces();
2977     else if (auto *Eh = dyn_cast<EhInputSection>(Sec))
2978       Eh->split<ELFT>();
2979   });
2980 }
2981 
2982 // This function scans over the inputsections to create mergeable
2983 // synthetic sections.
2984 //
2985 // It removes MergeInputSections from the input section array and adds
2986 // new synthetic sections at the location of the first input section
2987 // that it replaces. It then finalizes each synthetic section in order
2988 // to compute an output offset for each piece of each input section.
mergeSections()2989 void elf::mergeSections() {
2990   std::vector<MergeSyntheticSection *> MergeSections;
2991   for (InputSectionBase *&S : InputSections) {
2992     MergeInputSection *MS = dyn_cast<MergeInputSection>(S);
2993     if (!MS)
2994       continue;
2995 
2996     // We do not want to handle sections that are not alive, so just remove
2997     // them instead of trying to merge.
2998     if (!MS->Live) {
2999       S = nullptr;
3000       continue;
3001     }
3002 
3003     StringRef OutsecName = getOutputSectionName(MS);
3004     uint32_t Alignment = std::max<uint32_t>(MS->Alignment, MS->Entsize);
3005 
3006     auto I = llvm::find_if(MergeSections, [=](MergeSyntheticSection *Sec) {
3007       // While we could create a single synthetic section for two different
3008       // values of Entsize, it is better to take Entsize into consideration.
3009       //
3010       // With a single synthetic section no two pieces with different Entsize
3011       // could be equal, so we may as well have two sections.
3012       //
3013       // Using Entsize in here also allows us to propagate it to the synthetic
3014       // section.
3015       return Sec->Name == OutsecName && Sec->Flags == MS->Flags &&
3016              Sec->Entsize == MS->Entsize && Sec->Alignment == Alignment;
3017     });
3018     if (I == MergeSections.end()) {
3019       MergeSyntheticSection *Syn =
3020           createMergeSynthetic(OutsecName, MS->Type, MS->Flags, Alignment);
3021       MergeSections.push_back(Syn);
3022       I = std::prev(MergeSections.end());
3023       S = Syn;
3024       Syn->Entsize = MS->Entsize;
3025     } else {
3026       S = nullptr;
3027     }
3028     (*I)->addSection(MS);
3029   }
3030   for (auto *MS : MergeSections)
3031     MS->finalizeContents();
3032 
3033   std::vector<InputSectionBase *> &V = InputSections;
3034   V.erase(std::remove(V.begin(), V.end(), nullptr), V.end());
3035 }
3036 
MipsRldMapSection()3037 MipsRldMapSection::MipsRldMapSection()
3038     : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, Config->Wordsize,
3039                        ".rld_map") {}
3040 
ARMExidxSentinelSection()3041 ARMExidxSentinelSection::ARMExidxSentinelSection()
3042     : SyntheticSection(SHF_ALLOC | SHF_LINK_ORDER, SHT_ARM_EXIDX,
3043                        Config->Wordsize, ".ARM.exidx") {}
3044 
3045 // Write a terminating sentinel entry to the end of the .ARM.exidx table.
3046 // This section will have been sorted last in the .ARM.exidx table.
3047 // This table entry will have the form:
3048 // | PREL31 upper bound of code that has exception tables | EXIDX_CANTUNWIND |
3049 // The sentinel must have the PREL31 value of an address higher than any
3050 // address described by any other table entry.
writeTo(uint8_t * Buf)3051 void ARMExidxSentinelSection::writeTo(uint8_t *Buf) {
3052   assert(Highest);
3053   uint64_t S = Highest->getVA(Highest->getSize());
3054   uint64_t P = getVA();
3055   Target->relocateOne(Buf, R_ARM_PREL31, S - P);
3056   write32le(Buf + 4, 1);
3057 }
3058 
3059 // The sentinel has to be removed if there are no other .ARM.exidx entries.
empty() const3060 bool ARMExidxSentinelSection::empty() const {
3061   for (InputSection *IS : getInputSections(getParent()))
3062     if (!isa<ARMExidxSentinelSection>(IS))
3063       return false;
3064   return true;
3065 }
3066 
classof(const SectionBase * D)3067 bool ARMExidxSentinelSection::classof(const SectionBase *D) {
3068   return D->kind() == InputSectionBase::Synthetic && D->Type == SHT_ARM_EXIDX;
3069 }
3070 
ThunkSection(OutputSection * OS,uint64_t Off)3071 ThunkSection::ThunkSection(OutputSection *OS, uint64_t Off)
3072     : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS,
3073                        Config->Wordsize, ".text.thunk") {
3074   this->Parent = OS;
3075   this->OutSecOff = Off;
3076 }
3077 
addThunk(Thunk * T)3078 void ThunkSection::addThunk(Thunk *T) {
3079   Thunks.push_back(T);
3080   T->addSymbols(*this);
3081 }
3082 
writeTo(uint8_t * Buf)3083 void ThunkSection::writeTo(uint8_t *Buf) {
3084   for (Thunk *T : Thunks)
3085     T->writeTo(Buf + T->Offset);
3086 }
3087 
getTargetInputSection() const3088 InputSection *ThunkSection::getTargetInputSection() const {
3089   if (Thunks.empty())
3090     return nullptr;
3091   const Thunk *T = Thunks.front();
3092   return T->getTargetInputSection();
3093 }
3094 
assignOffsets()3095 bool ThunkSection::assignOffsets() {
3096   uint64_t Off = 0;
3097   for (Thunk *T : Thunks) {
3098     Off = alignTo(Off, T->Alignment);
3099     T->setOffset(Off);
3100     uint32_t Size = T->size();
3101     T->getThunkTargetSym()->Size = Size;
3102     Off += Size;
3103   }
3104   bool Changed = Off != Size;
3105   Size = Off;
3106   return Changed;
3107 }
3108 
3109 // If linking position-dependent code then the table will store the addresses
3110 // directly in the binary so the section has type SHT_PROGBITS. If linking
3111 // position-independent code the section has type SHT_NOBITS since it will be
3112 // allocated and filled in by the dynamic linker.
PPC64LongBranchTargetSection()3113 PPC64LongBranchTargetSection::PPC64LongBranchTargetSection()
3114     : SyntheticSection(SHF_ALLOC | SHF_WRITE,
3115                        Config->Pic ? SHT_NOBITS : SHT_PROGBITS, 8,
3116                        ".branch_lt") {}
3117 
addEntry(Symbol & Sym)3118 void PPC64LongBranchTargetSection::addEntry(Symbol &Sym) {
3119   assert(Sym.PPC64BranchltIndex == 0xffff);
3120   Sym.PPC64BranchltIndex = Entries.size();
3121   Entries.push_back(&Sym);
3122 }
3123 
getSize() const3124 size_t PPC64LongBranchTargetSection::getSize() const {
3125   return Entries.size() * 8;
3126 }
3127 
writeTo(uint8_t * Buf)3128 void PPC64LongBranchTargetSection::writeTo(uint8_t *Buf) {
3129   assert(Target->GotPltEntrySize == 8);
3130   // If linking non-pic we have the final addresses of the targets and they get
3131   // written to the table directly. For pic the dynamic linker will allocate
3132   // the section and fill it it.
3133   if (Config->Pic)
3134     return;
3135 
3136   for (const Symbol *Sym : Entries) {
3137     assert(Sym->getVA());
3138     // Need calls to branch to the local entry-point since a long-branch
3139     // must be a local-call.
3140     write64(Buf,
3141             Sym->getVA() + getPPC64GlobalEntryToLocalEntryOffset(Sym->StOther));
3142     Buf += Target->GotPltEntrySize;
3143   }
3144 }
3145 
empty() const3146 bool PPC64LongBranchTargetSection::empty() const {
3147   // `removeUnusedSyntheticSections()` is called before thunk allocation which
3148   // is too early to determine if this section will be empty or not. We need
3149   // Finalized to keep the section alive until after thunk creation. Finalized
3150   // only gets set to true once `finalizeSections()` is called after thunk
3151   // creation. Becuase of this, if we don't create any long-branch thunks we end
3152   // up with an empty .branch_lt section in the binary.
3153   return Finalized && Entries.empty();
3154 }
3155 
3156 InStruct elf::In;
3157 
3158 template GdbIndexSection *GdbIndexSection::create<ELF32LE>();
3159 template GdbIndexSection *GdbIndexSection::create<ELF32BE>();
3160 template GdbIndexSection *GdbIndexSection::create<ELF64LE>();
3161 template GdbIndexSection *GdbIndexSection::create<ELF64BE>();
3162 
3163 template void elf::splitSections<ELF32LE>();
3164 template void elf::splitSections<ELF32BE>();
3165 template void elf::splitSections<ELF64LE>();
3166 template void elf::splitSections<ELF64BE>();
3167 
3168 template void EhFrameSection::addSection<ELF32LE>(InputSectionBase *);
3169 template void EhFrameSection::addSection<ELF32BE>(InputSectionBase *);
3170 template void EhFrameSection::addSection<ELF64LE>(InputSectionBase *);
3171 template void EhFrameSection::addSection<ELF64BE>(InputSectionBase *);
3172 
3173 template void PltSection::addEntry<ELF32LE>(Symbol &Sym);
3174 template void PltSection::addEntry<ELF32BE>(Symbol &Sym);
3175 template void PltSection::addEntry<ELF64LE>(Symbol &Sym);
3176 template void PltSection::addEntry<ELF64BE>(Symbol &Sym);
3177 
3178 template void MipsGotSection::build<ELF32LE>();
3179 template void MipsGotSection::build<ELF32BE>();
3180 template void MipsGotSection::build<ELF64LE>();
3181 template void MipsGotSection::build<ELF64BE>();
3182 
3183 template class elf::MipsAbiFlagsSection<ELF32LE>;
3184 template class elf::MipsAbiFlagsSection<ELF32BE>;
3185 template class elf::MipsAbiFlagsSection<ELF64LE>;
3186 template class elf::MipsAbiFlagsSection<ELF64BE>;
3187 
3188 template class elf::MipsOptionsSection<ELF32LE>;
3189 template class elf::MipsOptionsSection<ELF32BE>;
3190 template class elf::MipsOptionsSection<ELF64LE>;
3191 template class elf::MipsOptionsSection<ELF64BE>;
3192 
3193 template class elf::MipsReginfoSection<ELF32LE>;
3194 template class elf::MipsReginfoSection<ELF32BE>;
3195 template class elf::MipsReginfoSection<ELF64LE>;
3196 template class elf::MipsReginfoSection<ELF64BE>;
3197 
3198 template class elf::DynamicSection<ELF32LE>;
3199 template class elf::DynamicSection<ELF32BE>;
3200 template class elf::DynamicSection<ELF64LE>;
3201 template class elf::DynamicSection<ELF64BE>;
3202 
3203 template class elf::RelocationSection<ELF32LE>;
3204 template class elf::RelocationSection<ELF32BE>;
3205 template class elf::RelocationSection<ELF64LE>;
3206 template class elf::RelocationSection<ELF64BE>;
3207 
3208 template class elf::AndroidPackedRelocationSection<ELF32LE>;
3209 template class elf::AndroidPackedRelocationSection<ELF32BE>;
3210 template class elf::AndroidPackedRelocationSection<ELF64LE>;
3211 template class elf::AndroidPackedRelocationSection<ELF64BE>;
3212 
3213 template class elf::RelrSection<ELF32LE>;
3214 template class elf::RelrSection<ELF32BE>;
3215 template class elf::RelrSection<ELF64LE>;
3216 template class elf::RelrSection<ELF64BE>;
3217 
3218 template class elf::SymbolTableSection<ELF32LE>;
3219 template class elf::SymbolTableSection<ELF32BE>;
3220 template class elf::SymbolTableSection<ELF64LE>;
3221 template class elf::SymbolTableSection<ELF64BE>;
3222 
3223 template class elf::VersionTableSection<ELF32LE>;
3224 template class elf::VersionTableSection<ELF32BE>;
3225 template class elf::VersionTableSection<ELF64LE>;
3226 template class elf::VersionTableSection<ELF64BE>;
3227 
3228 template class elf::VersionNeedSection<ELF32LE>;
3229 template class elf::VersionNeedSection<ELF32BE>;
3230 template class elf::VersionNeedSection<ELF64LE>;
3231 template class elf::VersionNeedSection<ELF64BE>;
3232