1 //===- OutputSections.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "OutputSections.h"
10 #include "Config.h"
11 #include "InputFiles.h"
12 #include "LinkerScript.h"
13 #include "Symbols.h"
14 #include "SyntheticSections.h"
15 #include "Target.h"
16 #include "lld/Common/Arrays.h"
17 #include "lld/Common/Memory.h"
18 #include "llvm/BinaryFormat/Dwarf.h"
19 #include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB
20 #include "llvm/Support/Compression.h"
21 #include "llvm/Support/Parallel.h"
22 #include "llvm/Support/Path.h"
23 #include "llvm/Support/TimeProfiler.h"
24 #if LLVM_ENABLE_ZLIB
25 #include <zlib.h>
26 #endif
27 #if LLVM_ENABLE_ZSTD
28 #include <zstd.h>
29 #endif
30 
31 using namespace llvm;
32 using namespace llvm::dwarf;
33 using namespace llvm::object;
34 using namespace llvm::support::endian;
35 using namespace llvm::ELF;
36 using namespace lld;
37 using namespace lld::elf;
38 
39 uint8_t *Out::bufferStart;
40 PhdrEntry *Out::tlsPhdr;
41 OutputSection *Out::elfHeader;
42 OutputSection *Out::programHeaders;
43 OutputSection *Out::preinitArray;
44 OutputSection *Out::initArray;
45 OutputSection *Out::finiArray;
46 
47 SmallVector<OutputSection *, 0> elf::outputSections;
48 
49 uint32_t OutputSection::getPhdrFlags() const {
50   uint32_t ret = 0;
51   if (config->emachine != EM_ARM || !(flags & SHF_ARM_PURECODE))
52     ret |= PF_R;
53   if (flags & SHF_WRITE)
54     ret |= PF_W;
55   if (flags & SHF_EXECINSTR)
56     ret |= PF_X;
57   return ret;
58 }
59 
60 template <class ELFT>
61 void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) {
62   shdr->sh_entsize = entsize;
63   shdr->sh_addralign = addralign;
64   shdr->sh_type = type;
65   shdr->sh_offset = offset;
66   shdr->sh_flags = flags;
67   shdr->sh_info = info;
68   shdr->sh_link = link;
69   shdr->sh_addr = addr;
70   shdr->sh_size = size;
71   shdr->sh_name = shName;
72 }
73 
74 OutputSection::OutputSection(StringRef name, uint32_t type, uint64_t flags)
75     : SectionBase(Output, name, flags, /*Entsize*/ 0, /*Alignment*/ 1, type,
76                   /*Info*/ 0, /*Link*/ 0) {}
77 
78 // We allow sections of types listed below to merged into a
79 // single progbits section. This is typically done by linker
80 // scripts. Merging nobits and progbits will force disk space
81 // to be allocated for nobits sections. Other ones don't require
82 // any special treatment on top of progbits, so there doesn't
83 // seem to be a harm in merging them.
84 //
85 // NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
86 // them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
87 static bool canMergeToProgbits(unsigned type) {
88   return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY ||
89          type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY ||
90          type == SHT_NOTE ||
91          (type == SHT_X86_64_UNWIND && config->emachine == EM_X86_64);
92 }
93 
94 // Record that isec will be placed in the OutputSection. isec does not become
95 // permanent until finalizeInputSections() is called. The function should not be
96 // used after finalizeInputSections() is called. If you need to add an
97 // InputSection post finalizeInputSections(), then you must do the following:
98 //
99 // 1. Find or create an InputSectionDescription to hold InputSection.
100 // 2. Add the InputSection to the InputSectionDescription::sections.
101 // 3. Call commitSection(isec).
102 void OutputSection::recordSection(InputSectionBase *isec) {
103   partition = isec->partition;
104   isec->parent = this;
105   if (commands.empty() || !isa<InputSectionDescription>(commands.back()))
106     commands.push_back(make<InputSectionDescription>(""));
107   auto *isd = cast<InputSectionDescription>(commands.back());
108   isd->sectionBases.push_back(isec);
109 }
110 
111 // Update fields (type, flags, alignment, etc) according to the InputSection
112 // isec. Also check whether the InputSection flags and type are consistent with
113 // other InputSections.
114 void OutputSection::commitSection(InputSection *isec) {
115   if (LLVM_UNLIKELY(type != isec->type)) {
116     if (hasInputSections || typeIsSet) {
117       if (typeIsSet || !canMergeToProgbits(type) ||
118           !canMergeToProgbits(isec->type)) {
119         // Changing the type of a (NOLOAD) section is fishy, but some projects
120         // (e.g. https://github.com/ClangBuiltLinux/linux/issues/1597)
121         // traditionally rely on the behavior. Issue a warning to not break
122         // them. Other types get an error.
123         auto diagnose = type == SHT_NOBITS ? warn : errorOrWarn;
124         diagnose("section type mismatch for " + isec->name + "\n>>> " +
125                  toString(isec) + ": " +
126                  getELFSectionTypeName(config->emachine, isec->type) +
127                  "\n>>> output section " + name + ": " +
128                  getELFSectionTypeName(config->emachine, type));
129       }
130       if (!typeIsSet)
131         type = SHT_PROGBITS;
132     } else {
133       type = isec->type;
134     }
135   }
136   if (!hasInputSections) {
137     // If IS is the first section to be added to this section,
138     // initialize type, entsize and flags from isec.
139     hasInputSections = true;
140     entsize = isec->entsize;
141     flags = isec->flags;
142   } else {
143     // Otherwise, check if new type or flags are compatible with existing ones.
144     if ((flags ^ isec->flags) & SHF_TLS)
145       error("incompatible section flags for " + name + "\n>>> " +
146             toString(isec) + ": 0x" + utohexstr(isec->flags) +
147             "\n>>> output section " + name + ": 0x" + utohexstr(flags));
148   }
149 
150   isec->parent = this;
151   uint64_t andMask =
152       config->emachine == EM_ARM ? (uint64_t)SHF_ARM_PURECODE : 0;
153   uint64_t orMask = ~andMask;
154   uint64_t andFlags = (flags & isec->flags) & andMask;
155   uint64_t orFlags = (flags | isec->flags) & orMask;
156   flags = andFlags | orFlags;
157   if (nonAlloc)
158     flags &= ~(uint64_t)SHF_ALLOC;
159 
160   addralign = std::max(addralign, isec->addralign);
161 
162   // If this section contains a table of fixed-size entries, sh_entsize
163   // holds the element size. If it contains elements of different size we
164   // set sh_entsize to 0.
165   if (entsize != isec->entsize)
166     entsize = 0;
167 }
168 
169 static MergeSyntheticSection *createMergeSynthetic(StringRef name,
170                                                    uint32_t type,
171                                                    uint64_t flags,
172                                                    uint32_t addralign) {
173   if ((flags & SHF_STRINGS) && config->optimize >= 2)
174     return make<MergeTailSection>(name, type, flags, addralign);
175   return make<MergeNoTailSection>(name, type, flags, addralign);
176 }
177 
178 // This function scans over the InputSectionBase list sectionBases to create
179 // InputSectionDescription::sections.
180 //
181 // It removes MergeInputSections from the input section array and adds
182 // new synthetic sections at the location of the first input section
183 // that it replaces. It then finalizes each synthetic section in order
184 // to compute an output offset for each piece of each input section.
185 void OutputSection::finalizeInputSections() {
186   std::vector<MergeSyntheticSection *> mergeSections;
187   for (SectionCommand *cmd : commands) {
188     auto *isd = dyn_cast<InputSectionDescription>(cmd);
189     if (!isd)
190       continue;
191     isd->sections.reserve(isd->sectionBases.size());
192     for (InputSectionBase *s : isd->sectionBases) {
193       MergeInputSection *ms = dyn_cast<MergeInputSection>(s);
194       if (!ms) {
195         isd->sections.push_back(cast<InputSection>(s));
196         continue;
197       }
198 
199       // We do not want to handle sections that are not alive, so just remove
200       // them instead of trying to merge.
201       if (!ms->isLive())
202         continue;
203 
204       auto i = llvm::find_if(mergeSections, [=](MergeSyntheticSection *sec) {
205         // While we could create a single synthetic section for two different
206         // values of Entsize, it is better to take Entsize into consideration.
207         //
208         // With a single synthetic section no two pieces with different Entsize
209         // could be equal, so we may as well have two sections.
210         //
211         // Using Entsize in here also allows us to propagate it to the synthetic
212         // section.
213         //
214         // SHF_STRINGS section with different alignments should not be merged.
215         return sec->flags == ms->flags && sec->entsize == ms->entsize &&
216                (sec->addralign == ms->addralign || !(sec->flags & SHF_STRINGS));
217       });
218       if (i == mergeSections.end()) {
219         MergeSyntheticSection *syn =
220             createMergeSynthetic(name, ms->type, ms->flags, ms->addralign);
221         mergeSections.push_back(syn);
222         i = std::prev(mergeSections.end());
223         syn->entsize = ms->entsize;
224         isd->sections.push_back(syn);
225       }
226       (*i)->addSection(ms);
227     }
228 
229     // sectionBases should not be used from this point onwards. Clear it to
230     // catch misuses.
231     isd->sectionBases.clear();
232 
233     // Some input sections may be removed from the list after ICF.
234     for (InputSection *s : isd->sections)
235       commitSection(s);
236   }
237   for (auto *ms : mergeSections)
238     ms->finalizeContents();
239 }
240 
241 static void sortByOrder(MutableArrayRef<InputSection *> in,
242                         llvm::function_ref<int(InputSectionBase *s)> order) {
243   std::vector<std::pair<int, InputSection *>> v;
244   for (InputSection *s : in)
245     v.push_back({order(s), s});
246   llvm::stable_sort(v, less_first());
247 
248   for (size_t i = 0; i < v.size(); ++i)
249     in[i] = v[i].second;
250 }
251 
252 uint64_t elf::getHeaderSize() {
253   if (config->oFormatBinary)
254     return 0;
255   return Out::elfHeader->size + Out::programHeaders->size;
256 }
257 
258 void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) {
259   assert(isLive());
260   for (SectionCommand *b : commands)
261     if (auto *isd = dyn_cast<InputSectionDescription>(b))
262       sortByOrder(isd->sections, order);
263 }
264 
265 static void nopInstrFill(uint8_t *buf, size_t size) {
266   if (size == 0)
267     return;
268   unsigned i = 0;
269   if (size == 0)
270     return;
271   std::vector<std::vector<uint8_t>> nopFiller = *target->nopInstrs;
272   unsigned num = size / nopFiller.back().size();
273   for (unsigned c = 0; c < num; ++c) {
274     memcpy(buf + i, nopFiller.back().data(), nopFiller.back().size());
275     i += nopFiller.back().size();
276   }
277   unsigned remaining = size - i;
278   if (!remaining)
279     return;
280   assert(nopFiller[remaining - 1].size() == remaining);
281   memcpy(buf + i, nopFiller[remaining - 1].data(), remaining);
282 }
283 
284 // Fill [Buf, Buf + Size) with Filler.
285 // This is used for linker script "=fillexp" command.
286 static void fill(uint8_t *buf, size_t size,
287                  const std::array<uint8_t, 4> &filler) {
288   size_t i = 0;
289   for (; i + 4 < size; i += 4)
290     memcpy(buf + i, filler.data(), 4);
291   memcpy(buf + i, filler.data(), size - i);
292 }
293 
294 #if LLVM_ENABLE_ZLIB
295 static SmallVector<uint8_t, 0> deflateShard(ArrayRef<uint8_t> in, int level,
296                                             int flush) {
297   // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
298   // data with no zlib header or trailer.
299   z_stream s = {};
300   deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
301   s.next_in = const_cast<uint8_t *>(in.data());
302   s.avail_in = in.size();
303 
304   // Allocate a buffer of half of the input size, and grow it by 1.5x if
305   // insufficient.
306   SmallVector<uint8_t, 0> out;
307   size_t pos = 0;
308   out.resize_for_overwrite(std::max<size_t>(in.size() / 2, 64));
309   do {
310     if (pos == out.size())
311       out.resize_for_overwrite(out.size() * 3 / 2);
312     s.next_out = out.data() + pos;
313     s.avail_out = out.size() - pos;
314     (void)deflate(&s, flush);
315     pos = s.next_out - out.data();
316   } while (s.avail_out == 0);
317   assert(s.avail_in == 0);
318 
319   out.truncate(pos);
320   deflateEnd(&s);
321   return out;
322 }
323 #endif
324 
325 // Compress section contents if this section contains debug info.
326 template <class ELFT> void OutputSection::maybeCompress() {
327   using Elf_Chdr = typename ELFT::Chdr;
328   (void)sizeof(Elf_Chdr);
329 
330   // Compress only DWARF debug sections.
331   if (config->compressDebugSections == DebugCompressionType::None ||
332       (flags & SHF_ALLOC) || !name.startswith(".debug_") || size == 0)
333     return;
334 
335   llvm::TimeTraceScope timeScope("Compress debug sections");
336   compressed.uncompressedSize = size;
337   auto buf = std::make_unique<uint8_t[]>(size);
338   // Write uncompressed data to a temporary zero-initialized buffer.
339   {
340     parallel::TaskGroup tg;
341     writeTo<ELFT>(buf.get(), tg);
342   }
343 
344 #if LLVM_ENABLE_ZSTD
345   // Use ZSTD's streaming compression API which permits parallel workers working
346   // on the stream. See http://facebook.github.io/zstd/zstd_manual.html
347   // "Streaming compression - HowTo".
348   if (config->compressDebugSections == DebugCompressionType::Zstd) {
349     // Allocate a buffer of half of the input size, and grow it by 1.5x if
350     // insufficient.
351     compressed.shards = std::make_unique<SmallVector<uint8_t, 0>[]>(1);
352     SmallVector<uint8_t, 0> &out = compressed.shards[0];
353     out.resize_for_overwrite(std::max<size_t>(size / 2, 32));
354     size_t pos = 0;
355 
356     ZSTD_CCtx *cctx = ZSTD_createCCtx();
357     // Ignore error if zstd was not built with ZSTD_MULTITHREAD.
358     (void)ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers,
359                                  parallel::strategy.compute_thread_count());
360     ZSTD_outBuffer zob = {out.data(), out.size(), 0};
361     ZSTD_EndDirective directive = ZSTD_e_continue;
362     const size_t blockSize = ZSTD_CStreamInSize();
363     do {
364       const size_t n = std::min(static_cast<size_t>(size - pos), blockSize);
365       if (n == size - pos)
366         directive = ZSTD_e_end;
367       ZSTD_inBuffer zib = {buf.get() + pos, n, 0};
368       size_t bytesRemaining = 0;
369       while (zib.pos != zib.size ||
370              (directive == ZSTD_e_end && bytesRemaining != 0)) {
371         if (zob.pos == zob.size) {
372           out.resize_for_overwrite(out.size() * 3 / 2);
373           zob.dst = out.data();
374           zob.size = out.size();
375         }
376         bytesRemaining = ZSTD_compressStream2(cctx, &zob, &zib, directive);
377         assert(!ZSTD_isError(bytesRemaining));
378       }
379       pos += n;
380     } while (directive != ZSTD_e_end);
381     out.resize(zob.pos);
382     ZSTD_freeCCtx(cctx);
383 
384     size = sizeof(Elf_Chdr) + out.size();
385     flags |= SHF_COMPRESSED;
386     return;
387   }
388 #endif
389 
390 #if LLVM_ENABLE_ZLIB
391   // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
392   // the fastest. If -O2 is given, we use level 6 to compress debug info more by
393   // ~15%. We found that level 7 to 9 doesn't make much difference (~1% more
394   // compression) while they take significant amount of time (~2x), so level 6
395   // seems enough.
396   const int level = config->optimize >= 2 ? 6 : Z_BEST_SPEED;
397 
398   // Split input into 1-MiB shards.
399   constexpr size_t shardSize = 1 << 20;
400   auto shardsIn = split(ArrayRef<uint8_t>(buf.get(), size), shardSize);
401   const size_t numShards = shardsIn.size();
402 
403   // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
404   // shards but the last to flush the output to a byte boundary to be
405   // concatenated with the next shard.
406   auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(numShards);
407   auto shardsAdler = std::make_unique<uint32_t[]>(numShards);
408   parallelFor(0, numShards, [&](size_t i) {
409     shardsOut[i] = deflateShard(shardsIn[i], level,
410                                 i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
411     shardsAdler[i] = adler32(1, shardsIn[i].data(), shardsIn[i].size());
412   });
413 
414   // Update section size and combine Alder-32 checksums.
415   uint32_t checksum = 1;       // Initial Adler-32 value
416   size = sizeof(Elf_Chdr) + 2; // Elf_Chdir and zlib header
417   for (size_t i = 0; i != numShards; ++i) {
418     size += shardsOut[i].size();
419     checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
420   }
421   size += 4; // checksum
422 
423   compressed.shards = std::move(shardsOut);
424   compressed.numShards = numShards;
425   compressed.checksum = checksum;
426   flags |= SHF_COMPRESSED;
427 #endif
428 }
429 
430 static void writeInt(uint8_t *buf, uint64_t data, uint64_t size) {
431   if (size == 1)
432     *buf = data;
433   else if (size == 2)
434     write16(buf, data);
435   else if (size == 4)
436     write32(buf, data);
437   else if (size == 8)
438     write64(buf, data);
439   else
440     llvm_unreachable("unsupported Size argument");
441 }
442 
443 template <class ELFT>
444 void OutputSection::writeTo(uint8_t *buf, parallel::TaskGroup &tg) {
445   llvm::TimeTraceScope timeScope("Write sections", name);
446   if (type == SHT_NOBITS)
447     return;
448 
449   // If --compress-debug-section is specified and if this is a debug section,
450   // we've already compressed section contents. If that's the case,
451   // just write it down.
452   if (compressed.shards) {
453     auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
454     chdr->ch_size = compressed.uncompressedSize;
455     chdr->ch_addralign = addralign;
456     buf += sizeof(*chdr);
457     if (config->compressDebugSections == DebugCompressionType::Zstd) {
458       chdr->ch_type = ELFCOMPRESS_ZSTD;
459       memcpy(buf, compressed.shards[0].data(), compressed.shards[0].size());
460       return;
461     }
462     chdr->ch_type = ELFCOMPRESS_ZLIB;
463 
464     // Compute shard offsets.
465     auto offsets = std::make_unique<size_t[]>(compressed.numShards);
466     offsets[0] = 2; // zlib header
467     for (size_t i = 1; i != compressed.numShards; ++i)
468       offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
469 
470     buf[0] = 0x78; // CMF
471     buf[1] = 0x01; // FLG: best speed
472     parallelFor(0, compressed.numShards, [&](size_t i) {
473       memcpy(buf + offsets[i], compressed.shards[i].data(),
474              compressed.shards[i].size());
475     });
476 
477     write32be(buf + (size - sizeof(*chdr) - 4), compressed.checksum);
478     return;
479   }
480 
481   // Write leading padding.
482   ArrayRef<InputSection *> sections = getInputSections(*this, storage);
483   std::array<uint8_t, 4> filler = getFiller();
484   bool nonZeroFiller = read32(filler.data()) != 0;
485   if (nonZeroFiller)
486     fill(buf, sections.empty() ? size : sections[0]->outSecOff, filler);
487 
488   auto fn = [=](size_t begin, size_t end) {
489     size_t numSections = sections.size();
490     for (size_t i = begin; i != end; ++i) {
491       InputSection *isec = sections[i];
492       if (auto *s = dyn_cast<SyntheticSection>(isec))
493         s->writeTo(buf + isec->outSecOff);
494       else
495         isec->writeTo<ELFT>(buf + isec->outSecOff);
496 
497       // Fill gaps between sections.
498       if (nonZeroFiller) {
499         uint8_t *start = buf + isec->outSecOff + isec->getSize();
500         uint8_t *end;
501         if (i + 1 == numSections)
502           end = buf + size;
503         else
504           end = buf + sections[i + 1]->outSecOff;
505         if (isec->nopFiller) {
506           assert(target->nopInstrs);
507           nopInstrFill(start, end - start);
508         } else
509           fill(start, end - start, filler);
510       }
511     }
512   };
513 
514   // If there is any BYTE()-family command (rare), write the section content
515   // first then process BYTE to overwrite the filler content. The write is
516   // serial due to the limitation of llvm/Support/Parallel.h.
517   bool written = false;
518   size_t numSections = sections.size();
519   for (SectionCommand *cmd : commands)
520     if (auto *data = dyn_cast<ByteCommand>(cmd)) {
521       if (!std::exchange(written, true))
522         fn(0, numSections);
523       writeInt(buf + data->offset, data->expression().getValue(), data->size);
524     }
525   if (written || !numSections)
526     return;
527 
528   // There is no data command. Write content asynchronously to overlap the write
529   // time with other output sections. Note, if a linker script specifies
530   // overlapping output sections (needs --noinhibit-exec or --no-check-sections
531   // to supress the error), the output may be non-deterministic.
532   const size_t taskSizeLimit = 4 << 20;
533   for (size_t begin = 0, i = 0, taskSize = 0;;) {
534     taskSize += sections[i]->getSize();
535     bool done = ++i == numSections;
536     if (done || taskSize >= taskSizeLimit) {
537       tg.execute([=] { fn(begin, i); });
538       if (done)
539         break;
540       begin = i;
541       taskSize = 0;
542     }
543   }
544 }
545 
546 static void finalizeShtGroup(OutputSection *os, InputSection *section) {
547   // sh_link field for SHT_GROUP sections should contain the section index of
548   // the symbol table.
549   os->link = in.symTab->getParent()->sectionIndex;
550 
551   if (!section)
552     return;
553 
554   // sh_info then contain index of an entry in symbol table section which
555   // provides signature of the section group.
556   ArrayRef<Symbol *> symbols = section->file->getSymbols();
557   os->info = in.symTab->getSymbolIndex(symbols[section->info]);
558 
559   // Some group members may be combined or discarded, so we need to compute the
560   // new size. The content will be rewritten in InputSection::copyShtGroup.
561   DenseSet<uint32_t> seen;
562   ArrayRef<InputSectionBase *> sections = section->file->getSections();
563   for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(1))
564     if (OutputSection *osec = sections[read32(&idx)]->getOutputSection())
565       seen.insert(osec->sectionIndex);
566   os->size = (1 + seen.size()) * sizeof(uint32_t);
567 }
568 
569 void OutputSection::finalize() {
570   InputSection *first = getFirstInputSection(this);
571 
572   if (flags & SHF_LINK_ORDER) {
573     // We must preserve the link order dependency of sections with the
574     // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
575     // need to translate the InputSection sh_link to the OutputSection sh_link,
576     // all InputSections in the OutputSection have the same dependency.
577     if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(first))
578       link = ex->getLinkOrderDep()->getParent()->sectionIndex;
579     else if (first->flags & SHF_LINK_ORDER)
580       if (auto *d = first->getLinkOrderDep())
581         link = d->getParent()->sectionIndex;
582   }
583 
584   if (type == SHT_GROUP) {
585     finalizeShtGroup(this, first);
586     return;
587   }
588 
589   if (!config->copyRelocs || (type != SHT_RELA && type != SHT_REL))
590     return;
591 
592   // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
593   // Normally 'type' was changed by 'first' so 'first' should be non-null.
594   // However, if the output section is .rela.dyn, 'type' can be set by the empty
595   // synthetic .rela.plt and first can be null.
596   if (!first || isa<SyntheticSection>(first))
597     return;
598 
599   link = in.symTab->getParent()->sectionIndex;
600   // sh_info for SHT_REL[A] sections should contain the section header index of
601   // the section to which the relocation applies.
602   InputSectionBase *s = first->getRelocatedSection();
603   info = s->getOutputSection()->sectionIndex;
604   flags |= SHF_INFO_LINK;
605 }
606 
607 // Returns true if S is in one of the many forms the compiler driver may pass
608 // crtbegin files.
609 //
610 // Gcc uses any of crtbegin[<empty>|S|T].o.
611 // Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
612 
613 static bool isCrt(StringRef s, StringRef beginEnd) {
614   s = sys::path::filename(s);
615   if (!s.consume_back(".o"))
616     return false;
617   if (s.consume_front("clang_rt."))
618     return s.consume_front(beginEnd);
619   return s.consume_front(beginEnd) && s.size() <= 1;
620 }
621 
622 // .ctors and .dtors are sorted by this order:
623 //
624 // 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
625 // 2. The section is named ".ctors" or ".dtors" (priority: 65536).
626 // 3. The section has an optional priority value in the form of ".ctors.N" or
627 //    ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
628 // 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
629 //
630 // For 2 and 3, the sections are sorted by priority from high to low, e.g.
631 // .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336).  In GNU ld's
632 // internal linker scripts, the sorting is by string comparison which can
633 // achieve the same goal given the optional priority values are of the same
634 // length.
635 //
636 // In an ideal world, we don't need this function because .init_array and
637 // .ctors are duplicate features (and .init_array is newer.) However, there
638 // are too many real-world use cases of .ctors, so we had no choice to
639 // support that with this rather ad-hoc semantics.
640 static bool compCtors(const InputSection *a, const InputSection *b) {
641   bool beginA = isCrt(a->file->getName(), "crtbegin");
642   bool beginB = isCrt(b->file->getName(), "crtbegin");
643   if (beginA != beginB)
644     return beginA;
645   bool endA = isCrt(a->file->getName(), "crtend");
646   bool endB = isCrt(b->file->getName(), "crtend");
647   if (endA != endB)
648     return endB;
649   return getPriority(a->name) > getPriority(b->name);
650 }
651 
652 // Sorts input sections by the special rules for .ctors and .dtors.
653 // Unfortunately, the rules are different from the one for .{init,fini}_array.
654 // Read the comment above.
655 void OutputSection::sortCtorsDtors() {
656   assert(commands.size() == 1);
657   auto *isd = cast<InputSectionDescription>(commands[0]);
658   llvm::stable_sort(isd->sections, compCtors);
659 }
660 
661 // If an input string is in the form of "foo.N" where N is a number, return N
662 // (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
663 // greater than the lowest priority.
664 int elf::getPriority(StringRef s) {
665   size_t pos = s.rfind('.');
666   if (pos == StringRef::npos)
667     return 65536;
668   int v = 65536;
669   if (to_integer(s.substr(pos + 1), v, 10) &&
670       (pos == 6 && (s.startswith(".ctors") || s.startswith(".dtors"))))
671     v = 65535 - v;
672   return v;
673 }
674 
675 InputSection *elf::getFirstInputSection(const OutputSection *os) {
676   for (SectionCommand *cmd : os->commands)
677     if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
678       if (!isd->sections.empty())
679         return isd->sections[0];
680   return nullptr;
681 }
682 
683 ArrayRef<InputSection *>
684 elf::getInputSections(const OutputSection &os,
685                       SmallVector<InputSection *, 0> &storage) {
686   ArrayRef<InputSection *> ret;
687   storage.clear();
688   for (SectionCommand *cmd : os.commands) {
689     auto *isd = dyn_cast<InputSectionDescription>(cmd);
690     if (!isd)
691       continue;
692     if (ret.empty()) {
693       ret = isd->sections;
694     } else {
695       if (storage.empty())
696         storage.assign(ret.begin(), ret.end());
697       storage.insert(storage.end(), isd->sections.begin(), isd->sections.end());
698     }
699   }
700   return storage.empty() ? ret : ArrayRef(storage);
701 }
702 
703 // Sorts input sections by section name suffixes, so that .foo.N comes
704 // before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
705 // We want to keep the original order if the priorities are the same
706 // because the compiler keeps the original initialization order in a
707 // translation unit and we need to respect that.
708 // For more detail, read the section of the GCC's manual about init_priority.
709 void OutputSection::sortInitFini() {
710   // Sort sections by priority.
711   sort([](InputSectionBase *s) { return getPriority(s->name); });
712 }
713 
714 std::array<uint8_t, 4> OutputSection::getFiller() {
715   if (filler)
716     return *filler;
717   if (flags & SHF_EXECINSTR)
718     return target->trapInstr;
719   return {0, 0, 0, 0};
720 }
721 
722 void OutputSection::checkDynRelAddends(const uint8_t *bufStart) {
723   assert(config->writeAddends && config->checkDynamicRelocs);
724   assert(type == SHT_REL || type == SHT_RELA);
725   SmallVector<InputSection *, 0> storage;
726   ArrayRef<InputSection *> sections = getInputSections(*this, storage);
727   parallelFor(0, sections.size(), [&](size_t i) {
728     // When linking with -r or --emit-relocs we might also call this function
729     // for input .rel[a].<sec> sections which we simply pass through to the
730     // output. We skip over those and only look at the synthetic relocation
731     // sections created during linking.
732     const auto *sec = dyn_cast<RelocationBaseSection>(sections[i]);
733     if (!sec)
734       return;
735     for (const DynamicReloc &rel : sec->relocs) {
736       int64_t addend = rel.addend;
737       const OutputSection *relOsec = rel.inputSec->getOutputSection();
738       assert(relOsec != nullptr && "missing output section for relocation");
739       const uint8_t *relocTarget =
740           bufStart + relOsec->offset + rel.inputSec->getOffset(rel.offsetInSec);
741       // For SHT_NOBITS the written addend is always zero.
742       int64_t writtenAddend =
743           relOsec->type == SHT_NOBITS
744               ? 0
745               : target->getImplicitAddend(relocTarget, rel.type);
746       if (addend != writtenAddend)
747         internalLinkerError(
748             getErrorLocation(relocTarget),
749             "wrote incorrect addend value 0x" + utohexstr(writtenAddend) +
750                 " instead of 0x" + utohexstr(addend) +
751                 " for dynamic relocation " + toString(rel.type) +
752                 " at offset 0x" + utohexstr(rel.getOffset()) +
753                 (rel.sym ? " against symbol " + toString(*rel.sym) : ""));
754     }
755   });
756 }
757 
758 template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
759 template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
760 template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
761 template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
762 
763 template void OutputSection::writeTo<ELF32LE>(uint8_t *,
764                                               llvm::parallel::TaskGroup &);
765 template void OutputSection::writeTo<ELF32BE>(uint8_t *,
766                                               llvm::parallel::TaskGroup &);
767 template void OutputSection::writeTo<ELF64LE>(uint8_t *,
768                                               llvm::parallel::TaskGroup &);
769 template void OutputSection::writeTo<ELF64BE>(uint8_t *,
770                                               llvm::parallel::TaskGroup &);
771 
772 template void OutputSection::maybeCompress<ELF32LE>();
773 template void OutputSection::maybeCompress<ELF32BE>();
774 template void OutputSection::maybeCompress<ELF64LE>();
775 template void OutputSection::maybeCompress<ELF64BE>();
776