1 //===- Chunks.cpp ---------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "Chunks.h"
10 #include "InputFiles.h"
11 #include "Symbols.h"
12 #include "Writer.h"
13 #include "SymbolTable.h"
14 #include "lld/Common/ErrorHandler.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/BinaryFormat/COFF.h"
17 #include "llvm/Object/COFF.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/Endian.h"
20 #include "llvm/Support/raw_ostream.h"
21 #include <algorithm>
22 
23 using namespace llvm;
24 using namespace llvm::object;
25 using namespace llvm::support::endian;
26 using namespace llvm::COFF;
27 using llvm::support::ulittle32_t;
28 
29 namespace lld {
30 namespace coff {
31 
32 SectionChunk::SectionChunk(ObjFile *f, const coff_section *h)
33     : Chunk(SectionKind), file(f), header(h), repl(this) {
34   // Initialize relocs.
35   setRelocs(file->getCOFFObj()->getRelocations(header));
36 
37   // Initialize sectionName.
38   StringRef sectionName;
39   if (Expected<StringRef> e = file->getCOFFObj()->getSectionName(header))
40     sectionName = *e;
41   sectionNameData = sectionName.data();
42   sectionNameSize = sectionName.size();
43 
44   setAlignment(header->getAlignment());
45 
46   hasData = !(header->Characteristics & IMAGE_SCN_CNT_UNINITIALIZED_DATA);
47 
48   // If linker GC is disabled, every chunk starts out alive.  If linker GC is
49   // enabled, treat non-comdat sections as roots. Generally optimized object
50   // files will be built with -ffunction-sections or /Gy, so most things worth
51   // stripping will be in a comdat.
52   live = !config->doGC || !isCOMDAT();
53 }
54 
55 // SectionChunk is one of the most frequently allocated classes, so it is
56 // important to keep it as compact as possible. As of this writing, the number
57 // below is the size of this class on x64 platforms.
58 static_assert(sizeof(SectionChunk) <= 88, "SectionChunk grew unexpectedly");
59 
60 static void add16(uint8_t *p, int16_t v) { write16le(p, read16le(p) + v); }
61 static void add32(uint8_t *p, int32_t v) { write32le(p, read32le(p) + v); }
62 static void add64(uint8_t *p, int64_t v) { write64le(p, read64le(p) + v); }
63 static void or16(uint8_t *p, uint16_t v) { write16le(p, read16le(p) | v); }
64 static void or32(uint8_t *p, uint32_t v) { write32le(p, read32le(p) | v); }
65 
66 // Verify that given sections are appropriate targets for SECREL
67 // relocations. This check is relaxed because unfortunately debug
68 // sections have section-relative relocations against absolute symbols.
69 static bool checkSecRel(const SectionChunk *sec, OutputSection *os) {
70   if (os)
71     return true;
72   if (sec->isCodeView())
73     return false;
74   error("SECREL relocation cannot be applied to absolute symbols");
75   return false;
76 }
77 
78 static void applySecRel(const SectionChunk *sec, uint8_t *off,
79                         OutputSection *os, uint64_t s) {
80   if (!checkSecRel(sec, os))
81     return;
82   uint64_t secRel = s - os->getRVA();
83   if (secRel > UINT32_MAX) {
84     error("overflow in SECREL relocation in section: " + sec->getSectionName());
85     return;
86   }
87   add32(off, secRel);
88 }
89 
90 static void applySecIdx(uint8_t *off, OutputSection *os) {
91   // Absolute symbol doesn't have section index, but section index relocation
92   // against absolute symbol should be resolved to one plus the last output
93   // section index. This is required for compatibility with MSVC.
94   if (os)
95     add16(off, os->sectionIndex);
96   else
97     add16(off, DefinedAbsolute::numOutputSections + 1);
98 }
99 
100 void SectionChunk::applyRelX64(uint8_t *off, uint16_t type, OutputSection *os,
101                                uint64_t s, uint64_t p) const {
102   switch (type) {
103   case IMAGE_REL_AMD64_ADDR32:   add32(off, s + config->imageBase); break;
104   case IMAGE_REL_AMD64_ADDR64:   add64(off, s + config->imageBase); break;
105   case IMAGE_REL_AMD64_ADDR32NB: add32(off, s); break;
106   case IMAGE_REL_AMD64_REL32:    add32(off, s - p - 4); break;
107   case IMAGE_REL_AMD64_REL32_1:  add32(off, s - p - 5); break;
108   case IMAGE_REL_AMD64_REL32_2:  add32(off, s - p - 6); break;
109   case IMAGE_REL_AMD64_REL32_3:  add32(off, s - p - 7); break;
110   case IMAGE_REL_AMD64_REL32_4:  add32(off, s - p - 8); break;
111   case IMAGE_REL_AMD64_REL32_5:  add32(off, s - p - 9); break;
112   case IMAGE_REL_AMD64_SECTION:  applySecIdx(off, os); break;
113   case IMAGE_REL_AMD64_SECREL:   applySecRel(this, off, os, s); break;
114   default:
115     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
116           toString(file));
117   }
118 }
119 
120 void SectionChunk::applyRelX86(uint8_t *off, uint16_t type, OutputSection *os,
121                                uint64_t s, uint64_t p) const {
122   switch (type) {
123   case IMAGE_REL_I386_ABSOLUTE: break;
124   case IMAGE_REL_I386_DIR32:    add32(off, s + config->imageBase); break;
125   case IMAGE_REL_I386_DIR32NB:  add32(off, s); break;
126   case IMAGE_REL_I386_REL32:    add32(off, s - p - 4); break;
127   case IMAGE_REL_I386_SECTION:  applySecIdx(off, os); break;
128   case IMAGE_REL_I386_SECREL:   applySecRel(this, off, os, s); break;
129   default:
130     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
131           toString(file));
132   }
133 }
134 
135 static void applyMOV(uint8_t *off, uint16_t v) {
136   write16le(off, (read16le(off) & 0xfbf0) | ((v & 0x800) >> 1) | ((v >> 12) & 0xf));
137   write16le(off + 2, (read16le(off + 2) & 0x8f00) | ((v & 0x700) << 4) | (v & 0xff));
138 }
139 
140 static uint16_t readMOV(uint8_t *off, bool movt) {
141   uint16_t op1 = read16le(off);
142   if ((op1 & 0xfbf0) != (movt ? 0xf2c0 : 0xf240))
143     error("unexpected instruction in " + Twine(movt ? "MOVT" : "MOVW") +
144           " instruction in MOV32T relocation");
145   uint16_t op2 = read16le(off + 2);
146   if ((op2 & 0x8000) != 0)
147     error("unexpected instruction in " + Twine(movt ? "MOVT" : "MOVW") +
148           " instruction in MOV32T relocation");
149   return (op2 & 0x00ff) | ((op2 >> 4) & 0x0700) | ((op1 << 1) & 0x0800) |
150          ((op1 & 0x000f) << 12);
151 }
152 
153 void applyMOV32T(uint8_t *off, uint32_t v) {
154   uint16_t immW = readMOV(off, false);    // read MOVW operand
155   uint16_t immT = readMOV(off + 4, true); // read MOVT operand
156   uint32_t imm = immW | (immT << 16);
157   v += imm;                         // add the immediate offset
158   applyMOV(off, v);           // set MOVW operand
159   applyMOV(off + 4, v >> 16); // set MOVT operand
160 }
161 
162 static void applyBranch20T(uint8_t *off, int32_t v) {
163   if (!isInt<21>(v))
164     error("relocation out of range");
165   uint32_t s = v < 0 ? 1 : 0;
166   uint32_t j1 = (v >> 19) & 1;
167   uint32_t j2 = (v >> 18) & 1;
168   or16(off, (s << 10) | ((v >> 12) & 0x3f));
169   or16(off + 2, (j1 << 13) | (j2 << 11) | ((v >> 1) & 0x7ff));
170 }
171 
172 void applyBranch24T(uint8_t *off, int32_t v) {
173   if (!isInt<25>(v))
174     error("relocation out of range");
175   uint32_t s = v < 0 ? 1 : 0;
176   uint32_t j1 = ((~v >> 23) & 1) ^ s;
177   uint32_t j2 = ((~v >> 22) & 1) ^ s;
178   or16(off, (s << 10) | ((v >> 12) & 0x3ff));
179   // Clear out the J1 and J2 bits which may be set.
180   write16le(off + 2, (read16le(off + 2) & 0xd000) | (j1 << 13) | (j2 << 11) | ((v >> 1) & 0x7ff));
181 }
182 
183 void SectionChunk::applyRelARM(uint8_t *off, uint16_t type, OutputSection *os,
184                                uint64_t s, uint64_t p) const {
185   // Pointer to thumb code must have the LSB set.
186   uint64_t sx = s;
187   if (os && (os->header.Characteristics & IMAGE_SCN_MEM_EXECUTE))
188     sx |= 1;
189   switch (type) {
190   case IMAGE_REL_ARM_ADDR32:    add32(off, sx + config->imageBase); break;
191   case IMAGE_REL_ARM_ADDR32NB:  add32(off, sx); break;
192   case IMAGE_REL_ARM_MOV32T:    applyMOV32T(off, sx + config->imageBase); break;
193   case IMAGE_REL_ARM_BRANCH20T: applyBranch20T(off, sx - p - 4); break;
194   case IMAGE_REL_ARM_BRANCH24T: applyBranch24T(off, sx - p - 4); break;
195   case IMAGE_REL_ARM_BLX23T:    applyBranch24T(off, sx - p - 4); break;
196   case IMAGE_REL_ARM_SECTION:   applySecIdx(off, os); break;
197   case IMAGE_REL_ARM_SECREL:    applySecRel(this, off, os, s); break;
198   case IMAGE_REL_ARM_REL32:     add32(off, sx - p - 4); break;
199   default:
200     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
201           toString(file));
202   }
203 }
204 
205 // Interpret the existing immediate value as a byte offset to the
206 // target symbol, then update the instruction with the immediate as
207 // the page offset from the current instruction to the target.
208 void applyArm64Addr(uint8_t *off, uint64_t s, uint64_t p, int shift) {
209   uint32_t orig = read32le(off);
210   uint64_t imm = ((orig >> 29) & 0x3) | ((orig >> 3) & 0x1FFFFC);
211   s += imm;
212   imm = (s >> shift) - (p >> shift);
213   uint32_t immLo = (imm & 0x3) << 29;
214   uint32_t immHi = (imm & 0x1FFFFC) << 3;
215   uint64_t mask = (0x3 << 29) | (0x1FFFFC << 3);
216   write32le(off, (orig & ~mask) | immLo | immHi);
217 }
218 
219 // Update the immediate field in a AARCH64 ldr, str, and add instruction.
220 // Optionally limit the range of the written immediate by one or more bits
221 // (rangeLimit).
222 void applyArm64Imm(uint8_t *off, uint64_t imm, uint32_t rangeLimit) {
223   uint32_t orig = read32le(off);
224   imm += (orig >> 10) & 0xFFF;
225   orig &= ~(0xFFF << 10);
226   write32le(off, orig | ((imm & (0xFFF >> rangeLimit)) << 10));
227 }
228 
229 // Add the 12 bit page offset to the existing immediate.
230 // Ldr/str instructions store the opcode immediate scaled
231 // by the load/store size (giving a larger range for larger
232 // loads/stores). The immediate is always (both before and after
233 // fixing up the relocation) stored scaled similarly.
234 // Even if larger loads/stores have a larger range, limit the
235 // effective offset to 12 bit, since it is intended to be a
236 // page offset.
237 static void applyArm64Ldr(uint8_t *off, uint64_t imm) {
238   uint32_t orig = read32le(off);
239   uint32_t size = orig >> 30;
240   // 0x04000000 indicates SIMD/FP registers
241   // 0x00800000 indicates 128 bit
242   if ((orig & 0x4800000) == 0x4800000)
243     size += 4;
244   if ((imm & ((1 << size) - 1)) != 0)
245     error("misaligned ldr/str offset");
246   applyArm64Imm(off, imm >> size, size);
247 }
248 
249 static void applySecRelLow12A(const SectionChunk *sec, uint8_t *off,
250                               OutputSection *os, uint64_t s) {
251   if (checkSecRel(sec, os))
252     applyArm64Imm(off, (s - os->getRVA()) & 0xfff, 0);
253 }
254 
255 static void applySecRelHigh12A(const SectionChunk *sec, uint8_t *off,
256                                OutputSection *os, uint64_t s) {
257   if (!checkSecRel(sec, os))
258     return;
259   uint64_t secRel = (s - os->getRVA()) >> 12;
260   if (0xfff < secRel) {
261     error("overflow in SECREL_HIGH12A relocation in section: " +
262           sec->getSectionName());
263     return;
264   }
265   applyArm64Imm(off, secRel & 0xfff, 0);
266 }
267 
268 static void applySecRelLdr(const SectionChunk *sec, uint8_t *off,
269                            OutputSection *os, uint64_t s) {
270   if (checkSecRel(sec, os))
271     applyArm64Ldr(off, (s - os->getRVA()) & 0xfff);
272 }
273 
274 void applyArm64Branch26(uint8_t *off, int64_t v) {
275   if (!isInt<28>(v))
276     error("relocation out of range");
277   or32(off, (v & 0x0FFFFFFC) >> 2);
278 }
279 
280 static void applyArm64Branch19(uint8_t *off, int64_t v) {
281   if (!isInt<21>(v))
282     error("relocation out of range");
283   or32(off, (v & 0x001FFFFC) << 3);
284 }
285 
286 static void applyArm64Branch14(uint8_t *off, int64_t v) {
287   if (!isInt<16>(v))
288     error("relocation out of range");
289   or32(off, (v & 0x0000FFFC) << 3);
290 }
291 
292 void SectionChunk::applyRelARM64(uint8_t *off, uint16_t type, OutputSection *os,
293                                  uint64_t s, uint64_t p) const {
294   switch (type) {
295   case IMAGE_REL_ARM64_PAGEBASE_REL21: applyArm64Addr(off, s, p, 12); break;
296   case IMAGE_REL_ARM64_REL21:          applyArm64Addr(off, s, p, 0); break;
297   case IMAGE_REL_ARM64_PAGEOFFSET_12A: applyArm64Imm(off, s & 0xfff, 0); break;
298   case IMAGE_REL_ARM64_PAGEOFFSET_12L: applyArm64Ldr(off, s & 0xfff); break;
299   case IMAGE_REL_ARM64_BRANCH26:       applyArm64Branch26(off, s - p); break;
300   case IMAGE_REL_ARM64_BRANCH19:       applyArm64Branch19(off, s - p); break;
301   case IMAGE_REL_ARM64_BRANCH14:       applyArm64Branch14(off, s - p); break;
302   case IMAGE_REL_ARM64_ADDR32:         add32(off, s + config->imageBase); break;
303   case IMAGE_REL_ARM64_ADDR32NB:       add32(off, s); break;
304   case IMAGE_REL_ARM64_ADDR64:         add64(off, s + config->imageBase); break;
305   case IMAGE_REL_ARM64_SECREL:         applySecRel(this, off, os, s); break;
306   case IMAGE_REL_ARM64_SECREL_LOW12A:  applySecRelLow12A(this, off, os, s); break;
307   case IMAGE_REL_ARM64_SECREL_HIGH12A: applySecRelHigh12A(this, off, os, s); break;
308   case IMAGE_REL_ARM64_SECREL_LOW12L:  applySecRelLdr(this, off, os, s); break;
309   case IMAGE_REL_ARM64_SECTION:        applySecIdx(off, os); break;
310   case IMAGE_REL_ARM64_REL32:          add32(off, s - p - 4); break;
311   default:
312     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
313           toString(file));
314   }
315 }
316 
317 static void maybeReportRelocationToDiscarded(const SectionChunk *fromChunk,
318                                              Defined *sym,
319                                              const coff_relocation &rel) {
320   // Don't report these errors when the relocation comes from a debug info
321   // section or in mingw mode. MinGW mode object files (built by GCC) can
322   // have leftover sections with relocations against discarded comdat
323   // sections. Such sections are left as is, with relocations untouched.
324   if (fromChunk->isCodeView() || fromChunk->isDWARF() || config->mingw)
325     return;
326 
327   // Get the name of the symbol. If it's null, it was discarded early, so we
328   // have to go back to the object file.
329   ObjFile *file = fromChunk->file;
330   StringRef name;
331   if (sym) {
332     name = sym->getName();
333   } else {
334     COFFSymbolRef coffSym =
335         check(file->getCOFFObj()->getSymbol(rel.SymbolTableIndex));
336     name = check(file->getCOFFObj()->getSymbolName(coffSym));
337   }
338 
339   std::vector<std::string> symbolLocations =
340       getSymbolLocations(file, rel.SymbolTableIndex);
341 
342   std::string out;
343   llvm::raw_string_ostream os(out);
344   os << "relocation against symbol in discarded section: " + name;
345   for (const std::string &s : symbolLocations)
346     os << s;
347   error(os.str());
348 }
349 
350 void SectionChunk::writeTo(uint8_t *buf) const {
351   if (!hasData)
352     return;
353   // Copy section contents from source object file to output file.
354   ArrayRef<uint8_t> a = getContents();
355   if (!a.empty())
356     memcpy(buf, a.data(), a.size());
357 
358   // Apply relocations.
359   size_t inputSize = getSize();
360   for (const coff_relocation &rel : getRelocs()) {
361     // Check for an invalid relocation offset. This check isn't perfect, because
362     // we don't have the relocation size, which is only known after checking the
363     // machine and relocation type. As a result, a relocation may overwrite the
364     // beginning of the following input section.
365     if (rel.VirtualAddress >= inputSize) {
366       error("relocation points beyond the end of its parent section");
367       continue;
368     }
369 
370     applyRelocation(buf + rel.VirtualAddress, rel);
371   }
372 }
373 
374 void SectionChunk::applyRelocation(uint8_t *off,
375                                    const coff_relocation &rel) const {
376   auto *sym = dyn_cast_or_null<Defined>(file->getSymbol(rel.SymbolTableIndex));
377 
378   // Get the output section of the symbol for this relocation.  The output
379   // section is needed to compute SECREL and SECTION relocations used in debug
380   // info.
381   Chunk *c = sym ? sym->getChunk() : nullptr;
382   OutputSection *os = c ? c->getOutputSection() : nullptr;
383 
384   // Skip the relocation if it refers to a discarded section, and diagnose it
385   // as an error if appropriate. If a symbol was discarded early, it may be
386   // null. If it was discarded late, the output section will be null, unless
387   // it was an absolute or synthetic symbol.
388   if (!sym ||
389       (!os && !isa<DefinedAbsolute>(sym) && !isa<DefinedSynthetic>(sym))) {
390     maybeReportRelocationToDiscarded(this, sym, rel);
391     return;
392   }
393 
394   uint64_t s = sym->getRVA();
395 
396   // Compute the RVA of the relocation for relative relocations.
397   uint64_t p = rva + rel.VirtualAddress;
398   switch (config->machine) {
399   case AMD64:
400     applyRelX64(off, rel.Type, os, s, p);
401     break;
402   case I386:
403     applyRelX86(off, rel.Type, os, s, p);
404     break;
405   case ARMNT:
406     applyRelARM(off, rel.Type, os, s, p);
407     break;
408   case ARM64:
409     applyRelARM64(off, rel.Type, os, s, p);
410     break;
411   default:
412     llvm_unreachable("unknown machine type");
413   }
414 }
415 
416 // Defend against unsorted relocations. This may be overly conservative.
417 void SectionChunk::sortRelocations() {
418   auto cmpByVa = [](const coff_relocation &l, const coff_relocation &r) {
419     return l.VirtualAddress < r.VirtualAddress;
420   };
421   if (llvm::is_sorted(getRelocs(), cmpByVa))
422     return;
423   warn("some relocations in " + file->getName() + " are not sorted");
424   MutableArrayRef<coff_relocation> newRelocs(
425       bAlloc.Allocate<coff_relocation>(relocsSize), relocsSize);
426   memcpy(newRelocs.data(), relocsData, relocsSize * sizeof(coff_relocation));
427   llvm::sort(newRelocs, cmpByVa);
428   setRelocs(newRelocs);
429 }
430 
431 // Similar to writeTo, but suitable for relocating a subsection of the overall
432 // section.
433 void SectionChunk::writeAndRelocateSubsection(ArrayRef<uint8_t> sec,
434                                               ArrayRef<uint8_t> subsec,
435                                               uint32_t &nextRelocIndex,
436                                               uint8_t *buf) const {
437   assert(!subsec.empty() && !sec.empty());
438   assert(sec.begin() <= subsec.begin() && subsec.end() <= sec.end() &&
439          "subsection is not part of this section");
440   size_t vaBegin = std::distance(sec.begin(), subsec.begin());
441   size_t vaEnd = std::distance(sec.begin(), subsec.end());
442   memcpy(buf, subsec.data(), subsec.size());
443   for (; nextRelocIndex < relocsSize; ++nextRelocIndex) {
444     const coff_relocation &rel = relocsData[nextRelocIndex];
445     // Only apply relocations that apply to this subsection. These checks
446     // assume that all subsections completely contain their relocations.
447     // Relocations must not straddle the beginning or end of a subsection.
448     if (rel.VirtualAddress < vaBegin)
449       continue;
450     if (rel.VirtualAddress + 1 >= vaEnd)
451       break;
452     applyRelocation(&buf[rel.VirtualAddress - vaBegin], rel);
453   }
454 }
455 
456 void SectionChunk::addAssociative(SectionChunk *child) {
457   // Insert this child at the head of the list.
458   assert(child->assocChildren == nullptr &&
459          "associated sections cannot have their own associated children");
460   child->assocChildren = assocChildren;
461   assocChildren = child;
462 }
463 
464 static uint8_t getBaserelType(const coff_relocation &rel) {
465   switch (config->machine) {
466   case AMD64:
467     if (rel.Type == IMAGE_REL_AMD64_ADDR64)
468       return IMAGE_REL_BASED_DIR64;
469     return IMAGE_REL_BASED_ABSOLUTE;
470   case I386:
471     if (rel.Type == IMAGE_REL_I386_DIR32)
472       return IMAGE_REL_BASED_HIGHLOW;
473     return IMAGE_REL_BASED_ABSOLUTE;
474   case ARMNT:
475     if (rel.Type == IMAGE_REL_ARM_ADDR32)
476       return IMAGE_REL_BASED_HIGHLOW;
477     if (rel.Type == IMAGE_REL_ARM_MOV32T)
478       return IMAGE_REL_BASED_ARM_MOV32T;
479     return IMAGE_REL_BASED_ABSOLUTE;
480   case ARM64:
481     if (rel.Type == IMAGE_REL_ARM64_ADDR64)
482       return IMAGE_REL_BASED_DIR64;
483     return IMAGE_REL_BASED_ABSOLUTE;
484   default:
485     llvm_unreachable("unknown machine type");
486   }
487 }
488 
489 // Windows-specific.
490 // Collect all locations that contain absolute addresses, which need to be
491 // fixed by the loader if load-time relocation is needed.
492 // Only called when base relocation is enabled.
493 void SectionChunk::getBaserels(std::vector<Baserel> *res) {
494   for (const coff_relocation &rel : getRelocs()) {
495     uint8_t ty = getBaserelType(rel);
496     if (ty == IMAGE_REL_BASED_ABSOLUTE)
497       continue;
498     Symbol *target = file->getSymbol(rel.SymbolTableIndex);
499     if (!target || isa<DefinedAbsolute>(target))
500       continue;
501     res->emplace_back(rva + rel.VirtualAddress, ty);
502   }
503 }
504 
505 // MinGW specific.
506 // Check whether a static relocation of type Type can be deferred and
507 // handled at runtime as a pseudo relocation (for references to a module
508 // local variable, which turned out to actually need to be imported from
509 // another DLL) This returns the size the relocation is supposed to update,
510 // in bits, or 0 if the relocation cannot be handled as a runtime pseudo
511 // relocation.
512 static int getRuntimePseudoRelocSize(uint16_t type) {
513   // Relocations that either contain an absolute address, or a plain
514   // relative offset, since the runtime pseudo reloc implementation
515   // adds 8/16/32/64 bit values to a memory address.
516   //
517   // Given a pseudo relocation entry,
518   //
519   // typedef struct {
520   //   DWORD sym;
521   //   DWORD target;
522   //   DWORD flags;
523   // } runtime_pseudo_reloc_item_v2;
524   //
525   // the runtime relocation performs this adjustment:
526   //     *(base + .target) += *(base + .sym) - (base + .sym)
527   //
528   // This works for both absolute addresses (IMAGE_REL_*_ADDR32/64,
529   // IMAGE_REL_I386_DIR32, where the memory location initially contains
530   // the address of the IAT slot, and for relative addresses (IMAGE_REL*_REL32),
531   // where the memory location originally contains the relative offset to the
532   // IAT slot.
533   //
534   // This requires the target address to be writable, either directly out of
535   // the image, or temporarily changed at runtime with VirtualProtect.
536   // Since this only operates on direct address values, it doesn't work for
537   // ARM/ARM64 relocations, other than the plain ADDR32/ADDR64 relocations.
538   switch (config->machine) {
539   case AMD64:
540     switch (type) {
541     case IMAGE_REL_AMD64_ADDR64:
542       return 64;
543     case IMAGE_REL_AMD64_ADDR32:
544     case IMAGE_REL_AMD64_REL32:
545     case IMAGE_REL_AMD64_REL32_1:
546     case IMAGE_REL_AMD64_REL32_2:
547     case IMAGE_REL_AMD64_REL32_3:
548     case IMAGE_REL_AMD64_REL32_4:
549     case IMAGE_REL_AMD64_REL32_5:
550       return 32;
551     default:
552       return 0;
553     }
554   case I386:
555     switch (type) {
556     case IMAGE_REL_I386_DIR32:
557     case IMAGE_REL_I386_REL32:
558       return 32;
559     default:
560       return 0;
561     }
562   case ARMNT:
563     switch (type) {
564     case IMAGE_REL_ARM_ADDR32:
565       return 32;
566     default:
567       return 0;
568     }
569   case ARM64:
570     switch (type) {
571     case IMAGE_REL_ARM64_ADDR64:
572       return 64;
573     case IMAGE_REL_ARM64_ADDR32:
574       return 32;
575     default:
576       return 0;
577     }
578   default:
579     llvm_unreachable("unknown machine type");
580   }
581 }
582 
583 // MinGW specific.
584 // Append information to the provided vector about all relocations that
585 // need to be handled at runtime as runtime pseudo relocations (references
586 // to a module local variable, which turned out to actually need to be
587 // imported from another DLL).
588 void SectionChunk::getRuntimePseudoRelocs(
589     std::vector<RuntimePseudoReloc> &res) {
590   for (const coff_relocation &rel : getRelocs()) {
591     auto *target =
592         dyn_cast_or_null<Defined>(file->getSymbol(rel.SymbolTableIndex));
593     if (!target || !target->isRuntimePseudoReloc)
594       continue;
595     int sizeInBits = getRuntimePseudoRelocSize(rel.Type);
596     if (sizeInBits == 0) {
597       error("unable to automatically import from " + target->getName() +
598             " with relocation type " +
599             file->getCOFFObj()->getRelocationTypeName(rel.Type) + " in " +
600             toString(file));
601       continue;
602     }
603     // sizeInBits is used to initialize the Flags field; currently no
604     // other flags are defined.
605     res.emplace_back(
606         RuntimePseudoReloc(target, this, rel.VirtualAddress, sizeInBits));
607   }
608 }
609 
610 bool SectionChunk::isCOMDAT() const {
611   return header->Characteristics & IMAGE_SCN_LNK_COMDAT;
612 }
613 
614 void SectionChunk::printDiscardedMessage() const {
615   // Removed by dead-stripping. If it's removed by ICF, ICF already
616   // printed out the name, so don't repeat that here.
617   if (sym && this == repl)
618     message("Discarded " + sym->getName());
619 }
620 
621 StringRef SectionChunk::getDebugName() const {
622   if (sym)
623     return sym->getName();
624   return "";
625 }
626 
627 ArrayRef<uint8_t> SectionChunk::getContents() const {
628   ArrayRef<uint8_t> a;
629   cantFail(file->getCOFFObj()->getSectionContents(header, a));
630   return a;
631 }
632 
633 ArrayRef<uint8_t> SectionChunk::consumeDebugMagic() {
634   assert(isCodeView());
635   return consumeDebugMagic(getContents(), getSectionName());
636 }
637 
638 ArrayRef<uint8_t> SectionChunk::consumeDebugMagic(ArrayRef<uint8_t> data,
639                                                   StringRef sectionName) {
640   if (data.empty())
641     return {};
642 
643   // First 4 bytes are section magic.
644   if (data.size() < 4)
645     fatal("the section is too short: " + sectionName);
646 
647   if (!sectionName.startswith(".debug$"))
648     fatal("invalid section: " + sectionName);
649 
650   uint32_t magic = support::endian::read32le(data.data());
651   uint32_t expectedMagic = sectionName == ".debug$H"
652                                ? DEBUG_HASHES_SECTION_MAGIC
653                                : DEBUG_SECTION_MAGIC;
654   if (magic != expectedMagic) {
655     warn("ignoring section " + sectionName + " with unrecognized magic 0x" +
656          utohexstr(magic));
657     return {};
658   }
659   return data.slice(4);
660 }
661 
662 SectionChunk *SectionChunk::findByName(ArrayRef<SectionChunk *> sections,
663                                        StringRef name) {
664   for (SectionChunk *c : sections)
665     if (c->getSectionName() == name)
666       return c;
667   return nullptr;
668 }
669 
670 void SectionChunk::replace(SectionChunk *other) {
671   p2Align = std::max(p2Align, other->p2Align);
672   other->repl = repl;
673   other->live = false;
674 }
675 
676 uint32_t SectionChunk::getSectionNumber() const {
677   DataRefImpl r;
678   r.p = reinterpret_cast<uintptr_t>(header);
679   SectionRef s(r, file->getCOFFObj());
680   return s.getIndex() + 1;
681 }
682 
683 CommonChunk::CommonChunk(const COFFSymbolRef s) : sym(s) {
684   // The value of a common symbol is its size. Align all common symbols smaller
685   // than 32 bytes naturally, i.e. round the size up to the next power of two.
686   // This is what MSVC link.exe does.
687   setAlignment(std::min(32U, uint32_t(PowerOf2Ceil(sym.getValue()))));
688   hasData = false;
689 }
690 
691 uint32_t CommonChunk::getOutputCharacteristics() const {
692   return IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_MEM_READ |
693          IMAGE_SCN_MEM_WRITE;
694 }
695 
696 void StringChunk::writeTo(uint8_t *buf) const {
697   memcpy(buf, str.data(), str.size());
698   buf[str.size()] = '\0';
699 }
700 
701 ImportThunkChunkX64::ImportThunkChunkX64(Defined *s) : ImportThunkChunk(s) {
702   // Intel Optimization Manual says that all branch targets
703   // should be 16-byte aligned. MSVC linker does this too.
704   setAlignment(16);
705 }
706 
707 void ImportThunkChunkX64::writeTo(uint8_t *buf) const {
708   memcpy(buf, importThunkX86, sizeof(importThunkX86));
709   // The first two bytes is a JMP instruction. Fill its operand.
710   write32le(buf + 2, impSymbol->getRVA() - rva - getSize());
711 }
712 
713 void ImportThunkChunkX86::getBaserels(std::vector<Baserel> *res) {
714   res->emplace_back(getRVA() + 2);
715 }
716 
717 void ImportThunkChunkX86::writeTo(uint8_t *buf) const {
718   memcpy(buf, importThunkX86, sizeof(importThunkX86));
719   // The first two bytes is a JMP instruction. Fill its operand.
720   write32le(buf + 2,
721             impSymbol->getRVA() + config->imageBase);
722 }
723 
724 void ImportThunkChunkARM::getBaserels(std::vector<Baserel> *res) {
725   res->emplace_back(getRVA(), IMAGE_REL_BASED_ARM_MOV32T);
726 }
727 
728 void ImportThunkChunkARM::writeTo(uint8_t *buf) const {
729   memcpy(buf, importThunkARM, sizeof(importThunkARM));
730   // Fix mov.w and mov.t operands.
731   applyMOV32T(buf, impSymbol->getRVA() + config->imageBase);
732 }
733 
734 void ImportThunkChunkARM64::writeTo(uint8_t *buf) const {
735   int64_t off = impSymbol->getRVA() & 0xfff;
736   memcpy(buf, importThunkARM64, sizeof(importThunkARM64));
737   applyArm64Addr(buf, impSymbol->getRVA(), rva, 12);
738   applyArm64Ldr(buf + 4, off);
739 }
740 
741 // A Thumb2, PIC, non-interworking range extension thunk.
742 const uint8_t armThunk[] = {
743     0x40, 0xf2, 0x00, 0x0c, // P:  movw ip,:lower16:S - (P + (L1-P) + 4)
744     0xc0, 0xf2, 0x00, 0x0c, //     movt ip,:upper16:S - (P + (L1-P) + 4)
745     0xe7, 0x44,             // L1: add  pc, ip
746 };
747 
748 size_t RangeExtensionThunkARM::getSize() const {
749   assert(config->machine == ARMNT);
750   return sizeof(armThunk);
751 }
752 
753 void RangeExtensionThunkARM::writeTo(uint8_t *buf) const {
754   assert(config->machine == ARMNT);
755   uint64_t offset = target->getRVA() - rva - 12;
756   memcpy(buf, armThunk, sizeof(armThunk));
757   applyMOV32T(buf, uint32_t(offset));
758 }
759 
760 // A position independent ARM64 adrp+add thunk, with a maximum range of
761 // +/- 4 GB, which is enough for any PE-COFF.
762 const uint8_t arm64Thunk[] = {
763     0x10, 0x00, 0x00, 0x90, // adrp x16, Dest
764     0x10, 0x02, 0x00, 0x91, // add  x16, x16, :lo12:Dest
765     0x00, 0x02, 0x1f, 0xd6, // br   x16
766 };
767 
768 size_t RangeExtensionThunkARM64::getSize() const {
769   assert(config->machine == ARM64);
770   return sizeof(arm64Thunk);
771 }
772 
773 void RangeExtensionThunkARM64::writeTo(uint8_t *buf) const {
774   assert(config->machine == ARM64);
775   memcpy(buf, arm64Thunk, sizeof(arm64Thunk));
776   applyArm64Addr(buf + 0, target->getRVA(), rva, 12);
777   applyArm64Imm(buf + 4, target->getRVA() & 0xfff, 0);
778 }
779 
780 void LocalImportChunk::getBaserels(std::vector<Baserel> *res) {
781   res->emplace_back(getRVA());
782 }
783 
784 size_t LocalImportChunk::getSize() const { return config->wordsize; }
785 
786 void LocalImportChunk::writeTo(uint8_t *buf) const {
787   if (config->is64()) {
788     write64le(buf, sym->getRVA() + config->imageBase);
789   } else {
790     write32le(buf, sym->getRVA() + config->imageBase);
791   }
792 }
793 
794 void RVATableChunk::writeTo(uint8_t *buf) const {
795   ulittle32_t *begin = reinterpret_cast<ulittle32_t *>(buf);
796   size_t cnt = 0;
797   for (const ChunkAndOffset &co : syms)
798     begin[cnt++] = co.inputChunk->getRVA() + co.offset;
799   std::sort(begin, begin + cnt);
800   assert(std::unique(begin, begin + cnt) == begin + cnt &&
801          "RVA tables should be de-duplicated");
802 }
803 
804 // MinGW specific, for the "automatic import of variables from DLLs" feature.
805 size_t PseudoRelocTableChunk::getSize() const {
806   if (relocs.empty())
807     return 0;
808   return 12 + 12 * relocs.size();
809 }
810 
811 // MinGW specific.
812 void PseudoRelocTableChunk::writeTo(uint8_t *buf) const {
813   if (relocs.empty())
814     return;
815 
816   ulittle32_t *table = reinterpret_cast<ulittle32_t *>(buf);
817   // This is the list header, to signal the runtime pseudo relocation v2
818   // format.
819   table[0] = 0;
820   table[1] = 0;
821   table[2] = 1;
822 
823   size_t idx = 3;
824   for (const RuntimePseudoReloc &rpr : relocs) {
825     table[idx + 0] = rpr.sym->getRVA();
826     table[idx + 1] = rpr.target->getRVA() + rpr.targetOffset;
827     table[idx + 2] = rpr.flags;
828     idx += 3;
829   }
830 }
831 
832 // Windows-specific. This class represents a block in .reloc section.
833 // The format is described here.
834 //
835 // On Windows, each DLL is linked against a fixed base address and
836 // usually loaded to that address. However, if there's already another
837 // DLL that overlaps, the loader has to relocate it. To do that, DLLs
838 // contain .reloc sections which contain offsets that need to be fixed
839 // up at runtime. If the loader finds that a DLL cannot be loaded to its
840 // desired base address, it loads it to somewhere else, and add <actual
841 // base address> - <desired base address> to each offset that is
842 // specified by the .reloc section. In ELF terms, .reloc sections
843 // contain relative relocations in REL format (as opposed to RELA.)
844 //
845 // This already significantly reduces the size of relocations compared
846 // to ELF .rel.dyn, but Windows does more to reduce it (probably because
847 // it was invented for PCs in the late '80s or early '90s.)  Offsets in
848 // .reloc are grouped by page where the page size is 12 bits, and
849 // offsets sharing the same page address are stored consecutively to
850 // represent them with less space. This is very similar to the page
851 // table which is grouped by (multiple stages of) pages.
852 //
853 // For example, let's say we have 0x00030, 0x00500, 0x00700, 0x00A00,
854 // 0x20004, and 0x20008 in a .reloc section for x64. The uppermost 4
855 // bits have a type IMAGE_REL_BASED_DIR64 or 0xA. In the section, they
856 // are represented like this:
857 //
858 //   0x00000  -- page address (4 bytes)
859 //   16       -- size of this block (4 bytes)
860 //     0xA030 -- entries (2 bytes each)
861 //     0xA500
862 //     0xA700
863 //     0xAA00
864 //   0x20000  -- page address (4 bytes)
865 //   12       -- size of this block (4 bytes)
866 //     0xA004 -- entries (2 bytes each)
867 //     0xA008
868 //
869 // Usually we have a lot of relocations for each page, so the number of
870 // bytes for one .reloc entry is close to 2 bytes on average.
871 BaserelChunk::BaserelChunk(uint32_t page, Baserel *begin, Baserel *end) {
872   // Block header consists of 4 byte page RVA and 4 byte block size.
873   // Each entry is 2 byte. Last entry may be padding.
874   data.resize(alignTo((end - begin) * 2 + 8, 4));
875   uint8_t *p = data.data();
876   write32le(p, page);
877   write32le(p + 4, data.size());
878   p += 8;
879   for (Baserel *i = begin; i != end; ++i) {
880     write16le(p, (i->type << 12) | (i->rva - page));
881     p += 2;
882   }
883 }
884 
885 void BaserelChunk::writeTo(uint8_t *buf) const {
886   memcpy(buf, data.data(), data.size());
887 }
888 
889 uint8_t Baserel::getDefaultType() {
890   switch (config->machine) {
891   case AMD64:
892   case ARM64:
893     return IMAGE_REL_BASED_DIR64;
894   case I386:
895   case ARMNT:
896     return IMAGE_REL_BASED_HIGHLOW;
897   default:
898     llvm_unreachable("unknown machine type");
899   }
900 }
901 
902 MergeChunk *MergeChunk::instances[Log2MaxSectionAlignment + 1] = {};
903 
904 MergeChunk::MergeChunk(uint32_t alignment)
905     : builder(StringTableBuilder::RAW, alignment) {
906   setAlignment(alignment);
907 }
908 
909 void MergeChunk::addSection(SectionChunk *c) {
910   assert(isPowerOf2_32(c->getAlignment()));
911   uint8_t p2Align = llvm::Log2_32(c->getAlignment());
912   assert(p2Align < array_lengthof(instances));
913   auto *&mc = instances[p2Align];
914   if (!mc)
915     mc = make<MergeChunk>(c->getAlignment());
916   mc->sections.push_back(c);
917 }
918 
919 void MergeChunk::finalizeContents() {
920   assert(!finalized && "should only finalize once");
921   for (SectionChunk *c : sections)
922     if (c->live)
923       builder.add(toStringRef(c->getContents()));
924   builder.finalize();
925   finalized = true;
926 }
927 
928 void MergeChunk::assignSubsectionRVAs() {
929   for (SectionChunk *c : sections) {
930     if (!c->live)
931       continue;
932     size_t off = builder.getOffset(toStringRef(c->getContents()));
933     c->setRVA(rva + off);
934   }
935 }
936 
937 uint32_t MergeChunk::getOutputCharacteristics() const {
938   return IMAGE_SCN_MEM_READ | IMAGE_SCN_CNT_INITIALIZED_DATA;
939 }
940 
941 size_t MergeChunk::getSize() const {
942   return builder.getSize();
943 }
944 
945 void MergeChunk::writeTo(uint8_t *buf) const {
946   builder.write(buf);
947 }
948 
949 // MinGW specific.
950 size_t AbsolutePointerChunk::getSize() const { return config->wordsize; }
951 
952 void AbsolutePointerChunk::writeTo(uint8_t *buf) const {
953   if (config->is64()) {
954     write64le(buf, value);
955   } else {
956     write32le(buf, value);
957   }
958 }
959 
960 } // namespace coff
961 } // namespace lld
962