1 //===- ConcatOutputSection.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ConcatOutputSection.h"
10 #include "Config.h"
11 #include "OutputSegment.h"
12 #include "SymbolTable.h"
13 #include "Symbols.h"
14 #include "SyntheticSections.h"
15 #include "Target.h"
16 #include "lld/Common/CommonLinkerContext.h"
17 #include "llvm/BinaryFormat/MachO.h"
18 #include "llvm/Support/ScopedPrinter.h"
19 #include "llvm/Support/TimeProfiler.h"
20 
21 using namespace llvm;
22 using namespace llvm::MachO;
23 using namespace lld;
24 using namespace lld::macho;
25 
26 MapVector<NamePair, ConcatOutputSection *> macho::concatOutputSections;
27 
28 void ConcatOutputSection::addInput(ConcatInputSection *input) {
29   assert(input->parent == this);
30   if (inputs.empty()) {
31     align = input->align;
32     flags = input->getFlags();
33   } else {
34     align = std::max(align, input->align);
35     finalizeFlags(input);
36   }
37   inputs.push_back(input);
38 }
39 
40 // Branch-range extension can be implemented in two ways, either through ...
41 //
42 // (1) Branch islands: Single branch instructions (also of limited range),
43 //     that might be chained in multiple hops to reach the desired
44 //     destination. On ARM64, as 16 branch islands are needed to hop between
45 //     opposite ends of a 2 GiB program. LD64 uses branch islands exclusively,
46 //     even when it needs excessive hops.
47 //
48 // (2) Thunks: Instruction(s) to load the destination address into a scratch
49 //     register, followed by a register-indirect branch. Thunks are
50 //     constructed to reach any arbitrary address, so need not be
51 //     chained. Although thunks need not be chained, a program might need
52 //     multiple thunks to the same destination distributed throughout a large
53 //     program so that all call sites can have one within range.
54 //
55 // The optimal approach is to mix islands for destinations within two hops,
56 // and use thunks for destinations at greater distance. For now, we only
57 // implement thunks. TODO: Adding support for branch islands!
58 //
59 // Internally -- as expressed in LLD's data structures -- a
60 // branch-range-extension thunk comprises ...
61 //
62 // (1) new Defined privateExtern symbol for the thunk named
63 //     <FUNCTION>.thunk.<SEQUENCE>, which references ...
64 // (2) new InputSection, which contains ...
65 // (3.1) new data for the instructions to load & branch to the far address +
66 // (3.2) new Relocs on instructions to load the far address, which reference ...
67 // (4.1) existing Defined extern symbol for the real function in __text, or
68 // (4.2) existing DylibSymbol for the real function in a dylib
69 //
70 // Nearly-optimal thunk-placement algorithm features:
71 //
72 // * Single pass: O(n) on the number of call sites.
73 //
74 // * Accounts for the exact space overhead of thunks - no heuristics
75 //
76 // * Exploits the full range of call instructions - forward & backward
77 //
78 // Data:
79 //
80 // * DenseMap<Symbol *, ThunkInfo> thunkMap: Maps the function symbol
81 //   to its thunk bookkeeper.
82 //
83 // * struct ThunkInfo (bookkeeper): Call instructions have limited range, and
84 //   distant call sites might be unable to reach the same thunk, so multiple
85 //   thunks are necessary to serve all call sites in a very large program. A
86 //   thunkInfo stores state for all thunks associated with a particular
87 //   function: (a) thunk symbol, (b) input section containing stub code, and
88 //   (c) sequence number for the active thunk incarnation. When an old thunk
89 //   goes out of range, we increment the sequence number and create a new
90 //   thunk named <FUNCTION>.thunk.<SEQUENCE>.
91 //
92 // * A thunk incarnation comprises (a) private-extern Defined symbol pointing
93 //   to (b) an InputSection holding machine instructions (similar to a MachO
94 //   stub), and (c) Reloc(s) that reference the real function for fixing-up
95 //   the stub code.
96 //
97 // * std::vector<InputSection *> MergedInputSection::thunks: A vector parallel
98 //   to the inputs vector. We store new thunks via cheap vector append, rather
99 //   than costly insertion into the inputs vector.
100 //
101 // Control Flow:
102 //
103 // * During address assignment, MergedInputSection::finalize() examines call
104 //   sites by ascending address and creates thunks.  When a function is beyond
105 //   the range of a call site, we need a thunk. Place it at the largest
106 //   available forward address from the call site. Call sites increase
107 //   monotonically and thunks are always placed as far forward as possible;
108 //   thus, we place thunks at monotonically increasing addresses. Once a thunk
109 //   is placed, it and all previous input-section addresses are final.
110 //
111 // * ConcatInputSection::finalize() and ConcatInputSection::writeTo() merge
112 //   the inputs and thunks vectors (both ordered by ascending address), which
113 //   is simple and cheap.
114 
115 DenseMap<Symbol *, ThunkInfo> lld::macho::thunkMap;
116 
117 // Determine whether we need thunks, which depends on the target arch -- RISC
118 // (i.e., ARM) generally does because it has limited-range branch/call
119 // instructions, whereas CISC (i.e., x86) generally doesn't. RISC only needs
120 // thunks for programs so large that branch source & destination addresses
121 // might differ more than the range of branch instruction(s).
122 bool ConcatOutputSection::needsThunks() const {
123   if (!target->usesThunks())
124     return false;
125   uint64_t isecAddr = addr;
126   for (ConcatInputSection *isec : inputs)
127     isecAddr = alignTo(isecAddr, isec->align) + isec->getSize();
128   if (isecAddr - addr + in.stubs->getSize() <=
129       std::min(target->backwardBranchRange, target->forwardBranchRange))
130     return false;
131   // Yes, this program is large enough to need thunks.
132   for (ConcatInputSection *isec : inputs) {
133     for (Reloc &r : isec->relocs) {
134       if (!target->hasAttr(r.type, RelocAttrBits::BRANCH))
135         continue;
136       auto *sym = r.referent.get<Symbol *>();
137       // Pre-populate the thunkMap and memoize call site counts for every
138       // InputSection and ThunkInfo. We do this for the benefit of
139       // ConcatOutputSection::estimateStubsInRangeVA()
140       ThunkInfo &thunkInfo = thunkMap[sym];
141       // Knowing ThunkInfo call site count will help us know whether or not we
142       // might need to create more for this referent at the time we are
143       // estimating distance to __stubs in estimateStubsInRangeVA().
144       ++thunkInfo.callSiteCount;
145       // We can avoid work on InputSections that have no BRANCH relocs.
146       isec->hasCallSites = true;
147     }
148   }
149   return true;
150 }
151 
152 // Since __stubs is placed after __text, we must estimate the address
153 // beyond which stubs are within range of a simple forward branch.
154 // This is called exactly once, when the last input section has been finalized.
155 uint64_t ConcatOutputSection::estimateStubsInRangeVA(size_t callIdx) const {
156   // Tally the functions which still have call sites remaining to process,
157   // which yields the maximum number of thunks we might yet place.
158   size_t maxPotentialThunks = 0;
159   for (auto &tp : thunkMap) {
160     ThunkInfo &ti = tp.second;
161     // This overcounts: Only sections that are in forward jump range from the
162     // currently-active section get finalized, and all input sections are
163     // finalized when estimateStubsInRangeVA() is called. So only backward
164     // jumps will need thunks, but we count all jumps.
165     if (ti.callSitesUsed < ti.callSiteCount)
166       maxPotentialThunks += 1;
167   }
168   // Tally the total size of input sections remaining to process.
169   uint64_t isecVA = inputs[callIdx]->getVA();
170   uint64_t isecEnd = isecVA;
171   for (size_t i = callIdx; i < inputs.size(); i++) {
172     InputSection *isec = inputs[i];
173     isecEnd = alignTo(isecEnd, isec->align) + isec->getSize();
174   }
175   // Estimate the address after which call sites can safely call stubs
176   // directly rather than through intermediary thunks.
177   uint64_t forwardBranchRange = target->forwardBranchRange;
178   assert(isecEnd > forwardBranchRange &&
179          "should not run thunk insertion if all code fits in jump range");
180   assert(isecEnd - isecVA <= forwardBranchRange &&
181          "should only finalize sections in jump range");
182   uint64_t stubsInRangeVA = isecEnd + maxPotentialThunks * target->thunkSize +
183                             in.stubs->getSize() - forwardBranchRange;
184   log("thunks = " + std::to_string(thunkMap.size()) +
185       ", potential = " + std::to_string(maxPotentialThunks) +
186       ", stubs = " + std::to_string(in.stubs->getSize()) + ", isecVA = " +
187       to_hexString(isecVA) + ", threshold = " + to_hexString(stubsInRangeVA) +
188       ", isecEnd = " + to_hexString(isecEnd) +
189       ", tail = " + to_hexString(isecEnd - isecVA) +
190       ", slop = " + to_hexString(forwardBranchRange - (isecEnd - isecVA)));
191   return stubsInRangeVA;
192 }
193 
194 void ConcatOutputSection::finalize() {
195   uint64_t isecAddr = addr;
196   uint64_t isecFileOff = fileOff;
197   auto finalizeOne = [&](ConcatInputSection *isec) {
198     isecAddr = alignTo(isecAddr, isec->align);
199     isecFileOff = alignTo(isecFileOff, isec->align);
200     isec->outSecOff = isecAddr - addr;
201     isec->isFinal = true;
202     isecAddr += isec->getSize();
203     isecFileOff += isec->getFileSize();
204   };
205 
206   if (!needsThunks()) {
207     for (ConcatInputSection *isec : inputs)
208       finalizeOne(isec);
209     size = isecAddr - addr;
210     fileSize = isecFileOff - fileOff;
211     return;
212   }
213 
214   uint64_t forwardBranchRange = target->forwardBranchRange;
215   uint64_t backwardBranchRange = target->backwardBranchRange;
216   uint64_t stubsInRangeVA = TargetInfo::outOfRangeVA;
217   size_t thunkSize = target->thunkSize;
218   size_t relocCount = 0;
219   size_t callSiteCount = 0;
220   size_t thunkCallCount = 0;
221   size_t thunkCount = 0;
222 
223   // Walk all sections in order. Finalize all sections that are less than
224   // forwardBranchRange in front of it.
225   // isecVA is the address of the current section.
226   // isecAddr is the start address of the first non-finalized section.
227 
228   // inputs[finalIdx] is for finalization (address-assignment)
229   size_t finalIdx = 0;
230   // Kick-off by ensuring that the first input section has an address
231   for (size_t callIdx = 0, endIdx = inputs.size(); callIdx < endIdx;
232        ++callIdx) {
233     if (finalIdx == callIdx)
234       finalizeOne(inputs[finalIdx++]);
235     ConcatInputSection *isec = inputs[callIdx];
236     assert(isec->isFinal);
237     uint64_t isecVA = isec->getVA();
238 
239     // Assign addresses up-to the forward branch-range limit.
240     // Every call instruction needs a small number of bytes (on Arm64: 4),
241     // and each inserted thunk needs a slightly larger number of bytes
242     // (on Arm64: 12). If a section starts with a branch instruction and
243     // contains several branch instructions in succession, then the distance
244     // from the current position to the position where the thunks are inserted
245     // grows. So leave room for a bunch of thunks.
246     unsigned slop = 256 * thunkSize;
247     while (finalIdx < endIdx && isecAddr + inputs[finalIdx]->getSize() <
248                                     isecVA + forwardBranchRange - slop)
249       finalizeOne(inputs[finalIdx++]);
250 
251     if (!isec->hasCallSites)
252       continue;
253 
254     if (finalIdx == endIdx && stubsInRangeVA == TargetInfo::outOfRangeVA) {
255       // When we have finalized all input sections, __stubs (destined
256       // to follow __text) comes within range of forward branches and
257       // we can estimate the threshold address after which we can
258       // reach any stub with a forward branch. Note that although it
259       // sits in the middle of a loop, this code executes only once.
260       // It is in the loop because we need to call it at the proper
261       // time: the earliest call site from which the end of __text
262       // (and start of __stubs) comes within range of a forward branch.
263       stubsInRangeVA = estimateStubsInRangeVA(callIdx);
264     }
265     // Process relocs by ascending address, i.e., ascending offset within isec
266     std::vector<Reloc> &relocs = isec->relocs;
267     // FIXME: This property does not hold for object files produced by ld64's
268     // `-r` mode.
269     assert(is_sorted(relocs,
270                      [](Reloc &a, Reloc &b) { return a.offset > b.offset; }));
271     for (Reloc &r : reverse(relocs)) {
272       ++relocCount;
273       if (!target->hasAttr(r.type, RelocAttrBits::BRANCH))
274         continue;
275       ++callSiteCount;
276       // Calculate branch reachability boundaries
277       uint64_t callVA = isecVA + r.offset;
278       uint64_t lowVA =
279           backwardBranchRange < callVA ? callVA - backwardBranchRange : 0;
280       uint64_t highVA = callVA + forwardBranchRange;
281       // Calculate our call referent address
282       auto *funcSym = r.referent.get<Symbol *>();
283       ThunkInfo &thunkInfo = thunkMap[funcSym];
284       // The referent is not reachable, so we need to use a thunk ...
285       if (funcSym->isInStubs() && callVA >= stubsInRangeVA) {
286         assert(callVA != TargetInfo::outOfRangeVA);
287         // ... Oh, wait! We are close enough to the end that __stubs
288         // are now within range of a simple forward branch.
289         continue;
290       }
291       uint64_t funcVA = funcSym->resolveBranchVA();
292       ++thunkInfo.callSitesUsed;
293       if (lowVA <= funcVA && funcVA <= highVA) {
294         // The referent is reachable with a simple call instruction.
295         continue;
296       }
297       ++thunkInfo.thunkCallCount;
298       ++thunkCallCount;
299       // If an existing thunk is reachable, use it ...
300       if (thunkInfo.sym) {
301         uint64_t thunkVA = thunkInfo.isec->getVA();
302         if (lowVA <= thunkVA && thunkVA <= highVA) {
303           r.referent = thunkInfo.sym;
304           continue;
305         }
306       }
307       // ... otherwise, create a new thunk.
308       if (isecAddr > highVA) {
309         // There were too many consecutive branch instructions for `slop`
310         // above. If you hit this: For the current algorithm, just bumping up
311         // slop above and trying again is probably simplest. (See also PR51578
312         // comment 5).
313         fatal(Twine(__FUNCTION__) + ": FIXME: thunk range overrun");
314       }
315       thunkInfo.isec =
316           make<ConcatInputSection>(isec->getSegName(), isec->getName());
317       thunkInfo.isec->parent = this;
318 
319       // This code runs after dead code removal. Need to set the `live` bit
320       // on the thunk isec so that asserts that check that only live sections
321       // get written are happy.
322       thunkInfo.isec->live = true;
323 
324       StringRef thunkName = saver().save(funcSym->getName() + ".thunk." +
325                                          std::to_string(thunkInfo.sequence++));
326       r.referent = thunkInfo.sym = symtab->addDefined(
327           thunkName, /*file=*/nullptr, thunkInfo.isec, /*value=*/0,
328           /*size=*/thunkSize, /*isWeakDef=*/false, /*isPrivateExtern=*/true,
329           /*isThumb=*/false, /*isReferencedDynamically=*/false,
330           /*noDeadStrip=*/false, /*isWeakDefCanBeHidden=*/false);
331       thunkInfo.sym->used = true;
332       target->populateThunk(thunkInfo.isec, funcSym);
333       finalizeOne(thunkInfo.isec);
334       thunks.push_back(thunkInfo.isec);
335       ++thunkCount;
336     }
337   }
338   size = isecAddr - addr;
339   fileSize = isecFileOff - fileOff;
340 
341   log("thunks for " + parent->name + "," + name +
342       ": funcs = " + std::to_string(thunkMap.size()) +
343       ", relocs = " + std::to_string(relocCount) +
344       ", all calls = " + std::to_string(callSiteCount) +
345       ", thunk calls = " + std::to_string(thunkCallCount) +
346       ", thunks = " + std::to_string(thunkCount));
347 }
348 
349 void ConcatOutputSection::writeTo(uint8_t *buf) const {
350   // Merge input sections from thunk & ordinary vectors
351   size_t i = 0, ie = inputs.size();
352   size_t t = 0, te = thunks.size();
353   while (i < ie || t < te) {
354     while (i < ie && (t == te || inputs[i]->empty() ||
355                       inputs[i]->outSecOff < thunks[t]->outSecOff)) {
356       inputs[i]->writeTo(buf + inputs[i]->outSecOff);
357       ++i;
358     }
359     while (t < te && (i == ie || thunks[t]->outSecOff < inputs[i]->outSecOff)) {
360       thunks[t]->writeTo(buf + thunks[t]->outSecOff);
361       ++t;
362     }
363   }
364 }
365 
366 void ConcatOutputSection::finalizeFlags(InputSection *input) {
367   switch (sectionType(input->getFlags())) {
368   default /*type-unspec'ed*/:
369     // FIXME: Add additional logic here when supporting emitting obj files.
370     break;
371   case S_4BYTE_LITERALS:
372   case S_8BYTE_LITERALS:
373   case S_16BYTE_LITERALS:
374   case S_CSTRING_LITERALS:
375   case S_ZEROFILL:
376   case S_LAZY_SYMBOL_POINTERS:
377   case S_MOD_TERM_FUNC_POINTERS:
378   case S_THREAD_LOCAL_REGULAR:
379   case S_THREAD_LOCAL_ZEROFILL:
380   case S_THREAD_LOCAL_VARIABLES:
381   case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS:
382   case S_THREAD_LOCAL_VARIABLE_POINTERS:
383   case S_NON_LAZY_SYMBOL_POINTERS:
384   case S_SYMBOL_STUBS:
385     flags |= input->getFlags();
386     break;
387   }
388 }
389 
390 ConcatOutputSection *
391 ConcatOutputSection::getOrCreateForInput(const InputSection *isec) {
392   NamePair names = maybeRenameSection({isec->getSegName(), isec->getName()});
393   ConcatOutputSection *&osec = concatOutputSections[names];
394   if (!osec)
395     osec = make<ConcatOutputSection>(names.second);
396   return osec;
397 }
398 
399 NamePair macho::maybeRenameSection(NamePair key) {
400   auto newNames = config->sectionRenameMap.find(key);
401   if (newNames != config->sectionRenameMap.end())
402     return newNames->second;
403   return key;
404 }
405