1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the mechanics required to implement inlining without
10 // missing any calls and updating the call graph. The decisions of which calls
11 // are profitable to inline are implemented elsewhere.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/IPO/Inliner.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/None.h"
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/ScopeExit.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/BasicAliasAnalysis.h"
28 #include "llvm/Analysis/BlockFrequencyInfo.h"
29 #include "llvm/Analysis/CGSCCPassManager.h"
30 #include "llvm/Analysis/CallGraph.h"
31 #include "llvm/Analysis/GlobalsModRef.h"
32 #include "llvm/Analysis/InlineAdvisor.h"
33 #include "llvm/Analysis/InlineCost.h"
34 #include "llvm/Analysis/LazyCallGraph.h"
35 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
36 #include "llvm/Analysis/ProfileSummaryInfo.h"
37 #include "llvm/Analysis/TargetLibraryInfo.h"
38 #include "llvm/Analysis/TargetTransformInfo.h"
39 #include "llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/DiagnosticInfo.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/InstIterator.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/IntrinsicInst.h"
51 #include "llvm/IR/Metadata.h"
52 #include "llvm/IR/Module.h"
53 #include "llvm/IR/PassManager.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/Casting.h"
58 #include "llvm/Support/CommandLine.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
62 #include "llvm/Transforms/Utils/Cloning.h"
63 #include "llvm/Transforms/Utils/Local.h"
64 #include "llvm/Transforms/Utils/ModuleUtils.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <functional>
68 #include <sstream>
69 #include <tuple>
70 #include <utility>
71 #include <vector>
72
73 using namespace llvm;
74
75 #define DEBUG_TYPE "inline"
76
77 STATISTIC(NumInlined, "Number of functions inlined");
78 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
79 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
80 STATISTIC(NumMergedAllocas, "Number of allocas merged together");
81
82 /// Flag to disable manual alloca merging.
83 ///
84 /// Merging of allocas was originally done as a stack-size saving technique
85 /// prior to LLVM's code generator having support for stack coloring based on
86 /// lifetime markers. It is now in the process of being removed. To experiment
87 /// with disabling it and relying fully on lifetime marker based stack
88 /// coloring, you can pass this flag to LLVM.
89 static cl::opt<bool>
90 DisableInlinedAllocaMerging("disable-inlined-alloca-merging",
91 cl::init(false), cl::Hidden);
92
93 extern cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats;
94
95 static cl::opt<std::string> CGSCCInlineReplayFile(
96 "cgscc-inline-replay", cl::init(""), cl::value_desc("filename"),
97 cl::desc(
98 "Optimization remarks file containing inline remarks to be replayed "
99 "by inlining from cgscc inline remarks."),
100 cl::Hidden);
101
102 static cl::opt<bool> InlineEnablePriorityOrder(
103 "inline-enable-priority-order", cl::Hidden, cl::init(false),
104 cl::desc("Enable the priority inline order for the inliner"));
105
LegacyInlinerBase(char & ID)106 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {}
107
LegacyInlinerBase(char & ID,bool InsertLifetime)108 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime)
109 : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {}
110
111 /// For this class, we declare that we require and preserve the call graph.
112 /// If the derived class implements this method, it should
113 /// always explicitly call the implementation here.
getAnalysisUsage(AnalysisUsage & AU) const114 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const {
115 AU.addRequired<AssumptionCacheTracker>();
116 AU.addRequired<ProfileSummaryInfoWrapperPass>();
117 AU.addRequired<TargetLibraryInfoWrapperPass>();
118 getAAResultsAnalysisUsage(AU);
119 CallGraphSCCPass::getAnalysisUsage(AU);
120 }
121
122 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>;
123
124 /// Look at all of the allocas that we inlined through this call site. If we
125 /// have already inlined other allocas through other calls into this function,
126 /// then we know that they have disjoint lifetimes and that we can merge them.
127 ///
128 /// There are many heuristics possible for merging these allocas, and the
129 /// different options have different tradeoffs. One thing that we *really*
130 /// don't want to hurt is SRoA: once inlining happens, often allocas are no
131 /// longer address taken and so they can be promoted.
132 ///
133 /// Our "solution" for that is to only merge allocas whose outermost type is an
134 /// array type. These are usually not promoted because someone is using a
135 /// variable index into them. These are also often the most important ones to
136 /// merge.
137 ///
138 /// A better solution would be to have real memory lifetime markers in the IR
139 /// and not have the inliner do any merging of allocas at all. This would
140 /// allow the backend to do proper stack slot coloring of all allocas that
141 /// *actually make it to the backend*, which is really what we want.
142 ///
143 /// Because we don't have this information, we do this simple and useful hack.
mergeInlinedArrayAllocas(Function * Caller,InlineFunctionInfo & IFI,InlinedArrayAllocasTy & InlinedArrayAllocas,int InlineHistory)144 static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI,
145 InlinedArrayAllocasTy &InlinedArrayAllocas,
146 int InlineHistory) {
147 SmallPtrSet<AllocaInst *, 16> UsedAllocas;
148
149 // When processing our SCC, check to see if the call site was inlined from
150 // some other call site. For example, if we're processing "A" in this code:
151 // A() { B() }
152 // B() { x = alloca ... C() }
153 // C() { y = alloca ... }
154 // Assume that C was not inlined into B initially, and so we're processing A
155 // and decide to inline B into A. Doing this makes an alloca available for
156 // reuse and makes a callsite (C) available for inlining. When we process
157 // the C call site we don't want to do any alloca merging between X and Y
158 // because their scopes are not disjoint. We could make this smarter by
159 // keeping track of the inline history for each alloca in the
160 // InlinedArrayAllocas but this isn't likely to be a significant win.
161 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC.
162 return;
163
164 // Loop over all the allocas we have so far and see if they can be merged with
165 // a previously inlined alloca. If not, remember that we had it.
166 for (unsigned AllocaNo = 0, E = IFI.StaticAllocas.size(); AllocaNo != E;
167 ++AllocaNo) {
168 AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
169
170 // Don't bother trying to merge array allocations (they will usually be
171 // canonicalized to be an allocation *of* an array), or allocations whose
172 // type is not itself an array (because we're afraid of pessimizing SRoA).
173 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
174 if (!ATy || AI->isArrayAllocation())
175 continue;
176
177 // Get the list of all available allocas for this array type.
178 std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy];
179
180 // Loop over the allocas in AllocasForType to see if we can reuse one. Note
181 // that we have to be careful not to reuse the same "available" alloca for
182 // multiple different allocas that we just inlined, we use the 'UsedAllocas'
183 // set to keep track of which "available" allocas are being used by this
184 // function. Also, AllocasForType can be empty of course!
185 bool MergedAwayAlloca = false;
186 for (AllocaInst *AvailableAlloca : AllocasForType) {
187 Align Align1 = AI->getAlign();
188 Align Align2 = AvailableAlloca->getAlign();
189
190 // The available alloca has to be in the right function, not in some other
191 // function in this SCC.
192 if (AvailableAlloca->getParent() != AI->getParent())
193 continue;
194
195 // If the inlined function already uses this alloca then we can't reuse
196 // it.
197 if (!UsedAllocas.insert(AvailableAlloca).second)
198 continue;
199
200 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
201 // success!
202 LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI
203 << "\n\t\tINTO: " << *AvailableAlloca << '\n');
204
205 // Move affected dbg.declare calls immediately after the new alloca to
206 // avoid the situation when a dbg.declare precedes its alloca.
207 if (auto *L = LocalAsMetadata::getIfExists(AI))
208 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
209 for (User *U : MDV->users())
210 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
211 DDI->moveBefore(AvailableAlloca->getNextNode());
212
213 AI->replaceAllUsesWith(AvailableAlloca);
214
215 if (Align1 > Align2)
216 AvailableAlloca->setAlignment(AI->getAlign());
217
218 AI->eraseFromParent();
219 MergedAwayAlloca = true;
220 ++NumMergedAllocas;
221 IFI.StaticAllocas[AllocaNo] = nullptr;
222 break;
223 }
224
225 // If we already nuked the alloca, we're done with it.
226 if (MergedAwayAlloca)
227 continue;
228
229 // If we were unable to merge away the alloca either because there are no
230 // allocas of the right type available or because we reused them all
231 // already, remember that this alloca came from an inlined function and mark
232 // it used so we don't reuse it for other allocas from this inline
233 // operation.
234 AllocasForType.push_back(AI);
235 UsedAllocas.insert(AI);
236 }
237 }
238
239 /// If it is possible to inline the specified call site,
240 /// do so and update the CallGraph for this operation.
241 ///
242 /// This function also does some basic book-keeping to update the IR. The
243 /// InlinedArrayAllocas map keeps track of any allocas that are already
244 /// available from other functions inlined into the caller. If we are able to
245 /// inline this call site we attempt to reuse already available allocas or add
246 /// any new allocas to the set if not possible.
inlineCallIfPossible(CallBase & CB,InlineFunctionInfo & IFI,InlinedArrayAllocasTy & InlinedArrayAllocas,int InlineHistory,bool InsertLifetime,function_ref<AAResults & (Function &)> & AARGetter,ImportedFunctionsInliningStatistics & ImportedFunctionsStats)247 static InlineResult inlineCallIfPossible(
248 CallBase &CB, InlineFunctionInfo &IFI,
249 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory,
250 bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter,
251 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
252 Function *Callee = CB.getCalledFunction();
253 Function *Caller = CB.getCaller();
254
255 AAResults &AAR = AARGetter(*Callee);
256
257 // Try to inline the function. Get the list of static allocas that were
258 // inlined.
259 InlineResult IR = InlineFunction(CB, IFI, &AAR, InsertLifetime);
260 if (!IR.isSuccess())
261 return IR;
262
263 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
264 ImportedFunctionsStats.recordInline(*Caller, *Callee);
265
266 AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee);
267
268 if (!DisableInlinedAllocaMerging)
269 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory);
270
271 return IR; // success
272 }
273
274 /// Return true if the specified inline history ID
275 /// indicates an inline history that includes the specified function.
inlineHistoryIncludes(Function * F,int InlineHistoryID,const SmallVectorImpl<std::pair<Function *,int>> & InlineHistory)276 static bool inlineHistoryIncludes(
277 Function *F, int InlineHistoryID,
278 const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) {
279 while (InlineHistoryID != -1) {
280 assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
281 "Invalid inline history ID");
282 if (InlineHistory[InlineHistoryID].first == F)
283 return true;
284 InlineHistoryID = InlineHistory[InlineHistoryID].second;
285 }
286 return false;
287 }
288
doInitialization(CallGraph & CG)289 bool LegacyInlinerBase::doInitialization(CallGraph &CG) {
290 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
291 ImportedFunctionsStats.setModuleInfo(CG.getModule());
292 return false; // No changes to CallGraph.
293 }
294
runOnSCC(CallGraphSCC & SCC)295 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) {
296 if (skipSCC(SCC))
297 return false;
298 return inlineCalls(SCC);
299 }
300
301 static bool
inlineCallsImpl(CallGraphSCC & SCC,CallGraph & CG,std::function<AssumptionCache & (Function &)> GetAssumptionCache,ProfileSummaryInfo * PSI,std::function<const TargetLibraryInfo & (Function &)> GetTLI,bool InsertLifetime,function_ref<InlineCost (CallBase & CB)> GetInlineCost,function_ref<AAResults & (Function &)> AARGetter,ImportedFunctionsInliningStatistics & ImportedFunctionsStats)302 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
303 std::function<AssumptionCache &(Function &)> GetAssumptionCache,
304 ProfileSummaryInfo *PSI,
305 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
306 bool InsertLifetime,
307 function_ref<InlineCost(CallBase &CB)> GetInlineCost,
308 function_ref<AAResults &(Function &)> AARGetter,
309 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
310 SmallPtrSet<Function *, 8> SCCFunctions;
311 LLVM_DEBUG(dbgs() << "Inliner visiting SCC:");
312 for (CallGraphNode *Node : SCC) {
313 Function *F = Node->getFunction();
314 if (F)
315 SCCFunctions.insert(F);
316 LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
317 }
318
319 // Scan through and identify all call sites ahead of time so that we only
320 // inline call sites in the original functions, not call sites that result
321 // from inlining other functions.
322 SmallVector<std::pair<CallBase *, int>, 16> CallSites;
323
324 // When inlining a callee produces new call sites, we want to keep track of
325 // the fact that they were inlined from the callee. This allows us to avoid
326 // infinite inlining in some obscure cases. To represent this, we use an
327 // index into the InlineHistory vector.
328 SmallVector<std::pair<Function *, int>, 8> InlineHistory;
329
330 for (CallGraphNode *Node : SCC) {
331 Function *F = Node->getFunction();
332 if (!F || F->isDeclaration())
333 continue;
334
335 OptimizationRemarkEmitter ORE(F);
336 for (BasicBlock &BB : *F)
337 for (Instruction &I : BB) {
338 auto *CB = dyn_cast<CallBase>(&I);
339 // If this isn't a call, or it is a call to an intrinsic, it can
340 // never be inlined.
341 if (!CB || isa<IntrinsicInst>(I))
342 continue;
343
344 // If this is a direct call to an external function, we can never inline
345 // it. If it is an indirect call, inlining may resolve it to be a
346 // direct call, so we keep it.
347 if (Function *Callee = CB->getCalledFunction())
348 if (Callee->isDeclaration()) {
349 using namespace ore;
350
351 setInlineRemark(*CB, "unavailable definition");
352 ORE.emit([&]() {
353 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
354 << NV("Callee", Callee) << " will not be inlined into "
355 << NV("Caller", CB->getCaller())
356 << " because its definition is unavailable"
357 << setIsVerbose();
358 });
359 continue;
360 }
361
362 CallSites.push_back(std::make_pair(CB, -1));
363 }
364 }
365
366 LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
367
368 // If there are no calls in this function, exit early.
369 if (CallSites.empty())
370 return false;
371
372 // Now that we have all of the call sites, move the ones to functions in the
373 // current SCC to the end of the list.
374 unsigned FirstCallInSCC = CallSites.size();
375 for (unsigned I = 0; I < FirstCallInSCC; ++I)
376 if (Function *F = CallSites[I].first->getCalledFunction())
377 if (SCCFunctions.count(F))
378 std::swap(CallSites[I--], CallSites[--FirstCallInSCC]);
379
380 InlinedArrayAllocasTy InlinedArrayAllocas;
381 InlineFunctionInfo InlineInfo(&CG, GetAssumptionCache, PSI);
382
383 // Now that we have all of the call sites, loop over them and inline them if
384 // it looks profitable to do so.
385 bool Changed = false;
386 bool LocalChange;
387 do {
388 LocalChange = false;
389 // Iterate over the outer loop because inlining functions can cause indirect
390 // calls to become direct calls.
391 // CallSites may be modified inside so ranged for loop can not be used.
392 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
393 auto &P = CallSites[CSi];
394 CallBase &CB = *P.first;
395 const int InlineHistoryID = P.second;
396
397 Function *Caller = CB.getCaller();
398 Function *Callee = CB.getCalledFunction();
399
400 // We can only inline direct calls to non-declarations.
401 if (!Callee || Callee->isDeclaration())
402 continue;
403
404 bool IsTriviallyDead = isInstructionTriviallyDead(&CB, &GetTLI(*Caller));
405
406 if (!IsTriviallyDead) {
407 // If this call site was obtained by inlining another function, verify
408 // that the include path for the function did not include the callee
409 // itself. If so, we'd be recursively inlining the same function,
410 // which would provide the same callsites, which would cause us to
411 // infinitely inline.
412 if (InlineHistoryID != -1 &&
413 inlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) {
414 setInlineRemark(CB, "recursive");
415 continue;
416 }
417 }
418
419 // FIXME for new PM: because of the old PM we currently generate ORE and
420 // in turn BFI on demand. With the new PM, the ORE dependency should
421 // just become a regular analysis dependency.
422 OptimizationRemarkEmitter ORE(Caller);
423
424 auto OIC = shouldInline(CB, GetInlineCost, ORE);
425 // If the policy determines that we should inline this function,
426 // delete the call instead.
427 if (!OIC)
428 continue;
429
430 // If this call site is dead and it is to a readonly function, we should
431 // just delete the call instead of trying to inline it, regardless of
432 // size. This happens because IPSCCP propagates the result out of the
433 // call and then we're left with the dead call.
434 if (IsTriviallyDead) {
435 LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << CB << "\n");
436 // Update the call graph by deleting the edge from Callee to Caller.
437 setInlineRemark(CB, "trivially dead");
438 CG[Caller]->removeCallEdgeFor(CB);
439 CB.eraseFromParent();
440 ++NumCallsDeleted;
441 } else {
442 // Get DebugLoc to report. CB will be invalid after Inliner.
443 DebugLoc DLoc = CB.getDebugLoc();
444 BasicBlock *Block = CB.getParent();
445
446 // Attempt to inline the function.
447 using namespace ore;
448
449 InlineResult IR = inlineCallIfPossible(
450 CB, InlineInfo, InlinedArrayAllocas, InlineHistoryID,
451 InsertLifetime, AARGetter, ImportedFunctionsStats);
452 if (!IR.isSuccess()) {
453 setInlineRemark(CB, std::string(IR.getFailureReason()) + "; " +
454 inlineCostStr(*OIC));
455 ORE.emit([&]() {
456 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc,
457 Block)
458 << NV("Callee", Callee) << " will not be inlined into "
459 << NV("Caller", Caller) << ": "
460 << NV("Reason", IR.getFailureReason());
461 });
462 continue;
463 }
464 ++NumInlined;
465
466 emitInlinedInto(ORE, DLoc, Block, *Callee, *Caller, *OIC);
467
468 // If inlining this function gave us any new call sites, throw them
469 // onto our worklist to process. They are useful inline candidates.
470 if (!InlineInfo.InlinedCalls.empty()) {
471 // Create a new inline history entry for this, so that we remember
472 // that these new callsites came about due to inlining Callee.
473 int NewHistoryID = InlineHistory.size();
474 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
475
476 #ifndef NDEBUG
477 // Make sure no dupplicates in the inline candidates. This could
478 // happen when a callsite is simpilfied to reusing the return value
479 // of another callsite during function cloning, thus the other
480 // callsite will be reconsidered here.
481 DenseSet<CallBase *> DbgCallSites;
482 for (auto &II : CallSites)
483 DbgCallSites.insert(II.first);
484 #endif
485
486 for (Value *Ptr : InlineInfo.InlinedCalls) {
487 #ifndef NDEBUG
488 assert(DbgCallSites.count(dyn_cast<CallBase>(Ptr)) == 0);
489 #endif
490 CallSites.push_back(
491 std::make_pair(dyn_cast<CallBase>(Ptr), NewHistoryID));
492 }
493 }
494 }
495
496 // If we inlined or deleted the last possible call site to the function,
497 // delete the function body now.
498 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
499 // TODO: Can remove if in SCC now.
500 !SCCFunctions.count(Callee) &&
501 // The function may be apparently dead, but if there are indirect
502 // callgraph references to the node, we cannot delete it yet, this
503 // could invalidate the CGSCC iterator.
504 CG[Callee]->getNumReferences() == 0) {
505 LLVM_DEBUG(dbgs() << " -> Deleting dead function: "
506 << Callee->getName() << "\n");
507 CallGraphNode *CalleeNode = CG[Callee];
508
509 // Remove any call graph edges from the callee to its callees.
510 CalleeNode->removeAllCalledFunctions();
511
512 // Removing the node for callee from the call graph and delete it.
513 delete CG.removeFunctionFromModule(CalleeNode);
514 ++NumDeleted;
515 }
516
517 // Remove this call site from the list. If possible, use
518 // swap/pop_back for efficiency, but do not use it if doing so would
519 // move a call site to a function in this SCC before the
520 // 'FirstCallInSCC' barrier.
521 if (SCC.isSingular()) {
522 CallSites[CSi] = CallSites.back();
523 CallSites.pop_back();
524 } else {
525 CallSites.erase(CallSites.begin() + CSi);
526 }
527 --CSi;
528
529 Changed = true;
530 LocalChange = true;
531 }
532 } while (LocalChange);
533
534 return Changed;
535 }
536
inlineCalls(CallGraphSCC & SCC)537 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) {
538 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
539 ACT = &getAnalysis<AssumptionCacheTracker>();
540 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
541 GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
542 return getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
543 };
544 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
545 return ACT->getAssumptionCache(F);
546 };
547 return inlineCallsImpl(
548 SCC, CG, GetAssumptionCache, PSI, GetTLI, InsertLifetime,
549 [&](CallBase &CB) { return getInlineCost(CB); }, LegacyAARGetter(*this),
550 ImportedFunctionsStats);
551 }
552
553 /// Remove now-dead linkonce functions at the end of
554 /// processing to avoid breaking the SCC traversal.
doFinalization(CallGraph & CG)555 bool LegacyInlinerBase::doFinalization(CallGraph &CG) {
556 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
557 ImportedFunctionsStats.dump(InlinerFunctionImportStats ==
558 InlinerFunctionImportStatsOpts::Verbose);
559 return removeDeadFunctions(CG);
560 }
561
562 /// Remove dead functions that are not included in DNR (Do Not Remove) list.
removeDeadFunctions(CallGraph & CG,bool AlwaysInlineOnly)563 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG,
564 bool AlwaysInlineOnly) {
565 SmallVector<CallGraphNode *, 16> FunctionsToRemove;
566 SmallVector<Function *, 16> DeadFunctionsInComdats;
567
568 auto RemoveCGN = [&](CallGraphNode *CGN) {
569 // Remove any call graph edges from the function to its callees.
570 CGN->removeAllCalledFunctions();
571
572 // Remove any edges from the external node to the function's call graph
573 // node. These edges might have been made irrelegant due to
574 // optimization of the program.
575 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
576
577 // Removing the node for callee from the call graph and delete it.
578 FunctionsToRemove.push_back(CGN);
579 };
580
581 // Scan for all of the functions, looking for ones that should now be removed
582 // from the program. Insert the dead ones in the FunctionsToRemove set.
583 for (const auto &I : CG) {
584 CallGraphNode *CGN = I.second.get();
585 Function *F = CGN->getFunction();
586 if (!F || F->isDeclaration())
587 continue;
588
589 // Handle the case when this function is called and we only want to care
590 // about always-inline functions. This is a bit of a hack to share code
591 // between here and the InlineAlways pass.
592 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
593 continue;
594
595 // If the only remaining users of the function are dead constants, remove
596 // them.
597 F->removeDeadConstantUsers();
598
599 if (!F->isDefTriviallyDead())
600 continue;
601
602 // It is unsafe to drop a function with discardable linkage from a COMDAT
603 // without also dropping the other members of the COMDAT.
604 // The inliner doesn't visit non-function entities which are in COMDAT
605 // groups so it is unsafe to do so *unless* the linkage is local.
606 if (!F->hasLocalLinkage()) {
607 if (F->hasComdat()) {
608 DeadFunctionsInComdats.push_back(F);
609 continue;
610 }
611 }
612
613 RemoveCGN(CGN);
614 }
615 if (!DeadFunctionsInComdats.empty()) {
616 // Filter out the functions whose comdats remain alive.
617 filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats);
618 // Remove the rest.
619 for (Function *F : DeadFunctionsInComdats)
620 RemoveCGN(CG[F]);
621 }
622
623 if (FunctionsToRemove.empty())
624 return false;
625
626 // Now that we know which functions to delete, do so. We didn't want to do
627 // this inline, because that would invalidate our CallGraph::iterator
628 // objects. :(
629 //
630 // Note that it doesn't matter that we are iterating over a non-stable order
631 // here to do this, it doesn't matter which order the functions are deleted
632 // in.
633 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
634 FunctionsToRemove.erase(
635 std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()),
636 FunctionsToRemove.end());
637 for (CallGraphNode *CGN : FunctionsToRemove) {
638 delete CG.removeFunctionFromModule(CGN);
639 ++NumDeleted;
640 }
641 return true;
642 }
643
644 InlineAdvisor &
getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result & MAM,FunctionAnalysisManager & FAM,Module & M)645 InlinerPass::getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM,
646 FunctionAnalysisManager &FAM, Module &M) {
647 if (OwnedAdvisor)
648 return *OwnedAdvisor;
649
650 auto *IAA = MAM.getCachedResult<InlineAdvisorAnalysis>(M);
651 if (!IAA) {
652 // It should still be possible to run the inliner as a stand-alone SCC pass,
653 // for test scenarios. In that case, we default to the
654 // DefaultInlineAdvisor, which doesn't need to keep state between SCC pass
655 // runs. It also uses just the default InlineParams.
656 // In this case, we need to use the provided FAM, which is valid for the
657 // duration of the inliner pass, and thus the lifetime of the owned advisor.
658 // The one we would get from the MAM can be invalidated as a result of the
659 // inliner's activity.
660 OwnedAdvisor =
661 std::make_unique<DefaultInlineAdvisor>(M, FAM, getInlineParams());
662
663 if (!CGSCCInlineReplayFile.empty())
664 OwnedAdvisor = std::make_unique<ReplayInlineAdvisor>(
665 M, FAM, M.getContext(), std::move(OwnedAdvisor),
666 CGSCCInlineReplayFile,
667 /*EmitRemarks=*/true);
668
669 return *OwnedAdvisor;
670 }
671 assert(IAA->getAdvisor() &&
672 "Expected a present InlineAdvisorAnalysis also have an "
673 "InlineAdvisor initialized");
674 return *IAA->getAdvisor();
675 }
676
677 template <typename T> class InlineOrder {
678 public:
679 using reference = T &;
680 using const_reference = const T &;
681
~InlineOrder()682 virtual ~InlineOrder() {}
683
684 virtual size_t size() = 0;
685
686 virtual void push(const T &Elt) = 0;
687
688 virtual T pop() = 0;
689
690 virtual const_reference front() = 0;
691
692 virtual void erase_if(function_ref<bool(T)> Pred) = 0;
693
empty()694 bool empty() { return !size(); }
695 };
696
697 template <typename T, typename Container = SmallVector<T, 16>>
698 class DefaultInlineOrder : public InlineOrder<T> {
699 using reference = T &;
700 using const_reference = const T &;
701
702 public:
size()703 size_t size() override { return Calls.size() - FirstIndex; }
704
push(const T & Elt)705 void push(const T &Elt) override { Calls.push_back(Elt); }
706
pop()707 T pop() override {
708 assert(size() > 0);
709 return Calls[FirstIndex++];
710 }
711
front()712 const_reference front() override {
713 assert(size() > 0);
714 return Calls[FirstIndex];
715 }
716
erase_if(function_ref<bool (T)> Pred)717 void erase_if(function_ref<bool(T)> Pred) override {
718 Calls.erase(std::remove_if(Calls.begin() + FirstIndex, Calls.end(), Pred),
719 Calls.end());
720 }
721
722 private:
723 Container Calls;
724 size_t FirstIndex = 0;
725 };
726
727 class Priority {
728 public:
Priority(int Size)729 Priority(int Size) : Size(Size) {}
730
isMoreDesirable(const Priority & S1,const Priority & S2)731 static bool isMoreDesirable(const Priority &S1, const Priority &S2) {
732 return S1.Size < S2.Size;
733 }
734
evaluate(CallBase * CB)735 static Priority evaluate(CallBase *CB) {
736 Function *Callee = CB->getCalledFunction();
737 return Priority(Callee->getInstructionCount());
738 }
739
740 int Size;
741 };
742
743 template <typename PriorityT>
744 class PriorityInlineOrder : public InlineOrder<std::pair<CallBase *, int>> {
745 using T = std::pair<CallBase *, int>;
746 using HeapT = std::pair<CallBase *, PriorityT>;
747 using reference = T &;
748 using const_reference = const T &;
749
cmp(const HeapT & P1,const HeapT & P2)750 static bool cmp(const HeapT &P1, const HeapT &P2) {
751 return PriorityT::isMoreDesirable(P2.second, P1.second);
752 }
753
754 // A call site could become less desirable for inlining because of the size
755 // growth from prior inlining into the callee. This method is used to lazily
756 // update the desirability of a call site if it's decreasing. It is only
757 // called on pop() or front(), not every time the desirability changes. When
758 // the desirability of the front call site decreases, an updated one would be
759 // pushed right back into the heap. For simplicity, those cases where
760 // the desirability of a call site increases are ignored here.
adjust()761 void adjust() {
762 bool Changed = false;
763 do {
764 CallBase *CB = Heap.front().first;
765 const PriorityT PreviousGoodness = Heap.front().second;
766 const PriorityT CurrentGoodness = PriorityT::evaluate(CB);
767 Changed = PriorityT::isMoreDesirable(PreviousGoodness, CurrentGoodness);
768 if (Changed) {
769 std::pop_heap(Heap.begin(), Heap.end(), cmp);
770 Heap.pop_back();
771 Heap.push_back({CB, CurrentGoodness});
772 std::push_heap(Heap.begin(), Heap.end(), cmp);
773 }
774 } while (Changed);
775 }
776
777 public:
size()778 size_t size() override { return Heap.size(); }
779
push(const T & Elt)780 void push(const T &Elt) override {
781 CallBase *CB = Elt.first;
782 const int InlineHistoryID = Elt.second;
783 const PriorityT Goodness = PriorityT::evaluate(CB);
784
785 Heap.push_back({CB, Goodness});
786 std::push_heap(Heap.begin(), Heap.end(), cmp);
787 InlineHistoryMap[CB] = InlineHistoryID;
788 }
789
pop()790 T pop() override {
791 assert(size() > 0);
792 adjust();
793
794 CallBase *CB = Heap.front().first;
795 T Result = std::make_pair(CB, InlineHistoryMap[CB]);
796 InlineHistoryMap.erase(CB);
797 std::pop_heap(Heap.begin(), Heap.end(), cmp);
798 Heap.pop_back();
799 return Result;
800 }
801
front()802 const_reference front() override {
803 assert(size() > 0);
804 adjust();
805
806 CallBase *CB = Heap.front().first;
807 return *InlineHistoryMap.find(CB);
808 }
809
erase_if(function_ref<bool (T)> Pred)810 void erase_if(function_ref<bool(T)> Pred) override {
811 auto PredWrapper = [=](HeapT P) -> bool {
812 return Pred(std::make_pair(P.first, 0));
813 };
814 Heap.erase(std::remove_if(Heap.begin(), Heap.end(), PredWrapper),
815 Heap.end());
816 std::make_heap(Heap.begin(), Heap.end(), cmp);
817 }
818
819 private:
820 SmallVector<HeapT, 16> Heap;
821 DenseMap<CallBase *, int> InlineHistoryMap;
822 };
823
run(LazyCallGraph::SCC & InitialC,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)824 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
825 CGSCCAnalysisManager &AM, LazyCallGraph &CG,
826 CGSCCUpdateResult &UR) {
827 const auto &MAMProxy =
828 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG);
829 bool Changed = false;
830
831 assert(InitialC.size() > 0 && "Cannot handle an empty SCC!");
832 Module &M = *InitialC.begin()->getFunction().getParent();
833 ProfileSummaryInfo *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(M);
834
835 FunctionAnalysisManager &FAM =
836 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG)
837 .getManager();
838
839 InlineAdvisor &Advisor = getAdvisor(MAMProxy, FAM, M);
840 Advisor.onPassEntry();
841
842 auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(); });
843
844 // We use a single common worklist for calls across the entire SCC. We
845 // process these in-order and append new calls introduced during inlining to
846 // the end. The PriorityInlineOrder is optional here, in which the smaller
847 // callee would have a higher priority to inline.
848 //
849 // Note that this particular order of processing is actually critical to
850 // avoid very bad behaviors. Consider *highly connected* call graphs where
851 // each function contains a small amount of code and a couple of calls to
852 // other functions. Because the LLVM inliner is fundamentally a bottom-up
853 // inliner, it can handle gracefully the fact that these all appear to be
854 // reasonable inlining candidates as it will flatten things until they become
855 // too big to inline, and then move on and flatten another batch.
856 //
857 // However, when processing call edges *within* an SCC we cannot rely on this
858 // bottom-up behavior. As a consequence, with heavily connected *SCCs* of
859 // functions we can end up incrementally inlining N calls into each of
860 // N functions because each incremental inlining decision looks good and we
861 // don't have a topological ordering to prevent explosions.
862 //
863 // To compensate for this, we don't process transitive edges made immediate
864 // by inlining until we've done one pass of inlining across the entire SCC.
865 // Large, highly connected SCCs still lead to some amount of code bloat in
866 // this model, but it is uniformly spread across all the functions in the SCC
867 // and eventually they all become too large to inline, rather than
868 // incrementally maknig a single function grow in a super linear fashion.
869 std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>> Calls;
870 if (InlineEnablePriorityOrder)
871 Calls = std::make_unique<PriorityInlineOrder<Priority>>();
872 else
873 Calls = std::make_unique<DefaultInlineOrder<std::pair<CallBase *, int>>>();
874 assert(Calls != nullptr && "Expected an initialized InlineOrder");
875
876 // Populate the initial list of calls in this SCC.
877 for (auto &N : InitialC) {
878 auto &ORE =
879 FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction());
880 // We want to generally process call sites top-down in order for
881 // simplifications stemming from replacing the call with the returned value
882 // after inlining to be visible to subsequent inlining decisions.
883 // FIXME: Using instructions sequence is a really bad way to do this.
884 // Instead we should do an actual RPO walk of the function body.
885 for (Instruction &I : instructions(N.getFunction()))
886 if (auto *CB = dyn_cast<CallBase>(&I))
887 if (Function *Callee = CB->getCalledFunction()) {
888 if (!Callee->isDeclaration())
889 Calls->push({CB, -1});
890 else if (!isa<IntrinsicInst>(I)) {
891 using namespace ore;
892 setInlineRemark(*CB, "unavailable definition");
893 ORE.emit([&]() {
894 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
895 << NV("Callee", Callee) << " will not be inlined into "
896 << NV("Caller", CB->getCaller())
897 << " because its definition is unavailable"
898 << setIsVerbose();
899 });
900 }
901 }
902 }
903 if (Calls->empty())
904 return PreservedAnalyses::all();
905
906 // Capture updatable variable for the current SCC.
907 auto *C = &InitialC;
908
909 // When inlining a callee produces new call sites, we want to keep track of
910 // the fact that they were inlined from the callee. This allows us to avoid
911 // infinite inlining in some obscure cases. To represent this, we use an
912 // index into the InlineHistory vector.
913 SmallVector<std::pair<Function *, int>, 16> InlineHistory;
914
915 // Track a set vector of inlined callees so that we can augment the caller
916 // with all of their edges in the call graph before pruning out the ones that
917 // got simplified away.
918 SmallSetVector<Function *, 4> InlinedCallees;
919
920 // Track the dead functions to delete once finished with inlining calls. We
921 // defer deleting these to make it easier to handle the call graph updates.
922 SmallVector<Function *, 4> DeadFunctions;
923
924 // Loop forward over all of the calls.
925 while (!Calls->empty()) {
926 // We expect the calls to typically be batched with sequences of calls that
927 // have the same caller, so we first set up some shared infrastructure for
928 // this caller. We also do any pruning we can at this layer on the caller
929 // alone.
930 Function &F = *Calls->front().first->getCaller();
931 LazyCallGraph::Node &N = *CG.lookup(F);
932 if (CG.lookupSCC(N) != C) {
933 Calls->pop();
934 continue;
935 }
936
937 LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"
938 << " Function size: " << F.getInstructionCount()
939 << "\n");
940
941 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
942 return FAM.getResult<AssumptionAnalysis>(F);
943 };
944
945 // Now process as many calls as we have within this caller in the sequence.
946 // We bail out as soon as the caller has to change so we can update the
947 // call graph and prepare the context of that new caller.
948 bool DidInline = false;
949 while (!Calls->empty() && Calls->front().first->getCaller() == &F) {
950 auto P = Calls->pop();
951 CallBase *CB = P.first;
952 const int InlineHistoryID = P.second;
953 Function &Callee = *CB->getCalledFunction();
954
955 if (InlineHistoryID != -1 &&
956 inlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) {
957 setInlineRemark(*CB, "recursive");
958 continue;
959 }
960
961 // Check if this inlining may repeat breaking an SCC apart that has
962 // already been split once before. In that case, inlining here may
963 // trigger infinite inlining, much like is prevented within the inliner
964 // itself by the InlineHistory above, but spread across CGSCC iterations
965 // and thus hidden from the full inline history.
966 if (CG.lookupSCC(*CG.lookup(Callee)) == C &&
967 UR.InlinedInternalEdges.count({&N, C})) {
968 LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node "
969 "previously split out of this SCC by inlining: "
970 << F.getName() << " -> " << Callee.getName() << "\n");
971 setInlineRemark(*CB, "recursive SCC split");
972 continue;
973 }
974
975 auto Advice = Advisor.getAdvice(*CB, OnlyMandatory);
976 // Check whether we want to inline this callsite.
977 if (!Advice->isInliningRecommended()) {
978 Advice->recordUnattemptedInlining();
979 continue;
980 }
981
982 // Setup the data structure used to plumb customization into the
983 // `InlineFunction` routine.
984 InlineFunctionInfo IFI(
985 /*cg=*/nullptr, GetAssumptionCache, PSI,
986 &FAM.getResult<BlockFrequencyAnalysis>(*(CB->getCaller())),
987 &FAM.getResult<BlockFrequencyAnalysis>(Callee));
988
989 InlineResult IR =
990 InlineFunction(*CB, IFI, &FAM.getResult<AAManager>(*CB->getCaller()));
991 if (!IR.isSuccess()) {
992 Advice->recordUnsuccessfulInlining(IR);
993 continue;
994 }
995
996 DidInline = true;
997 InlinedCallees.insert(&Callee);
998 ++NumInlined;
999
1000 LLVM_DEBUG(dbgs() << " Size after inlining: "
1001 << F.getInstructionCount() << "\n");
1002
1003 // Add any new callsites to defined functions to the worklist.
1004 if (!IFI.InlinedCallSites.empty()) {
1005 int NewHistoryID = InlineHistory.size();
1006 InlineHistory.push_back({&Callee, InlineHistoryID});
1007
1008 for (CallBase *ICB : reverse(IFI.InlinedCallSites)) {
1009 Function *NewCallee = ICB->getCalledFunction();
1010 assert(!(NewCallee && NewCallee->isIntrinsic()) &&
1011 "Intrinsic calls should not be tracked.");
1012 if (!NewCallee) {
1013 // Try to promote an indirect (virtual) call without waiting for
1014 // the post-inline cleanup and the next DevirtSCCRepeatedPass
1015 // iteration because the next iteration may not happen and we may
1016 // miss inlining it.
1017 if (tryPromoteCall(*ICB))
1018 NewCallee = ICB->getCalledFunction();
1019 }
1020 if (NewCallee)
1021 if (!NewCallee->isDeclaration())
1022 Calls->push({ICB, NewHistoryID});
1023 }
1024 }
1025
1026 // Merge the attributes based on the inlining.
1027 AttributeFuncs::mergeAttributesForInlining(F, Callee);
1028
1029 // For local functions, check whether this makes the callee trivially
1030 // dead. In that case, we can drop the body of the function eagerly
1031 // which may reduce the number of callers of other functions to one,
1032 // changing inline cost thresholds.
1033 bool CalleeWasDeleted = false;
1034 if (Callee.hasLocalLinkage()) {
1035 // To check this we also need to nuke any dead constant uses (perhaps
1036 // made dead by this operation on other functions).
1037 Callee.removeDeadConstantUsers();
1038 if (Callee.use_empty() && !CG.isLibFunction(Callee)) {
1039 Calls->erase_if([&](const std::pair<CallBase *, int> &Call) {
1040 return Call.first->getCaller() == &Callee;
1041 });
1042 // Clear the body and queue the function itself for deletion when we
1043 // finish inlining and call graph updates.
1044 // Note that after this point, it is an error to do anything other
1045 // than use the callee's address or delete it.
1046 Callee.dropAllReferences();
1047 assert(!is_contained(DeadFunctions, &Callee) &&
1048 "Cannot put cause a function to become dead twice!");
1049 DeadFunctions.push_back(&Callee);
1050 CalleeWasDeleted = true;
1051 }
1052 }
1053 if (CalleeWasDeleted)
1054 Advice->recordInliningWithCalleeDeleted();
1055 else
1056 Advice->recordInlining();
1057 }
1058
1059 if (!DidInline)
1060 continue;
1061 Changed = true;
1062
1063 // At this point, since we have made changes we have at least removed
1064 // a call instruction. However, in the process we do some incremental
1065 // simplification of the surrounding code. This simplification can
1066 // essentially do all of the same things as a function pass and we can
1067 // re-use the exact same logic for updating the call graph to reflect the
1068 // change.
1069
1070 // Inside the update, we also update the FunctionAnalysisManager in the
1071 // proxy for this particular SCC. We do this as the SCC may have changed and
1072 // as we're going to mutate this particular function we want to make sure
1073 // the proxy is in place to forward any invalidation events.
1074 LazyCallGraph::SCC *OldC = C;
1075 C = &updateCGAndAnalysisManagerForCGSCCPass(CG, *C, N, AM, UR, FAM);
1076 LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n");
1077
1078 // If this causes an SCC to split apart into multiple smaller SCCs, there
1079 // is a subtle risk we need to prepare for. Other transformations may
1080 // expose an "infinite inlining" opportunity later, and because of the SCC
1081 // mutation, we will revisit this function and potentially re-inline. If we
1082 // do, and that re-inlining also has the potentially to mutate the SCC
1083 // structure, the infinite inlining problem can manifest through infinite
1084 // SCC splits and merges. To avoid this, we capture the originating caller
1085 // node and the SCC containing the call edge. This is a slight over
1086 // approximation of the possible inlining decisions that must be avoided,
1087 // but is relatively efficient to store. We use C != OldC to know when
1088 // a new SCC is generated and the original SCC may be generated via merge
1089 // in later iterations.
1090 //
1091 // It is also possible that even if no new SCC is generated
1092 // (i.e., C == OldC), the original SCC could be split and then merged
1093 // into the same one as itself. and the original SCC will be added into
1094 // UR.CWorklist again, we want to catch such cases too.
1095 //
1096 // FIXME: This seems like a very heavyweight way of retaining the inline
1097 // history, we should look for a more efficient way of tracking it.
1098 if ((C != OldC || UR.CWorklist.count(OldC)) &&
1099 llvm::any_of(InlinedCallees, [&](Function *Callee) {
1100 return CG.lookupSCC(*CG.lookup(*Callee)) == OldC;
1101 })) {
1102 LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, "
1103 "retaining this to avoid infinite inlining.\n");
1104 UR.InlinedInternalEdges.insert({&N, OldC});
1105 }
1106 InlinedCallees.clear();
1107 }
1108
1109 // Now that we've finished inlining all of the calls across this SCC, delete
1110 // all of the trivially dead functions, updating the call graph and the CGSCC
1111 // pass manager in the process.
1112 //
1113 // Note that this walks a pointer set which has non-deterministic order but
1114 // that is OK as all we do is delete things and add pointers to unordered
1115 // sets.
1116 for (Function *DeadF : DeadFunctions) {
1117 // Get the necessary information out of the call graph and nuke the
1118 // function there. Also, clear out any cached analyses.
1119 auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF));
1120 FAM.clear(*DeadF, DeadF->getName());
1121 AM.clear(DeadC, DeadC.getName());
1122 auto &DeadRC = DeadC.getOuterRefSCC();
1123 CG.removeDeadFunction(*DeadF);
1124
1125 // Mark the relevant parts of the call graph as invalid so we don't visit
1126 // them.
1127 UR.InvalidatedSCCs.insert(&DeadC);
1128 UR.InvalidatedRefSCCs.insert(&DeadRC);
1129
1130 // If the updated SCC was the one containing the deleted function, clear it.
1131 if (&DeadC == UR.UpdatedC)
1132 UR.UpdatedC = nullptr;
1133
1134 // And delete the actual function from the module.
1135 // The Advisor may use Function pointers to efficiently index various
1136 // internal maps, e.g. for memoization. Function cleanup passes like
1137 // argument promotion create new functions. It is possible for a new
1138 // function to be allocated at the address of a deleted function. We could
1139 // index using names, but that's inefficient. Alternatively, we let the
1140 // Advisor free the functions when it sees fit.
1141 DeadF->getBasicBlockList().clear();
1142 M.getFunctionList().remove(DeadF);
1143
1144 ++NumDeleted;
1145 }
1146
1147 if (!Changed)
1148 return PreservedAnalyses::all();
1149
1150 // Even if we change the IR, we update the core CGSCC data structures and so
1151 // can preserve the proxy to the function analysis manager.
1152 PreservedAnalyses PA;
1153 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
1154 return PA;
1155 }
1156
ModuleInlinerWrapperPass(InlineParams Params,bool MandatoryFirst,InliningAdvisorMode Mode,unsigned MaxDevirtIterations)1157 ModuleInlinerWrapperPass::ModuleInlinerWrapperPass(InlineParams Params,
1158 bool MandatoryFirst,
1159 InliningAdvisorMode Mode,
1160 unsigned MaxDevirtIterations)
1161 : Params(Params), Mode(Mode), MaxDevirtIterations(MaxDevirtIterations),
1162 PM(), MPM() {
1163 // Run the inliner first. The theory is that we are walking bottom-up and so
1164 // the callees have already been fully optimized, and we want to inline them
1165 // into the callers so that our optimizations can reflect that.
1166 // For PreLinkThinLTO pass, we disable hot-caller heuristic for sample PGO
1167 // because it makes profile annotation in the backend inaccurate.
1168 if (MandatoryFirst)
1169 PM.addPass(InlinerPass(/*OnlyMandatory*/ true));
1170 PM.addPass(InlinerPass());
1171 }
1172
run(Module & M,ModuleAnalysisManager & MAM)1173 PreservedAnalyses ModuleInlinerWrapperPass::run(Module &M,
1174 ModuleAnalysisManager &MAM) {
1175 auto &IAA = MAM.getResult<InlineAdvisorAnalysis>(M);
1176 if (!IAA.tryCreate(Params, Mode, CGSCCInlineReplayFile)) {
1177 M.getContext().emitError(
1178 "Could not setup Inlining Advisor for the requested "
1179 "mode and/or options");
1180 return PreservedAnalyses::all();
1181 }
1182
1183 // We wrap the CGSCC pipeline in a devirtualization repeater. This will try
1184 // to detect when we devirtualize indirect calls and iterate the SCC passes
1185 // in that case to try and catch knock-on inlining or function attrs
1186 // opportunities. Then we add it to the module pipeline by walking the SCCs
1187 // in postorder (or bottom-up).
1188 // If MaxDevirtIterations is 0, we just don't use the devirtualization
1189 // wrapper.
1190 if (MaxDevirtIterations == 0)
1191 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(PM)));
1192 else
1193 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(
1194 createDevirtSCCRepeatedPass(std::move(PM), MaxDevirtIterations)));
1195 MPM.run(M, MAM);
1196
1197 IAA.clear();
1198
1199 // The ModulePassManager has already taken care of invalidating analyses.
1200 return PreservedAnalyses::all();
1201 }
1202