1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the mechanics required to implement inlining without
10 // missing any calls and updating the call graph. The decisions of which calls
11 // are profitable to inline are implemented elsewhere.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/IPO/Inliner.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/PriorityWorklist.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/ScopeExit.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/BasicAliasAnalysis.h"
28 #include "llvm/Analysis/BlockFrequencyInfo.h"
29 #include "llvm/Analysis/CGSCCPassManager.h"
30 #include "llvm/Analysis/CallGraph.h"
31 #include "llvm/Analysis/InlineAdvisor.h"
32 #include "llvm/Analysis/InlineCost.h"
33 #include "llvm/Analysis/LazyCallGraph.h"
34 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
35 #include "llvm/Analysis/ProfileSummaryInfo.h"
36 #include "llvm/Analysis/ReplayInlineAdvisor.h"
37 #include "llvm/Analysis/TargetLibraryInfo.h"
38 #include "llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h"
39 #include "llvm/IR/Attributes.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/DebugLoc.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstIterator.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/IntrinsicInst.h"
49 #include "llvm/IR/Metadata.h"
50 #include "llvm/IR/Module.h"
51 #include "llvm/IR/PassManager.h"
52 #include "llvm/IR/User.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
60 #include "llvm/Transforms/Utils/Cloning.h"
61 #include "llvm/Transforms/Utils/Local.h"
62 #include "llvm/Transforms/Utils/ModuleUtils.h"
63 #include <algorithm>
64 #include <cassert>
65 #include <functional>
66 #include <utility>
67 #include <vector>
68
69 using namespace llvm;
70
71 #define DEBUG_TYPE "inline"
72
73 STATISTIC(NumInlined, "Number of functions inlined");
74 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
75 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
76 STATISTIC(NumMergedAllocas, "Number of allocas merged together");
77
78 /// Flag to disable manual alloca merging.
79 ///
80 /// Merging of allocas was originally done as a stack-size saving technique
81 /// prior to LLVM's code generator having support for stack coloring based on
82 /// lifetime markers. It is now in the process of being removed. To experiment
83 /// with disabling it and relying fully on lifetime marker based stack
84 /// coloring, you can pass this flag to LLVM.
85 static cl::opt<bool>
86 DisableInlinedAllocaMerging("disable-inlined-alloca-merging",
87 cl::init(false), cl::Hidden);
88
89 static cl::opt<int> IntraSCCCostMultiplier(
90 "intra-scc-cost-multiplier", cl::init(2), cl::Hidden,
91 cl::desc(
92 "Cost multiplier to multiply onto inlined call sites where the "
93 "new call was previously an intra-SCC call (not relevant when the "
94 "original call was already intra-SCC). This can accumulate over "
95 "multiple inlinings (e.g. if a call site already had a cost "
96 "multiplier and one of its inlined calls was also subject to "
97 "this, the inlined call would have the original multiplier "
98 "multiplied by intra-scc-cost-multiplier). This is to prevent tons of "
99 "inlining through a child SCC which can cause terrible compile times"));
100
101 /// A flag for test, so we can print the content of the advisor when running it
102 /// as part of the default (e.g. -O3) pipeline.
103 static cl::opt<bool> KeepAdvisorForPrinting("keep-inline-advisor-for-printing",
104 cl::init(false), cl::Hidden);
105
106 /// Allows printing the contents of the advisor after each SCC inliner pass.
107 static cl::opt<bool>
108 EnablePostSCCAdvisorPrinting("enable-scc-inline-advisor-printing",
109 cl::init(false), cl::Hidden);
110
111 namespace llvm {
112 extern cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats;
113 }
114
115 static cl::opt<std::string> CGSCCInlineReplayFile(
116 "cgscc-inline-replay", cl::init(""), cl::value_desc("filename"),
117 cl::desc(
118 "Optimization remarks file containing inline remarks to be replayed "
119 "by cgscc inlining."),
120 cl::Hidden);
121
122 static cl::opt<ReplayInlinerSettings::Scope> CGSCCInlineReplayScope(
123 "cgscc-inline-replay-scope",
124 cl::init(ReplayInlinerSettings::Scope::Function),
125 cl::values(clEnumValN(ReplayInlinerSettings::Scope::Function, "Function",
126 "Replay on functions that have remarks associated "
127 "with them (default)"),
128 clEnumValN(ReplayInlinerSettings::Scope::Module, "Module",
129 "Replay on the entire module")),
130 cl::desc("Whether inline replay should be applied to the entire "
131 "Module or just the Functions (default) that are present as "
132 "callers in remarks during cgscc inlining."),
133 cl::Hidden);
134
135 static cl::opt<ReplayInlinerSettings::Fallback> CGSCCInlineReplayFallback(
136 "cgscc-inline-replay-fallback",
137 cl::init(ReplayInlinerSettings::Fallback::Original),
138 cl::values(
139 clEnumValN(
140 ReplayInlinerSettings::Fallback::Original, "Original",
141 "All decisions not in replay send to original advisor (default)"),
142 clEnumValN(ReplayInlinerSettings::Fallback::AlwaysInline,
143 "AlwaysInline", "All decisions not in replay are inlined"),
144 clEnumValN(ReplayInlinerSettings::Fallback::NeverInline, "NeverInline",
145 "All decisions not in replay are not inlined")),
146 cl::desc(
147 "How cgscc inline replay treats sites that don't come from the replay. "
148 "Original: defers to original advisor, AlwaysInline: inline all sites "
149 "not in replay, NeverInline: inline no sites not in replay"),
150 cl::Hidden);
151
152 static cl::opt<CallSiteFormat::Format> CGSCCInlineReplayFormat(
153 "cgscc-inline-replay-format",
154 cl::init(CallSiteFormat::Format::LineColumnDiscriminator),
155 cl::values(
156 clEnumValN(CallSiteFormat::Format::Line, "Line", "<Line Number>"),
157 clEnumValN(CallSiteFormat::Format::LineColumn, "LineColumn",
158 "<Line Number>:<Column Number>"),
159 clEnumValN(CallSiteFormat::Format::LineDiscriminator,
160 "LineDiscriminator", "<Line Number>.<Discriminator>"),
161 clEnumValN(CallSiteFormat::Format::LineColumnDiscriminator,
162 "LineColumnDiscriminator",
163 "<Line Number>:<Column Number>.<Discriminator> (default)")),
164 cl::desc("How cgscc inline replay file is formatted"), cl::Hidden);
165
LegacyInlinerBase(char & ID)166 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {}
167
LegacyInlinerBase(char & ID,bool InsertLifetime)168 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime)
169 : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {}
170
171 /// For this class, we declare that we require and preserve the call graph.
172 /// If the derived class implements this method, it should
173 /// always explicitly call the implementation here.
getAnalysisUsage(AnalysisUsage & AU) const174 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const {
175 AU.addRequired<AssumptionCacheTracker>();
176 AU.addRequired<ProfileSummaryInfoWrapperPass>();
177 AU.addRequired<TargetLibraryInfoWrapperPass>();
178 getAAResultsAnalysisUsage(AU);
179 CallGraphSCCPass::getAnalysisUsage(AU);
180 }
181
182 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>;
183
184 /// Look at all of the allocas that we inlined through this call site. If we
185 /// have already inlined other allocas through other calls into this function,
186 /// then we know that they have disjoint lifetimes and that we can merge them.
187 ///
188 /// There are many heuristics possible for merging these allocas, and the
189 /// different options have different tradeoffs. One thing that we *really*
190 /// don't want to hurt is SRoA: once inlining happens, often allocas are no
191 /// longer address taken and so they can be promoted.
192 ///
193 /// Our "solution" for that is to only merge allocas whose outermost type is an
194 /// array type. These are usually not promoted because someone is using a
195 /// variable index into them. These are also often the most important ones to
196 /// merge.
197 ///
198 /// A better solution would be to have real memory lifetime markers in the IR
199 /// and not have the inliner do any merging of allocas at all. This would
200 /// allow the backend to do proper stack slot coloring of all allocas that
201 /// *actually make it to the backend*, which is really what we want.
202 ///
203 /// Because we don't have this information, we do this simple and useful hack.
mergeInlinedArrayAllocas(Function * Caller,InlineFunctionInfo & IFI,InlinedArrayAllocasTy & InlinedArrayAllocas,int InlineHistory)204 static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI,
205 InlinedArrayAllocasTy &InlinedArrayAllocas,
206 int InlineHistory) {
207 SmallPtrSet<AllocaInst *, 16> UsedAllocas;
208
209 // When processing our SCC, check to see if the call site was inlined from
210 // some other call site. For example, if we're processing "A" in this code:
211 // A() { B() }
212 // B() { x = alloca ... C() }
213 // C() { y = alloca ... }
214 // Assume that C was not inlined into B initially, and so we're processing A
215 // and decide to inline B into A. Doing this makes an alloca available for
216 // reuse and makes a callsite (C) available for inlining. When we process
217 // the C call site we don't want to do any alloca merging between X and Y
218 // because their scopes are not disjoint. We could make this smarter by
219 // keeping track of the inline history for each alloca in the
220 // InlinedArrayAllocas but this isn't likely to be a significant win.
221 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC.
222 return;
223
224 // Loop over all the allocas we have so far and see if they can be merged with
225 // a previously inlined alloca. If not, remember that we had it.
226 for (unsigned AllocaNo = 0, E = IFI.StaticAllocas.size(); AllocaNo != E;
227 ++AllocaNo) {
228 AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
229
230 // Don't bother trying to merge array allocations (they will usually be
231 // canonicalized to be an allocation *of* an array), or allocations whose
232 // type is not itself an array (because we're afraid of pessimizing SRoA).
233 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
234 if (!ATy || AI->isArrayAllocation())
235 continue;
236
237 // Get the list of all available allocas for this array type.
238 std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy];
239
240 // Loop over the allocas in AllocasForType to see if we can reuse one. Note
241 // that we have to be careful not to reuse the same "available" alloca for
242 // multiple different allocas that we just inlined, we use the 'UsedAllocas'
243 // set to keep track of which "available" allocas are being used by this
244 // function. Also, AllocasForType can be empty of course!
245 bool MergedAwayAlloca = false;
246 for (AllocaInst *AvailableAlloca : AllocasForType) {
247 Align Align1 = AI->getAlign();
248 Align Align2 = AvailableAlloca->getAlign();
249
250 // The available alloca has to be in the right function, not in some other
251 // function in this SCC.
252 if (AvailableAlloca->getParent() != AI->getParent())
253 continue;
254
255 // If the inlined function already uses this alloca then we can't reuse
256 // it.
257 if (!UsedAllocas.insert(AvailableAlloca).second)
258 continue;
259
260 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
261 // success!
262 LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI
263 << "\n\t\tINTO: " << *AvailableAlloca << '\n');
264
265 // Move affected dbg.declare calls immediately after the new alloca to
266 // avoid the situation when a dbg.declare precedes its alloca.
267 if (auto *L = LocalAsMetadata::getIfExists(AI))
268 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
269 for (User *U : MDV->users())
270 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
271 DDI->moveBefore(AvailableAlloca->getNextNode());
272
273 AI->replaceAllUsesWith(AvailableAlloca);
274
275 if (Align1 > Align2)
276 AvailableAlloca->setAlignment(AI->getAlign());
277
278 AI->eraseFromParent();
279 MergedAwayAlloca = true;
280 ++NumMergedAllocas;
281 IFI.StaticAllocas[AllocaNo] = nullptr;
282 break;
283 }
284
285 // If we already nuked the alloca, we're done with it.
286 if (MergedAwayAlloca)
287 continue;
288
289 // If we were unable to merge away the alloca either because there are no
290 // allocas of the right type available or because we reused them all
291 // already, remember that this alloca came from an inlined function and mark
292 // it used so we don't reuse it for other allocas from this inline
293 // operation.
294 AllocasForType.push_back(AI);
295 UsedAllocas.insert(AI);
296 }
297 }
298
299 /// If it is possible to inline the specified call site,
300 /// do so and update the CallGraph for this operation.
301 ///
302 /// This function also does some basic book-keeping to update the IR. The
303 /// InlinedArrayAllocas map keeps track of any allocas that are already
304 /// available from other functions inlined into the caller. If we are able to
305 /// inline this call site we attempt to reuse already available allocas or add
306 /// any new allocas to the set if not possible.
inlineCallIfPossible(CallBase & CB,InlineFunctionInfo & IFI,InlinedArrayAllocasTy & InlinedArrayAllocas,int InlineHistory,bool InsertLifetime,function_ref<AAResults & (Function &)> & AARGetter,ImportedFunctionsInliningStatistics & ImportedFunctionsStats)307 static InlineResult inlineCallIfPossible(
308 CallBase &CB, InlineFunctionInfo &IFI,
309 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory,
310 bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter,
311 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
312 Function *Callee = CB.getCalledFunction();
313 Function *Caller = CB.getCaller();
314
315 AAResults &AAR = AARGetter(*Callee);
316
317 // Try to inline the function. Get the list of static allocas that were
318 // inlined.
319 InlineResult IR =
320 InlineFunction(CB, IFI,
321 /*MergeAttributes=*/true, &AAR, InsertLifetime);
322 if (!IR.isSuccess())
323 return IR;
324
325 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
326 ImportedFunctionsStats.recordInline(*Caller, *Callee);
327
328 if (!DisableInlinedAllocaMerging)
329 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory);
330
331 return IR; // success
332 }
333
334 /// Return true if the specified inline history ID
335 /// indicates an inline history that includes the specified function.
inlineHistoryIncludes(Function * F,int InlineHistoryID,const SmallVectorImpl<std::pair<Function *,int>> & InlineHistory)336 static bool inlineHistoryIncludes(
337 Function *F, int InlineHistoryID,
338 const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) {
339 while (InlineHistoryID != -1) {
340 assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
341 "Invalid inline history ID");
342 if (InlineHistory[InlineHistoryID].first == F)
343 return true;
344 InlineHistoryID = InlineHistory[InlineHistoryID].second;
345 }
346 return false;
347 }
348
doInitialization(CallGraph & CG)349 bool LegacyInlinerBase::doInitialization(CallGraph &CG) {
350 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
351 ImportedFunctionsStats.setModuleInfo(CG.getModule());
352 return false; // No changes to CallGraph.
353 }
354
runOnSCC(CallGraphSCC & SCC)355 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) {
356 if (skipSCC(SCC))
357 return false;
358 return inlineCalls(SCC);
359 }
360
361 static bool
inlineCallsImpl(CallGraphSCC & SCC,CallGraph & CG,std::function<AssumptionCache & (Function &)> GetAssumptionCache,ProfileSummaryInfo * PSI,std::function<const TargetLibraryInfo & (Function &)> GetTLI,bool InsertLifetime,function_ref<InlineCost (CallBase & CB)> GetInlineCost,function_ref<AAResults & (Function &)> AARGetter,ImportedFunctionsInliningStatistics & ImportedFunctionsStats)362 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
363 std::function<AssumptionCache &(Function &)> GetAssumptionCache,
364 ProfileSummaryInfo *PSI,
365 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
366 bool InsertLifetime,
367 function_ref<InlineCost(CallBase &CB)> GetInlineCost,
368 function_ref<AAResults &(Function &)> AARGetter,
369 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
370 SmallPtrSet<Function *, 8> SCCFunctions;
371 LLVM_DEBUG(dbgs() << "Inliner visiting SCC:");
372 for (CallGraphNode *Node : SCC) {
373 Function *F = Node->getFunction();
374 if (F)
375 SCCFunctions.insert(F);
376 LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
377 }
378
379 // Scan through and identify all call sites ahead of time so that we only
380 // inline call sites in the original functions, not call sites that result
381 // from inlining other functions.
382 SmallVector<std::pair<CallBase *, int>, 16> CallSites;
383
384 // When inlining a callee produces new call sites, we want to keep track of
385 // the fact that they were inlined from the callee. This allows us to avoid
386 // infinite inlining in some obscure cases. To represent this, we use an
387 // index into the InlineHistory vector.
388 SmallVector<std::pair<Function *, int>, 8> InlineHistory;
389
390 for (CallGraphNode *Node : SCC) {
391 Function *F = Node->getFunction();
392 if (!F || F->isDeclaration())
393 continue;
394
395 OptimizationRemarkEmitter ORE(F);
396 for (BasicBlock &BB : *F)
397 for (Instruction &I : BB) {
398 auto *CB = dyn_cast<CallBase>(&I);
399 // If this isn't a call, or it is a call to an intrinsic, it can
400 // never be inlined.
401 if (!CB || isa<IntrinsicInst>(I))
402 continue;
403
404 // If this is a direct call to an external function, we can never inline
405 // it. If it is an indirect call, inlining may resolve it to be a
406 // direct call, so we keep it.
407 if (Function *Callee = CB->getCalledFunction())
408 if (Callee->isDeclaration()) {
409 using namespace ore;
410
411 setInlineRemark(*CB, "unavailable definition");
412 ORE.emit([&]() {
413 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
414 << NV("Callee", Callee) << " will not be inlined into "
415 << NV("Caller", CB->getCaller())
416 << " because its definition is unavailable"
417 << setIsVerbose();
418 });
419 continue;
420 }
421
422 CallSites.push_back(std::make_pair(CB, -1));
423 }
424 }
425
426 LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
427
428 // If there are no calls in this function, exit early.
429 if (CallSites.empty())
430 return false;
431
432 // Now that we have all of the call sites, move the ones to functions in the
433 // current SCC to the end of the list.
434 unsigned FirstCallInSCC = CallSites.size();
435 for (unsigned I = 0; I < FirstCallInSCC; ++I)
436 if (Function *F = CallSites[I].first->getCalledFunction())
437 if (SCCFunctions.count(F))
438 std::swap(CallSites[I--], CallSites[--FirstCallInSCC]);
439
440 InlinedArrayAllocasTy InlinedArrayAllocas;
441 InlineFunctionInfo InlineInfo(&CG, GetAssumptionCache, PSI);
442
443 // Now that we have all of the call sites, loop over them and inline them if
444 // it looks profitable to do so.
445 bool Changed = false;
446 bool LocalChange;
447 do {
448 LocalChange = false;
449 // Iterate over the outer loop because inlining functions can cause indirect
450 // calls to become direct calls.
451 // CallSites may be modified inside so ranged for loop can not be used.
452 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
453 auto &P = CallSites[CSi];
454 CallBase &CB = *P.first;
455 const int InlineHistoryID = P.second;
456
457 Function *Caller = CB.getCaller();
458 Function *Callee = CB.getCalledFunction();
459
460 // We can only inline direct calls to non-declarations.
461 if (!Callee || Callee->isDeclaration())
462 continue;
463
464 bool IsTriviallyDead = isInstructionTriviallyDead(&CB, &GetTLI(*Caller));
465
466 if (!IsTriviallyDead) {
467 // If this call site was obtained by inlining another function, verify
468 // that the include path for the function did not include the callee
469 // itself. If so, we'd be recursively inlining the same function,
470 // which would provide the same callsites, which would cause us to
471 // infinitely inline.
472 if (InlineHistoryID != -1 &&
473 inlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) {
474 setInlineRemark(CB, "recursive");
475 continue;
476 }
477 }
478
479 // FIXME for new PM: because of the old PM we currently generate ORE and
480 // in turn BFI on demand. With the new PM, the ORE dependency should
481 // just become a regular analysis dependency.
482 OptimizationRemarkEmitter ORE(Caller);
483
484 auto OIC = shouldInline(CB, GetInlineCost, ORE);
485 // If the policy determines that we should inline this function,
486 // delete the call instead.
487 if (!OIC)
488 continue;
489
490 // If this call site is dead and it is to a readonly function, we should
491 // just delete the call instead of trying to inline it, regardless of
492 // size. This happens because IPSCCP propagates the result out of the
493 // call and then we're left with the dead call.
494 if (IsTriviallyDead) {
495 LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << CB << "\n");
496 // Update the call graph by deleting the edge from Callee to Caller.
497 setInlineRemark(CB, "trivially dead");
498 CG[Caller]->removeCallEdgeFor(CB);
499 CB.eraseFromParent();
500 ++NumCallsDeleted;
501 } else {
502 // Get DebugLoc to report. CB will be invalid after Inliner.
503 DebugLoc DLoc = CB.getDebugLoc();
504 BasicBlock *Block = CB.getParent();
505
506 // Attempt to inline the function.
507 using namespace ore;
508
509 InlineResult IR = inlineCallIfPossible(
510 CB, InlineInfo, InlinedArrayAllocas, InlineHistoryID,
511 InsertLifetime, AARGetter, ImportedFunctionsStats);
512 if (!IR.isSuccess()) {
513 setInlineRemark(CB, std::string(IR.getFailureReason()) + "; " +
514 inlineCostStr(*OIC));
515 ORE.emit([&]() {
516 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc,
517 Block)
518 << NV("Callee", Callee) << " will not be inlined into "
519 << NV("Caller", Caller) << ": "
520 << NV("Reason", IR.getFailureReason());
521 });
522 continue;
523 }
524 ++NumInlined;
525
526 emitInlinedIntoBasedOnCost(ORE, DLoc, Block, *Callee, *Caller, *OIC);
527
528 // If inlining this function gave us any new call sites, throw them
529 // onto our worklist to process. They are useful inline candidates.
530 if (!InlineInfo.InlinedCalls.empty()) {
531 // Create a new inline history entry for this, so that we remember
532 // that these new callsites came about due to inlining Callee.
533 int NewHistoryID = InlineHistory.size();
534 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
535
536 #ifndef NDEBUG
537 // Make sure no dupplicates in the inline candidates. This could
538 // happen when a callsite is simpilfied to reusing the return value
539 // of another callsite during function cloning, thus the other
540 // callsite will be reconsidered here.
541 DenseSet<CallBase *> DbgCallSites;
542 for (auto &II : CallSites)
543 DbgCallSites.insert(II.first);
544 #endif
545
546 for (Value *Ptr : InlineInfo.InlinedCalls) {
547 #ifndef NDEBUG
548 assert(DbgCallSites.count(dyn_cast<CallBase>(Ptr)) == 0);
549 #endif
550 CallSites.push_back(
551 std::make_pair(dyn_cast<CallBase>(Ptr), NewHistoryID));
552 }
553 }
554 }
555
556 // If we inlined or deleted the last possible call site to the function,
557 // delete the function body now.
558 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
559 // TODO: Can remove if in SCC now.
560 !SCCFunctions.count(Callee) &&
561 // The function may be apparently dead, but if there are indirect
562 // callgraph references to the node, we cannot delete it yet, this
563 // could invalidate the CGSCC iterator.
564 CG[Callee]->getNumReferences() == 0) {
565 LLVM_DEBUG(dbgs() << " -> Deleting dead function: "
566 << Callee->getName() << "\n");
567 CallGraphNode *CalleeNode = CG[Callee];
568
569 // Remove any call graph edges from the callee to its callees.
570 CalleeNode->removeAllCalledFunctions();
571
572 // Removing the node for callee from the call graph and delete it.
573 delete CG.removeFunctionFromModule(CalleeNode);
574 ++NumDeleted;
575 }
576
577 // Remove this call site from the list. If possible, use
578 // swap/pop_back for efficiency, but do not use it if doing so would
579 // move a call site to a function in this SCC before the
580 // 'FirstCallInSCC' barrier.
581 if (SCC.isSingular()) {
582 CallSites[CSi] = CallSites.back();
583 CallSites.pop_back();
584 } else {
585 CallSites.erase(CallSites.begin() + CSi);
586 }
587 --CSi;
588
589 Changed = true;
590 LocalChange = true;
591 }
592 } while (LocalChange);
593
594 return Changed;
595 }
596
inlineCalls(CallGraphSCC & SCC)597 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) {
598 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
599 ACT = &getAnalysis<AssumptionCacheTracker>();
600 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
601 GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
602 return getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
603 };
604 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
605 return ACT->getAssumptionCache(F);
606 };
607 return inlineCallsImpl(
608 SCC, CG, GetAssumptionCache, PSI, GetTLI, InsertLifetime,
609 [&](CallBase &CB) { return getInlineCost(CB); }, LegacyAARGetter(*this),
610 ImportedFunctionsStats);
611 }
612
613 /// Remove now-dead linkonce functions at the end of
614 /// processing to avoid breaking the SCC traversal.
doFinalization(CallGraph & CG)615 bool LegacyInlinerBase::doFinalization(CallGraph &CG) {
616 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
617 ImportedFunctionsStats.dump(InlinerFunctionImportStats ==
618 InlinerFunctionImportStatsOpts::Verbose);
619 return removeDeadFunctions(CG);
620 }
621
622 /// Remove dead functions that are not included in DNR (Do Not Remove) list.
removeDeadFunctions(CallGraph & CG,bool AlwaysInlineOnly)623 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG,
624 bool AlwaysInlineOnly) {
625 SmallVector<CallGraphNode *, 16> FunctionsToRemove;
626 SmallVector<Function *, 16> DeadFunctionsInComdats;
627
628 auto RemoveCGN = [&](CallGraphNode *CGN) {
629 // Remove any call graph edges from the function to its callees.
630 CGN->removeAllCalledFunctions();
631
632 // Remove any edges from the external node to the function's call graph
633 // node. These edges might have been made irrelegant due to
634 // optimization of the program.
635 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
636
637 // Removing the node for callee from the call graph and delete it.
638 FunctionsToRemove.push_back(CGN);
639 };
640
641 // Scan for all of the functions, looking for ones that should now be removed
642 // from the program. Insert the dead ones in the FunctionsToRemove set.
643 for (const auto &I : CG) {
644 CallGraphNode *CGN = I.second.get();
645 Function *F = CGN->getFunction();
646 if (!F || F->isDeclaration())
647 continue;
648
649 // Handle the case when this function is called and we only want to care
650 // about always-inline functions. This is a bit of a hack to share code
651 // between here and the InlineAlways pass.
652 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
653 continue;
654
655 // If the only remaining users of the function are dead constants, remove
656 // them.
657 F->removeDeadConstantUsers();
658
659 if (!F->isDefTriviallyDead())
660 continue;
661
662 // It is unsafe to drop a function with discardable linkage from a COMDAT
663 // without also dropping the other members of the COMDAT.
664 // The inliner doesn't visit non-function entities which are in COMDAT
665 // groups so it is unsafe to do so *unless* the linkage is local.
666 if (!F->hasLocalLinkage()) {
667 if (F->hasComdat()) {
668 DeadFunctionsInComdats.push_back(F);
669 continue;
670 }
671 }
672
673 RemoveCGN(CGN);
674 }
675 if (!DeadFunctionsInComdats.empty()) {
676 // Filter out the functions whose comdats remain alive.
677 filterDeadComdatFunctions(DeadFunctionsInComdats);
678 // Remove the rest.
679 for (Function *F : DeadFunctionsInComdats)
680 RemoveCGN(CG[F]);
681 }
682
683 if (FunctionsToRemove.empty())
684 return false;
685
686 // Now that we know which functions to delete, do so. We didn't want to do
687 // this inline, because that would invalidate our CallGraph::iterator
688 // objects. :(
689 //
690 // Note that it doesn't matter that we are iterating over a non-stable order
691 // here to do this, it doesn't matter which order the functions are deleted
692 // in.
693 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
694 FunctionsToRemove.erase(
695 std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()),
696 FunctionsToRemove.end());
697 for (CallGraphNode *CGN : FunctionsToRemove) {
698 delete CG.removeFunctionFromModule(CGN);
699 ++NumDeleted;
700 }
701 return true;
702 }
703
704 InlineAdvisor &
getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result & MAM,FunctionAnalysisManager & FAM,Module & M)705 InlinerPass::getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM,
706 FunctionAnalysisManager &FAM, Module &M) {
707 if (OwnedAdvisor)
708 return *OwnedAdvisor;
709
710 auto *IAA = MAM.getCachedResult<InlineAdvisorAnalysis>(M);
711 if (!IAA) {
712 // It should still be possible to run the inliner as a stand-alone SCC pass,
713 // for test scenarios. In that case, we default to the
714 // DefaultInlineAdvisor, which doesn't need to keep state between SCC pass
715 // runs. It also uses just the default InlineParams.
716 // In this case, we need to use the provided FAM, which is valid for the
717 // duration of the inliner pass, and thus the lifetime of the owned advisor.
718 // The one we would get from the MAM can be invalidated as a result of the
719 // inliner's activity.
720 OwnedAdvisor = std::make_unique<DefaultInlineAdvisor>(
721 M, FAM, getInlineParams(),
722 InlineContext{LTOPhase, InlinePass::CGSCCInliner});
723
724 if (!CGSCCInlineReplayFile.empty())
725 OwnedAdvisor = getReplayInlineAdvisor(
726 M, FAM, M.getContext(), std::move(OwnedAdvisor),
727 ReplayInlinerSettings{CGSCCInlineReplayFile,
728 CGSCCInlineReplayScope,
729 CGSCCInlineReplayFallback,
730 {CGSCCInlineReplayFormat}},
731 /*EmitRemarks=*/true,
732 InlineContext{LTOPhase,
733 InlinePass::ReplayCGSCCInliner});
734
735 return *OwnedAdvisor;
736 }
737 assert(IAA->getAdvisor() &&
738 "Expected a present InlineAdvisorAnalysis also have an "
739 "InlineAdvisor initialized");
740 return *IAA->getAdvisor();
741 }
742
run(LazyCallGraph::SCC & InitialC,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)743 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
744 CGSCCAnalysisManager &AM, LazyCallGraph &CG,
745 CGSCCUpdateResult &UR) {
746 const auto &MAMProxy =
747 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG);
748 bool Changed = false;
749
750 assert(InitialC.size() > 0 && "Cannot handle an empty SCC!");
751 Module &M = *InitialC.begin()->getFunction().getParent();
752 ProfileSummaryInfo *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(M);
753
754 FunctionAnalysisManager &FAM =
755 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG)
756 .getManager();
757
758 InlineAdvisor &Advisor = getAdvisor(MAMProxy, FAM, M);
759 Advisor.onPassEntry(&InitialC);
760
761 auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(&InitialC); });
762
763 // We use a single common worklist for calls across the entire SCC. We
764 // process these in-order and append new calls introduced during inlining to
765 // the end. The PriorityInlineOrder is optional here, in which the smaller
766 // callee would have a higher priority to inline.
767 //
768 // Note that this particular order of processing is actually critical to
769 // avoid very bad behaviors. Consider *highly connected* call graphs where
770 // each function contains a small amount of code and a couple of calls to
771 // other functions. Because the LLVM inliner is fundamentally a bottom-up
772 // inliner, it can handle gracefully the fact that these all appear to be
773 // reasonable inlining candidates as it will flatten things until they become
774 // too big to inline, and then move on and flatten another batch.
775 //
776 // However, when processing call edges *within* an SCC we cannot rely on this
777 // bottom-up behavior. As a consequence, with heavily connected *SCCs* of
778 // functions we can end up incrementally inlining N calls into each of
779 // N functions because each incremental inlining decision looks good and we
780 // don't have a topological ordering to prevent explosions.
781 //
782 // To compensate for this, we don't process transitive edges made immediate
783 // by inlining until we've done one pass of inlining across the entire SCC.
784 // Large, highly connected SCCs still lead to some amount of code bloat in
785 // this model, but it is uniformly spread across all the functions in the SCC
786 // and eventually they all become too large to inline, rather than
787 // incrementally maknig a single function grow in a super linear fashion.
788 SmallVector<std::pair<CallBase *, int>, 16> Calls;
789
790 // Populate the initial list of calls in this SCC.
791 for (auto &N : InitialC) {
792 auto &ORE =
793 FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction());
794 // We want to generally process call sites top-down in order for
795 // simplifications stemming from replacing the call with the returned value
796 // after inlining to be visible to subsequent inlining decisions.
797 // FIXME: Using instructions sequence is a really bad way to do this.
798 // Instead we should do an actual RPO walk of the function body.
799 for (Instruction &I : instructions(N.getFunction()))
800 if (auto *CB = dyn_cast<CallBase>(&I))
801 if (Function *Callee = CB->getCalledFunction()) {
802 if (!Callee->isDeclaration())
803 Calls.push_back({CB, -1});
804 else if (!isa<IntrinsicInst>(I)) {
805 using namespace ore;
806 setInlineRemark(*CB, "unavailable definition");
807 ORE.emit([&]() {
808 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
809 << NV("Callee", Callee) << " will not be inlined into "
810 << NV("Caller", CB->getCaller())
811 << " because its definition is unavailable"
812 << setIsVerbose();
813 });
814 }
815 }
816 }
817 if (Calls.empty())
818 return PreservedAnalyses::all();
819
820 // Capture updatable variable for the current SCC.
821 auto *C = &InitialC;
822
823 // When inlining a callee produces new call sites, we want to keep track of
824 // the fact that they were inlined from the callee. This allows us to avoid
825 // infinite inlining in some obscure cases. To represent this, we use an
826 // index into the InlineHistory vector.
827 SmallVector<std::pair<Function *, int>, 16> InlineHistory;
828
829 // Track a set vector of inlined callees so that we can augment the caller
830 // with all of their edges in the call graph before pruning out the ones that
831 // got simplified away.
832 SmallSetVector<Function *, 4> InlinedCallees;
833
834 // Track the dead functions to delete once finished with inlining calls. We
835 // defer deleting these to make it easier to handle the call graph updates.
836 SmallVector<Function *, 4> DeadFunctions;
837
838 // Track potentially dead non-local functions with comdats to see if they can
839 // be deleted as a batch after inlining.
840 SmallVector<Function *, 4> DeadFunctionsInComdats;
841
842 // Loop forward over all of the calls. Note that we cannot cache the size as
843 // inlining can introduce new calls that need to be processed.
844 for (int I = 0; I < (int)Calls.size(); ++I) {
845 // We expect the calls to typically be batched with sequences of calls that
846 // have the same caller, so we first set up some shared infrastructure for
847 // this caller. We also do any pruning we can at this layer on the caller
848 // alone.
849 Function &F = *Calls[I].first->getCaller();
850 LazyCallGraph::Node &N = *CG.lookup(F);
851 if (CG.lookupSCC(N) != C)
852 continue;
853
854 LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"
855 << " Function size: " << F.getInstructionCount()
856 << "\n");
857
858 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
859 return FAM.getResult<AssumptionAnalysis>(F);
860 };
861
862 // Now process as many calls as we have within this caller in the sequence.
863 // We bail out as soon as the caller has to change so we can update the
864 // call graph and prepare the context of that new caller.
865 bool DidInline = false;
866 for (; I < (int)Calls.size() && Calls[I].first->getCaller() == &F; ++I) {
867 auto &P = Calls[I];
868 CallBase *CB = P.first;
869 const int InlineHistoryID = P.second;
870 Function &Callee = *CB->getCalledFunction();
871
872 if (InlineHistoryID != -1 &&
873 inlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) {
874 LLVM_DEBUG(dbgs() << "Skipping inlining due to history: "
875 << F.getName() << " -> " << Callee.getName() << "\n");
876 setInlineRemark(*CB, "recursive");
877 continue;
878 }
879
880 // Check if this inlining may repeat breaking an SCC apart that has
881 // already been split once before. In that case, inlining here may
882 // trigger infinite inlining, much like is prevented within the inliner
883 // itself by the InlineHistory above, but spread across CGSCC iterations
884 // and thus hidden from the full inline history.
885 LazyCallGraph::SCC *CalleeSCC = CG.lookupSCC(*CG.lookup(Callee));
886 if (CalleeSCC == C && UR.InlinedInternalEdges.count({&N, C})) {
887 LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node "
888 "previously split out of this SCC by inlining: "
889 << F.getName() << " -> " << Callee.getName() << "\n");
890 setInlineRemark(*CB, "recursive SCC split");
891 continue;
892 }
893
894 std::unique_ptr<InlineAdvice> Advice =
895 Advisor.getAdvice(*CB, OnlyMandatory);
896
897 // Check whether we want to inline this callsite.
898 if (!Advice)
899 continue;
900
901 if (!Advice->isInliningRecommended()) {
902 Advice->recordUnattemptedInlining();
903 continue;
904 }
905
906 int CBCostMult =
907 getStringFnAttrAsInt(
908 *CB, InlineConstants::FunctionInlineCostMultiplierAttributeName)
909 .value_or(1);
910
911 // Setup the data structure used to plumb customization into the
912 // `InlineFunction` routine.
913 InlineFunctionInfo IFI(
914 /*cg=*/nullptr, GetAssumptionCache, PSI,
915 &FAM.getResult<BlockFrequencyAnalysis>(*(CB->getCaller())),
916 &FAM.getResult<BlockFrequencyAnalysis>(Callee));
917
918 InlineResult IR =
919 InlineFunction(*CB, IFI, /*MergeAttributes=*/true,
920 &FAM.getResult<AAManager>(*CB->getCaller()));
921 if (!IR.isSuccess()) {
922 Advice->recordUnsuccessfulInlining(IR);
923 continue;
924 }
925
926 DidInline = true;
927 InlinedCallees.insert(&Callee);
928 ++NumInlined;
929
930 LLVM_DEBUG(dbgs() << " Size after inlining: "
931 << F.getInstructionCount() << "\n");
932
933 // Add any new callsites to defined functions to the worklist.
934 if (!IFI.InlinedCallSites.empty()) {
935 int NewHistoryID = InlineHistory.size();
936 InlineHistory.push_back({&Callee, InlineHistoryID});
937
938 for (CallBase *ICB : reverse(IFI.InlinedCallSites)) {
939 Function *NewCallee = ICB->getCalledFunction();
940 assert(!(NewCallee && NewCallee->isIntrinsic()) &&
941 "Intrinsic calls should not be tracked.");
942 if (!NewCallee) {
943 // Try to promote an indirect (virtual) call without waiting for
944 // the post-inline cleanup and the next DevirtSCCRepeatedPass
945 // iteration because the next iteration may not happen and we may
946 // miss inlining it.
947 if (tryPromoteCall(*ICB))
948 NewCallee = ICB->getCalledFunction();
949 }
950 if (NewCallee) {
951 if (!NewCallee->isDeclaration()) {
952 Calls.push_back({ICB, NewHistoryID});
953 // Continually inlining through an SCC can result in huge compile
954 // times and bloated code since we arbitrarily stop at some point
955 // when the inliner decides it's not profitable to inline anymore.
956 // We attempt to mitigate this by making these calls exponentially
957 // more expensive.
958 // This doesn't apply to calls in the same SCC since if we do
959 // inline through the SCC the function will end up being
960 // self-recursive which the inliner bails out on, and inlining
961 // within an SCC is necessary for performance.
962 if (CalleeSCC != C &&
963 CalleeSCC == CG.lookupSCC(CG.get(*NewCallee))) {
964 Attribute NewCBCostMult = Attribute::get(
965 M.getContext(),
966 InlineConstants::FunctionInlineCostMultiplierAttributeName,
967 itostr(CBCostMult * IntraSCCCostMultiplier));
968 ICB->addFnAttr(NewCBCostMult);
969 }
970 }
971 }
972 }
973 }
974
975 // For local functions or discardable functions without comdats, check
976 // whether this makes the callee trivially dead. In that case, we can drop
977 // the body of the function eagerly which may reduce the number of callers
978 // of other functions to one, changing inline cost thresholds. Non-local
979 // discardable functions with comdats are checked later on.
980 bool CalleeWasDeleted = false;
981 if (Callee.isDiscardableIfUnused() && Callee.hasZeroLiveUses() &&
982 !CG.isLibFunction(Callee)) {
983 if (Callee.hasLocalLinkage() || !Callee.hasComdat()) {
984 Calls.erase(
985 std::remove_if(Calls.begin() + I + 1, Calls.end(),
986 [&](const std::pair<CallBase *, int> &Call) {
987 return Call.first->getCaller() == &Callee;
988 }),
989 Calls.end());
990
991 // Clear the body and queue the function itself for deletion when we
992 // finish inlining and call graph updates.
993 // Note that after this point, it is an error to do anything other
994 // than use the callee's address or delete it.
995 Callee.dropAllReferences();
996 assert(!is_contained(DeadFunctions, &Callee) &&
997 "Cannot put cause a function to become dead twice!");
998 DeadFunctions.push_back(&Callee);
999 CalleeWasDeleted = true;
1000 } else {
1001 DeadFunctionsInComdats.push_back(&Callee);
1002 }
1003 }
1004 if (CalleeWasDeleted)
1005 Advice->recordInliningWithCalleeDeleted();
1006 else
1007 Advice->recordInlining();
1008 }
1009
1010 // Back the call index up by one to put us in a good position to go around
1011 // the outer loop.
1012 --I;
1013
1014 if (!DidInline)
1015 continue;
1016 Changed = true;
1017
1018 // At this point, since we have made changes we have at least removed
1019 // a call instruction. However, in the process we do some incremental
1020 // simplification of the surrounding code. This simplification can
1021 // essentially do all of the same things as a function pass and we can
1022 // re-use the exact same logic for updating the call graph to reflect the
1023 // change.
1024
1025 // Inside the update, we also update the FunctionAnalysisManager in the
1026 // proxy for this particular SCC. We do this as the SCC may have changed and
1027 // as we're going to mutate this particular function we want to make sure
1028 // the proxy is in place to forward any invalidation events.
1029 LazyCallGraph::SCC *OldC = C;
1030 C = &updateCGAndAnalysisManagerForCGSCCPass(CG, *C, N, AM, UR, FAM);
1031 LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n");
1032
1033 // If this causes an SCC to split apart into multiple smaller SCCs, there
1034 // is a subtle risk we need to prepare for. Other transformations may
1035 // expose an "infinite inlining" opportunity later, and because of the SCC
1036 // mutation, we will revisit this function and potentially re-inline. If we
1037 // do, and that re-inlining also has the potentially to mutate the SCC
1038 // structure, the infinite inlining problem can manifest through infinite
1039 // SCC splits and merges. To avoid this, we capture the originating caller
1040 // node and the SCC containing the call edge. This is a slight over
1041 // approximation of the possible inlining decisions that must be avoided,
1042 // but is relatively efficient to store. We use C != OldC to know when
1043 // a new SCC is generated and the original SCC may be generated via merge
1044 // in later iterations.
1045 //
1046 // It is also possible that even if no new SCC is generated
1047 // (i.e., C == OldC), the original SCC could be split and then merged
1048 // into the same one as itself. and the original SCC will be added into
1049 // UR.CWorklist again, we want to catch such cases too.
1050 //
1051 // FIXME: This seems like a very heavyweight way of retaining the inline
1052 // history, we should look for a more efficient way of tracking it.
1053 if ((C != OldC || UR.CWorklist.count(OldC)) &&
1054 llvm::any_of(InlinedCallees, [&](Function *Callee) {
1055 return CG.lookupSCC(*CG.lookup(*Callee)) == OldC;
1056 })) {
1057 LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, "
1058 "retaining this to avoid infinite inlining.\n");
1059 UR.InlinedInternalEdges.insert({&N, OldC});
1060 }
1061 InlinedCallees.clear();
1062
1063 // Invalidate analyses for this function now so that we don't have to
1064 // invalidate analyses for all functions in this SCC later.
1065 FAM.invalidate(F, PreservedAnalyses::none());
1066 }
1067
1068 // We must ensure that we only delete functions with comdats if every function
1069 // in the comdat is going to be deleted.
1070 if (!DeadFunctionsInComdats.empty()) {
1071 filterDeadComdatFunctions(DeadFunctionsInComdats);
1072 for (auto *Callee : DeadFunctionsInComdats)
1073 Callee->dropAllReferences();
1074 DeadFunctions.append(DeadFunctionsInComdats);
1075 }
1076
1077 // Now that we've finished inlining all of the calls across this SCC, delete
1078 // all of the trivially dead functions, updating the call graph and the CGSCC
1079 // pass manager in the process.
1080 //
1081 // Note that this walks a pointer set which has non-deterministic order but
1082 // that is OK as all we do is delete things and add pointers to unordered
1083 // sets.
1084 for (Function *DeadF : DeadFunctions) {
1085 // Get the necessary information out of the call graph and nuke the
1086 // function there. Also, clear out any cached analyses.
1087 auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF));
1088 FAM.clear(*DeadF, DeadF->getName());
1089 AM.clear(DeadC, DeadC.getName());
1090 auto &DeadRC = DeadC.getOuterRefSCC();
1091 CG.removeDeadFunction(*DeadF);
1092
1093 // Mark the relevant parts of the call graph as invalid so we don't visit
1094 // them.
1095 UR.InvalidatedSCCs.insert(&DeadC);
1096 UR.InvalidatedRefSCCs.insert(&DeadRC);
1097
1098 // If the updated SCC was the one containing the deleted function, clear it.
1099 if (&DeadC == UR.UpdatedC)
1100 UR.UpdatedC = nullptr;
1101
1102 // And delete the actual function from the module.
1103 M.getFunctionList().erase(DeadF);
1104
1105 ++NumDeleted;
1106 }
1107
1108 if (!Changed)
1109 return PreservedAnalyses::all();
1110
1111 PreservedAnalyses PA;
1112 // Even if we change the IR, we update the core CGSCC data structures and so
1113 // can preserve the proxy to the function analysis manager.
1114 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
1115 // We have already invalidated all analyses on modified functions.
1116 PA.preserveSet<AllAnalysesOn<Function>>();
1117 return PA;
1118 }
1119
ModuleInlinerWrapperPass(InlineParams Params,bool MandatoryFirst,InlineContext IC,InliningAdvisorMode Mode,unsigned MaxDevirtIterations)1120 ModuleInlinerWrapperPass::ModuleInlinerWrapperPass(InlineParams Params,
1121 bool MandatoryFirst,
1122 InlineContext IC,
1123 InliningAdvisorMode Mode,
1124 unsigned MaxDevirtIterations)
1125 : Params(Params), IC(IC), Mode(Mode),
1126 MaxDevirtIterations(MaxDevirtIterations) {
1127 // Run the inliner first. The theory is that we are walking bottom-up and so
1128 // the callees have already been fully optimized, and we want to inline them
1129 // into the callers so that our optimizations can reflect that.
1130 // For PreLinkThinLTO pass, we disable hot-caller heuristic for sample PGO
1131 // because it makes profile annotation in the backend inaccurate.
1132 if (MandatoryFirst) {
1133 PM.addPass(InlinerPass(/*OnlyMandatory*/ true));
1134 if (EnablePostSCCAdvisorPrinting)
1135 PM.addPass(InlineAdvisorAnalysisPrinterPass(dbgs()));
1136 }
1137 PM.addPass(InlinerPass());
1138 if (EnablePostSCCAdvisorPrinting)
1139 PM.addPass(InlineAdvisorAnalysisPrinterPass(dbgs()));
1140 }
1141
run(Module & M,ModuleAnalysisManager & MAM)1142 PreservedAnalyses ModuleInlinerWrapperPass::run(Module &M,
1143 ModuleAnalysisManager &MAM) {
1144 auto &IAA = MAM.getResult<InlineAdvisorAnalysis>(M);
1145 if (!IAA.tryCreate(Params, Mode,
1146 {CGSCCInlineReplayFile,
1147 CGSCCInlineReplayScope,
1148 CGSCCInlineReplayFallback,
1149 {CGSCCInlineReplayFormat}},
1150 IC)) {
1151 M.getContext().emitError(
1152 "Could not setup Inlining Advisor for the requested "
1153 "mode and/or options");
1154 return PreservedAnalyses::all();
1155 }
1156
1157 // We wrap the CGSCC pipeline in a devirtualization repeater. This will try
1158 // to detect when we devirtualize indirect calls and iterate the SCC passes
1159 // in that case to try and catch knock-on inlining or function attrs
1160 // opportunities. Then we add it to the module pipeline by walking the SCCs
1161 // in postorder (or bottom-up).
1162 // If MaxDevirtIterations is 0, we just don't use the devirtualization
1163 // wrapper.
1164 if (MaxDevirtIterations == 0)
1165 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(PM)));
1166 else
1167 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(
1168 createDevirtSCCRepeatedPass(std::move(PM), MaxDevirtIterations)));
1169
1170 MPM.addPass(std::move(AfterCGMPM));
1171 MPM.run(M, MAM);
1172
1173 // Discard the InlineAdvisor, a subsequent inlining session should construct
1174 // its own.
1175 auto PA = PreservedAnalyses::all();
1176 if (!KeepAdvisorForPrinting)
1177 PA.abandon<InlineAdvisorAnalysis>();
1178 return PA;
1179 }
1180
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)1181 void InlinerPass::printPipeline(
1182 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1183 static_cast<PassInfoMixin<InlinerPass> *>(this)->printPipeline(
1184 OS, MapClassName2PassName);
1185 if (OnlyMandatory)
1186 OS << "<only-mandatory>";
1187 }
1188
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)1189 void ModuleInlinerWrapperPass::printPipeline(
1190 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1191 // Print some info about passes added to the wrapper. This is however
1192 // incomplete as InlineAdvisorAnalysis part isn't included (which also depends
1193 // on Params and Mode).
1194 if (!MPM.isEmpty()) {
1195 MPM.printPipeline(OS, MapClassName2PassName);
1196 OS << ",";
1197 }
1198 OS << "cgscc(";
1199 if (MaxDevirtIterations != 0)
1200 OS << "devirt<" << MaxDevirtIterations << ">(";
1201 PM.printPipeline(OS, MapClassName2PassName);
1202 if (MaxDevirtIterations != 0)
1203 OS << ")";
1204 OS << ")";
1205 }
1206