1 //===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the SampleProfileLoader transformation. This pass
10 // reads a profile file generated by a sampling profiler (e.g. Linux Perf -
11 // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the
12 // profile information in the given profile.
13 //
14 // This pass generates branch weight annotations on the IR:
15 //
16 // - prof: Represents branch weights. This annotation is added to branches
17 // to indicate the weights of each edge coming out of the branch.
18 // The weight of each edge is the weight of the target block for
19 // that edge. The weight of a block B is computed as the maximum
20 // number of samples found in B.
21 //
22 //===----------------------------------------------------------------------===//
23
24 #include "llvm/Transforms/IPO/SampleProfile.h"
25 #include "llvm/ADT/ArrayRef.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/DenseSet.h"
28 #include "llvm/ADT/None.h"
29 #include "llvm/ADT/PriorityQueue.h"
30 #include "llvm/ADT/SCCIterator.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringMap.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/Analysis/AssumptionCache.h"
39 #include "llvm/Analysis/CallGraph.h"
40 #include "llvm/Analysis/CallGraphSCCPass.h"
41 #include "llvm/Analysis/InlineAdvisor.h"
42 #include "llvm/Analysis/InlineCost.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
45 #include "llvm/Analysis/PostDominators.h"
46 #include "llvm/Analysis/ProfileSummaryInfo.h"
47 #include "llvm/Analysis/ReplayInlineAdvisor.h"
48 #include "llvm/Analysis/TargetLibraryInfo.h"
49 #include "llvm/Analysis/TargetTransformInfo.h"
50 #include "llvm/IR/BasicBlock.h"
51 #include "llvm/IR/CFG.h"
52 #include "llvm/IR/DebugInfoMetadata.h"
53 #include "llvm/IR/DebugLoc.h"
54 #include "llvm/IR/DiagnosticInfo.h"
55 #include "llvm/IR/Dominators.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/GlobalValue.h"
58 #include "llvm/IR/InstrTypes.h"
59 #include "llvm/IR/Instruction.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/LLVMContext.h"
63 #include "llvm/IR/MDBuilder.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/PassManager.h"
66 #include "llvm/IR/ValueSymbolTable.h"
67 #include "llvm/InitializePasses.h"
68 #include "llvm/Pass.h"
69 #include "llvm/ProfileData/InstrProf.h"
70 #include "llvm/ProfileData/SampleProf.h"
71 #include "llvm/ProfileData/SampleProfReader.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/CommandLine.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/ErrorOr.h"
77 #include "llvm/Support/GenericDomTree.h"
78 #include "llvm/Support/raw_ostream.h"
79 #include "llvm/Transforms/IPO.h"
80 #include "llvm/Transforms/IPO/SampleContextTracker.h"
81 #include "llvm/Transforms/IPO/SampleProfileProbe.h"
82 #include "llvm/Transforms/Instrumentation.h"
83 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
84 #include "llvm/Transforms/Utils/Cloning.h"
85 #include <algorithm>
86 #include <cassert>
87 #include <cstdint>
88 #include <functional>
89 #include <limits>
90 #include <map>
91 #include <memory>
92 #include <queue>
93 #include <string>
94 #include <system_error>
95 #include <utility>
96 #include <vector>
97
98 using namespace llvm;
99 using namespace sampleprof;
100 using ProfileCount = Function::ProfileCount;
101 #define DEBUG_TYPE "sample-profile"
102 #define CSINLINE_DEBUG DEBUG_TYPE "-inline"
103
104 STATISTIC(NumCSInlined,
105 "Number of functions inlined with context sensitive profile");
106 STATISTIC(NumCSNotInlined,
107 "Number of functions not inlined with context sensitive profile");
108 STATISTIC(NumMismatchedProfile,
109 "Number of functions with CFG mismatched profile");
110 STATISTIC(NumMatchedProfile, "Number of functions with CFG matched profile");
111 STATISTIC(NumDuplicatedInlinesite,
112 "Number of inlined callsites with a partial distribution factor");
113
114 STATISTIC(NumCSInlinedHitMinLimit,
115 "Number of functions with FDO inline stopped due to min size limit");
116 STATISTIC(NumCSInlinedHitMaxLimit,
117 "Number of functions with FDO inline stopped due to max size limit");
118 STATISTIC(
119 NumCSInlinedHitGrowthLimit,
120 "Number of functions with FDO inline stopped due to growth size limit");
121
122 // Command line option to specify the file to read samples from. This is
123 // mainly used for debugging.
124 static cl::opt<std::string> SampleProfileFile(
125 "sample-profile-file", cl::init(""), cl::value_desc("filename"),
126 cl::desc("Profile file loaded by -sample-profile"), cl::Hidden);
127
128 // The named file contains a set of transformations that may have been applied
129 // to the symbol names between the program from which the sample data was
130 // collected and the current program's symbols.
131 static cl::opt<std::string> SampleProfileRemappingFile(
132 "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"),
133 cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden);
134
135 static cl::opt<unsigned> SampleProfileMaxPropagateIterations(
136 "sample-profile-max-propagate-iterations", cl::init(100),
137 cl::desc("Maximum number of iterations to go through when propagating "
138 "sample block/edge weights through the CFG."));
139
140 static cl::opt<unsigned> SampleProfileRecordCoverage(
141 "sample-profile-check-record-coverage", cl::init(0), cl::value_desc("N"),
142 cl::desc("Emit a warning if less than N% of records in the input profile "
143 "are matched to the IR."));
144
145 static cl::opt<unsigned> SampleProfileSampleCoverage(
146 "sample-profile-check-sample-coverage", cl::init(0), cl::value_desc("N"),
147 cl::desc("Emit a warning if less than N% of samples in the input profile "
148 "are matched to the IR."));
149
150 static cl::opt<bool> NoWarnSampleUnused(
151 "no-warn-sample-unused", cl::init(false), cl::Hidden,
152 cl::desc("Use this option to turn off/on warnings about function with "
153 "samples but without debug information to use those samples. "));
154
155 static cl::opt<bool> ProfileSampleAccurate(
156 "profile-sample-accurate", cl::Hidden, cl::init(false),
157 cl::desc("If the sample profile is accurate, we will mark all un-sampled "
158 "callsite and function as having 0 samples. Otherwise, treat "
159 "un-sampled callsites and functions conservatively as unknown. "));
160
161 static cl::opt<bool> ProfileAccurateForSymsInList(
162 "profile-accurate-for-symsinlist", cl::Hidden, cl::ZeroOrMore,
163 cl::init(true),
164 cl::desc("For symbols in profile symbol list, regard their profiles to "
165 "be accurate. It may be overriden by profile-sample-accurate. "));
166
167 static cl::opt<bool> ProfileMergeInlinee(
168 "sample-profile-merge-inlinee", cl::Hidden, cl::init(true),
169 cl::desc("Merge past inlinee's profile to outline version if sample "
170 "profile loader decided not to inline a call site. It will "
171 "only be enabled when top-down order of profile loading is "
172 "enabled. "));
173
174 static cl::opt<bool> ProfileTopDownLoad(
175 "sample-profile-top-down-load", cl::Hidden, cl::init(true),
176 cl::desc("Do profile annotation and inlining for functions in top-down "
177 "order of call graph during sample profile loading. It only "
178 "works for new pass manager. "));
179
180 static cl::opt<bool> UseProfileIndirectCallEdges(
181 "use-profile-indirect-call-edges", cl::init(true), cl::Hidden,
182 cl::desc("Considering indirect call samples from profile when top-down "
183 "processing functions. Only CSSPGO is supported."));
184
185 static cl::opt<bool> UseProfileTopDownOrder(
186 "use-profile-top-down-order", cl::init(false), cl::Hidden,
187 cl::desc("Process functions in one SCC in a top-down order "
188 "based on the input profile."));
189
190 static cl::opt<bool> ProfileSizeInline(
191 "sample-profile-inline-size", cl::Hidden, cl::init(false),
192 cl::desc("Inline cold call sites in profile loader if it's beneficial "
193 "for code size."));
194
195 static cl::opt<int> ProfileInlineGrowthLimit(
196 "sample-profile-inline-growth-limit", cl::Hidden, cl::init(12),
197 cl::desc("The size growth ratio limit for proirity-based sample profile "
198 "loader inlining."));
199
200 static cl::opt<int> ProfileInlineLimitMin(
201 "sample-profile-inline-limit-min", cl::Hidden, cl::init(100),
202 cl::desc("The lower bound of size growth limit for "
203 "proirity-based sample profile loader inlining."));
204
205 static cl::opt<int> ProfileInlineLimitMax(
206 "sample-profile-inline-limit-max", cl::Hidden, cl::init(10000),
207 cl::desc("The upper bound of size growth limit for "
208 "proirity-based sample profile loader inlining."));
209
210 static cl::opt<int> ProfileICPThreshold(
211 "sample-profile-icp-threshold", cl::Hidden, cl::init(5),
212 cl::desc(
213 "Relative hotness threshold for indirect "
214 "call promotion in proirity-based sample profile loader inlining."));
215
216 static cl::opt<int> SampleHotCallSiteThreshold(
217 "sample-profile-hot-inline-threshold", cl::Hidden, cl::init(3000),
218 cl::desc("Hot callsite threshold for proirity-based sample profile loader "
219 "inlining."));
220
221 static cl::opt<bool> CallsitePrioritizedInline(
222 "sample-profile-prioritized-inline", cl::Hidden, cl::ZeroOrMore,
223 cl::init(false),
224 cl::desc("Use call site prioritized inlining for sample profile loader."
225 "Currently only CSSPGO is supported."));
226
227 static cl::opt<int> SampleColdCallSiteThreshold(
228 "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45),
229 cl::desc("Threshold for inlining cold callsites"));
230
231 static cl::opt<std::string> ProfileInlineReplayFile(
232 "sample-profile-inline-replay", cl::init(""), cl::value_desc("filename"),
233 cl::desc(
234 "Optimization remarks file containing inline remarks to be replayed "
235 "by inlining from sample profile loader."),
236 cl::Hidden);
237
238 namespace {
239
240 using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>;
241 using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>;
242 using Edge = std::pair<const BasicBlock *, const BasicBlock *>;
243 using EdgeWeightMap = DenseMap<Edge, uint64_t>;
244 using BlockEdgeMap =
245 DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>;
246
247 class SampleProfileLoader;
248
249 class SampleCoverageTracker {
250 public:
SampleCoverageTracker(SampleProfileLoader & SPL)251 SampleCoverageTracker(SampleProfileLoader &SPL) : SPLoader(SPL){};
252
253 bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset,
254 uint32_t Discriminator, uint64_t Samples);
255 unsigned computeCoverage(unsigned Used, unsigned Total) const;
256 unsigned countUsedRecords(const FunctionSamples *FS,
257 ProfileSummaryInfo *PSI) const;
258 unsigned countBodyRecords(const FunctionSamples *FS,
259 ProfileSummaryInfo *PSI) const;
getTotalUsedSamples() const260 uint64_t getTotalUsedSamples() const { return TotalUsedSamples; }
261 uint64_t countBodySamples(const FunctionSamples *FS,
262 ProfileSummaryInfo *PSI) const;
263
clear()264 void clear() {
265 SampleCoverage.clear();
266 TotalUsedSamples = 0;
267 }
268
269 private:
270 using BodySampleCoverageMap = std::map<LineLocation, unsigned>;
271 using FunctionSamplesCoverageMap =
272 DenseMap<const FunctionSamples *, BodySampleCoverageMap>;
273
274 /// Coverage map for sampling records.
275 ///
276 /// This map keeps a record of sampling records that have been matched to
277 /// an IR instruction. This is used to detect some form of staleness in
278 /// profiles (see flag -sample-profile-check-coverage).
279 ///
280 /// Each entry in the map corresponds to a FunctionSamples instance. This is
281 /// another map that counts how many times the sample record at the
282 /// given location has been used.
283 FunctionSamplesCoverageMap SampleCoverage;
284
285 /// Number of samples used from the profile.
286 ///
287 /// When a sampling record is used for the first time, the samples from
288 /// that record are added to this accumulator. Coverage is later computed
289 /// based on the total number of samples available in this function and
290 /// its callsites.
291 ///
292 /// Note that this accumulator tracks samples used from a single function
293 /// and all the inlined callsites. Strictly, we should have a map of counters
294 /// keyed by FunctionSamples pointers, but these stats are cleared after
295 /// every function, so we just need to keep a single counter.
296 uint64_t TotalUsedSamples = 0;
297
298 SampleProfileLoader &SPLoader;
299 };
300
301 class GUIDToFuncNameMapper {
302 public:
GUIDToFuncNameMapper(Module & M,SampleProfileReader & Reader,DenseMap<uint64_t,StringRef> & GUIDToFuncNameMap)303 GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader,
304 DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap)
305 : CurrentReader(Reader), CurrentModule(M),
306 CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) {
307 if (!CurrentReader.useMD5())
308 return;
309
310 for (const auto &F : CurrentModule) {
311 StringRef OrigName = F.getName();
312 CurrentGUIDToFuncNameMap.insert(
313 {Function::getGUID(OrigName), OrigName});
314
315 // Local to global var promotion used by optimization like thinlto
316 // will rename the var and add suffix like ".llvm.xxx" to the
317 // original local name. In sample profile, the suffixes of function
318 // names are all stripped. Since it is possible that the mapper is
319 // built in post-thin-link phase and var promotion has been done,
320 // we need to add the substring of function name without the suffix
321 // into the GUIDToFuncNameMap.
322 StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
323 if (CanonName != OrigName)
324 CurrentGUIDToFuncNameMap.insert(
325 {Function::getGUID(CanonName), CanonName});
326 }
327
328 // Update GUIDToFuncNameMap for each function including inlinees.
329 SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap);
330 }
331
~GUIDToFuncNameMapper()332 ~GUIDToFuncNameMapper() {
333 if (!CurrentReader.useMD5())
334 return;
335
336 CurrentGUIDToFuncNameMap.clear();
337
338 // Reset GUIDToFuncNameMap for of each function as they're no
339 // longer valid at this point.
340 SetGUIDToFuncNameMapForAll(nullptr);
341 }
342
343 private:
SetGUIDToFuncNameMapForAll(DenseMap<uint64_t,StringRef> * Map)344 void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) {
345 std::queue<FunctionSamples *> FSToUpdate;
346 for (auto &IFS : CurrentReader.getProfiles()) {
347 FSToUpdate.push(&IFS.second);
348 }
349
350 while (!FSToUpdate.empty()) {
351 FunctionSamples *FS = FSToUpdate.front();
352 FSToUpdate.pop();
353 FS->GUIDToFuncNameMap = Map;
354 for (const auto &ICS : FS->getCallsiteSamples()) {
355 const FunctionSamplesMap &FSMap = ICS.second;
356 for (auto &IFS : FSMap) {
357 FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second);
358 FSToUpdate.push(&FS);
359 }
360 }
361 }
362 }
363
364 SampleProfileReader &CurrentReader;
365 Module &CurrentModule;
366 DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap;
367 };
368
369 // Inline candidate used by iterative callsite prioritized inliner
370 struct InlineCandidate {
371 CallBase *CallInstr;
372 const FunctionSamples *CalleeSamples;
373 // Prorated callsite count, which will be used to guide inlining. For example,
374 // if a callsite is duplicated in LTO prelink, then in LTO postlink the two
375 // copies will get their own distribution factors and their prorated counts
376 // will be used to decide if they should be inlined independently.
377 uint64_t CallsiteCount;
378 // Call site distribution factor to prorate the profile samples for a
379 // duplicated callsite. Default value is 1.0.
380 float CallsiteDistribution;
381 };
382
383 // Inline candidate comparer using call site weight
384 struct CandidateComparer {
operator ()__anone330f7100111::CandidateComparer385 bool operator()(const InlineCandidate &LHS, const InlineCandidate &RHS) {
386 if (LHS.CallsiteCount != RHS.CallsiteCount)
387 return LHS.CallsiteCount < RHS.CallsiteCount;
388
389 // Tie breaker using GUID so we have stable/deterministic inlining order
390 assert(LHS.CalleeSamples && RHS.CalleeSamples &&
391 "Expect non-null FunctionSamples");
392 return LHS.CalleeSamples->getGUID(LHS.CalleeSamples->getName()) <
393 RHS.CalleeSamples->getGUID(RHS.CalleeSamples->getName());
394 }
395 };
396
397 using CandidateQueue =
398 PriorityQueue<InlineCandidate, std::vector<InlineCandidate>,
399 CandidateComparer>;
400
401 /// Sample profile pass.
402 ///
403 /// This pass reads profile data from the file specified by
404 /// -sample-profile-file and annotates every affected function with the
405 /// profile information found in that file.
406 class SampleProfileLoader {
407 public:
SampleProfileLoader(StringRef Name,StringRef RemapName,ThinOrFullLTOPhase LTOPhase,std::function<AssumptionCache & (Function &)> GetAssumptionCache,std::function<TargetTransformInfo & (Function &)> GetTargetTransformInfo,std::function<const TargetLibraryInfo & (Function &)> GetTLI)408 SampleProfileLoader(
409 StringRef Name, StringRef RemapName, ThinOrFullLTOPhase LTOPhase,
410 std::function<AssumptionCache &(Function &)> GetAssumptionCache,
411 std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo,
412 std::function<const TargetLibraryInfo &(Function &)> GetTLI)
413 : GetAC(std::move(GetAssumptionCache)),
414 GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)),
415 CoverageTracker(*this), Filename(std::string(Name)),
416 RemappingFilename(std::string(RemapName)), LTOPhase(LTOPhase) {}
417
418 bool doInitialization(Module &M, FunctionAnalysisManager *FAM = nullptr);
419 bool runOnModule(Module &M, ModuleAnalysisManager *AM,
420 ProfileSummaryInfo *_PSI, CallGraph *CG);
421
dump()422 void dump() { Reader->dump(); }
423
424 protected:
425 friend class SampleCoverageTracker;
426
427 bool runOnFunction(Function &F, ModuleAnalysisManager *AM);
428 unsigned getFunctionLoc(Function &F);
429 bool emitAnnotations(Function &F);
430 ErrorOr<uint64_t> getInstWeight(const Instruction &I);
431 ErrorOr<uint64_t> getProbeWeight(const Instruction &I);
432 ErrorOr<uint64_t> getBlockWeight(const BasicBlock *BB);
433 const FunctionSamples *findCalleeFunctionSamples(const CallBase &I) const;
434 std::vector<const FunctionSamples *>
435 findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const;
436 mutable DenseMap<const DILocation *, const FunctionSamples *> DILocation2SampleMap;
437 const FunctionSamples *findFunctionSamples(const Instruction &I) const;
438 // Attempt to promote indirect call and also inline the promoted call
439 bool tryPromoteAndInlineCandidate(
440 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin,
441 uint64_t &Sum, DenseSet<Instruction *> &PromotedInsns,
442 SmallVector<CallBase *, 8> *InlinedCallSites = nullptr);
443 bool inlineHotFunctions(Function &F,
444 DenseSet<GlobalValue::GUID> &InlinedGUIDs);
445 InlineCost shouldInlineCandidate(InlineCandidate &Candidate);
446 bool getInlineCandidate(InlineCandidate *NewCandidate, CallBase *CB);
447 bool
448 tryInlineCandidate(InlineCandidate &Candidate,
449 SmallVector<CallBase *, 8> *InlinedCallSites = nullptr);
450 bool
451 inlineHotFunctionsWithPriority(Function &F,
452 DenseSet<GlobalValue::GUID> &InlinedGUIDs);
453 // Inline cold/small functions in addition to hot ones
454 bool shouldInlineColdCallee(CallBase &CallInst);
455 void emitOptimizationRemarksForInlineCandidates(
456 const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
457 bool Hot);
458 void printEdgeWeight(raw_ostream &OS, Edge E);
459 void printBlockWeight(raw_ostream &OS, const BasicBlock *BB) const;
460 void printBlockEquivalence(raw_ostream &OS, const BasicBlock *BB);
461 bool computeBlockWeights(Function &F);
462 void findEquivalenceClasses(Function &F);
463 template <bool IsPostDom>
464 void findEquivalencesFor(BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants,
465 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree);
466
467 void propagateWeights(Function &F);
468 uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge);
469 void buildEdges(Function &F);
470 std::vector<Function *> buildFunctionOrder(Module &M, CallGraph *CG);
471 void addCallGraphEdges(CallGraph &CG, const FunctionSamples &Samples);
472 void replaceCallGraphEdges(CallGraph &CG, StringMap<Function *> &SymbolMap);
473 bool propagateThroughEdges(Function &F, bool UpdateBlockCount);
474 void computeDominanceAndLoopInfo(Function &F);
475 void clearFunctionData();
476 bool callsiteIsHot(const FunctionSamples *CallsiteFS,
477 ProfileSummaryInfo *PSI);
478
479 /// Map basic blocks to their computed weights.
480 ///
481 /// The weight of a basic block is defined to be the maximum
482 /// of all the instruction weights in that block.
483 BlockWeightMap BlockWeights;
484
485 /// Map edges to their computed weights.
486 ///
487 /// Edge weights are computed by propagating basic block weights in
488 /// SampleProfile::propagateWeights.
489 EdgeWeightMap EdgeWeights;
490
491 /// Set of visited blocks during propagation.
492 SmallPtrSet<const BasicBlock *, 32> VisitedBlocks;
493
494 /// Set of visited edges during propagation.
495 SmallSet<Edge, 32> VisitedEdges;
496
497 /// Equivalence classes for block weights.
498 ///
499 /// Two blocks BB1 and BB2 are in the same equivalence class if they
500 /// dominate and post-dominate each other, and they are in the same loop
501 /// nest. When this happens, the two blocks are guaranteed to execute
502 /// the same number of times.
503 EquivalenceClassMap EquivalenceClass;
504
505 /// Map from function name to Function *. Used to find the function from
506 /// the function name. If the function name contains suffix, additional
507 /// entry is added to map from the stripped name to the function if there
508 /// is one-to-one mapping.
509 StringMap<Function *> SymbolMap;
510
511 /// Dominance, post-dominance and loop information.
512 std::unique_ptr<DominatorTree> DT;
513 std::unique_ptr<PostDominatorTree> PDT;
514 std::unique_ptr<LoopInfo> LI;
515
516 std::function<AssumptionCache &(Function &)> GetAC;
517 std::function<TargetTransformInfo &(Function &)> GetTTI;
518 std::function<const TargetLibraryInfo &(Function &)> GetTLI;
519
520 /// Predecessors for each basic block in the CFG.
521 BlockEdgeMap Predecessors;
522
523 /// Successors for each basic block in the CFG.
524 BlockEdgeMap Successors;
525
526 SampleCoverageTracker CoverageTracker;
527
528 /// Profile reader object.
529 std::unique_ptr<SampleProfileReader> Reader;
530
531 /// Profile tracker for different context.
532 std::unique_ptr<SampleContextTracker> ContextTracker;
533
534 /// Samples collected for the body of this function.
535 FunctionSamples *Samples = nullptr;
536
537 /// Name of the profile file to load.
538 std::string Filename;
539
540 /// Name of the profile remapping file to load.
541 std::string RemappingFilename;
542
543 /// Flag indicating whether the profile input loaded successfully.
544 bool ProfileIsValid = false;
545
546 /// Flag indicating whether input profile is context-sensitive
547 bool ProfileIsCS = false;
548
549 /// Flag indicating which LTO/ThinLTO phase the pass is invoked in.
550 ///
551 /// We need to know the LTO phase because for example in ThinLTOPrelink
552 /// phase, in annotation, we should not promote indirect calls. Instead,
553 /// we will mark GUIDs that needs to be annotated to the function.
554 ThinOrFullLTOPhase LTOPhase;
555
556 /// Profile Summary Info computed from sample profile.
557 ProfileSummaryInfo *PSI = nullptr;
558
559 /// Profle Symbol list tells whether a function name appears in the binary
560 /// used to generate the current profile.
561 std::unique_ptr<ProfileSymbolList> PSL;
562
563 /// Total number of samples collected in this profile.
564 ///
565 /// This is the sum of all the samples collected in all the functions executed
566 /// at runtime.
567 uint64_t TotalCollectedSamples = 0;
568
569 /// Optimization Remark Emitter used to emit diagnostic remarks.
570 OptimizationRemarkEmitter *ORE = nullptr;
571
572 // Information recorded when we declined to inline a call site
573 // because we have determined it is too cold is accumulated for
574 // each callee function. Initially this is just the entry count.
575 struct NotInlinedProfileInfo {
576 uint64_t entryCount;
577 };
578 DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo;
579
580 // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for
581 // all the function symbols defined or declared in current module.
582 DenseMap<uint64_t, StringRef> GUIDToFuncNameMap;
583
584 // All the Names used in FunctionSamples including outline function
585 // names, inline instance names and call target names.
586 StringSet<> NamesInProfile;
587
588 // For symbol in profile symbol list, whether to regard their profiles
589 // to be accurate. It is mainly decided by existance of profile symbol
590 // list and -profile-accurate-for-symsinlist flag, but it can be
591 // overriden by -profile-sample-accurate or profile-sample-accurate
592 // attribute.
593 bool ProfAccForSymsInList;
594
595 // External inline advisor used to replay inline decision from remarks.
596 std::unique_ptr<ReplayInlineAdvisor> ExternalInlineAdvisor;
597
598 // A pseudo probe helper to correlate the imported sample counts.
599 std::unique_ptr<PseudoProbeManager> ProbeManager;
600 };
601
602 class SampleProfileLoaderLegacyPass : public ModulePass {
603 public:
604 // Class identification, replacement for typeinfo
605 static char ID;
606
SampleProfileLoaderLegacyPass(StringRef Name=SampleProfileFile,ThinOrFullLTOPhase LTOPhase=ThinOrFullLTOPhase::None)607 SampleProfileLoaderLegacyPass(
608 StringRef Name = SampleProfileFile,
609 ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None)
610 : ModulePass(ID), SampleLoader(
611 Name, SampleProfileRemappingFile, LTOPhase,
612 [&](Function &F) -> AssumptionCache & {
613 return ACT->getAssumptionCache(F);
614 },
__anone330f7100302(Function &F) 615 [&](Function &F) -> TargetTransformInfo & {
616 return TTIWP->getTTI(F);
617 },
__anone330f7100402(Function &F) 618 [&](Function &F) -> TargetLibraryInfo & {
619 return TLIWP->getTLI(F);
620 }) {
621 initializeSampleProfileLoaderLegacyPassPass(
622 *PassRegistry::getPassRegistry());
623 }
624
dump()625 void dump() { SampleLoader.dump(); }
626
doInitialization(Module & M)627 bool doInitialization(Module &M) override {
628 return SampleLoader.doInitialization(M);
629 }
630
getPassName() const631 StringRef getPassName() const override { return "Sample profile pass"; }
632 bool runOnModule(Module &M) override;
633
getAnalysisUsage(AnalysisUsage & AU) const634 void getAnalysisUsage(AnalysisUsage &AU) const override {
635 AU.addRequired<AssumptionCacheTracker>();
636 AU.addRequired<TargetTransformInfoWrapperPass>();
637 AU.addRequired<TargetLibraryInfoWrapperPass>();
638 AU.addRequired<ProfileSummaryInfoWrapperPass>();
639 }
640
641 private:
642 SampleProfileLoader SampleLoader;
643 AssumptionCacheTracker *ACT = nullptr;
644 TargetTransformInfoWrapperPass *TTIWP = nullptr;
645 TargetLibraryInfoWrapperPass *TLIWP = nullptr;
646 };
647
648 } // end anonymous namespace
649
650 /// Return true if the given callsite is hot wrt to hot cutoff threshold.
651 ///
652 /// Functions that were inlined in the original binary will be represented
653 /// in the inline stack in the sample profile. If the profile shows that
654 /// the original inline decision was "good" (i.e., the callsite is executed
655 /// frequently), then we will recreate the inline decision and apply the
656 /// profile from the inlined callsite.
657 ///
658 /// To decide whether an inlined callsite is hot, we compare the callsite
659 /// sample count with the hot cutoff computed by ProfileSummaryInfo, it is
660 /// regarded as hot if the count is above the cutoff value.
661 ///
662 /// When ProfileAccurateForSymsInList is enabled and profile symbol list
663 /// is present, functions in the profile symbol list but without profile will
664 /// be regarded as cold and much less inlining will happen in CGSCC inlining
665 /// pass, so we tend to lower the hot criteria here to allow more early
666 /// inlining to happen for warm callsites and it is helpful for performance.
callsiteIsHot(const FunctionSamples * CallsiteFS,ProfileSummaryInfo * PSI)667 bool SampleProfileLoader::callsiteIsHot(const FunctionSamples *CallsiteFS,
668 ProfileSummaryInfo *PSI) {
669 if (!CallsiteFS)
670 return false; // The callsite was not inlined in the original binary.
671
672 assert(PSI && "PSI is expected to be non null");
673 uint64_t CallsiteTotalSamples = CallsiteFS->getTotalSamples();
674 if (ProfAccForSymsInList)
675 return !PSI->isColdCount(CallsiteTotalSamples);
676 else
677 return PSI->isHotCount(CallsiteTotalSamples);
678 }
679
680 /// Mark as used the sample record for the given function samples at
681 /// (LineOffset, Discriminator).
682 ///
683 /// \returns true if this is the first time we mark the given record.
markSamplesUsed(const FunctionSamples * FS,uint32_t LineOffset,uint32_t Discriminator,uint64_t Samples)684 bool SampleCoverageTracker::markSamplesUsed(const FunctionSamples *FS,
685 uint32_t LineOffset,
686 uint32_t Discriminator,
687 uint64_t Samples) {
688 LineLocation Loc(LineOffset, Discriminator);
689 unsigned &Count = SampleCoverage[FS][Loc];
690 bool FirstTime = (++Count == 1);
691 if (FirstTime)
692 TotalUsedSamples += Samples;
693 return FirstTime;
694 }
695
696 /// Return the number of sample records that were applied from this profile.
697 ///
698 /// This count does not include records from cold inlined callsites.
699 unsigned
countUsedRecords(const FunctionSamples * FS,ProfileSummaryInfo * PSI) const700 SampleCoverageTracker::countUsedRecords(const FunctionSamples *FS,
701 ProfileSummaryInfo *PSI) const {
702 auto I = SampleCoverage.find(FS);
703
704 // The size of the coverage map for FS represents the number of records
705 // that were marked used at least once.
706 unsigned Count = (I != SampleCoverage.end()) ? I->second.size() : 0;
707
708 // If there are inlined callsites in this function, count the samples found
709 // in the respective bodies. However, do not bother counting callees with 0
710 // total samples, these are callees that were never invoked at runtime.
711 for (const auto &I : FS->getCallsiteSamples())
712 for (const auto &J : I.second) {
713 const FunctionSamples *CalleeSamples = &J.second;
714 if (SPLoader.callsiteIsHot(CalleeSamples, PSI))
715 Count += countUsedRecords(CalleeSamples, PSI);
716 }
717
718 return Count;
719 }
720
721 /// Return the number of sample records in the body of this profile.
722 ///
723 /// This count does not include records from cold inlined callsites.
724 unsigned
countBodyRecords(const FunctionSamples * FS,ProfileSummaryInfo * PSI) const725 SampleCoverageTracker::countBodyRecords(const FunctionSamples *FS,
726 ProfileSummaryInfo *PSI) const {
727 unsigned Count = FS->getBodySamples().size();
728
729 // Only count records in hot callsites.
730 for (const auto &I : FS->getCallsiteSamples())
731 for (const auto &J : I.second) {
732 const FunctionSamples *CalleeSamples = &J.second;
733 if (SPLoader.callsiteIsHot(CalleeSamples, PSI))
734 Count += countBodyRecords(CalleeSamples, PSI);
735 }
736
737 return Count;
738 }
739
740 /// Return the number of samples collected in the body of this profile.
741 ///
742 /// This count does not include samples from cold inlined callsites.
743 uint64_t
countBodySamples(const FunctionSamples * FS,ProfileSummaryInfo * PSI) const744 SampleCoverageTracker::countBodySamples(const FunctionSamples *FS,
745 ProfileSummaryInfo *PSI) const {
746 uint64_t Total = 0;
747 for (const auto &I : FS->getBodySamples())
748 Total += I.second.getSamples();
749
750 // Only count samples in hot callsites.
751 for (const auto &I : FS->getCallsiteSamples())
752 for (const auto &J : I.second) {
753 const FunctionSamples *CalleeSamples = &J.second;
754 if (SPLoader.callsiteIsHot(CalleeSamples, PSI))
755 Total += countBodySamples(CalleeSamples, PSI);
756 }
757
758 return Total;
759 }
760
761 /// Return the fraction of sample records used in this profile.
762 ///
763 /// The returned value is an unsigned integer in the range 0-100 indicating
764 /// the percentage of sample records that were used while applying this
765 /// profile to the associated function.
computeCoverage(unsigned Used,unsigned Total) const766 unsigned SampleCoverageTracker::computeCoverage(unsigned Used,
767 unsigned Total) const {
768 assert(Used <= Total &&
769 "number of used records cannot exceed the total number of records");
770 return Total > 0 ? Used * 100 / Total : 100;
771 }
772
773 /// Clear all the per-function data used to load samples and propagate weights.
clearFunctionData()774 void SampleProfileLoader::clearFunctionData() {
775 BlockWeights.clear();
776 EdgeWeights.clear();
777 VisitedBlocks.clear();
778 VisitedEdges.clear();
779 EquivalenceClass.clear();
780 DT = nullptr;
781 PDT = nullptr;
782 LI = nullptr;
783 Predecessors.clear();
784 Successors.clear();
785 CoverageTracker.clear();
786 }
787
788 #ifndef NDEBUG
789 /// Print the weight of edge \p E on stream \p OS.
790 ///
791 /// \param OS Stream to emit the output to.
792 /// \param E Edge to print.
printEdgeWeight(raw_ostream & OS,Edge E)793 void SampleProfileLoader::printEdgeWeight(raw_ostream &OS, Edge E) {
794 OS << "weight[" << E.first->getName() << "->" << E.second->getName()
795 << "]: " << EdgeWeights[E] << "\n";
796 }
797
798 /// Print the equivalence class of block \p BB on stream \p OS.
799 ///
800 /// \param OS Stream to emit the output to.
801 /// \param BB Block to print.
printBlockEquivalence(raw_ostream & OS,const BasicBlock * BB)802 void SampleProfileLoader::printBlockEquivalence(raw_ostream &OS,
803 const BasicBlock *BB) {
804 const BasicBlock *Equiv = EquivalenceClass[BB];
805 OS << "equivalence[" << BB->getName()
806 << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n";
807 }
808
809 /// Print the weight of block \p BB on stream \p OS.
810 ///
811 /// \param OS Stream to emit the output to.
812 /// \param BB Block to print.
printBlockWeight(raw_ostream & OS,const BasicBlock * BB) const813 void SampleProfileLoader::printBlockWeight(raw_ostream &OS,
814 const BasicBlock *BB) const {
815 const auto &I = BlockWeights.find(BB);
816 uint64_t W = (I == BlockWeights.end() ? 0 : I->second);
817 OS << "weight[" << BB->getName() << "]: " << W << "\n";
818 }
819 #endif
820
821 /// Get the weight for an instruction.
822 ///
823 /// The "weight" of an instruction \p Inst is the number of samples
824 /// collected on that instruction at runtime. To retrieve it, we
825 /// need to compute the line number of \p Inst relative to the start of its
826 /// function. We use HeaderLineno to compute the offset. We then
827 /// look up the samples collected for \p Inst using BodySamples.
828 ///
829 /// \param Inst Instruction to query.
830 ///
831 /// \returns the weight of \p Inst.
getInstWeight(const Instruction & Inst)832 ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) {
833 if (FunctionSamples::ProfileIsProbeBased)
834 return getProbeWeight(Inst);
835
836 const DebugLoc &DLoc = Inst.getDebugLoc();
837 if (!DLoc)
838 return std::error_code();
839
840 const FunctionSamples *FS = findFunctionSamples(Inst);
841 if (!FS)
842 return std::error_code();
843
844 // Ignore all intrinsics, phinodes and branch instructions.
845 // Branch and phinodes instruction usually contains debug info from sources outside of
846 // the residing basic block, thus we ignore them during annotation.
847 if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst))
848 return std::error_code();
849
850 // If a direct call/invoke instruction is inlined in profile
851 // (findCalleeFunctionSamples returns non-empty result), but not inlined here,
852 // it means that the inlined callsite has no sample, thus the call
853 // instruction should have 0 count.
854 if (!ProfileIsCS)
855 if (const auto *CB = dyn_cast<CallBase>(&Inst))
856 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB))
857 return 0;
858
859 const DILocation *DIL = DLoc;
860 uint32_t LineOffset = FunctionSamples::getOffset(DIL);
861 uint32_t Discriminator = DIL->getBaseDiscriminator();
862 ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator);
863 if (R) {
864 bool FirstMark =
865 CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get());
866 if (FirstMark) {
867 ORE->emit([&]() {
868 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
869 Remark << "Applied " << ore::NV("NumSamples", *R);
870 Remark << " samples from profile (offset: ";
871 Remark << ore::NV("LineOffset", LineOffset);
872 if (Discriminator) {
873 Remark << ".";
874 Remark << ore::NV("Discriminator", Discriminator);
875 }
876 Remark << ")";
877 return Remark;
878 });
879 }
880 LLVM_DEBUG(dbgs() << " " << DLoc.getLine() << "."
881 << DIL->getBaseDiscriminator() << ":" << Inst
882 << " (line offset: " << LineOffset << "."
883 << DIL->getBaseDiscriminator() << " - weight: " << R.get()
884 << ")\n");
885 }
886 return R;
887 }
888
getProbeWeight(const Instruction & Inst)889 ErrorOr<uint64_t> SampleProfileLoader::getProbeWeight(const Instruction &Inst) {
890 assert(FunctionSamples::ProfileIsProbeBased &&
891 "Profile is not pseudo probe based");
892 Optional<PseudoProbe> Probe = extractProbe(Inst);
893 if (!Probe)
894 return std::error_code();
895
896 const FunctionSamples *FS = findFunctionSamples(Inst);
897 if (!FS)
898 return std::error_code();
899
900 // If a direct call/invoke instruction is inlined in profile
901 // (findCalleeFunctionSamples returns non-empty result), but not inlined here,
902 // it means that the inlined callsite has no sample, thus the call
903 // instruction should have 0 count.
904 if (const auto *CB = dyn_cast<CallBase>(&Inst))
905 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB))
906 return 0;
907
908 const ErrorOr<uint64_t> &R = FS->findSamplesAt(Probe->Id, 0);
909 if (R) {
910 uint64_t Samples = R.get() * Probe->Factor;
911 bool FirstMark = CoverageTracker.markSamplesUsed(FS, Probe->Id, 0, Samples);
912 if (FirstMark) {
913 ORE->emit([&]() {
914 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
915 Remark << "Applied " << ore::NV("NumSamples", Samples);
916 Remark << " samples from profile (ProbeId=";
917 Remark << ore::NV("ProbeId", Probe->Id);
918 Remark << ", Factor=";
919 Remark << ore::NV("Factor", Probe->Factor);
920 Remark << ", OriginalSamples=";
921 Remark << ore::NV("OriginalSamples", R.get());
922 Remark << ")";
923 return Remark;
924 });
925 }
926 LLVM_DEBUG(dbgs() << " " << Probe->Id << ":" << Inst
927 << " - weight: " << R.get() << " - factor: "
928 << format("%0.2f", Probe->Factor) << ")\n");
929 return Samples;
930 }
931 return R;
932 }
933
934 /// Compute the weight of a basic block.
935 ///
936 /// The weight of basic block \p BB is the maximum weight of all the
937 /// instructions in BB.
938 ///
939 /// \param BB The basic block to query.
940 ///
941 /// \returns the weight for \p BB.
getBlockWeight(const BasicBlock * BB)942 ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) {
943 uint64_t Max = 0;
944 bool HasWeight = false;
945 for (auto &I : BB->getInstList()) {
946 const ErrorOr<uint64_t> &R = getInstWeight(I);
947 if (R) {
948 Max = std::max(Max, R.get());
949 HasWeight = true;
950 }
951 }
952 return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code();
953 }
954
955 /// Compute and store the weights of every basic block.
956 ///
957 /// This populates the BlockWeights map by computing
958 /// the weights of every basic block in the CFG.
959 ///
960 /// \param F The function to query.
computeBlockWeights(Function & F)961 bool SampleProfileLoader::computeBlockWeights(Function &F) {
962 bool Changed = false;
963 LLVM_DEBUG(dbgs() << "Block weights\n");
964 for (const auto &BB : F) {
965 ErrorOr<uint64_t> Weight = getBlockWeight(&BB);
966 if (Weight) {
967 BlockWeights[&BB] = Weight.get();
968 VisitedBlocks.insert(&BB);
969 Changed = true;
970 }
971 LLVM_DEBUG(printBlockWeight(dbgs(), &BB));
972 }
973
974 return Changed;
975 }
976
977 /// Get the FunctionSamples for a call instruction.
978 ///
979 /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined
980 /// instance in which that call instruction is calling to. It contains
981 /// all samples that resides in the inlined instance. We first find the
982 /// inlined instance in which the call instruction is from, then we
983 /// traverse its children to find the callsite with the matching
984 /// location.
985 ///
986 /// \param Inst Call/Invoke instruction to query.
987 ///
988 /// \returns The FunctionSamples pointer to the inlined instance.
989 const FunctionSamples *
findCalleeFunctionSamples(const CallBase & Inst) const990 SampleProfileLoader::findCalleeFunctionSamples(const CallBase &Inst) const {
991 const DILocation *DIL = Inst.getDebugLoc();
992 if (!DIL) {
993 return nullptr;
994 }
995
996 StringRef CalleeName;
997 if (Function *Callee = Inst.getCalledFunction())
998 CalleeName = FunctionSamples::getCanonicalFnName(*Callee);
999
1000 if (ProfileIsCS)
1001 return ContextTracker->getCalleeContextSamplesFor(Inst, CalleeName);
1002
1003 const FunctionSamples *FS = findFunctionSamples(Inst);
1004 if (FS == nullptr)
1005 return nullptr;
1006
1007 return FS->findFunctionSamplesAt(FunctionSamples::getCallSiteIdentifier(DIL),
1008 CalleeName, Reader->getRemapper());
1009 }
1010
1011 /// Returns a vector of FunctionSamples that are the indirect call targets
1012 /// of \p Inst. The vector is sorted by the total number of samples. Stores
1013 /// the total call count of the indirect call in \p Sum.
1014 std::vector<const FunctionSamples *>
findIndirectCallFunctionSamples(const Instruction & Inst,uint64_t & Sum) const1015 SampleProfileLoader::findIndirectCallFunctionSamples(
1016 const Instruction &Inst, uint64_t &Sum) const {
1017 const DILocation *DIL = Inst.getDebugLoc();
1018 std::vector<const FunctionSamples *> R;
1019
1020 if (!DIL) {
1021 return R;
1022 }
1023
1024 auto FSCompare = [](const FunctionSamples *L, const FunctionSamples *R) {
1025 assert(L && R && "Expect non-null FunctionSamples");
1026 if (L->getEntrySamples() != R->getEntrySamples())
1027 return L->getEntrySamples() > R->getEntrySamples();
1028 return FunctionSamples::getGUID(L->getName()) <
1029 FunctionSamples::getGUID(R->getName());
1030 };
1031
1032 if (ProfileIsCS) {
1033 auto CalleeSamples =
1034 ContextTracker->getIndirectCalleeContextSamplesFor(DIL);
1035 if (CalleeSamples.empty())
1036 return R;
1037
1038 // For CSSPGO, we only use target context profile's entry count
1039 // as that already includes both inlined callee and non-inlined ones..
1040 Sum = 0;
1041 for (const auto *const FS : CalleeSamples) {
1042 Sum += FS->getEntrySamples();
1043 R.push_back(FS);
1044 }
1045 llvm::sort(R, FSCompare);
1046 return R;
1047 }
1048
1049 const FunctionSamples *FS = findFunctionSamples(Inst);
1050 if (FS == nullptr)
1051 return R;
1052
1053 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL);
1054 auto T = FS->findCallTargetMapAt(CallSite);
1055 Sum = 0;
1056 if (T)
1057 for (const auto &T_C : T.get())
1058 Sum += T_C.second;
1059 if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(CallSite)) {
1060 if (M->empty())
1061 return R;
1062 for (const auto &NameFS : *M) {
1063 Sum += NameFS.second.getEntrySamples();
1064 R.push_back(&NameFS.second);
1065 }
1066 llvm::sort(R, FSCompare);
1067 }
1068 return R;
1069 }
1070
1071 /// Get the FunctionSamples for an instruction.
1072 ///
1073 /// The FunctionSamples of an instruction \p Inst is the inlined instance
1074 /// in which that instruction is coming from. We traverse the inline stack
1075 /// of that instruction, and match it with the tree nodes in the profile.
1076 ///
1077 /// \param Inst Instruction to query.
1078 ///
1079 /// \returns the FunctionSamples pointer to the inlined instance.
1080 const FunctionSamples *
findFunctionSamples(const Instruction & Inst) const1081 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const {
1082 if (FunctionSamples::ProfileIsProbeBased) {
1083 Optional<PseudoProbe> Probe = extractProbe(Inst);
1084 if (!Probe)
1085 return nullptr;
1086 }
1087
1088 const DILocation *DIL = Inst.getDebugLoc();
1089 if (!DIL)
1090 return Samples;
1091
1092 auto it = DILocation2SampleMap.try_emplace(DIL,nullptr);
1093 if (it.second) {
1094 if (ProfileIsCS)
1095 it.first->second = ContextTracker->getContextSamplesFor(DIL);
1096 else
1097 it.first->second =
1098 Samples->findFunctionSamples(DIL, Reader->getRemapper());
1099 }
1100 return it.first->second;
1101 }
1102
1103 /// Attempt to promote indirect call and also inline the promoted call.
1104 ///
1105 /// \param F Caller function.
1106 /// \param Candidate ICP and inline candidate.
1107 /// \param Sum Sum of target counts for indirect call.
1108 /// \param PromotedInsns Map to keep track of indirect call already processed.
1109 /// \param Candidate ICP and inline candidate.
1110 /// \param InlinedCallSite Output vector for new call sites exposed after
1111 /// inlining.
tryPromoteAndInlineCandidate(Function & F,InlineCandidate & Candidate,uint64_t SumOrigin,uint64_t & Sum,DenseSet<Instruction * > & PromotedInsns,SmallVector<CallBase *,8> * InlinedCallSite)1112 bool SampleProfileLoader::tryPromoteAndInlineCandidate(
1113 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin, uint64_t &Sum,
1114 DenseSet<Instruction *> &PromotedInsns,
1115 SmallVector<CallBase *, 8> *InlinedCallSite) {
1116 const char *Reason = "Callee function not available";
1117 // R->getValue() != &F is to prevent promoting a recursive call.
1118 // If it is a recursive call, we do not inline it as it could bloat
1119 // the code exponentially. There is way to better handle this, e.g.
1120 // clone the caller first, and inline the cloned caller if it is
1121 // recursive. As llvm does not inline recursive calls, we will
1122 // simply ignore it instead of handling it explicitly.
1123 auto R = SymbolMap.find(Candidate.CalleeSamples->getFuncName());
1124 if (R != SymbolMap.end() && R->getValue() &&
1125 !R->getValue()->isDeclaration() && R->getValue()->getSubprogram() &&
1126 R->getValue()->hasFnAttribute("use-sample-profile") &&
1127 R->getValue() != &F &&
1128 isLegalToPromote(*Candidate.CallInstr, R->getValue(), &Reason)) {
1129 auto *DI =
1130 &pgo::promoteIndirectCall(*Candidate.CallInstr, R->getValue(),
1131 Candidate.CallsiteCount, Sum, false, ORE);
1132 if (DI) {
1133 Sum -= Candidate.CallsiteCount;
1134 // Prorate the indirect callsite distribution.
1135 // Do not update the promoted direct callsite distribution at this
1136 // point since the original distribution combined with the callee
1137 // profile will be used to prorate callsites from the callee if
1138 // inlined. Once not inlined, the direct callsite distribution should
1139 // be prorated so that the it will reflect the real callsite counts.
1140 setProbeDistributionFactor(*Candidate.CallInstr,
1141 Candidate.CallsiteDistribution * Sum /
1142 SumOrigin);
1143 PromotedInsns.insert(Candidate.CallInstr);
1144 Candidate.CallInstr = DI;
1145 if (isa<CallInst>(DI) || isa<InvokeInst>(DI)) {
1146 bool Inlined = tryInlineCandidate(Candidate, InlinedCallSite);
1147 if (!Inlined) {
1148 // Prorate the direct callsite distribution so that it reflects real
1149 // callsite counts.
1150 setProbeDistributionFactor(*DI, Candidate.CallsiteDistribution *
1151 Candidate.CallsiteCount /
1152 SumOrigin);
1153 }
1154 return Inlined;
1155 }
1156 }
1157 } else {
1158 LLVM_DEBUG(dbgs() << "\nFailed to promote indirect call to "
1159 << Candidate.CalleeSamples->getFuncName() << " because "
1160 << Reason << "\n");
1161 }
1162 return false;
1163 }
1164
shouldInlineColdCallee(CallBase & CallInst)1165 bool SampleProfileLoader::shouldInlineColdCallee(CallBase &CallInst) {
1166 if (!ProfileSizeInline)
1167 return false;
1168
1169 Function *Callee = CallInst.getCalledFunction();
1170 if (Callee == nullptr)
1171 return false;
1172
1173 InlineCost Cost = getInlineCost(CallInst, getInlineParams(), GetTTI(*Callee),
1174 GetAC, GetTLI);
1175
1176 if (Cost.isNever())
1177 return false;
1178
1179 if (Cost.isAlways())
1180 return true;
1181
1182 return Cost.getCost() <= SampleColdCallSiteThreshold;
1183 }
1184
emitOptimizationRemarksForInlineCandidates(const SmallVectorImpl<CallBase * > & Candidates,const Function & F,bool Hot)1185 void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates(
1186 const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
1187 bool Hot) {
1188 for (auto I : Candidates) {
1189 Function *CalledFunction = I->getCalledFunction();
1190 if (CalledFunction) {
1191 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineAttempt",
1192 I->getDebugLoc(), I->getParent())
1193 << "previous inlining reattempted for "
1194 << (Hot ? "hotness: '" : "size: '")
1195 << ore::NV("Callee", CalledFunction) << "' into '"
1196 << ore::NV("Caller", &F) << "'");
1197 }
1198 }
1199 }
1200
1201 /// Iteratively inline hot callsites of a function.
1202 ///
1203 /// Iteratively traverse all callsites of the function \p F, and find if
1204 /// the corresponding inlined instance exists and is hot in profile. If
1205 /// it is hot enough, inline the callsites and adds new callsites of the
1206 /// callee into the caller. If the call is an indirect call, first promote
1207 /// it to direct call. Each indirect call is limited with a single target.
1208 ///
1209 /// \param F function to perform iterative inlining.
1210 /// \param InlinedGUIDs a set to be updated to include all GUIDs that are
1211 /// inlined in the profiled binary.
1212 ///
1213 /// \returns True if there is any inline happened.
inlineHotFunctions(Function & F,DenseSet<GlobalValue::GUID> & InlinedGUIDs)1214 bool SampleProfileLoader::inlineHotFunctions(
1215 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
1216 DenseSet<Instruction *> PromotedInsns;
1217
1218 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
1219 // Profile symbol list is ignored when profile-sample-accurate is on.
1220 assert((!ProfAccForSymsInList ||
1221 (!ProfileSampleAccurate &&
1222 !F.hasFnAttribute("profile-sample-accurate"))) &&
1223 "ProfAccForSymsInList should be false when profile-sample-accurate "
1224 "is enabled");
1225
1226 DenseMap<CallBase *, const FunctionSamples *> LocalNotInlinedCallSites;
1227 bool Changed = false;
1228 bool LocalChanged = true;
1229 while (LocalChanged) {
1230 LocalChanged = false;
1231 SmallVector<CallBase *, 10> CIS;
1232 for (auto &BB : F) {
1233 bool Hot = false;
1234 SmallVector<CallBase *, 10> AllCandidates;
1235 SmallVector<CallBase *, 10> ColdCandidates;
1236 for (auto &I : BB.getInstList()) {
1237 const FunctionSamples *FS = nullptr;
1238 if (auto *CB = dyn_cast<CallBase>(&I)) {
1239 if (!isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(*CB))) {
1240 assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) &&
1241 "GUIDToFuncNameMap has to be populated");
1242 AllCandidates.push_back(CB);
1243 if (FS->getEntrySamples() > 0 || ProfileIsCS)
1244 LocalNotInlinedCallSites.try_emplace(CB, FS);
1245 if (callsiteIsHot(FS, PSI))
1246 Hot = true;
1247 else if (shouldInlineColdCallee(*CB))
1248 ColdCandidates.push_back(CB);
1249 }
1250 }
1251 }
1252 if (Hot || ExternalInlineAdvisor) {
1253 CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end());
1254 emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true);
1255 } else {
1256 CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end());
1257 emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false);
1258 }
1259 }
1260 for (CallBase *I : CIS) {
1261 Function *CalledFunction = I->getCalledFunction();
1262 InlineCandidate Candidate = {
1263 I,
1264 LocalNotInlinedCallSites.count(I) ? LocalNotInlinedCallSites[I]
1265 : nullptr,
1266 0 /* dummy count */, 1.0 /* dummy distribution factor */};
1267 // Do not inline recursive calls.
1268 if (CalledFunction == &F)
1269 continue;
1270 if (I->isIndirectCall()) {
1271 if (PromotedInsns.count(I))
1272 continue;
1273 uint64_t Sum;
1274 for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) {
1275 uint64_t SumOrigin = Sum;
1276 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1277 FS->findInlinedFunctions(InlinedGUIDs, F.getParent(),
1278 PSI->getOrCompHotCountThreshold());
1279 continue;
1280 }
1281 if (!callsiteIsHot(FS, PSI))
1282 continue;
1283
1284 Candidate = {I, FS, FS->getEntrySamples(), 1.0};
1285 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum,
1286 PromotedInsns)) {
1287 LocalNotInlinedCallSites.erase(I);
1288 LocalChanged = true;
1289 }
1290 }
1291 } else if (CalledFunction && CalledFunction->getSubprogram() &&
1292 !CalledFunction->isDeclaration()) {
1293 if (tryInlineCandidate(Candidate)) {
1294 LocalNotInlinedCallSites.erase(I);
1295 LocalChanged = true;
1296 }
1297 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1298 findCalleeFunctionSamples(*I)->findInlinedFunctions(
1299 InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold());
1300 }
1301 }
1302 Changed |= LocalChanged;
1303 }
1304
1305 // For CS profile, profile for not inlined context will be merged when
1306 // base profile is being trieved
1307 if (ProfileIsCS)
1308 return Changed;
1309
1310 // Accumulate not inlined callsite information into notInlinedSamples
1311 for (const auto &Pair : LocalNotInlinedCallSites) {
1312 CallBase *I = Pair.getFirst();
1313 Function *Callee = I->getCalledFunction();
1314 if (!Callee || Callee->isDeclaration())
1315 continue;
1316
1317 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "NotInline",
1318 I->getDebugLoc(), I->getParent())
1319 << "previous inlining not repeated: '"
1320 << ore::NV("Callee", Callee) << "' into '"
1321 << ore::NV("Caller", &F) << "'");
1322
1323 ++NumCSNotInlined;
1324 const FunctionSamples *FS = Pair.getSecond();
1325 if (FS->getTotalSamples() == 0 && FS->getEntrySamples() == 0) {
1326 continue;
1327 }
1328
1329 if (ProfileMergeInlinee) {
1330 // A function call can be replicated by optimizations like callsite
1331 // splitting or jump threading and the replicates end up sharing the
1332 // sample nested callee profile instead of slicing the original inlinee's
1333 // profile. We want to do merge exactly once by filtering out callee
1334 // profiles with a non-zero head sample count.
1335 if (FS->getHeadSamples() == 0) {
1336 // Use entry samples as head samples during the merge, as inlinees
1337 // don't have head samples.
1338 const_cast<FunctionSamples *>(FS)->addHeadSamples(
1339 FS->getEntrySamples());
1340
1341 // Note that we have to do the merge right after processing function.
1342 // This allows OutlineFS's profile to be used for annotation during
1343 // top-down processing of functions' annotation.
1344 FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee);
1345 OutlineFS->merge(*FS);
1346 }
1347 } else {
1348 auto pair =
1349 notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0});
1350 pair.first->second.entryCount += FS->getEntrySamples();
1351 }
1352 }
1353 return Changed;
1354 }
1355
tryInlineCandidate(InlineCandidate & Candidate,SmallVector<CallBase *,8> * InlinedCallSites)1356 bool SampleProfileLoader::tryInlineCandidate(
1357 InlineCandidate &Candidate, SmallVector<CallBase *, 8> *InlinedCallSites) {
1358
1359 CallBase &CB = *Candidate.CallInstr;
1360 Function *CalledFunction = CB.getCalledFunction();
1361 assert(CalledFunction && "Expect a callee with definition");
1362 DebugLoc DLoc = CB.getDebugLoc();
1363 BasicBlock *BB = CB.getParent();
1364
1365 InlineCost Cost = shouldInlineCandidate(Candidate);
1366 if (Cost.isNever()) {
1367 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineFail", DLoc, BB)
1368 << "incompatible inlining");
1369 return false;
1370 }
1371
1372 if (!Cost)
1373 return false;
1374
1375 InlineFunctionInfo IFI(nullptr, GetAC);
1376 if (InlineFunction(CB, IFI).isSuccess()) {
1377 // The call to InlineFunction erases I, so we can't pass it here.
1378 emitInlinedInto(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(), Cost,
1379 true, CSINLINE_DEBUG);
1380
1381 // Now populate the list of newly exposed call sites.
1382 if (InlinedCallSites) {
1383 InlinedCallSites->clear();
1384 for (auto &I : IFI.InlinedCallSites)
1385 InlinedCallSites->push_back(I);
1386 }
1387
1388 if (ProfileIsCS)
1389 ContextTracker->markContextSamplesInlined(Candidate.CalleeSamples);
1390 ++NumCSInlined;
1391
1392 // Prorate inlined probes for a duplicated inlining callsite which probably
1393 // has a distribution less than 100%. Samples for an inlinee should be
1394 // distributed among the copies of the original callsite based on each
1395 // callsite's distribution factor for counts accuracy. Note that an inlined
1396 // probe may come with its own distribution factor if it has been duplicated
1397 // in the inlinee body. The two factor are multiplied to reflect the
1398 // aggregation of duplication.
1399 if (Candidate.CallsiteDistribution < 1) {
1400 for (auto &I : IFI.InlinedCallSites) {
1401 if (Optional<PseudoProbe> Probe = extractProbe(*I))
1402 setProbeDistributionFactor(*I, Probe->Factor *
1403 Candidate.CallsiteDistribution);
1404 }
1405 NumDuplicatedInlinesite++;
1406 }
1407
1408 return true;
1409 }
1410 return false;
1411 }
1412
getInlineCandidate(InlineCandidate * NewCandidate,CallBase * CB)1413 bool SampleProfileLoader::getInlineCandidate(InlineCandidate *NewCandidate,
1414 CallBase *CB) {
1415 assert(CB && "Expect non-null call instruction");
1416
1417 if (isa<IntrinsicInst>(CB))
1418 return false;
1419
1420 // Find the callee's profile. For indirect call, find hottest target profile.
1421 const FunctionSamples *CalleeSamples = findCalleeFunctionSamples(*CB);
1422 if (!CalleeSamples)
1423 return false;
1424
1425 float Factor = 1.0;
1426 if (Optional<PseudoProbe> Probe = extractProbe(*CB))
1427 Factor = Probe->Factor;
1428
1429 uint64_t CallsiteCount = 0;
1430 ErrorOr<uint64_t> Weight = getBlockWeight(CB->getParent());
1431 if (Weight)
1432 CallsiteCount = Weight.get();
1433 if (CalleeSamples)
1434 CallsiteCount = std::max(
1435 CallsiteCount, uint64_t(CalleeSamples->getEntrySamples() * Factor));
1436
1437 *NewCandidate = {CB, CalleeSamples, CallsiteCount, Factor};
1438 return true;
1439 }
1440
1441 InlineCost
shouldInlineCandidate(InlineCandidate & Candidate)1442 SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) {
1443 std::unique_ptr<InlineAdvice> Advice = nullptr;
1444 if (ExternalInlineAdvisor) {
1445 Advice = ExternalInlineAdvisor->getAdvice(*Candidate.CallInstr);
1446 if (!Advice->isInliningRecommended()) {
1447 Advice->recordUnattemptedInlining();
1448 return InlineCost::getNever("not previously inlined");
1449 }
1450 Advice->recordInlining();
1451 return InlineCost::getAlways("previously inlined");
1452 }
1453
1454 // Adjust threshold based on call site hotness, only do this for callsite
1455 // prioritized inliner because otherwise cost-benefit check is done earlier.
1456 int SampleThreshold = SampleColdCallSiteThreshold;
1457 if (CallsitePrioritizedInline) {
1458 if (Candidate.CallsiteCount > PSI->getHotCountThreshold())
1459 SampleThreshold = SampleHotCallSiteThreshold;
1460 else if (!ProfileSizeInline)
1461 return InlineCost::getNever("cold callsite");
1462 }
1463
1464 Function *Callee = Candidate.CallInstr->getCalledFunction();
1465 assert(Callee && "Expect a definition for inline candidate of direct call");
1466
1467 InlineParams Params = getInlineParams();
1468 Params.ComputeFullInlineCost = true;
1469 // Checks if there is anything in the reachable portion of the callee at
1470 // this callsite that makes this inlining potentially illegal. Need to
1471 // set ComputeFullInlineCost, otherwise getInlineCost may return early
1472 // when cost exceeds threshold without checking all IRs in the callee.
1473 // The acutal cost does not matter because we only checks isNever() to
1474 // see if it is legal to inline the callsite.
1475 InlineCost Cost = getInlineCost(*Candidate.CallInstr, Callee, Params,
1476 GetTTI(*Callee), GetAC, GetTLI);
1477
1478 // Honor always inline and never inline from call analyzer
1479 if (Cost.isNever() || Cost.isAlways())
1480 return Cost;
1481
1482 // For old FDO inliner, we inline the call site as long as cost is not
1483 // "Never". The cost-benefit check is done earlier.
1484 if (!CallsitePrioritizedInline) {
1485 return InlineCost::get(Cost.getCost(), INT_MAX);
1486 }
1487
1488 // Otherwise only use the cost from call analyzer, but overwite threshold with
1489 // Sample PGO threshold.
1490 return InlineCost::get(Cost.getCost(), SampleThreshold);
1491 }
1492
inlineHotFunctionsWithPriority(Function & F,DenseSet<GlobalValue::GUID> & InlinedGUIDs)1493 bool SampleProfileLoader::inlineHotFunctionsWithPriority(
1494 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
1495 DenseSet<Instruction *> PromotedInsns;
1496 assert(ProfileIsCS && "Prioritiy based inliner only works with CSSPGO now");
1497
1498 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
1499 // Profile symbol list is ignored when profile-sample-accurate is on.
1500 assert((!ProfAccForSymsInList ||
1501 (!ProfileSampleAccurate &&
1502 !F.hasFnAttribute("profile-sample-accurate"))) &&
1503 "ProfAccForSymsInList should be false when profile-sample-accurate "
1504 "is enabled");
1505
1506 // Populating worklist with initial call sites from root inliner, along
1507 // with call site weights.
1508 CandidateQueue CQueue;
1509 InlineCandidate NewCandidate;
1510 for (auto &BB : F) {
1511 for (auto &I : BB.getInstList()) {
1512 auto *CB = dyn_cast<CallBase>(&I);
1513 if (!CB)
1514 continue;
1515 if (getInlineCandidate(&NewCandidate, CB))
1516 CQueue.push(NewCandidate);
1517 }
1518 }
1519
1520 // Cap the size growth from profile guided inlining. This is needed even
1521 // though cost of each inline candidate already accounts for callee size,
1522 // because with top-down inlining, we can grow inliner size significantly
1523 // with large number of smaller inlinees each pass the cost check.
1524 assert(ProfileInlineLimitMax >= ProfileInlineLimitMin &&
1525 "Max inline size limit should not be smaller than min inline size "
1526 "limit.");
1527 unsigned SizeLimit = F.getInstructionCount() * ProfileInlineGrowthLimit;
1528 SizeLimit = std::min(SizeLimit, (unsigned)ProfileInlineLimitMax);
1529 SizeLimit = std::max(SizeLimit, (unsigned)ProfileInlineLimitMin);
1530 if (ExternalInlineAdvisor)
1531 SizeLimit = std::numeric_limits<unsigned>::max();
1532
1533 // Perform iterative BFS call site prioritized inlining
1534 bool Changed = false;
1535 while (!CQueue.empty() && F.getInstructionCount() < SizeLimit) {
1536 InlineCandidate Candidate = CQueue.top();
1537 CQueue.pop();
1538 CallBase *I = Candidate.CallInstr;
1539 Function *CalledFunction = I->getCalledFunction();
1540
1541 if (CalledFunction == &F)
1542 continue;
1543 if (I->isIndirectCall()) {
1544 if (PromotedInsns.count(I))
1545 continue;
1546 uint64_t Sum;
1547 auto CalleeSamples = findIndirectCallFunctionSamples(*I, Sum);
1548 uint64_t SumOrigin = Sum;
1549 Sum *= Candidate.CallsiteDistribution;
1550 for (const auto *FS : CalleeSamples) {
1551 // TODO: Consider disable pre-lTO ICP for MonoLTO as well
1552 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1553 FS->findInlinedFunctions(InlinedGUIDs, F.getParent(),
1554 PSI->getOrCompHotCountThreshold());
1555 continue;
1556 }
1557 uint64_t EntryCountDistributed =
1558 FS->getEntrySamples() * Candidate.CallsiteDistribution;
1559 // In addition to regular inline cost check, we also need to make sure
1560 // ICP isn't introducing excessive speculative checks even if individual
1561 // target looks beneficial to promote and inline. That means we should
1562 // only do ICP when there's a small number dominant targets.
1563 if (EntryCountDistributed < SumOrigin / ProfileICPThreshold)
1564 break;
1565 // TODO: Fix CallAnalyzer to handle all indirect calls.
1566 // For indirect call, we don't run CallAnalyzer to get InlineCost
1567 // before actual inlining. This is because we could see two different
1568 // types from the same definition, which makes CallAnalyzer choke as
1569 // it's expecting matching parameter type on both caller and callee
1570 // side. See example from PR18962 for the triggering cases (the bug was
1571 // fixed, but we generate different types).
1572 if (!PSI->isHotCount(EntryCountDistributed))
1573 break;
1574 SmallVector<CallBase *, 8> InlinedCallSites;
1575 // Attach function profile for promoted indirect callee, and update
1576 // call site count for the promoted inline candidate too.
1577 Candidate = {I, FS, EntryCountDistributed,
1578 Candidate.CallsiteDistribution};
1579 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum,
1580 PromotedInsns, &InlinedCallSites)) {
1581 for (auto *CB : InlinedCallSites) {
1582 if (getInlineCandidate(&NewCandidate, CB))
1583 CQueue.emplace(NewCandidate);
1584 }
1585 Changed = true;
1586 }
1587 }
1588 } else if (CalledFunction && CalledFunction->getSubprogram() &&
1589 !CalledFunction->isDeclaration()) {
1590 SmallVector<CallBase *, 8> InlinedCallSites;
1591 if (tryInlineCandidate(Candidate, &InlinedCallSites)) {
1592 for (auto *CB : InlinedCallSites) {
1593 if (getInlineCandidate(&NewCandidate, CB))
1594 CQueue.emplace(NewCandidate);
1595 }
1596 Changed = true;
1597 }
1598 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1599 findCalleeFunctionSamples(*I)->findInlinedFunctions(
1600 InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold());
1601 }
1602 }
1603
1604 if (!CQueue.empty()) {
1605 if (SizeLimit == (unsigned)ProfileInlineLimitMax)
1606 ++NumCSInlinedHitMaxLimit;
1607 else if (SizeLimit == (unsigned)ProfileInlineLimitMin)
1608 ++NumCSInlinedHitMinLimit;
1609 else
1610 ++NumCSInlinedHitGrowthLimit;
1611 }
1612
1613 return Changed;
1614 }
1615
1616 /// Find equivalence classes for the given block.
1617 ///
1618 /// This finds all the blocks that are guaranteed to execute the same
1619 /// number of times as \p BB1. To do this, it traverses all the
1620 /// descendants of \p BB1 in the dominator or post-dominator tree.
1621 ///
1622 /// A block BB2 will be in the same equivalence class as \p BB1 if
1623 /// the following holds:
1624 ///
1625 /// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2
1626 /// is a descendant of \p BB1 in the dominator tree, then BB2 should
1627 /// dominate BB1 in the post-dominator tree.
1628 ///
1629 /// 2- Both BB2 and \p BB1 must be in the same loop.
1630 ///
1631 /// For every block BB2 that meets those two requirements, we set BB2's
1632 /// equivalence class to \p BB1.
1633 ///
1634 /// \param BB1 Block to check.
1635 /// \param Descendants Descendants of \p BB1 in either the dom or pdom tree.
1636 /// \param DomTree Opposite dominator tree. If \p Descendants is filled
1637 /// with blocks from \p BB1's dominator tree, then
1638 /// this is the post-dominator tree, and vice versa.
1639 template <bool IsPostDom>
findEquivalencesFor(BasicBlock * BB1,ArrayRef<BasicBlock * > Descendants,DominatorTreeBase<BasicBlock,IsPostDom> * DomTree)1640 void SampleProfileLoader::findEquivalencesFor(
1641 BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants,
1642 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree) {
1643 const BasicBlock *EC = EquivalenceClass[BB1];
1644 uint64_t Weight = BlockWeights[EC];
1645 for (const auto *BB2 : Descendants) {
1646 bool IsDomParent = DomTree->dominates(BB2, BB1);
1647 bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2);
1648 if (BB1 != BB2 && IsDomParent && IsInSameLoop) {
1649 EquivalenceClass[BB2] = EC;
1650 // If BB2 is visited, then the entire EC should be marked as visited.
1651 if (VisitedBlocks.count(BB2)) {
1652 VisitedBlocks.insert(EC);
1653 }
1654
1655 // If BB2 is heavier than BB1, make BB2 have the same weight
1656 // as BB1.
1657 //
1658 // Note that we don't worry about the opposite situation here
1659 // (when BB2 is lighter than BB1). We will deal with this
1660 // during the propagation phase. Right now, we just want to
1661 // make sure that BB1 has the largest weight of all the
1662 // members of its equivalence set.
1663 Weight = std::max(Weight, BlockWeights[BB2]);
1664 }
1665 }
1666 if (EC == &EC->getParent()->getEntryBlock()) {
1667 BlockWeights[EC] = Samples->getHeadSamples() + 1;
1668 } else {
1669 BlockWeights[EC] = Weight;
1670 }
1671 }
1672
1673 /// Find equivalence classes.
1674 ///
1675 /// Since samples may be missing from blocks, we can fill in the gaps by setting
1676 /// the weights of all the blocks in the same equivalence class to the same
1677 /// weight. To compute the concept of equivalence, we use dominance and loop
1678 /// information. Two blocks B1 and B2 are in the same equivalence class if B1
1679 /// dominates B2, B2 post-dominates B1 and both are in the same loop.
1680 ///
1681 /// \param F The function to query.
findEquivalenceClasses(Function & F)1682 void SampleProfileLoader::findEquivalenceClasses(Function &F) {
1683 SmallVector<BasicBlock *, 8> DominatedBBs;
1684 LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n");
1685 // Find equivalence sets based on dominance and post-dominance information.
1686 for (auto &BB : F) {
1687 BasicBlock *BB1 = &BB;
1688
1689 // Compute BB1's equivalence class once.
1690 if (EquivalenceClass.count(BB1)) {
1691 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
1692 continue;
1693 }
1694
1695 // By default, blocks are in their own equivalence class.
1696 EquivalenceClass[BB1] = BB1;
1697
1698 // Traverse all the blocks dominated by BB1. We are looking for
1699 // every basic block BB2 such that:
1700 //
1701 // 1- BB1 dominates BB2.
1702 // 2- BB2 post-dominates BB1.
1703 // 3- BB1 and BB2 are in the same loop nest.
1704 //
1705 // If all those conditions hold, it means that BB2 is executed
1706 // as many times as BB1, so they are placed in the same equivalence
1707 // class by making BB2's equivalence class be BB1.
1708 DominatedBBs.clear();
1709 DT->getDescendants(BB1, DominatedBBs);
1710 findEquivalencesFor(BB1, DominatedBBs, PDT.get());
1711
1712 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
1713 }
1714
1715 // Assign weights to equivalence classes.
1716 //
1717 // All the basic blocks in the same equivalence class will execute
1718 // the same number of times. Since we know that the head block in
1719 // each equivalence class has the largest weight, assign that weight
1720 // to all the blocks in that equivalence class.
1721 LLVM_DEBUG(
1722 dbgs() << "\nAssign the same weight to all blocks in the same class\n");
1723 for (auto &BI : F) {
1724 const BasicBlock *BB = &BI;
1725 const BasicBlock *EquivBB = EquivalenceClass[BB];
1726 if (BB != EquivBB)
1727 BlockWeights[BB] = BlockWeights[EquivBB];
1728 LLVM_DEBUG(printBlockWeight(dbgs(), BB));
1729 }
1730 }
1731
1732 /// Visit the given edge to decide if it has a valid weight.
1733 ///
1734 /// If \p E has not been visited before, we copy to \p UnknownEdge
1735 /// and increment the count of unknown edges.
1736 ///
1737 /// \param E Edge to visit.
1738 /// \param NumUnknownEdges Current number of unknown edges.
1739 /// \param UnknownEdge Set if E has not been visited before.
1740 ///
1741 /// \returns E's weight, if known. Otherwise, return 0.
visitEdge(Edge E,unsigned * NumUnknownEdges,Edge * UnknownEdge)1742 uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges,
1743 Edge *UnknownEdge) {
1744 if (!VisitedEdges.count(E)) {
1745 (*NumUnknownEdges)++;
1746 *UnknownEdge = E;
1747 return 0;
1748 }
1749
1750 return EdgeWeights[E];
1751 }
1752
1753 /// Propagate weights through incoming/outgoing edges.
1754 ///
1755 /// If the weight of a basic block is known, and there is only one edge
1756 /// with an unknown weight, we can calculate the weight of that edge.
1757 ///
1758 /// Similarly, if all the edges have a known count, we can calculate the
1759 /// count of the basic block, if needed.
1760 ///
1761 /// \param F Function to process.
1762 /// \param UpdateBlockCount Whether we should update basic block counts that
1763 /// has already been annotated.
1764 ///
1765 /// \returns True if new weights were assigned to edges or blocks.
propagateThroughEdges(Function & F,bool UpdateBlockCount)1766 bool SampleProfileLoader::propagateThroughEdges(Function &F,
1767 bool UpdateBlockCount) {
1768 bool Changed = false;
1769 LLVM_DEBUG(dbgs() << "\nPropagation through edges\n");
1770 for (const auto &BI : F) {
1771 const BasicBlock *BB = &BI;
1772 const BasicBlock *EC = EquivalenceClass[BB];
1773
1774 // Visit all the predecessor and successor edges to determine
1775 // which ones have a weight assigned already. Note that it doesn't
1776 // matter that we only keep track of a single unknown edge. The
1777 // only case we are interested in handling is when only a single
1778 // edge is unknown (see setEdgeOrBlockWeight).
1779 for (unsigned i = 0; i < 2; i++) {
1780 uint64_t TotalWeight = 0;
1781 unsigned NumUnknownEdges = 0, NumTotalEdges = 0;
1782 Edge UnknownEdge, SelfReferentialEdge, SingleEdge;
1783
1784 if (i == 0) {
1785 // First, visit all predecessor edges.
1786 NumTotalEdges = Predecessors[BB].size();
1787 for (auto *Pred : Predecessors[BB]) {
1788 Edge E = std::make_pair(Pred, BB);
1789 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
1790 if (E.first == E.second)
1791 SelfReferentialEdge = E;
1792 }
1793 if (NumTotalEdges == 1) {
1794 SingleEdge = std::make_pair(Predecessors[BB][0], BB);
1795 }
1796 } else {
1797 // On the second round, visit all successor edges.
1798 NumTotalEdges = Successors[BB].size();
1799 for (auto *Succ : Successors[BB]) {
1800 Edge E = std::make_pair(BB, Succ);
1801 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
1802 }
1803 if (NumTotalEdges == 1) {
1804 SingleEdge = std::make_pair(BB, Successors[BB][0]);
1805 }
1806 }
1807
1808 // After visiting all the edges, there are three cases that we
1809 // can handle immediately:
1810 //
1811 // - All the edge weights are known (i.e., NumUnknownEdges == 0).
1812 // In this case, we simply check that the sum of all the edges
1813 // is the same as BB's weight. If not, we change BB's weight
1814 // to match. Additionally, if BB had not been visited before,
1815 // we mark it visited.
1816 //
1817 // - Only one edge is unknown and BB has already been visited.
1818 // In this case, we can compute the weight of the edge by
1819 // subtracting the total block weight from all the known
1820 // edge weights. If the edges weight more than BB, then the
1821 // edge of the last remaining edge is set to zero.
1822 //
1823 // - There exists a self-referential edge and the weight of BB is
1824 // known. In this case, this edge can be based on BB's weight.
1825 // We add up all the other known edges and set the weight on
1826 // the self-referential edge as we did in the previous case.
1827 //
1828 // In any other case, we must continue iterating. Eventually,
1829 // all edges will get a weight, or iteration will stop when
1830 // it reaches SampleProfileMaxPropagateIterations.
1831 if (NumUnknownEdges <= 1) {
1832 uint64_t &BBWeight = BlockWeights[EC];
1833 if (NumUnknownEdges == 0) {
1834 if (!VisitedBlocks.count(EC)) {
1835 // If we already know the weight of all edges, the weight of the
1836 // basic block can be computed. It should be no larger than the sum
1837 // of all edge weights.
1838 if (TotalWeight > BBWeight) {
1839 BBWeight = TotalWeight;
1840 Changed = true;
1841 LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName()
1842 << " known. Set weight for block: ";
1843 printBlockWeight(dbgs(), BB););
1844 }
1845 } else if (NumTotalEdges == 1 &&
1846 EdgeWeights[SingleEdge] < BlockWeights[EC]) {
1847 // If there is only one edge for the visited basic block, use the
1848 // block weight to adjust edge weight if edge weight is smaller.
1849 EdgeWeights[SingleEdge] = BlockWeights[EC];
1850 Changed = true;
1851 }
1852 } else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) {
1853 // If there is a single unknown edge and the block has been
1854 // visited, then we can compute E's weight.
1855 if (BBWeight >= TotalWeight)
1856 EdgeWeights[UnknownEdge] = BBWeight - TotalWeight;
1857 else
1858 EdgeWeights[UnknownEdge] = 0;
1859 const BasicBlock *OtherEC;
1860 if (i == 0)
1861 OtherEC = EquivalenceClass[UnknownEdge.first];
1862 else
1863 OtherEC = EquivalenceClass[UnknownEdge.second];
1864 // Edge weights should never exceed the BB weights it connects.
1865 if (VisitedBlocks.count(OtherEC) &&
1866 EdgeWeights[UnknownEdge] > BlockWeights[OtherEC])
1867 EdgeWeights[UnknownEdge] = BlockWeights[OtherEC];
1868 VisitedEdges.insert(UnknownEdge);
1869 Changed = true;
1870 LLVM_DEBUG(dbgs() << "Set weight for edge: ";
1871 printEdgeWeight(dbgs(), UnknownEdge));
1872 }
1873 } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) {
1874 // If a block Weights 0, all its in/out edges should weight 0.
1875 if (i == 0) {
1876 for (auto *Pred : Predecessors[BB]) {
1877 Edge E = std::make_pair(Pred, BB);
1878 EdgeWeights[E] = 0;
1879 VisitedEdges.insert(E);
1880 }
1881 } else {
1882 for (auto *Succ : Successors[BB]) {
1883 Edge E = std::make_pair(BB, Succ);
1884 EdgeWeights[E] = 0;
1885 VisitedEdges.insert(E);
1886 }
1887 }
1888 } else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) {
1889 uint64_t &BBWeight = BlockWeights[BB];
1890 // We have a self-referential edge and the weight of BB is known.
1891 if (BBWeight >= TotalWeight)
1892 EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight;
1893 else
1894 EdgeWeights[SelfReferentialEdge] = 0;
1895 VisitedEdges.insert(SelfReferentialEdge);
1896 Changed = true;
1897 LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: ";
1898 printEdgeWeight(dbgs(), SelfReferentialEdge));
1899 }
1900 if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) {
1901 BlockWeights[EC] = TotalWeight;
1902 VisitedBlocks.insert(EC);
1903 Changed = true;
1904 }
1905 }
1906 }
1907
1908 return Changed;
1909 }
1910
1911 /// Build in/out edge lists for each basic block in the CFG.
1912 ///
1913 /// We are interested in unique edges. If a block B1 has multiple
1914 /// edges to another block B2, we only add a single B1->B2 edge.
buildEdges(Function & F)1915 void SampleProfileLoader::buildEdges(Function &F) {
1916 for (auto &BI : F) {
1917 BasicBlock *B1 = &BI;
1918
1919 // Add predecessors for B1.
1920 SmallPtrSet<BasicBlock *, 16> Visited;
1921 if (!Predecessors[B1].empty())
1922 llvm_unreachable("Found a stale predecessors list in a basic block.");
1923 for (pred_iterator PI = pred_begin(B1), PE = pred_end(B1); PI != PE; ++PI) {
1924 BasicBlock *B2 = *PI;
1925 if (Visited.insert(B2).second)
1926 Predecessors[B1].push_back(B2);
1927 }
1928
1929 // Add successors for B1.
1930 Visited.clear();
1931 if (!Successors[B1].empty())
1932 llvm_unreachable("Found a stale successors list in a basic block.");
1933 for (succ_iterator SI = succ_begin(B1), SE = succ_end(B1); SI != SE; ++SI) {
1934 BasicBlock *B2 = *SI;
1935 if (Visited.insert(B2).second)
1936 Successors[B1].push_back(B2);
1937 }
1938 }
1939 }
1940
1941 /// Returns the sorted CallTargetMap \p M by count in descending order.
GetSortedValueDataFromCallTargets(const SampleRecord::CallTargetMap & M)1942 static SmallVector<InstrProfValueData, 2> GetSortedValueDataFromCallTargets(
1943 const SampleRecord::CallTargetMap & M) {
1944 SmallVector<InstrProfValueData, 2> R;
1945 for (const auto &I : SampleRecord::SortCallTargets(M)) {
1946 R.emplace_back(InstrProfValueData{FunctionSamples::getGUID(I.first), I.second});
1947 }
1948 return R;
1949 }
1950
1951 /// Propagate weights into edges
1952 ///
1953 /// The following rules are applied to every block BB in the CFG:
1954 ///
1955 /// - If BB has a single predecessor/successor, then the weight
1956 /// of that edge is the weight of the block.
1957 ///
1958 /// - If all incoming or outgoing edges are known except one, and the
1959 /// weight of the block is already known, the weight of the unknown
1960 /// edge will be the weight of the block minus the sum of all the known
1961 /// edges. If the sum of all the known edges is larger than BB's weight,
1962 /// we set the unknown edge weight to zero.
1963 ///
1964 /// - If there is a self-referential edge, and the weight of the block is
1965 /// known, the weight for that edge is set to the weight of the block
1966 /// minus the weight of the other incoming edges to that block (if
1967 /// known).
propagateWeights(Function & F)1968 void SampleProfileLoader::propagateWeights(Function &F) {
1969 bool Changed = true;
1970 unsigned I = 0;
1971
1972 // If BB weight is larger than its corresponding loop's header BB weight,
1973 // use the BB weight to replace the loop header BB weight.
1974 for (auto &BI : F) {
1975 BasicBlock *BB = &BI;
1976 Loop *L = LI->getLoopFor(BB);
1977 if (!L) {
1978 continue;
1979 }
1980 BasicBlock *Header = L->getHeader();
1981 if (Header && BlockWeights[BB] > BlockWeights[Header]) {
1982 BlockWeights[Header] = BlockWeights[BB];
1983 }
1984 }
1985
1986 // Before propagation starts, build, for each block, a list of
1987 // unique predecessors and successors. This is necessary to handle
1988 // identical edges in multiway branches. Since we visit all blocks and all
1989 // edges of the CFG, it is cleaner to build these lists once at the start
1990 // of the pass.
1991 buildEdges(F);
1992
1993 // Propagate until we converge or we go past the iteration limit.
1994 while (Changed && I++ < SampleProfileMaxPropagateIterations) {
1995 Changed = propagateThroughEdges(F, false);
1996 }
1997
1998 // The first propagation propagates BB counts from annotated BBs to unknown
1999 // BBs. The 2nd propagation pass resets edges weights, and use all BB weights
2000 // to propagate edge weights.
2001 VisitedEdges.clear();
2002 Changed = true;
2003 while (Changed && I++ < SampleProfileMaxPropagateIterations) {
2004 Changed = propagateThroughEdges(F, false);
2005 }
2006
2007 // The 3rd propagation pass allows adjust annotated BB weights that are
2008 // obviously wrong.
2009 Changed = true;
2010 while (Changed && I++ < SampleProfileMaxPropagateIterations) {
2011 Changed = propagateThroughEdges(F, true);
2012 }
2013
2014 // Generate MD_prof metadata for every branch instruction using the
2015 // edge weights computed during propagation.
2016 LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n");
2017 LLVMContext &Ctx = F.getContext();
2018 MDBuilder MDB(Ctx);
2019 for (auto &BI : F) {
2020 BasicBlock *BB = &BI;
2021
2022 if (BlockWeights[BB]) {
2023 for (auto &I : BB->getInstList()) {
2024 if (!isa<CallInst>(I) && !isa<InvokeInst>(I))
2025 continue;
2026 if (!cast<CallBase>(I).getCalledFunction()) {
2027 const DebugLoc &DLoc = I.getDebugLoc();
2028 if (!DLoc)
2029 continue;
2030 const DILocation *DIL = DLoc;
2031 const FunctionSamples *FS = findFunctionSamples(I);
2032 if (!FS)
2033 continue;
2034 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL);
2035 auto T = FS->findCallTargetMapAt(CallSite);
2036 if (!T || T.get().empty())
2037 continue;
2038 // Prorate the callsite counts to reflect what is already done to the
2039 // callsite, such as ICP or calliste cloning.
2040 if (FunctionSamples::ProfileIsProbeBased) {
2041 if (Optional<PseudoProbe> Probe = extractProbe(I)) {
2042 if (Probe->Factor < 1)
2043 T = SampleRecord::adjustCallTargets(T.get(), Probe->Factor);
2044 }
2045 }
2046 SmallVector<InstrProfValueData, 2> SortedCallTargets =
2047 GetSortedValueDataFromCallTargets(T.get());
2048 uint64_t Sum;
2049 findIndirectCallFunctionSamples(I, Sum);
2050 annotateValueSite(*I.getParent()->getParent()->getParent(), I,
2051 SortedCallTargets, Sum, IPVK_IndirectCallTarget,
2052 SortedCallTargets.size());
2053 } else if (!isa<IntrinsicInst>(&I)) {
2054 I.setMetadata(LLVMContext::MD_prof,
2055 MDB.createBranchWeights(
2056 {static_cast<uint32_t>(BlockWeights[BB])}));
2057 }
2058 }
2059 }
2060 Instruction *TI = BB->getTerminator();
2061 if (TI->getNumSuccessors() == 1)
2062 continue;
2063 if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
2064 continue;
2065
2066 DebugLoc BranchLoc = TI->getDebugLoc();
2067 LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line "
2068 << ((BranchLoc) ? Twine(BranchLoc.getLine())
2069 : Twine("<UNKNOWN LOCATION>"))
2070 << ".\n");
2071 SmallVector<uint32_t, 4> Weights;
2072 uint32_t MaxWeight = 0;
2073 Instruction *MaxDestInst;
2074 for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) {
2075 BasicBlock *Succ = TI->getSuccessor(I);
2076 Edge E = std::make_pair(BB, Succ);
2077 uint64_t Weight = EdgeWeights[E];
2078 LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E));
2079 // Use uint32_t saturated arithmetic to adjust the incoming weights,
2080 // if needed. Sample counts in profiles are 64-bit unsigned values,
2081 // but internally branch weights are expressed as 32-bit values.
2082 if (Weight > std::numeric_limits<uint32_t>::max()) {
2083 LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)");
2084 Weight = std::numeric_limits<uint32_t>::max();
2085 }
2086 // Weight is added by one to avoid propagation errors introduced by
2087 // 0 weights.
2088 Weights.push_back(static_cast<uint32_t>(Weight + 1));
2089 if (Weight != 0) {
2090 if (Weight > MaxWeight) {
2091 MaxWeight = Weight;
2092 MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime();
2093 }
2094 }
2095 }
2096
2097 uint64_t TempWeight;
2098 // Only set weights if there is at least one non-zero weight.
2099 // In any other case, let the analyzer set weights.
2100 // Do not set weights if the weights are present. In ThinLTO, the profile
2101 // annotation is done twice. If the first annotation already set the
2102 // weights, the second pass does not need to set it.
2103 if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) {
2104 LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
2105 TI->setMetadata(LLVMContext::MD_prof,
2106 MDB.createBranchWeights(Weights));
2107 ORE->emit([&]() {
2108 return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst)
2109 << "most popular destination for conditional branches at "
2110 << ore::NV("CondBranchesLoc", BranchLoc);
2111 });
2112 } else {
2113 LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
2114 }
2115 }
2116 }
2117
2118 /// Get the line number for the function header.
2119 ///
2120 /// This looks up function \p F in the current compilation unit and
2121 /// retrieves the line number where the function is defined. This is
2122 /// line 0 for all the samples read from the profile file. Every line
2123 /// number is relative to this line.
2124 ///
2125 /// \param F Function object to query.
2126 ///
2127 /// \returns the line number where \p F is defined. If it returns 0,
2128 /// it means that there is no debug information available for \p F.
getFunctionLoc(Function & F)2129 unsigned SampleProfileLoader::getFunctionLoc(Function &F) {
2130 if (DISubprogram *S = F.getSubprogram())
2131 return S->getLine();
2132
2133 if (NoWarnSampleUnused)
2134 return 0;
2135
2136 // If the start of \p F is missing, emit a diagnostic to inform the user
2137 // about the missed opportunity.
2138 F.getContext().diagnose(DiagnosticInfoSampleProfile(
2139 "No debug information found in function " + F.getName() +
2140 ": Function profile not used",
2141 DS_Warning));
2142 return 0;
2143 }
2144
computeDominanceAndLoopInfo(Function & F)2145 void SampleProfileLoader::computeDominanceAndLoopInfo(Function &F) {
2146 DT.reset(new DominatorTree);
2147 DT->recalculate(F);
2148
2149 PDT.reset(new PostDominatorTree(F));
2150
2151 LI.reset(new LoopInfo);
2152 LI->analyze(*DT);
2153 }
2154
2155 /// Generate branch weight metadata for all branches in \p F.
2156 ///
2157 /// Branch weights are computed out of instruction samples using a
2158 /// propagation heuristic. Propagation proceeds in 3 phases:
2159 ///
2160 /// 1- Assignment of block weights. All the basic blocks in the function
2161 /// are initial assigned the same weight as their most frequently
2162 /// executed instruction.
2163 ///
2164 /// 2- Creation of equivalence classes. Since samples may be missing from
2165 /// blocks, we can fill in the gaps by setting the weights of all the
2166 /// blocks in the same equivalence class to the same weight. To compute
2167 /// the concept of equivalence, we use dominance and loop information.
2168 /// Two blocks B1 and B2 are in the same equivalence class if B1
2169 /// dominates B2, B2 post-dominates B1 and both are in the same loop.
2170 ///
2171 /// 3- Propagation of block weights into edges. This uses a simple
2172 /// propagation heuristic. The following rules are applied to every
2173 /// block BB in the CFG:
2174 ///
2175 /// - If BB has a single predecessor/successor, then the weight
2176 /// of that edge is the weight of the block.
2177 ///
2178 /// - If all the edges are known except one, and the weight of the
2179 /// block is already known, the weight of the unknown edge will
2180 /// be the weight of the block minus the sum of all the known
2181 /// edges. If the sum of all the known edges is larger than BB's weight,
2182 /// we set the unknown edge weight to zero.
2183 ///
2184 /// - If there is a self-referential edge, and the weight of the block is
2185 /// known, the weight for that edge is set to the weight of the block
2186 /// minus the weight of the other incoming edges to that block (if
2187 /// known).
2188 ///
2189 /// Since this propagation is not guaranteed to finalize for every CFG, we
2190 /// only allow it to proceed for a limited number of iterations (controlled
2191 /// by -sample-profile-max-propagate-iterations).
2192 ///
2193 /// FIXME: Try to replace this propagation heuristic with a scheme
2194 /// that is guaranteed to finalize. A work-list approach similar to
2195 /// the standard value propagation algorithm used by SSA-CCP might
2196 /// work here.
2197 ///
2198 /// Once all the branch weights are computed, we emit the MD_prof
2199 /// metadata on BB using the computed values for each of its branches.
2200 ///
2201 /// \param F The function to query.
2202 ///
2203 /// \returns true if \p F was modified. Returns false, otherwise.
emitAnnotations(Function & F)2204 bool SampleProfileLoader::emitAnnotations(Function &F) {
2205 bool Changed = false;
2206
2207 if (FunctionSamples::ProfileIsProbeBased) {
2208 if (!ProbeManager->profileIsValid(F, *Samples)) {
2209 LLVM_DEBUG(
2210 dbgs() << "Profile is invalid due to CFG mismatch for Function "
2211 << F.getName());
2212 ++NumMismatchedProfile;
2213 return false;
2214 }
2215 ++NumMatchedProfile;
2216 } else {
2217 if (getFunctionLoc(F) == 0)
2218 return false;
2219
2220 LLVM_DEBUG(dbgs() << "Line number for the first instruction in "
2221 << F.getName() << ": " << getFunctionLoc(F) << "\n");
2222 }
2223
2224 DenseSet<GlobalValue::GUID> InlinedGUIDs;
2225 if (ProfileIsCS && CallsitePrioritizedInline)
2226 Changed |= inlineHotFunctionsWithPriority(F, InlinedGUIDs);
2227 else
2228 Changed |= inlineHotFunctions(F, InlinedGUIDs);
2229
2230 // Compute basic block weights.
2231 Changed |= computeBlockWeights(F);
2232
2233 if (Changed) {
2234 // Add an entry count to the function using the samples gathered at the
2235 // function entry.
2236 // Sets the GUIDs that are inlined in the profiled binary. This is used
2237 // for ThinLink to make correct liveness analysis, and also make the IR
2238 // match the profiled binary before annotation.
2239 F.setEntryCount(
2240 ProfileCount(Samples->getHeadSamples() + 1, Function::PCT_Real),
2241 &InlinedGUIDs);
2242
2243 // Compute dominance and loop info needed for propagation.
2244 computeDominanceAndLoopInfo(F);
2245
2246 // Find equivalence classes.
2247 findEquivalenceClasses(F);
2248
2249 // Propagate weights to all edges.
2250 propagateWeights(F);
2251 }
2252
2253 // If coverage checking was requested, compute it now.
2254 if (SampleProfileRecordCoverage) {
2255 unsigned Used = CoverageTracker.countUsedRecords(Samples, PSI);
2256 unsigned Total = CoverageTracker.countBodyRecords(Samples, PSI);
2257 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
2258 if (Coverage < SampleProfileRecordCoverage) {
2259 F.getContext().diagnose(DiagnosticInfoSampleProfile(
2260 F.getSubprogram()->getFilename(), getFunctionLoc(F),
2261 Twine(Used) + " of " + Twine(Total) + " available profile records (" +
2262 Twine(Coverage) + "%) were applied",
2263 DS_Warning));
2264 }
2265 }
2266
2267 if (SampleProfileSampleCoverage) {
2268 uint64_t Used = CoverageTracker.getTotalUsedSamples();
2269 uint64_t Total = CoverageTracker.countBodySamples(Samples, PSI);
2270 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
2271 if (Coverage < SampleProfileSampleCoverage) {
2272 F.getContext().diagnose(DiagnosticInfoSampleProfile(
2273 F.getSubprogram()->getFilename(), getFunctionLoc(F),
2274 Twine(Used) + " of " + Twine(Total) + " available profile samples (" +
2275 Twine(Coverage) + "%) were applied",
2276 DS_Warning));
2277 }
2278 }
2279 return Changed;
2280 }
2281
2282 char SampleProfileLoaderLegacyPass::ID = 0;
2283
2284 INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile",
2285 "Sample Profile loader", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)2286 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2287 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
2288 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2289 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
2290 INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile",
2291 "Sample Profile loader", false, false)
2292
2293 // Add inlined profile call edges to the call graph.
2294 void SampleProfileLoader::addCallGraphEdges(CallGraph &CG,
2295 const FunctionSamples &Samples) {
2296 Function *Caller = SymbolMap.lookup(Samples.getFuncName());
2297 if (!Caller || Caller->isDeclaration())
2298 return;
2299
2300 // Skip non-inlined call edges which are not important since top down inlining
2301 // for non-CS profile is to get more precise profile matching, not to enable
2302 // more inlining.
2303
2304 for (const auto &CallsiteSamples : Samples.getCallsiteSamples()) {
2305 for (const auto &InlinedSamples : CallsiteSamples.second) {
2306 Function *Callee = SymbolMap.lookup(InlinedSamples.first);
2307 if (Callee && !Callee->isDeclaration())
2308 CG[Caller]->addCalledFunction(nullptr, CG[Callee]);
2309 addCallGraphEdges(CG, InlinedSamples.second);
2310 }
2311 }
2312 }
2313
2314 // Replace call graph edges with dynamic call edges from the profile.
replaceCallGraphEdges(CallGraph & CG,StringMap<Function * > & SymbolMap)2315 void SampleProfileLoader::replaceCallGraphEdges(
2316 CallGraph &CG, StringMap<Function *> &SymbolMap) {
2317 // Remove static call edges from the call graph except for the ones from the
2318 // root which make the call graph connected.
2319 for (const auto &Node : CG)
2320 if (Node.second.get() != CG.getExternalCallingNode())
2321 Node.second->removeAllCalledFunctions();
2322
2323 // Add profile call edges to the call graph.
2324 if (ProfileIsCS) {
2325 ContextTracker->addCallGraphEdges(CG, SymbolMap);
2326 } else {
2327 for (const auto &Samples : Reader->getProfiles())
2328 addCallGraphEdges(CG, Samples.second);
2329 }
2330 }
2331
2332 std::vector<Function *>
buildFunctionOrder(Module & M,CallGraph * CG)2333 SampleProfileLoader::buildFunctionOrder(Module &M, CallGraph *CG) {
2334 std::vector<Function *> FunctionOrderList;
2335 FunctionOrderList.reserve(M.size());
2336
2337 if (!ProfileTopDownLoad || CG == nullptr) {
2338 if (ProfileMergeInlinee) {
2339 // Disable ProfileMergeInlinee if profile is not loaded in top down order,
2340 // because the profile for a function may be used for the profile
2341 // annotation of its outline copy before the profile merging of its
2342 // non-inlined inline instances, and that is not the way how
2343 // ProfileMergeInlinee is supposed to work.
2344 ProfileMergeInlinee = false;
2345 }
2346
2347 for (Function &F : M)
2348 if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile"))
2349 FunctionOrderList.push_back(&F);
2350 return FunctionOrderList;
2351 }
2352
2353 assert(&CG->getModule() == &M);
2354
2355 // Add indirect call edges from profile to augment the static call graph.
2356 // Functions will be processed in a top-down order defined by the static call
2357 // graph. Adjusting the order by considering indirect call edges from the
2358 // profile (which don't exist in the static call graph) can enable the
2359 // inlining of indirect call targets by processing the caller before them.
2360 // TODO: enable this for non-CS profile and fix the counts returning logic to
2361 // have a full support for indirect calls.
2362 if (UseProfileIndirectCallEdges && ProfileIsCS) {
2363 for (auto &Entry : *CG) {
2364 const auto *F = Entry.first;
2365 if (!F || F->isDeclaration() || !F->hasFnAttribute("use-sample-profile"))
2366 continue;
2367 auto &AllContexts = ContextTracker->getAllContextSamplesFor(F->getName());
2368 if (AllContexts.empty())
2369 continue;
2370
2371 for (const auto &BB : *F) {
2372 for (const auto &I : BB.getInstList()) {
2373 const auto *CB = dyn_cast<CallBase>(&I);
2374 if (!CB || !CB->isIndirectCall())
2375 continue;
2376 const DebugLoc &DLoc = I.getDebugLoc();
2377 if (!DLoc)
2378 continue;
2379 auto CallSite = FunctionSamples::getCallSiteIdentifier(DLoc);
2380 for (FunctionSamples *Samples : AllContexts) {
2381 if (auto CallTargets = Samples->findCallTargetMapAt(CallSite)) {
2382 for (const auto &Target : CallTargets.get()) {
2383 Function *Callee = SymbolMap.lookup(Target.first());
2384 if (Callee && !Callee->isDeclaration())
2385 Entry.second->addCalledFunction(nullptr, (*CG)[Callee]);
2386 }
2387 }
2388 }
2389 }
2390 }
2391 }
2392 }
2393
2394 // Compute a top-down order the profile which is used to sort functions in
2395 // one SCC later. The static processing order computed for an SCC may not
2396 // reflect the call contexts in the context-sensitive profile, thus may cause
2397 // potential inlining to be overlooked. The function order in one SCC is being
2398 // adjusted to a top-down order based on the profile to favor more inlining.
2399 DenseMap<Function *, uint64_t> ProfileOrderMap;
2400 if (UseProfileTopDownOrder ||
2401 (ProfileIsCS && !UseProfileTopDownOrder.getNumOccurrences())) {
2402 // Create a static call graph. The call edges are not important since they
2403 // will be replaced by dynamic edges from the profile.
2404 CallGraph ProfileCG(M);
2405 replaceCallGraphEdges(ProfileCG, SymbolMap);
2406 scc_iterator<CallGraph *> CGI = scc_begin(&ProfileCG);
2407 uint64_t I = 0;
2408 while (!CGI.isAtEnd()) {
2409 for (CallGraphNode *Node : *CGI) {
2410 if (auto *F = Node->getFunction())
2411 ProfileOrderMap[F] = ++I;
2412 }
2413 ++CGI;
2414 }
2415 }
2416
2417 scc_iterator<CallGraph *> CGI = scc_begin(CG);
2418 while (!CGI.isAtEnd()) {
2419 uint64_t Start = FunctionOrderList.size();
2420 for (CallGraphNode *Node : *CGI) {
2421 auto *F = Node->getFunction();
2422 if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile"))
2423 FunctionOrderList.push_back(F);
2424 }
2425
2426 // Sort nodes in SCC based on the profile top-down order.
2427 if (!ProfileOrderMap.empty()) {
2428 std::stable_sort(FunctionOrderList.begin() + Start,
2429 FunctionOrderList.end(),
2430 [&ProfileOrderMap](Function *Left, Function *Right) {
2431 return ProfileOrderMap[Left] < ProfileOrderMap[Right];
2432 });
2433 }
2434
2435 ++CGI;
2436 }
2437
2438 LLVM_DEBUG({
2439 dbgs() << "Function processing order:\n";
2440 for (auto F : reverse(FunctionOrderList)) {
2441 dbgs() << F->getName() << "\n";
2442 }
2443 });
2444
2445 std::reverse(FunctionOrderList.begin(), FunctionOrderList.end());
2446 return FunctionOrderList;
2447 }
2448
doInitialization(Module & M,FunctionAnalysisManager * FAM)2449 bool SampleProfileLoader::doInitialization(Module &M,
2450 FunctionAnalysisManager *FAM) {
2451 auto &Ctx = M.getContext();
2452
2453 auto ReaderOrErr =
2454 SampleProfileReader::create(Filename, Ctx, RemappingFilename);
2455 if (std::error_code EC = ReaderOrErr.getError()) {
2456 std::string Msg = "Could not open profile: " + EC.message();
2457 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2458 return false;
2459 }
2460 Reader = std::move(ReaderOrErr.get());
2461 Reader->setSkipFlatProf(LTOPhase == ThinOrFullLTOPhase::ThinLTOPostLink);
2462 Reader->collectFuncsFrom(M);
2463 if (std::error_code EC = Reader->read()) {
2464 std::string Msg = "profile reading failed: " + EC.message();
2465 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2466 return false;
2467 }
2468
2469 PSL = Reader->getProfileSymbolList();
2470
2471 // While profile-sample-accurate is on, ignore symbol list.
2472 ProfAccForSymsInList =
2473 ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate;
2474 if (ProfAccForSymsInList) {
2475 NamesInProfile.clear();
2476 if (auto NameTable = Reader->getNameTable())
2477 NamesInProfile.insert(NameTable->begin(), NameTable->end());
2478 }
2479
2480 if (FAM && !ProfileInlineReplayFile.empty()) {
2481 ExternalInlineAdvisor = std::make_unique<ReplayInlineAdvisor>(
2482 M, *FAM, Ctx, /*OriginalAdvisor=*/nullptr, ProfileInlineReplayFile,
2483 /*EmitRemarks=*/false);
2484 if (!ExternalInlineAdvisor->areReplayRemarksLoaded())
2485 ExternalInlineAdvisor.reset();
2486 }
2487
2488 // Apply tweaks if context-sensitive profile is available.
2489 if (Reader->profileIsCS()) {
2490 ProfileIsCS = true;
2491 FunctionSamples::ProfileIsCS = true;
2492
2493 // Enable priority-base inliner and size inline by default for CSSPGO.
2494 if (!ProfileSizeInline.getNumOccurrences())
2495 ProfileSizeInline = true;
2496 if (!CallsitePrioritizedInline.getNumOccurrences())
2497 CallsitePrioritizedInline = true;
2498
2499 // Tracker for profiles under different context
2500 ContextTracker =
2501 std::make_unique<SampleContextTracker>(Reader->getProfiles());
2502 }
2503
2504 // Load pseudo probe descriptors for probe-based function samples.
2505 if (Reader->profileIsProbeBased()) {
2506 ProbeManager = std::make_unique<PseudoProbeManager>(M);
2507 if (!ProbeManager->moduleIsProbed(M)) {
2508 const char *Msg =
2509 "Pseudo-probe-based profile requires SampleProfileProbePass";
2510 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2511 return false;
2512 }
2513 }
2514
2515 return true;
2516 }
2517
createSampleProfileLoaderPass()2518 ModulePass *llvm::createSampleProfileLoaderPass() {
2519 return new SampleProfileLoaderLegacyPass();
2520 }
2521
createSampleProfileLoaderPass(StringRef Name)2522 ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) {
2523 return new SampleProfileLoaderLegacyPass(Name);
2524 }
2525
runOnModule(Module & M,ModuleAnalysisManager * AM,ProfileSummaryInfo * _PSI,CallGraph * CG)2526 bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM,
2527 ProfileSummaryInfo *_PSI, CallGraph *CG) {
2528 GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap);
2529
2530 PSI = _PSI;
2531 if (M.getProfileSummary(/* IsCS */ false) == nullptr) {
2532 M.setProfileSummary(Reader->getSummary().getMD(M.getContext()),
2533 ProfileSummary::PSK_Sample);
2534 PSI->refresh();
2535 }
2536 // Compute the total number of samples collected in this profile.
2537 for (const auto &I : Reader->getProfiles())
2538 TotalCollectedSamples += I.second.getTotalSamples();
2539
2540 auto Remapper = Reader->getRemapper();
2541 // Populate the symbol map.
2542 for (const auto &N_F : M.getValueSymbolTable()) {
2543 StringRef OrigName = N_F.getKey();
2544 Function *F = dyn_cast<Function>(N_F.getValue());
2545 if (F == nullptr)
2546 continue;
2547 SymbolMap[OrigName] = F;
2548 auto pos = OrigName.find('.');
2549 if (pos != StringRef::npos) {
2550 StringRef NewName = OrigName.substr(0, pos);
2551 auto r = SymbolMap.insert(std::make_pair(NewName, F));
2552 // Failiing to insert means there is already an entry in SymbolMap,
2553 // thus there are multiple functions that are mapped to the same
2554 // stripped name. In this case of name conflicting, set the value
2555 // to nullptr to avoid confusion.
2556 if (!r.second)
2557 r.first->second = nullptr;
2558 OrigName = NewName;
2559 }
2560 // Insert the remapped names into SymbolMap.
2561 if (Remapper) {
2562 if (auto MapName = Remapper->lookUpNameInProfile(OrigName)) {
2563 if (*MapName == OrigName)
2564 continue;
2565 SymbolMap.insert(std::make_pair(*MapName, F));
2566 }
2567 }
2568 }
2569
2570 bool retval = false;
2571 for (auto F : buildFunctionOrder(M, CG)) {
2572 assert(!F->isDeclaration());
2573 clearFunctionData();
2574 retval |= runOnFunction(*F, AM);
2575 }
2576
2577 // Account for cold calls not inlined....
2578 if (!ProfileIsCS)
2579 for (const std::pair<Function *, NotInlinedProfileInfo> &pair :
2580 notInlinedCallInfo)
2581 updateProfileCallee(pair.first, pair.second.entryCount);
2582
2583 return retval;
2584 }
2585
runOnModule(Module & M)2586 bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) {
2587 ACT = &getAnalysis<AssumptionCacheTracker>();
2588 TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>();
2589 TLIWP = &getAnalysis<TargetLibraryInfoWrapperPass>();
2590 ProfileSummaryInfo *PSI =
2591 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2592 return SampleLoader.runOnModule(M, nullptr, PSI, nullptr);
2593 }
2594
runOnFunction(Function & F,ModuleAnalysisManager * AM)2595 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) {
2596 LLVM_DEBUG(dbgs() << "\n\nProcessing Function " << F.getName() << "\n");
2597 DILocation2SampleMap.clear();
2598 // By default the entry count is initialized to -1, which will be treated
2599 // conservatively by getEntryCount as the same as unknown (None). This is
2600 // to avoid newly added code to be treated as cold. If we have samples
2601 // this will be overwritten in emitAnnotations.
2602 uint64_t initialEntryCount = -1;
2603
2604 ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL;
2605 if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) {
2606 // initialize all the function entry counts to 0. It means all the
2607 // functions without profile will be regarded as cold.
2608 initialEntryCount = 0;
2609 // profile-sample-accurate is a user assertion which has a higher precedence
2610 // than symbol list. When profile-sample-accurate is on, ignore symbol list.
2611 ProfAccForSymsInList = false;
2612 }
2613
2614 // PSL -- profile symbol list include all the symbols in sampled binary.
2615 // If ProfileAccurateForSymsInList is enabled, PSL is used to treat
2616 // old functions without samples being cold, without having to worry
2617 // about new and hot functions being mistakenly treated as cold.
2618 if (ProfAccForSymsInList) {
2619 // Initialize the entry count to 0 for functions in the list.
2620 if (PSL->contains(F.getName()))
2621 initialEntryCount = 0;
2622
2623 // Function in the symbol list but without sample will be regarded as
2624 // cold. To minimize the potential negative performance impact it could
2625 // have, we want to be a little conservative here saying if a function
2626 // shows up in the profile, no matter as outline function, inline instance
2627 // or call targets, treat the function as not being cold. This will handle
2628 // the cases such as most callsites of a function are inlined in sampled
2629 // binary but not inlined in current build (because of source code drift,
2630 // imprecise debug information, or the callsites are all cold individually
2631 // but not cold accumulatively...), so the outline function showing up as
2632 // cold in sampled binary will actually not be cold after current build.
2633 StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
2634 if (NamesInProfile.count(CanonName))
2635 initialEntryCount = -1;
2636 }
2637
2638 // Initialize entry count when the function has no existing entry
2639 // count value.
2640 if (!F.getEntryCount().hasValue())
2641 F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real));
2642 std::unique_ptr<OptimizationRemarkEmitter> OwnedORE;
2643 if (AM) {
2644 auto &FAM =
2645 AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent())
2646 .getManager();
2647 ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
2648 } else {
2649 OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2650 ORE = OwnedORE.get();
2651 }
2652
2653 if (ProfileIsCS)
2654 Samples = ContextTracker->getBaseSamplesFor(F);
2655 else
2656 Samples = Reader->getSamplesFor(F);
2657
2658 if (Samples && !Samples->empty())
2659 return emitAnnotations(F);
2660 return false;
2661 }
2662
run(Module & M,ModuleAnalysisManager & AM)2663 PreservedAnalyses SampleProfileLoaderPass::run(Module &M,
2664 ModuleAnalysisManager &AM) {
2665 FunctionAnalysisManager &FAM =
2666 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
2667
2668 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
2669 return FAM.getResult<AssumptionAnalysis>(F);
2670 };
2671 auto GetTTI = [&](Function &F) -> TargetTransformInfo & {
2672 return FAM.getResult<TargetIRAnalysis>(F);
2673 };
2674 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
2675 return FAM.getResult<TargetLibraryAnalysis>(F);
2676 };
2677
2678 SampleProfileLoader SampleLoader(
2679 ProfileFileName.empty() ? SampleProfileFile : ProfileFileName,
2680 ProfileRemappingFileName.empty() ? SampleProfileRemappingFile
2681 : ProfileRemappingFileName,
2682 LTOPhase, GetAssumptionCache, GetTTI, GetTLI);
2683
2684 if (!SampleLoader.doInitialization(M, &FAM))
2685 return PreservedAnalyses::all();
2686
2687 ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M);
2688 CallGraph &CG = AM.getResult<CallGraphAnalysis>(M);
2689 if (!SampleLoader.runOnModule(M, &AM, PSI, &CG))
2690 return PreservedAnalyses::all();
2691
2692 return PreservedAnalyses::none();
2693 }
2694