1 //===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements interprocedural passes which walk the
11 /// call-graph deducing and/or propagating function attributes.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/IPO/FunctionAttrs.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/SCCIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AssumptionCache.h"
26 #include "llvm/Analysis/BasicAliasAnalysis.h"
27 #include "llvm/Analysis/CFG.h"
28 #include "llvm/Analysis/CGSCCPassManager.h"
29 #include "llvm/Analysis/CallGraph.h"
30 #include "llvm/Analysis/CallGraphSCCPass.h"
31 #include "llvm/Analysis/CaptureTracking.h"
32 #include "llvm/Analysis/LazyCallGraph.h"
33 #include "llvm/Analysis/MemoryBuiltins.h"
34 #include "llvm/Analysis/MemoryLocation.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/InstIterator.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/PassManager.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/Use.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include "llvm/Transforms/IPO.h"
62 #include "llvm/Transforms/Utils/Local.h"
63 #include <cassert>
64 #include <iterator>
65 #include <map>
66 #include <vector>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "function-attrs"
71 
72 STATISTIC(NumReadNone, "Number of functions marked readnone");
73 STATISTIC(NumReadOnly, "Number of functions marked readonly");
74 STATISTIC(NumWriteOnly, "Number of functions marked writeonly");
75 STATISTIC(NumNoCapture, "Number of arguments marked nocapture");
76 STATISTIC(NumReturned, "Number of arguments marked returned");
77 STATISTIC(NumReadNoneArg, "Number of arguments marked readnone");
78 STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly");
79 STATISTIC(NumWriteOnlyArg, "Number of arguments marked writeonly");
80 STATISTIC(NumNoAlias, "Number of function returns marked noalias");
81 STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull");
82 STATISTIC(NumNoRecurse, "Number of functions marked as norecurse");
83 STATISTIC(NumNoUnwind, "Number of functions marked as nounwind");
84 STATISTIC(NumNoFree, "Number of functions marked as nofree");
85 STATISTIC(NumWillReturn, "Number of functions marked as willreturn");
86 STATISTIC(NumNoSync, "Number of functions marked as nosync");
87 
88 STATISTIC(NumThinLinkNoRecurse,
89           "Number of functions marked as norecurse during thinlink");
90 STATISTIC(NumThinLinkNoUnwind,
91           "Number of functions marked as nounwind during thinlink");
92 
93 static cl::opt<bool> EnableNonnullArgPropagation(
94     "enable-nonnull-arg-prop", cl::init(true), cl::Hidden,
95     cl::desc("Try to propagate nonnull argument attributes from callsites to "
96              "caller functions."));
97 
98 static cl::opt<bool> DisableNoUnwindInference(
99     "disable-nounwind-inference", cl::Hidden,
100     cl::desc("Stop inferring nounwind attribute during function-attrs pass"));
101 
102 static cl::opt<bool> DisableNoFreeInference(
103     "disable-nofree-inference", cl::Hidden,
104     cl::desc("Stop inferring nofree attribute during function-attrs pass"));
105 
106 static cl::opt<bool> DisableThinLTOPropagation(
107     "disable-thinlto-funcattrs", cl::init(true), cl::Hidden,
108     cl::desc("Don't propagate function-attrs in thinLTO"));
109 
110 namespace {
111 
112 using SCCNodeSet = SmallSetVector<Function *, 8>;
113 
114 } // end anonymous namespace
115 
116 /// Returns the memory access attribute for function F using AAR for AA results,
117 /// where SCCNodes is the current SCC.
118 ///
119 /// If ThisBody is true, this function may examine the function body and will
120 /// return a result pertaining to this copy of the function. If it is false, the
121 /// result will be based only on AA results for the function declaration; it
122 /// will be assumed that some other (perhaps less optimized) version of the
123 /// function may be selected at link time.
124 static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody,
125                                                   AAResults &AAR,
126                                                   const SCCNodeSet &SCCNodes) {
127   FunctionModRefBehavior MRB = AAR.getModRefBehavior(&F);
128   if (MRB == FMRB_DoesNotAccessMemory)
129     // Already perfect!
130     return MAK_ReadNone;
131 
132   if (!ThisBody) {
133     if (AliasAnalysis::onlyReadsMemory(MRB))
134       return MAK_ReadOnly;
135 
136     if (AliasAnalysis::onlyWritesMemory(MRB))
137       return MAK_WriteOnly;
138 
139     // Conservatively assume it reads and writes to memory.
140     return MAK_MayWrite;
141   }
142 
143   // Scan the function body for instructions that may read or write memory.
144   bool ReadsMemory = false;
145   bool WritesMemory = false;
146   for (Instruction &I : instructions(F)) {
147     // Some instructions can be ignored even if they read or write memory.
148     // Detect these now, skipping to the next instruction if one is found.
149     if (auto *Call = dyn_cast<CallBase>(&I)) {
150       // Ignore calls to functions in the same SCC, as long as the call sites
151       // don't have operand bundles.  Calls with operand bundles are allowed to
152       // have memory effects not described by the memory effects of the call
153       // target.
154       if (!Call->hasOperandBundles() && Call->getCalledFunction() &&
155           SCCNodes.count(Call->getCalledFunction()))
156         continue;
157       FunctionModRefBehavior MRB = AAR.getModRefBehavior(Call);
158       ModRefInfo MRI = createModRefInfo(MRB);
159 
160       // If the call doesn't access memory, we're done.
161       if (isNoModRef(MRI))
162         continue;
163 
164       // A pseudo probe call shouldn't change any function attribute since it
165       // doesn't translate to a real instruction. It comes with a memory access
166       // tag to prevent itself being removed by optimizations and not block
167       // other instructions being optimized.
168       if (isa<PseudoProbeInst>(I))
169         continue;
170 
171       if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) {
172         // The call could access any memory. If that includes writes, note it.
173         if (isModSet(MRI))
174           WritesMemory = true;
175         // If it reads, note it.
176         if (isRefSet(MRI))
177           ReadsMemory = true;
178         continue;
179       }
180 
181       // Check whether all pointer arguments point to local memory, and
182       // ignore calls that only access local memory.
183       for (const Use &U : Call->args()) {
184         const Value *Arg = U;
185         if (!Arg->getType()->isPtrOrPtrVectorTy())
186           continue;
187 
188         MemoryLocation Loc =
189             MemoryLocation::getBeforeOrAfter(Arg, I.getAAMetadata());
190 
191         // Skip accesses to local or constant memory as they don't impact the
192         // externally visible mod/ref behavior.
193         if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
194           continue;
195 
196         if (isModSet(MRI))
197           // Writes non-local memory.
198           WritesMemory = true;
199         if (isRefSet(MRI))
200           // Ok, it reads non-local memory.
201           ReadsMemory = true;
202       }
203       continue;
204     } else if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
205       // Ignore non-volatile loads from local memory. (Atomic is okay here.)
206       if (!LI->isVolatile()) {
207         MemoryLocation Loc = MemoryLocation::get(LI);
208         if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
209           continue;
210       }
211     } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
212       // Ignore non-volatile stores to local memory. (Atomic is okay here.)
213       if (!SI->isVolatile()) {
214         MemoryLocation Loc = MemoryLocation::get(SI);
215         if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
216           continue;
217       }
218     } else if (VAArgInst *VI = dyn_cast<VAArgInst>(&I)) {
219       // Ignore vaargs on local memory.
220       MemoryLocation Loc = MemoryLocation::get(VI);
221       if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
222         continue;
223     }
224 
225     // Any remaining instructions need to be taken seriously!  Check if they
226     // read or write memory.
227     //
228     // Writes memory, remember that.
229     WritesMemory |= I.mayWriteToMemory();
230 
231     // If this instruction may read memory, remember that.
232     ReadsMemory |= I.mayReadFromMemory();
233   }
234 
235   if (WritesMemory) {
236     if (!ReadsMemory)
237       return MAK_WriteOnly;
238     else
239       return MAK_MayWrite;
240   }
241 
242   return ReadsMemory ? MAK_ReadOnly : MAK_ReadNone;
243 }
244 
245 MemoryAccessKind llvm::computeFunctionBodyMemoryAccess(Function &F,
246                                                        AAResults &AAR) {
247   return checkFunctionMemoryAccess(F, /*ThisBody=*/true, AAR, {});
248 }
249 
250 /// Deduce readonly/readnone attributes for the SCC.
251 template <typename AARGetterT>
252 static void addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter,
253                          SmallSet<Function *, 8> &Changed) {
254   // Check if any of the functions in the SCC read or write memory.  If they
255   // write memory then they can't be marked readnone or readonly.
256   bool ReadsMemory = false;
257   bool WritesMemory = false;
258   for (Function *F : SCCNodes) {
259     // Call the callable parameter to look up AA results for this function.
260     AAResults &AAR = AARGetter(*F);
261 
262     // Non-exact function definitions may not be selected at link time, and an
263     // alternative version that writes to memory may be selected.  See the
264     // comment on GlobalValue::isDefinitionExact for more details.
265     switch (checkFunctionMemoryAccess(*F, F->hasExactDefinition(),
266                                       AAR, SCCNodes)) {
267     case MAK_MayWrite:
268       return;
269     case MAK_ReadOnly:
270       ReadsMemory = true;
271       break;
272     case MAK_WriteOnly:
273       WritesMemory = true;
274       break;
275     case MAK_ReadNone:
276       // Nothing to do!
277       break;
278     }
279   }
280 
281   // If the SCC contains both functions that read and functions that write, then
282   // we cannot add readonly attributes.
283   if (ReadsMemory && WritesMemory)
284     return;
285 
286   // Success!  Functions in this SCC do not access memory, or only read memory.
287   // Give them the appropriate attribute.
288 
289   for (Function *F : SCCNodes) {
290     if (F->doesNotAccessMemory())
291       // Already perfect!
292       continue;
293 
294     if (F->onlyReadsMemory() && ReadsMemory)
295       // No change.
296       continue;
297 
298     if (F->onlyWritesMemory() && WritesMemory)
299       continue;
300 
301     Changed.insert(F);
302 
303     // Clear out any existing attributes.
304     AttributeMask AttrsToRemove;
305     AttrsToRemove.addAttribute(Attribute::ReadOnly);
306     AttrsToRemove.addAttribute(Attribute::ReadNone);
307     AttrsToRemove.addAttribute(Attribute::WriteOnly);
308 
309     if (!WritesMemory && !ReadsMemory) {
310       // Clear out any "access range attributes" if readnone was deduced.
311       AttrsToRemove.addAttribute(Attribute::ArgMemOnly);
312       AttrsToRemove.addAttribute(Attribute::InaccessibleMemOnly);
313       AttrsToRemove.addAttribute(Attribute::InaccessibleMemOrArgMemOnly);
314     }
315     F->removeFnAttrs(AttrsToRemove);
316 
317     // Add in the new attribute.
318     if (WritesMemory && !ReadsMemory)
319       F->addFnAttr(Attribute::WriteOnly);
320     else
321       F->addFnAttr(ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone);
322 
323     if (WritesMemory && !ReadsMemory)
324       ++NumWriteOnly;
325     else if (ReadsMemory)
326       ++NumReadOnly;
327     else
328       ++NumReadNone;
329   }
330 }
331 
332 // Compute definitive function attributes for a function taking into account
333 // prevailing definitions and linkage types
334 static FunctionSummary *calculatePrevailingSummary(
335     ValueInfo VI,
336     DenseMap<ValueInfo, FunctionSummary *> &CachedPrevailingSummary,
337     function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
338         IsPrevailing) {
339 
340   if (CachedPrevailingSummary.count(VI))
341     return CachedPrevailingSummary[VI];
342 
343   /// At this point, prevailing symbols have been resolved. The following leads
344   /// to returning a conservative result:
345   /// - Multiple instances with local linkage. Normally local linkage would be
346   ///   unique per module
347   ///   as the GUID includes the module path. We could have a guid alias if
348   ///   there wasn't any distinguishing path when each file was compiled, but
349   ///   that should be rare so we'll punt on those.
350 
351   /// These next 2 cases should not happen and will assert:
352   /// - Multiple instances with external linkage. This should be caught in
353   ///   symbol resolution
354   /// - Non-existent FunctionSummary for Aliasee. This presents a hole in our
355   ///   knowledge meaning we have to go conservative.
356 
357   /// Otherwise, we calculate attributes for a function as:
358   ///   1. If we have a local linkage, take its attributes. If there's somehow
359   ///      multiple, bail and go conservative.
360   ///   2. If we have an external/WeakODR/LinkOnceODR linkage check that it is
361   ///      prevailing, take its attributes.
362   ///   3. If we have a Weak/LinkOnce linkage the copies can have semantic
363   ///      differences. However, if the prevailing copy is known it will be used
364   ///      so take its attributes. If the prevailing copy is in a native file
365   ///      all IR copies will be dead and propagation will go conservative.
366   ///   4. AvailableExternally summaries without a prevailing copy are known to
367   ///      occur in a couple of circumstances:
368   ///      a. An internal function gets imported due to its caller getting
369   ///         imported, it becomes AvailableExternally but no prevailing
370   ///         definition exists. Because it has to get imported along with its
371   ///         caller the attributes will be captured by propagating on its
372   ///         caller.
373   ///      b. C++11 [temp.explicit]p10 can generate AvailableExternally
374   ///         definitions of explicitly instanced template declarations
375   ///         for inlining which are ultimately dropped from the TU. Since this
376   ///         is localized to the TU the attributes will have already made it to
377   ///         the callers.
378   ///      These are edge cases and already captured by their callers so we
379   ///      ignore these for now. If they become relevant to optimize in the
380   ///      future this can be revisited.
381   ///   5. Otherwise, go conservative.
382 
383   CachedPrevailingSummary[VI] = nullptr;
384   FunctionSummary *Local = nullptr;
385   FunctionSummary *Prevailing = nullptr;
386 
387   for (const auto &GVS : VI.getSummaryList()) {
388     if (!GVS->isLive())
389       continue;
390 
391     FunctionSummary *FS = dyn_cast<FunctionSummary>(GVS->getBaseObject());
392     // Virtual and Unknown (e.g. indirect) calls require going conservative
393     if (!FS || FS->fflags().HasUnknownCall)
394       return nullptr;
395 
396     const auto &Linkage = GVS->linkage();
397     if (GlobalValue::isLocalLinkage(Linkage)) {
398       if (Local) {
399         LLVM_DEBUG(
400             dbgs()
401             << "ThinLTO FunctionAttrs: Multiple Local Linkage, bailing on "
402                "function "
403             << VI.name() << " from " << FS->modulePath() << ". Previous module "
404             << Local->modulePath() << "\n");
405         return nullptr;
406       }
407       Local = FS;
408     } else if (GlobalValue::isExternalLinkage(Linkage)) {
409       assert(IsPrevailing(VI.getGUID(), GVS.get()));
410       Prevailing = FS;
411       break;
412     } else if (GlobalValue::isWeakODRLinkage(Linkage) ||
413                GlobalValue::isLinkOnceODRLinkage(Linkage) ||
414                GlobalValue::isWeakAnyLinkage(Linkage) ||
415                GlobalValue::isLinkOnceAnyLinkage(Linkage)) {
416       if (IsPrevailing(VI.getGUID(), GVS.get())) {
417         Prevailing = FS;
418         break;
419       }
420     } else if (GlobalValue::isAvailableExternallyLinkage(Linkage)) {
421       // TODO: Handle these cases if they become meaningful
422       continue;
423     }
424   }
425 
426   if (Local) {
427     assert(!Prevailing);
428     CachedPrevailingSummary[VI] = Local;
429   } else if (Prevailing) {
430     assert(!Local);
431     CachedPrevailingSummary[VI] = Prevailing;
432   }
433 
434   return CachedPrevailingSummary[VI];
435 }
436 
437 bool llvm::thinLTOPropagateFunctionAttrs(
438     ModuleSummaryIndex &Index,
439     function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
440         IsPrevailing) {
441   // TODO: implement addNoAliasAttrs once
442   // there's more information about the return type in the summary
443   if (DisableThinLTOPropagation)
444     return false;
445 
446   DenseMap<ValueInfo, FunctionSummary *> CachedPrevailingSummary;
447   bool Changed = false;
448 
449   auto PropagateAttributes = [&](std::vector<ValueInfo> &SCCNodes) {
450     // Assume we can propagate unless we discover otherwise
451     FunctionSummary::FFlags InferredFlags;
452     InferredFlags.NoRecurse = (SCCNodes.size() == 1);
453     InferredFlags.NoUnwind = true;
454 
455     for (auto &V : SCCNodes) {
456       FunctionSummary *CallerSummary =
457           calculatePrevailingSummary(V, CachedPrevailingSummary, IsPrevailing);
458 
459       // Function summaries can fail to contain information such as declarations
460       if (!CallerSummary)
461         return;
462 
463       if (CallerSummary->fflags().MayThrow)
464         InferredFlags.NoUnwind = false;
465 
466       for (const auto &Callee : CallerSummary->calls()) {
467         FunctionSummary *CalleeSummary = calculatePrevailingSummary(
468             Callee.first, CachedPrevailingSummary, IsPrevailing);
469 
470         if (!CalleeSummary)
471           return;
472 
473         if (!CalleeSummary->fflags().NoRecurse)
474           InferredFlags.NoRecurse = false;
475 
476         if (!CalleeSummary->fflags().NoUnwind)
477           InferredFlags.NoUnwind = false;
478 
479         if (!InferredFlags.NoUnwind && !InferredFlags.NoRecurse)
480           break;
481       }
482     }
483 
484     if (InferredFlags.NoUnwind || InferredFlags.NoRecurse) {
485       Changed = true;
486       for (auto &V : SCCNodes) {
487         if (InferredFlags.NoRecurse) {
488           LLVM_DEBUG(dbgs() << "ThinLTO FunctionAttrs: Propagated NoRecurse to "
489                             << V.name() << "\n");
490           ++NumThinLinkNoRecurse;
491         }
492 
493         if (InferredFlags.NoUnwind) {
494           LLVM_DEBUG(dbgs() << "ThinLTO FunctionAttrs: Propagated NoUnwind to "
495                             << V.name() << "\n");
496           ++NumThinLinkNoUnwind;
497         }
498 
499         for (auto &S : V.getSummaryList()) {
500           if (auto *FS = dyn_cast<FunctionSummary>(S.get())) {
501             if (InferredFlags.NoRecurse)
502               FS->setNoRecurse();
503 
504             if (InferredFlags.NoUnwind)
505               FS->setNoUnwind();
506           }
507         }
508       }
509     }
510   };
511 
512   // Call propagation functions on each SCC in the Index
513   for (scc_iterator<ModuleSummaryIndex *> I = scc_begin(&Index); !I.isAtEnd();
514        ++I) {
515     std::vector<ValueInfo> Nodes(*I);
516     PropagateAttributes(Nodes);
517   }
518   return Changed;
519 }
520 
521 namespace {
522 
523 /// For a given pointer Argument, this retains a list of Arguments of functions
524 /// in the same SCC that the pointer data flows into. We use this to build an
525 /// SCC of the arguments.
526 struct ArgumentGraphNode {
527   Argument *Definition;
528   SmallVector<ArgumentGraphNode *, 4> Uses;
529 };
530 
531 class ArgumentGraph {
532   // We store pointers to ArgumentGraphNode objects, so it's important that
533   // that they not move around upon insert.
534   using ArgumentMapTy = std::map<Argument *, ArgumentGraphNode>;
535 
536   ArgumentMapTy ArgumentMap;
537 
538   // There is no root node for the argument graph, in fact:
539   //   void f(int *x, int *y) { if (...) f(x, y); }
540   // is an example where the graph is disconnected. The SCCIterator requires a
541   // single entry point, so we maintain a fake ("synthetic") root node that
542   // uses every node. Because the graph is directed and nothing points into
543   // the root, it will not participate in any SCCs (except for its own).
544   ArgumentGraphNode SyntheticRoot;
545 
546 public:
547   ArgumentGraph() { SyntheticRoot.Definition = nullptr; }
548 
549   using iterator = SmallVectorImpl<ArgumentGraphNode *>::iterator;
550 
551   iterator begin() { return SyntheticRoot.Uses.begin(); }
552   iterator end() { return SyntheticRoot.Uses.end(); }
553   ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; }
554 
555   ArgumentGraphNode *operator[](Argument *A) {
556     ArgumentGraphNode &Node = ArgumentMap[A];
557     Node.Definition = A;
558     SyntheticRoot.Uses.push_back(&Node);
559     return &Node;
560   }
561 };
562 
563 /// This tracker checks whether callees are in the SCC, and if so it does not
564 /// consider that a capture, instead adding it to the "Uses" list and
565 /// continuing with the analysis.
566 struct ArgumentUsesTracker : public CaptureTracker {
567   ArgumentUsesTracker(const SCCNodeSet &SCCNodes) : SCCNodes(SCCNodes) {}
568 
569   void tooManyUses() override { Captured = true; }
570 
571   bool captured(const Use *U) override {
572     CallBase *CB = dyn_cast<CallBase>(U->getUser());
573     if (!CB) {
574       Captured = true;
575       return true;
576     }
577 
578     Function *F = CB->getCalledFunction();
579     if (!F || !F->hasExactDefinition() || !SCCNodes.count(F)) {
580       Captured = true;
581       return true;
582     }
583 
584     assert(!CB->isCallee(U) && "callee operand reported captured?");
585     const unsigned UseIndex = CB->getDataOperandNo(U);
586     if (UseIndex >= CB->arg_size()) {
587       // Data operand, but not a argument operand -- must be a bundle operand
588       assert(CB->hasOperandBundles() && "Must be!");
589 
590       // CaptureTracking told us that we're being captured by an operand bundle
591       // use.  In this case it does not matter if the callee is within our SCC
592       // or not -- we've been captured in some unknown way, and we have to be
593       // conservative.
594       Captured = true;
595       return true;
596     }
597 
598     if (UseIndex >= F->arg_size()) {
599       assert(F->isVarArg() && "More params than args in non-varargs call");
600       Captured = true;
601       return true;
602     }
603 
604     Uses.push_back(&*std::next(F->arg_begin(), UseIndex));
605     return false;
606   }
607 
608   // True only if certainly captured (used outside our SCC).
609   bool Captured = false;
610 
611   // Uses within our SCC.
612   SmallVector<Argument *, 4> Uses;
613 
614   const SCCNodeSet &SCCNodes;
615 };
616 
617 } // end anonymous namespace
618 
619 namespace llvm {
620 
621 template <> struct GraphTraits<ArgumentGraphNode *> {
622   using NodeRef = ArgumentGraphNode *;
623   using ChildIteratorType = SmallVectorImpl<ArgumentGraphNode *>::iterator;
624 
625   static NodeRef getEntryNode(NodeRef A) { return A; }
626   static ChildIteratorType child_begin(NodeRef N) { return N->Uses.begin(); }
627   static ChildIteratorType child_end(NodeRef N) { return N->Uses.end(); }
628 };
629 
630 template <>
631 struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> {
632   static NodeRef getEntryNode(ArgumentGraph *AG) { return AG->getEntryNode(); }
633 
634   static ChildIteratorType nodes_begin(ArgumentGraph *AG) {
635     return AG->begin();
636   }
637 
638   static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); }
639 };
640 
641 } // end namespace llvm
642 
643 /// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
644 static Attribute::AttrKind
645 determinePointerAccessAttrs(Argument *A,
646                             const SmallPtrSet<Argument *, 8> &SCCNodes) {
647   SmallVector<Use *, 32> Worklist;
648   SmallPtrSet<Use *, 32> Visited;
649 
650   // inalloca arguments are always clobbered by the call.
651   if (A->hasInAllocaAttr() || A->hasPreallocatedAttr())
652     return Attribute::None;
653 
654   bool IsRead = false;
655   bool IsWrite = false;
656 
657   for (Use &U : A->uses()) {
658     Visited.insert(&U);
659     Worklist.push_back(&U);
660   }
661 
662   while (!Worklist.empty()) {
663     if (IsWrite && IsRead)
664       // No point in searching further..
665       return Attribute::None;
666 
667     Use *U = Worklist.pop_back_val();
668     Instruction *I = cast<Instruction>(U->getUser());
669 
670     switch (I->getOpcode()) {
671     case Instruction::BitCast:
672     case Instruction::GetElementPtr:
673     case Instruction::PHI:
674     case Instruction::Select:
675     case Instruction::AddrSpaceCast:
676       // The original value is not read/written via this if the new value isn't.
677       for (Use &UU : I->uses())
678         if (Visited.insert(&UU).second)
679           Worklist.push_back(&UU);
680       break;
681 
682     case Instruction::Call:
683     case Instruction::Invoke: {
684       CallBase &CB = cast<CallBase>(*I);
685       if (CB.isCallee(U)) {
686         IsRead = true;
687         // Note that indirect calls do not capture, see comment in
688         // CaptureTracking for context
689         continue;
690       }
691 
692       // Given we've explictily handled the callee operand above, what's left
693       // must be a data operand (e.g. argument or operand bundle)
694       const unsigned UseIndex = CB.getDataOperandNo(U);
695 
696       if (!CB.doesNotCapture(UseIndex)) {
697         if (!CB.onlyReadsMemory())
698           // If the callee can save a copy into other memory, then simply
699           // scanning uses of the call is insufficient.  We have no way
700           // of tracking copies of the pointer through memory to see
701           // if a reloaded copy is written to, thus we must give up.
702           return Attribute::None;
703         // Push users for processing once we finish this one
704         if (!I->getType()->isVoidTy())
705           for (Use &UU : I->uses())
706             if (Visited.insert(&UU).second)
707               Worklist.push_back(&UU);
708       }
709 
710       if (CB.doesNotAccessMemory())
711         continue;
712 
713       if (Function *F = CB.getCalledFunction())
714         if (CB.isArgOperand(U) && UseIndex < F->arg_size() &&
715             SCCNodes.count(F->getArg(UseIndex)))
716           // This is an argument which is part of the speculative SCC.  Note
717           // that only operands corresponding to formal arguments of the callee
718           // can participate in the speculation.
719           break;
720 
721       // The accessors used on call site here do the right thing for calls and
722       // invokes with operand bundles.
723       if (CB.doesNotAccessMemory(UseIndex)) {
724         /* nop */
725       } else if (CB.onlyReadsMemory() || CB.onlyReadsMemory(UseIndex)) {
726         IsRead = true;
727       } else if (CB.hasFnAttr(Attribute::WriteOnly) ||
728                  CB.dataOperandHasImpliedAttr(UseIndex, Attribute::WriteOnly)) {
729         IsWrite = true;
730       } else {
731         return Attribute::None;
732       }
733       break;
734     }
735 
736     case Instruction::Load:
737       // A volatile load has side effects beyond what readonly can be relied
738       // upon.
739       if (cast<LoadInst>(I)->isVolatile())
740         return Attribute::None;
741 
742       IsRead = true;
743       break;
744 
745     case Instruction::Store:
746       if (cast<StoreInst>(I)->getValueOperand() == *U)
747         // untrackable capture
748         return Attribute::None;
749 
750       // A volatile store has side effects beyond what writeonly can be relied
751       // upon.
752       if (cast<StoreInst>(I)->isVolatile())
753         return Attribute::None;
754 
755       IsWrite = true;
756       break;
757 
758     case Instruction::ICmp:
759     case Instruction::Ret:
760       break;
761 
762     default:
763       return Attribute::None;
764     }
765   }
766 
767   if (IsWrite && IsRead)
768     return Attribute::None;
769   else if (IsRead)
770     return Attribute::ReadOnly;
771   else if (IsWrite)
772     return Attribute::WriteOnly;
773   else
774     return Attribute::ReadNone;
775 }
776 
777 /// Deduce returned attributes for the SCC.
778 static void addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes,
779                                      SmallSet<Function *, 8> &Changed) {
780   // Check each function in turn, determining if an argument is always returned.
781   for (Function *F : SCCNodes) {
782     // We can infer and propagate function attributes only when we know that the
783     // definition we'll get at link time is *exactly* the definition we see now.
784     // For more details, see GlobalValue::mayBeDerefined.
785     if (!F->hasExactDefinition())
786       continue;
787 
788     if (F->getReturnType()->isVoidTy())
789       continue;
790 
791     // There is nothing to do if an argument is already marked as 'returned'.
792     if (llvm::any_of(F->args(),
793                      [](const Argument &Arg) { return Arg.hasReturnedAttr(); }))
794       continue;
795 
796     auto FindRetArg = [&]() -> Value * {
797       Value *RetArg = nullptr;
798       for (BasicBlock &BB : *F)
799         if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) {
800           // Note that stripPointerCasts should look through functions with
801           // returned arguments.
802           Value *RetVal = Ret->getReturnValue()->stripPointerCasts();
803           if (!isa<Argument>(RetVal) || RetVal->getType() != F->getReturnType())
804             return nullptr;
805 
806           if (!RetArg)
807             RetArg = RetVal;
808           else if (RetArg != RetVal)
809             return nullptr;
810         }
811 
812       return RetArg;
813     };
814 
815     if (Value *RetArg = FindRetArg()) {
816       auto *A = cast<Argument>(RetArg);
817       A->addAttr(Attribute::Returned);
818       ++NumReturned;
819       Changed.insert(F);
820     }
821   }
822 }
823 
824 /// If a callsite has arguments that are also arguments to the parent function,
825 /// try to propagate attributes from the callsite's arguments to the parent's
826 /// arguments. This may be important because inlining can cause information loss
827 /// when attribute knowledge disappears with the inlined call.
828 static bool addArgumentAttrsFromCallsites(Function &F) {
829   if (!EnableNonnullArgPropagation)
830     return false;
831 
832   bool Changed = false;
833 
834   // For an argument attribute to transfer from a callsite to the parent, the
835   // call must be guaranteed to execute every time the parent is called.
836   // Conservatively, just check for calls in the entry block that are guaranteed
837   // to execute.
838   // TODO: This could be enhanced by testing if the callsite post-dominates the
839   // entry block or by doing simple forward walks or backward walks to the
840   // callsite.
841   BasicBlock &Entry = F.getEntryBlock();
842   for (Instruction &I : Entry) {
843     if (auto *CB = dyn_cast<CallBase>(&I)) {
844       if (auto *CalledFunc = CB->getCalledFunction()) {
845         for (auto &CSArg : CalledFunc->args()) {
846           if (!CSArg.hasNonNullAttr(/* AllowUndefOrPoison */ false))
847             continue;
848 
849           // If the non-null callsite argument operand is an argument to 'F'
850           // (the caller) and the call is guaranteed to execute, then the value
851           // must be non-null throughout 'F'.
852           auto *FArg = dyn_cast<Argument>(CB->getArgOperand(CSArg.getArgNo()));
853           if (FArg && !FArg->hasNonNullAttr()) {
854             FArg->addAttr(Attribute::NonNull);
855             Changed = true;
856           }
857         }
858       }
859     }
860     if (!isGuaranteedToTransferExecutionToSuccessor(&I))
861       break;
862   }
863 
864   return Changed;
865 }
866 
867 static bool addAccessAttr(Argument *A, Attribute::AttrKind R) {
868   assert((R == Attribute::ReadOnly || R == Attribute::ReadNone ||
869           R == Attribute::WriteOnly)
870          && "Must be an access attribute.");
871   assert(A && "Argument must not be null.");
872 
873   // If the argument already has the attribute, nothing needs to be done.
874   if (A->hasAttribute(R))
875       return false;
876 
877   // Otherwise, remove potentially conflicting attribute, add the new one,
878   // and update statistics.
879   A->removeAttr(Attribute::WriteOnly);
880   A->removeAttr(Attribute::ReadOnly);
881   A->removeAttr(Attribute::ReadNone);
882   A->addAttr(R);
883   if (R == Attribute::ReadOnly)
884     ++NumReadOnlyArg;
885   else if (R == Attribute::WriteOnly)
886     ++NumWriteOnlyArg;
887   else
888     ++NumReadNoneArg;
889   return true;
890 }
891 
892 /// Deduce nocapture attributes for the SCC.
893 static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
894                              SmallSet<Function *, 8> &Changed) {
895   ArgumentGraph AG;
896 
897   // Check each function in turn, determining which pointer arguments are not
898   // captured.
899   for (Function *F : SCCNodes) {
900     // We can infer and propagate function attributes only when we know that the
901     // definition we'll get at link time is *exactly* the definition we see now.
902     // For more details, see GlobalValue::mayBeDerefined.
903     if (!F->hasExactDefinition())
904       continue;
905 
906     if (addArgumentAttrsFromCallsites(*F))
907       Changed.insert(F);
908 
909     // Functions that are readonly (or readnone) and nounwind and don't return
910     // a value can't capture arguments. Don't analyze them.
911     if (F->onlyReadsMemory() && F->doesNotThrow() &&
912         F->getReturnType()->isVoidTy()) {
913       for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
914            ++A) {
915         if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
916           A->addAttr(Attribute::NoCapture);
917           ++NumNoCapture;
918           Changed.insert(F);
919         }
920       }
921       continue;
922     }
923 
924     for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
925          ++A) {
926       if (!A->getType()->isPointerTy())
927         continue;
928       bool HasNonLocalUses = false;
929       if (!A->hasNoCaptureAttr()) {
930         ArgumentUsesTracker Tracker(SCCNodes);
931         PointerMayBeCaptured(&*A, &Tracker);
932         if (!Tracker.Captured) {
933           if (Tracker.Uses.empty()) {
934             // If it's trivially not captured, mark it nocapture now.
935             A->addAttr(Attribute::NoCapture);
936             ++NumNoCapture;
937             Changed.insert(F);
938           } else {
939             // If it's not trivially captured and not trivially not captured,
940             // then it must be calling into another function in our SCC. Save
941             // its particulars for Argument-SCC analysis later.
942             ArgumentGraphNode *Node = AG[&*A];
943             for (Argument *Use : Tracker.Uses) {
944               Node->Uses.push_back(AG[Use]);
945               if (Use != &*A)
946                 HasNonLocalUses = true;
947             }
948           }
949         }
950         // Otherwise, it's captured. Don't bother doing SCC analysis on it.
951       }
952       if (!HasNonLocalUses && !A->onlyReadsMemory()) {
953         // Can we determine that it's readonly/readnone/writeonly without doing
954         // an SCC? Note that we don't allow any calls at all here, or else our
955         // result will be dependent on the iteration order through the
956         // functions in the SCC.
957         SmallPtrSet<Argument *, 8> Self;
958         Self.insert(&*A);
959         Attribute::AttrKind R = determinePointerAccessAttrs(&*A, Self);
960         if (R != Attribute::None)
961           if (addAccessAttr(A, R))
962             Changed.insert(F);
963       }
964     }
965   }
966 
967   // The graph we've collected is partial because we stopped scanning for
968   // argument uses once we solved the argument trivially. These partial nodes
969   // show up as ArgumentGraphNode objects with an empty Uses list, and for
970   // these nodes the final decision about whether they capture has already been
971   // made.  If the definition doesn't have a 'nocapture' attribute by now, it
972   // captures.
973 
974   for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) {
975     const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I;
976     if (ArgumentSCC.size() == 1) {
977       if (!ArgumentSCC[0]->Definition)
978         continue; // synthetic root node
979 
980       // eg. "void f(int* x) { if (...) f(x); }"
981       if (ArgumentSCC[0]->Uses.size() == 1 &&
982           ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) {
983         Argument *A = ArgumentSCC[0]->Definition;
984         A->addAttr(Attribute::NoCapture);
985         ++NumNoCapture;
986         Changed.insert(A->getParent());
987 
988         // Infer the access attributes given the new nocapture one
989         SmallPtrSet<Argument *, 8> Self;
990         Self.insert(&*A);
991         Attribute::AttrKind R = determinePointerAccessAttrs(&*A, Self);
992         if (R != Attribute::None)
993           addAccessAttr(A, R);
994       }
995       continue;
996     }
997 
998     bool SCCCaptured = false;
999     for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
1000          I != E && !SCCCaptured; ++I) {
1001       ArgumentGraphNode *Node = *I;
1002       if (Node->Uses.empty()) {
1003         if (!Node->Definition->hasNoCaptureAttr())
1004           SCCCaptured = true;
1005       }
1006     }
1007     if (SCCCaptured)
1008       continue;
1009 
1010     SmallPtrSet<Argument *, 8> ArgumentSCCNodes;
1011     // Fill ArgumentSCCNodes with the elements of the ArgumentSCC.  Used for
1012     // quickly looking up whether a given Argument is in this ArgumentSCC.
1013     for (ArgumentGraphNode *I : ArgumentSCC) {
1014       ArgumentSCCNodes.insert(I->Definition);
1015     }
1016 
1017     for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
1018          I != E && !SCCCaptured; ++I) {
1019       ArgumentGraphNode *N = *I;
1020       for (ArgumentGraphNode *Use : N->Uses) {
1021         Argument *A = Use->Definition;
1022         if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A))
1023           continue;
1024         SCCCaptured = true;
1025         break;
1026       }
1027     }
1028     if (SCCCaptured)
1029       continue;
1030 
1031     for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
1032       Argument *A = ArgumentSCC[i]->Definition;
1033       A->addAttr(Attribute::NoCapture);
1034       ++NumNoCapture;
1035       Changed.insert(A->getParent());
1036     }
1037 
1038     // We also want to compute readonly/readnone/writeonly. With a small number
1039     // of false negatives, we can assume that any pointer which is captured
1040     // isn't going to be provably readonly or readnone, since by definition
1041     // we can't analyze all uses of a captured pointer.
1042     //
1043     // The false negatives happen when the pointer is captured by a function
1044     // that promises readonly/readnone behaviour on the pointer, then the
1045     // pointer's lifetime ends before anything that writes to arbitrary memory.
1046     // Also, a readonly/readnone pointer may be returned, but returning a
1047     // pointer is capturing it.
1048 
1049     auto meetAccessAttr = [](Attribute::AttrKind A, Attribute::AttrKind B) {
1050       if (A == B)
1051         return A;
1052       if (A == Attribute::ReadNone)
1053         return B;
1054       if (B == Attribute::ReadNone)
1055         return A;
1056       return Attribute::None;
1057     };
1058 
1059     Attribute::AttrKind AccessAttr = Attribute::ReadNone;
1060     for (unsigned i = 0, e = ArgumentSCC.size();
1061          i != e && AccessAttr != Attribute::None; ++i) {
1062       Argument *A = ArgumentSCC[i]->Definition;
1063       Attribute::AttrKind K = determinePointerAccessAttrs(A, ArgumentSCCNodes);
1064       AccessAttr = meetAccessAttr(AccessAttr, K);
1065     }
1066 
1067     if (AccessAttr != Attribute::None) {
1068       for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
1069         Argument *A = ArgumentSCC[i]->Definition;
1070         if (addAccessAttr(A, AccessAttr))
1071           Changed.insert(A->getParent());
1072       }
1073     }
1074   }
1075 }
1076 
1077 /// Tests whether a function is "malloc-like".
1078 ///
1079 /// A function is "malloc-like" if it returns either null or a pointer that
1080 /// doesn't alias any other pointer visible to the caller.
1081 static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) {
1082   SmallSetVector<Value *, 8> FlowsToReturn;
1083   for (BasicBlock &BB : *F)
1084     if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
1085       FlowsToReturn.insert(Ret->getReturnValue());
1086 
1087   for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
1088     Value *RetVal = FlowsToReturn[i];
1089 
1090     if (Constant *C = dyn_cast<Constant>(RetVal)) {
1091       if (!C->isNullValue() && !isa<UndefValue>(C))
1092         return false;
1093 
1094       continue;
1095     }
1096 
1097     if (isa<Argument>(RetVal))
1098       return false;
1099 
1100     if (Instruction *RVI = dyn_cast<Instruction>(RetVal))
1101       switch (RVI->getOpcode()) {
1102       // Extend the analysis by looking upwards.
1103       case Instruction::BitCast:
1104       case Instruction::GetElementPtr:
1105       case Instruction::AddrSpaceCast:
1106         FlowsToReturn.insert(RVI->getOperand(0));
1107         continue;
1108       case Instruction::Select: {
1109         SelectInst *SI = cast<SelectInst>(RVI);
1110         FlowsToReturn.insert(SI->getTrueValue());
1111         FlowsToReturn.insert(SI->getFalseValue());
1112         continue;
1113       }
1114       case Instruction::PHI: {
1115         PHINode *PN = cast<PHINode>(RVI);
1116         for (Value *IncValue : PN->incoming_values())
1117           FlowsToReturn.insert(IncValue);
1118         continue;
1119       }
1120 
1121       // Check whether the pointer came from an allocation.
1122       case Instruction::Alloca:
1123         break;
1124       case Instruction::Call:
1125       case Instruction::Invoke: {
1126         CallBase &CB = cast<CallBase>(*RVI);
1127         if (CB.hasRetAttr(Attribute::NoAlias))
1128           break;
1129         if (CB.getCalledFunction() && SCCNodes.count(CB.getCalledFunction()))
1130           break;
1131         LLVM_FALLTHROUGH;
1132       }
1133       default:
1134         return false; // Did not come from an allocation.
1135       }
1136 
1137     if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false))
1138       return false;
1139   }
1140 
1141   return true;
1142 }
1143 
1144 /// Deduce noalias attributes for the SCC.
1145 static void addNoAliasAttrs(const SCCNodeSet &SCCNodes,
1146                             SmallSet<Function *, 8> &Changed) {
1147   // Check each function in turn, determining which functions return noalias
1148   // pointers.
1149   for (Function *F : SCCNodes) {
1150     // Already noalias.
1151     if (F->returnDoesNotAlias())
1152       continue;
1153 
1154     // We can infer and propagate function attributes only when we know that the
1155     // definition we'll get at link time is *exactly* the definition we see now.
1156     // For more details, see GlobalValue::mayBeDerefined.
1157     if (!F->hasExactDefinition())
1158       return;
1159 
1160     // We annotate noalias return values, which are only applicable to
1161     // pointer types.
1162     if (!F->getReturnType()->isPointerTy())
1163       continue;
1164 
1165     if (!isFunctionMallocLike(F, SCCNodes))
1166       return;
1167   }
1168 
1169   for (Function *F : SCCNodes) {
1170     if (F->returnDoesNotAlias() ||
1171         !F->getReturnType()->isPointerTy())
1172       continue;
1173 
1174     F->setReturnDoesNotAlias();
1175     ++NumNoAlias;
1176     Changed.insert(F);
1177   }
1178 }
1179 
1180 /// Tests whether this function is known to not return null.
1181 ///
1182 /// Requires that the function returns a pointer.
1183 ///
1184 /// Returns true if it believes the function will not return a null, and sets
1185 /// \p Speculative based on whether the returned conclusion is a speculative
1186 /// conclusion due to SCC calls.
1187 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
1188                             bool &Speculative) {
1189   assert(F->getReturnType()->isPointerTy() &&
1190          "nonnull only meaningful on pointer types");
1191   Speculative = false;
1192 
1193   SmallSetVector<Value *, 8> FlowsToReturn;
1194   for (BasicBlock &BB : *F)
1195     if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
1196       FlowsToReturn.insert(Ret->getReturnValue());
1197 
1198   auto &DL = F->getParent()->getDataLayout();
1199 
1200   for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
1201     Value *RetVal = FlowsToReturn[i];
1202 
1203     // If this value is locally known to be non-null, we're good
1204     if (isKnownNonZero(RetVal, DL))
1205       continue;
1206 
1207     // Otherwise, we need to look upwards since we can't make any local
1208     // conclusions.
1209     Instruction *RVI = dyn_cast<Instruction>(RetVal);
1210     if (!RVI)
1211       return false;
1212     switch (RVI->getOpcode()) {
1213     // Extend the analysis by looking upwards.
1214     case Instruction::BitCast:
1215     case Instruction::GetElementPtr:
1216     case Instruction::AddrSpaceCast:
1217       FlowsToReturn.insert(RVI->getOperand(0));
1218       continue;
1219     case Instruction::Select: {
1220       SelectInst *SI = cast<SelectInst>(RVI);
1221       FlowsToReturn.insert(SI->getTrueValue());
1222       FlowsToReturn.insert(SI->getFalseValue());
1223       continue;
1224     }
1225     case Instruction::PHI: {
1226       PHINode *PN = cast<PHINode>(RVI);
1227       for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1228         FlowsToReturn.insert(PN->getIncomingValue(i));
1229       continue;
1230     }
1231     case Instruction::Call:
1232     case Instruction::Invoke: {
1233       CallBase &CB = cast<CallBase>(*RVI);
1234       Function *Callee = CB.getCalledFunction();
1235       // A call to a node within the SCC is assumed to return null until
1236       // proven otherwise
1237       if (Callee && SCCNodes.count(Callee)) {
1238         Speculative = true;
1239         continue;
1240       }
1241       return false;
1242     }
1243     default:
1244       return false; // Unknown source, may be null
1245     };
1246     llvm_unreachable("should have either continued or returned");
1247   }
1248 
1249   return true;
1250 }
1251 
1252 /// Deduce nonnull attributes for the SCC.
1253 static void addNonNullAttrs(const SCCNodeSet &SCCNodes,
1254                             SmallSet<Function *, 8> &Changed) {
1255   // Speculative that all functions in the SCC return only nonnull
1256   // pointers.  We may refute this as we analyze functions.
1257   bool SCCReturnsNonNull = true;
1258 
1259   // Check each function in turn, determining which functions return nonnull
1260   // pointers.
1261   for (Function *F : SCCNodes) {
1262     // Already nonnull.
1263     if (F->getAttributes().hasRetAttr(Attribute::NonNull))
1264       continue;
1265 
1266     // We can infer and propagate function attributes only when we know that the
1267     // definition we'll get at link time is *exactly* the definition we see now.
1268     // For more details, see GlobalValue::mayBeDerefined.
1269     if (!F->hasExactDefinition())
1270       return;
1271 
1272     // We annotate nonnull return values, which are only applicable to
1273     // pointer types.
1274     if (!F->getReturnType()->isPointerTy())
1275       continue;
1276 
1277     bool Speculative = false;
1278     if (isReturnNonNull(F, SCCNodes, Speculative)) {
1279       if (!Speculative) {
1280         // Mark the function eagerly since we may discover a function
1281         // which prevents us from speculating about the entire SCC
1282         LLVM_DEBUG(dbgs() << "Eagerly marking " << F->getName()
1283                           << " as nonnull\n");
1284         F->addRetAttr(Attribute::NonNull);
1285         ++NumNonNullReturn;
1286         Changed.insert(F);
1287       }
1288       continue;
1289     }
1290     // At least one function returns something which could be null, can't
1291     // speculate any more.
1292     SCCReturnsNonNull = false;
1293   }
1294 
1295   if (SCCReturnsNonNull) {
1296     for (Function *F : SCCNodes) {
1297       if (F->getAttributes().hasRetAttr(Attribute::NonNull) ||
1298           !F->getReturnType()->isPointerTy())
1299         continue;
1300 
1301       LLVM_DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n");
1302       F->addRetAttr(Attribute::NonNull);
1303       ++NumNonNullReturn;
1304       Changed.insert(F);
1305     }
1306   }
1307 }
1308 
1309 namespace {
1310 
1311 /// Collects a set of attribute inference requests and performs them all in one
1312 /// go on a single SCC Node. Inference involves scanning function bodies
1313 /// looking for instructions that violate attribute assumptions.
1314 /// As soon as all the bodies are fine we are free to set the attribute.
1315 /// Customization of inference for individual attributes is performed by
1316 /// providing a handful of predicates for each attribute.
1317 class AttributeInferer {
1318 public:
1319   /// Describes a request for inference of a single attribute.
1320   struct InferenceDescriptor {
1321 
1322     /// Returns true if this function does not have to be handled.
1323     /// General intent for this predicate is to provide an optimization
1324     /// for functions that do not need this attribute inference at all
1325     /// (say, for functions that already have the attribute).
1326     std::function<bool(const Function &)> SkipFunction;
1327 
1328     /// Returns true if this instruction violates attribute assumptions.
1329     std::function<bool(Instruction &)> InstrBreaksAttribute;
1330 
1331     /// Sets the inferred attribute for this function.
1332     std::function<void(Function &)> SetAttribute;
1333 
1334     /// Attribute we derive.
1335     Attribute::AttrKind AKind;
1336 
1337     /// If true, only "exact" definitions can be used to infer this attribute.
1338     /// See GlobalValue::isDefinitionExact.
1339     bool RequiresExactDefinition;
1340 
1341     InferenceDescriptor(Attribute::AttrKind AK,
1342                         std::function<bool(const Function &)> SkipFunc,
1343                         std::function<bool(Instruction &)> InstrScan,
1344                         std::function<void(Function &)> SetAttr,
1345                         bool ReqExactDef)
1346         : SkipFunction(SkipFunc), InstrBreaksAttribute(InstrScan),
1347           SetAttribute(SetAttr), AKind(AK),
1348           RequiresExactDefinition(ReqExactDef) {}
1349   };
1350 
1351 private:
1352   SmallVector<InferenceDescriptor, 4> InferenceDescriptors;
1353 
1354 public:
1355   void registerAttrInference(InferenceDescriptor AttrInference) {
1356     InferenceDescriptors.push_back(AttrInference);
1357   }
1358 
1359   void run(const SCCNodeSet &SCCNodes, SmallSet<Function *, 8> &Changed);
1360 };
1361 
1362 /// Perform all the requested attribute inference actions according to the
1363 /// attribute predicates stored before.
1364 void AttributeInferer::run(const SCCNodeSet &SCCNodes,
1365                            SmallSet<Function *, 8> &Changed) {
1366   SmallVector<InferenceDescriptor, 4> InferInSCC = InferenceDescriptors;
1367   // Go through all the functions in SCC and check corresponding attribute
1368   // assumptions for each of them. Attributes that are invalid for this SCC
1369   // will be removed from InferInSCC.
1370   for (Function *F : SCCNodes) {
1371 
1372     // No attributes whose assumptions are still valid - done.
1373     if (InferInSCC.empty())
1374       return;
1375 
1376     // Check if our attributes ever need scanning/can be scanned.
1377     llvm::erase_if(InferInSCC, [F](const InferenceDescriptor &ID) {
1378       if (ID.SkipFunction(*F))
1379         return false;
1380 
1381       // Remove from further inference (invalidate) when visiting a function
1382       // that has no instructions to scan/has an unsuitable definition.
1383       return F->isDeclaration() ||
1384              (ID.RequiresExactDefinition && !F->hasExactDefinition());
1385     });
1386 
1387     // For each attribute still in InferInSCC that doesn't explicitly skip F,
1388     // set up the F instructions scan to verify assumptions of the attribute.
1389     SmallVector<InferenceDescriptor, 4> InferInThisFunc;
1390     llvm::copy_if(
1391         InferInSCC, std::back_inserter(InferInThisFunc),
1392         [F](const InferenceDescriptor &ID) { return !ID.SkipFunction(*F); });
1393 
1394     if (InferInThisFunc.empty())
1395       continue;
1396 
1397     // Start instruction scan.
1398     for (Instruction &I : instructions(*F)) {
1399       llvm::erase_if(InferInThisFunc, [&](const InferenceDescriptor &ID) {
1400         if (!ID.InstrBreaksAttribute(I))
1401           return false;
1402         // Remove attribute from further inference on any other functions
1403         // because attribute assumptions have just been violated.
1404         llvm::erase_if(InferInSCC, [&ID](const InferenceDescriptor &D) {
1405           return D.AKind == ID.AKind;
1406         });
1407         // Remove attribute from the rest of current instruction scan.
1408         return true;
1409       });
1410 
1411       if (InferInThisFunc.empty())
1412         break;
1413     }
1414   }
1415 
1416   if (InferInSCC.empty())
1417     return;
1418 
1419   for (Function *F : SCCNodes)
1420     // At this point InferInSCC contains only functions that were either:
1421     //   - explicitly skipped from scan/inference, or
1422     //   - verified to have no instructions that break attribute assumptions.
1423     // Hence we just go and force the attribute for all non-skipped functions.
1424     for (auto &ID : InferInSCC) {
1425       if (ID.SkipFunction(*F))
1426         continue;
1427       Changed.insert(F);
1428       ID.SetAttribute(*F);
1429     }
1430 }
1431 
1432 struct SCCNodesResult {
1433   SCCNodeSet SCCNodes;
1434   bool HasUnknownCall;
1435 };
1436 
1437 } // end anonymous namespace
1438 
1439 /// Helper for non-Convergent inference predicate InstrBreaksAttribute.
1440 static bool InstrBreaksNonConvergent(Instruction &I,
1441                                      const SCCNodeSet &SCCNodes) {
1442   const CallBase *CB = dyn_cast<CallBase>(&I);
1443   // Breaks non-convergent assumption if CS is a convergent call to a function
1444   // not in the SCC.
1445   return CB && CB->isConvergent() &&
1446          !SCCNodes.contains(CB->getCalledFunction());
1447 }
1448 
1449 /// Helper for NoUnwind inference predicate InstrBreaksAttribute.
1450 static bool InstrBreaksNonThrowing(Instruction &I, const SCCNodeSet &SCCNodes) {
1451   if (!I.mayThrow())
1452     return false;
1453   if (const auto *CI = dyn_cast<CallInst>(&I)) {
1454     if (Function *Callee = CI->getCalledFunction()) {
1455       // I is a may-throw call to a function inside our SCC. This doesn't
1456       // invalidate our current working assumption that the SCC is no-throw; we
1457       // just have to scan that other function.
1458       if (SCCNodes.contains(Callee))
1459         return false;
1460     }
1461   }
1462   return true;
1463 }
1464 
1465 /// Helper for NoFree inference predicate InstrBreaksAttribute.
1466 static bool InstrBreaksNoFree(Instruction &I, const SCCNodeSet &SCCNodes) {
1467   CallBase *CB = dyn_cast<CallBase>(&I);
1468   if (!CB)
1469     return false;
1470 
1471   if (CB->hasFnAttr(Attribute::NoFree))
1472     return false;
1473 
1474   // Speculatively assume in SCC.
1475   if (Function *Callee = CB->getCalledFunction())
1476     if (SCCNodes.contains(Callee))
1477       return false;
1478 
1479   return true;
1480 }
1481 
1482 /// Attempt to remove convergent function attribute when possible.
1483 ///
1484 /// Returns true if any changes to function attributes were made.
1485 static void inferConvergent(const SCCNodeSet &SCCNodes,
1486                             SmallSet<Function *, 8> &Changed) {
1487   AttributeInferer AI;
1488 
1489   // Request to remove the convergent attribute from all functions in the SCC
1490   // if every callsite within the SCC is not convergent (except for calls
1491   // to functions within the SCC).
1492   // Note: Removal of the attr from the callsites will happen in
1493   // InstCombineCalls separately.
1494   AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1495       Attribute::Convergent,
1496       // Skip non-convergent functions.
1497       [](const Function &F) { return !F.isConvergent(); },
1498       // Instructions that break non-convergent assumption.
1499       [SCCNodes](Instruction &I) {
1500         return InstrBreaksNonConvergent(I, SCCNodes);
1501       },
1502       [](Function &F) {
1503         LLVM_DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName()
1504                           << "\n");
1505         F.setNotConvergent();
1506       },
1507       /* RequiresExactDefinition= */ false});
1508   // Perform all the requested attribute inference actions.
1509   AI.run(SCCNodes, Changed);
1510 }
1511 
1512 /// Infer attributes from all functions in the SCC by scanning every
1513 /// instruction for compliance to the attribute assumptions. Currently it
1514 /// does:
1515 ///   - addition of NoUnwind attribute
1516 ///
1517 /// Returns true if any changes to function attributes were made.
1518 static void inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes,
1519                                          SmallSet<Function *, 8> &Changed) {
1520   AttributeInferer AI;
1521 
1522   if (!DisableNoUnwindInference)
1523     // Request to infer nounwind attribute for all the functions in the SCC if
1524     // every callsite within the SCC is not throwing (except for calls to
1525     // functions within the SCC). Note that nounwind attribute suffers from
1526     // derefinement - results may change depending on how functions are
1527     // optimized. Thus it can be inferred only from exact definitions.
1528     AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1529         Attribute::NoUnwind,
1530         // Skip non-throwing functions.
1531         [](const Function &F) { return F.doesNotThrow(); },
1532         // Instructions that break non-throwing assumption.
1533         [&SCCNodes](Instruction &I) {
1534           return InstrBreaksNonThrowing(I, SCCNodes);
1535         },
1536         [](Function &F) {
1537           LLVM_DEBUG(dbgs()
1538                      << "Adding nounwind attr to fn " << F.getName() << "\n");
1539           F.setDoesNotThrow();
1540           ++NumNoUnwind;
1541         },
1542         /* RequiresExactDefinition= */ true});
1543 
1544   if (!DisableNoFreeInference)
1545     // Request to infer nofree attribute for all the functions in the SCC if
1546     // every callsite within the SCC does not directly or indirectly free
1547     // memory (except for calls to functions within the SCC). Note that nofree
1548     // attribute suffers from derefinement - results may change depending on
1549     // how functions are optimized. Thus it can be inferred only from exact
1550     // definitions.
1551     AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1552         Attribute::NoFree,
1553         // Skip functions known not to free memory.
1554         [](const Function &F) { return F.doesNotFreeMemory(); },
1555         // Instructions that break non-deallocating assumption.
1556         [&SCCNodes](Instruction &I) {
1557           return InstrBreaksNoFree(I, SCCNodes);
1558         },
1559         [](Function &F) {
1560           LLVM_DEBUG(dbgs()
1561                      << "Adding nofree attr to fn " << F.getName() << "\n");
1562           F.setDoesNotFreeMemory();
1563           ++NumNoFree;
1564         },
1565         /* RequiresExactDefinition= */ true});
1566 
1567   // Perform all the requested attribute inference actions.
1568   AI.run(SCCNodes, Changed);
1569 }
1570 
1571 static void addNoRecurseAttrs(const SCCNodeSet &SCCNodes,
1572                               SmallSet<Function *, 8> &Changed) {
1573   // Try and identify functions that do not recurse.
1574 
1575   // If the SCC contains multiple nodes we know for sure there is recursion.
1576   if (SCCNodes.size() != 1)
1577     return;
1578 
1579   Function *F = *SCCNodes.begin();
1580   if (!F || !F->hasExactDefinition() || F->doesNotRecurse())
1581     return;
1582 
1583   // If all of the calls in F are identifiable and are to norecurse functions, F
1584   // is norecurse. This check also detects self-recursion as F is not currently
1585   // marked norecurse, so any called from F to F will not be marked norecurse.
1586   for (auto &BB : *F)
1587     for (auto &I : BB.instructionsWithoutDebug())
1588       if (auto *CB = dyn_cast<CallBase>(&I)) {
1589         Function *Callee = CB->getCalledFunction();
1590         if (!Callee || Callee == F || !Callee->doesNotRecurse())
1591           // Function calls a potentially recursive function.
1592           return;
1593       }
1594 
1595   // Every call was to a non-recursive function other than this function, and
1596   // we have no indirect recursion as the SCC size is one. This function cannot
1597   // recurse.
1598   F->setDoesNotRecurse();
1599   ++NumNoRecurse;
1600   Changed.insert(F);
1601 }
1602 
1603 static bool instructionDoesNotReturn(Instruction &I) {
1604   if (auto *CB = dyn_cast<CallBase>(&I))
1605     return CB->hasFnAttr(Attribute::NoReturn);
1606   return false;
1607 }
1608 
1609 // A basic block can only return if it terminates with a ReturnInst and does not
1610 // contain calls to noreturn functions.
1611 static bool basicBlockCanReturn(BasicBlock &BB) {
1612   if (!isa<ReturnInst>(BB.getTerminator()))
1613     return false;
1614   return none_of(BB, instructionDoesNotReturn);
1615 }
1616 
1617 // FIXME: this doesn't handle recursion.
1618 static bool canReturn(Function &F) {
1619   SmallVector<BasicBlock *, 16> Worklist;
1620   SmallPtrSet<BasicBlock *, 16> Visited;
1621 
1622   Visited.insert(&F.front());
1623   Worklist.push_back(&F.front());
1624 
1625   do {
1626     BasicBlock *BB = Worklist.pop_back_val();
1627     if (basicBlockCanReturn(*BB))
1628       return true;
1629     for (BasicBlock *Succ : successors(BB))
1630       if (Visited.insert(Succ).second)
1631         Worklist.push_back(Succ);
1632   } while (!Worklist.empty());
1633 
1634   return false;
1635 }
1636 
1637 // Set the noreturn function attribute if possible.
1638 static void addNoReturnAttrs(const SCCNodeSet &SCCNodes,
1639                              SmallSet<Function *, 8> &Changed) {
1640   for (Function *F : SCCNodes) {
1641     if (!F || !F->hasExactDefinition() || F->hasFnAttribute(Attribute::Naked) ||
1642         F->doesNotReturn())
1643       continue;
1644 
1645     if (!canReturn(*F)) {
1646       F->setDoesNotReturn();
1647       Changed.insert(F);
1648     }
1649   }
1650 }
1651 
1652 static bool functionWillReturn(const Function &F) {
1653   // We can infer and propagate function attributes only when we know that the
1654   // definition we'll get at link time is *exactly* the definition we see now.
1655   // For more details, see GlobalValue::mayBeDerefined.
1656   if (!F.hasExactDefinition())
1657     return false;
1658 
1659   // Must-progress function without side-effects must return.
1660   if (F.mustProgress() && F.onlyReadsMemory())
1661     return true;
1662 
1663   // Can only analyze functions with a definition.
1664   if (F.isDeclaration())
1665     return false;
1666 
1667   // Functions with loops require more sophisticated analysis, as the loop
1668   // may be infinite. For now, don't try to handle them.
1669   SmallVector<std::pair<const BasicBlock *, const BasicBlock *>> Backedges;
1670   FindFunctionBackedges(F, Backedges);
1671   if (!Backedges.empty())
1672     return false;
1673 
1674   // If there are no loops, then the function is willreturn if all calls in
1675   // it are willreturn.
1676   return all_of(instructions(F), [](const Instruction &I) {
1677     return I.willReturn();
1678   });
1679 }
1680 
1681 // Set the willreturn function attribute if possible.
1682 static void addWillReturn(const SCCNodeSet &SCCNodes,
1683                           SmallSet<Function *, 8> &Changed) {
1684   for (Function *F : SCCNodes) {
1685     if (!F || F->willReturn() || !functionWillReturn(*F))
1686       continue;
1687 
1688     F->setWillReturn();
1689     NumWillReturn++;
1690     Changed.insert(F);
1691   }
1692 }
1693 
1694 // Return true if this is an atomic which has an ordering stronger than
1695 // unordered.  Note that this is different than the predicate we use in
1696 // Attributor.  Here we chose to be conservative and consider monotonic
1697 // operations potentially synchronizing.  We generally don't do much with
1698 // monotonic operations, so this is simply risk reduction.
1699 static bool isOrderedAtomic(Instruction *I) {
1700   if (!I->isAtomic())
1701     return false;
1702 
1703   if (auto *FI = dyn_cast<FenceInst>(I))
1704     // All legal orderings for fence are stronger than monotonic.
1705     return FI->getSyncScopeID() != SyncScope::SingleThread;
1706   else if (isa<AtomicCmpXchgInst>(I) || isa<AtomicRMWInst>(I))
1707     return true;
1708   else if (auto *SI = dyn_cast<StoreInst>(I))
1709     return !SI->isUnordered();
1710   else if (auto *LI = dyn_cast<LoadInst>(I))
1711     return !LI->isUnordered();
1712   else {
1713     llvm_unreachable("unknown atomic instruction?");
1714   }
1715 }
1716 
1717 static bool InstrBreaksNoSync(Instruction &I, const SCCNodeSet &SCCNodes) {
1718   // Volatile may synchronize
1719   if (I.isVolatile())
1720     return true;
1721 
1722   // An ordered atomic may synchronize.  (See comment about on monotonic.)
1723   if (isOrderedAtomic(&I))
1724     return true;
1725 
1726   auto *CB = dyn_cast<CallBase>(&I);
1727   if (!CB)
1728     // Non call site cases covered by the two checks above
1729     return false;
1730 
1731   if (CB->hasFnAttr(Attribute::NoSync))
1732     return false;
1733 
1734   // Non volatile memset/memcpy/memmoves are nosync
1735   // NOTE: Only intrinsics with volatile flags should be handled here.  All
1736   // others should be marked in Intrinsics.td.
1737   if (auto *MI = dyn_cast<MemIntrinsic>(&I))
1738     if (!MI->isVolatile())
1739       return false;
1740 
1741   // Speculatively assume in SCC.
1742   if (Function *Callee = CB->getCalledFunction())
1743     if (SCCNodes.contains(Callee))
1744       return false;
1745 
1746   return true;
1747 }
1748 
1749 // Infer the nosync attribute.
1750 static void addNoSyncAttr(const SCCNodeSet &SCCNodes,
1751                           SmallSet<Function *, 8> &Changed) {
1752   AttributeInferer AI;
1753   AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1754       Attribute::NoSync,
1755       // Skip already marked functions.
1756       [](const Function &F) { return F.hasNoSync(); },
1757       // Instructions that break nosync assumption.
1758       [&SCCNodes](Instruction &I) {
1759         return InstrBreaksNoSync(I, SCCNodes);
1760       },
1761       [](Function &F) {
1762         LLVM_DEBUG(dbgs()
1763                    << "Adding nosync attr to fn " << F.getName() << "\n");
1764         F.setNoSync();
1765         ++NumNoSync;
1766       },
1767       /* RequiresExactDefinition= */ true});
1768   AI.run(SCCNodes, Changed);
1769 }
1770 
1771 static SCCNodesResult createSCCNodeSet(ArrayRef<Function *> Functions) {
1772   SCCNodesResult Res;
1773   Res.HasUnknownCall = false;
1774   for (Function *F : Functions) {
1775     if (!F || F->hasOptNone() || F->hasFnAttribute(Attribute::Naked) ||
1776         F->isPresplitCoroutine()) {
1777       // Treat any function we're trying not to optimize as if it were an
1778       // indirect call and omit it from the node set used below.
1779       Res.HasUnknownCall = true;
1780       continue;
1781     }
1782     // Track whether any functions in this SCC have an unknown call edge.
1783     // Note: if this is ever a performance hit, we can common it with
1784     // subsequent routines which also do scans over the instructions of the
1785     // function.
1786     if (!Res.HasUnknownCall) {
1787       for (Instruction &I : instructions(*F)) {
1788         if (auto *CB = dyn_cast<CallBase>(&I)) {
1789           if (!CB->getCalledFunction()) {
1790             Res.HasUnknownCall = true;
1791             break;
1792           }
1793         }
1794       }
1795     }
1796     Res.SCCNodes.insert(F);
1797   }
1798   return Res;
1799 }
1800 
1801 template <typename AARGetterT>
1802 static SmallSet<Function *, 8>
1803 deriveAttrsInPostOrder(ArrayRef<Function *> Functions, AARGetterT &&AARGetter) {
1804   SCCNodesResult Nodes = createSCCNodeSet(Functions);
1805 
1806   // Bail if the SCC only contains optnone functions.
1807   if (Nodes.SCCNodes.empty())
1808     return {};
1809 
1810   SmallSet<Function *, 8> Changed;
1811 
1812   addArgumentReturnedAttrs(Nodes.SCCNodes, Changed);
1813   addReadAttrs(Nodes.SCCNodes, AARGetter, Changed);
1814   addArgumentAttrs(Nodes.SCCNodes, Changed);
1815   inferConvergent(Nodes.SCCNodes, Changed);
1816   addNoReturnAttrs(Nodes.SCCNodes, Changed);
1817   addWillReturn(Nodes.SCCNodes, Changed);
1818 
1819   // If we have no external nodes participating in the SCC, we can deduce some
1820   // more precise attributes as well.
1821   if (!Nodes.HasUnknownCall) {
1822     addNoAliasAttrs(Nodes.SCCNodes, Changed);
1823     addNonNullAttrs(Nodes.SCCNodes, Changed);
1824     inferAttrsFromFunctionBodies(Nodes.SCCNodes, Changed);
1825     addNoRecurseAttrs(Nodes.SCCNodes, Changed);
1826   }
1827 
1828   addNoSyncAttr(Nodes.SCCNodes, Changed);
1829 
1830   // Finally, infer the maximal set of attributes from the ones we've inferred
1831   // above.  This is handling the cases where one attribute on a signature
1832   // implies another, but for implementation reasons the inference rule for
1833   // the later is missing (or simply less sophisticated).
1834   for (Function *F : Nodes.SCCNodes)
1835     if (F)
1836       if (inferAttributesFromOthers(*F))
1837         Changed.insert(F);
1838 
1839   return Changed;
1840 }
1841 
1842 PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C,
1843                                                   CGSCCAnalysisManager &AM,
1844                                                   LazyCallGraph &CG,
1845                                                   CGSCCUpdateResult &) {
1846   FunctionAnalysisManager &FAM =
1847       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
1848 
1849   // We pass a lambda into functions to wire them up to the analysis manager
1850   // for getting function analyses.
1851   auto AARGetter = [&](Function &F) -> AAResults & {
1852     return FAM.getResult<AAManager>(F);
1853   };
1854 
1855   SmallVector<Function *, 8> Functions;
1856   for (LazyCallGraph::Node &N : C) {
1857     Functions.push_back(&N.getFunction());
1858   }
1859 
1860   auto ChangedFunctions = deriveAttrsInPostOrder(Functions, AARGetter);
1861   if (ChangedFunctions.empty())
1862     return PreservedAnalyses::all();
1863 
1864   // Invalidate analyses for modified functions so that we don't have to
1865   // invalidate all analyses for all functions in this SCC.
1866   PreservedAnalyses FuncPA;
1867   // We haven't changed the CFG for modified functions.
1868   FuncPA.preserveSet<CFGAnalyses>();
1869   for (Function *Changed : ChangedFunctions) {
1870     FAM.invalidate(*Changed, FuncPA);
1871     // Also invalidate any direct callers of changed functions since analyses
1872     // may care about attributes of direct callees. For example, MemorySSA cares
1873     // about whether or not a call's callee modifies memory and queries that
1874     // through function attributes.
1875     for (auto *U : Changed->users()) {
1876       if (auto *Call = dyn_cast<CallBase>(U)) {
1877         if (Call->getCalledFunction() == Changed)
1878           FAM.invalidate(*Call->getFunction(), FuncPA);
1879       }
1880     }
1881   }
1882 
1883   PreservedAnalyses PA;
1884   // We have not added or removed functions.
1885   PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
1886   // We already invalidated all relevant function analyses above.
1887   PA.preserveSet<AllAnalysesOn<Function>>();
1888   return PA;
1889 }
1890 
1891 namespace {
1892 
1893 struct PostOrderFunctionAttrsLegacyPass : public CallGraphSCCPass {
1894   // Pass identification, replacement for typeid
1895   static char ID;
1896 
1897   PostOrderFunctionAttrsLegacyPass() : CallGraphSCCPass(ID) {
1898     initializePostOrderFunctionAttrsLegacyPassPass(
1899         *PassRegistry::getPassRegistry());
1900   }
1901 
1902   bool runOnSCC(CallGraphSCC &SCC) override;
1903 
1904   void getAnalysisUsage(AnalysisUsage &AU) const override {
1905     AU.setPreservesCFG();
1906     AU.addRequired<AssumptionCacheTracker>();
1907     getAAResultsAnalysisUsage(AU);
1908     CallGraphSCCPass::getAnalysisUsage(AU);
1909   }
1910 };
1911 
1912 } // end anonymous namespace
1913 
1914 char PostOrderFunctionAttrsLegacyPass::ID = 0;
1915 INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrsLegacyPass, "function-attrs",
1916                       "Deduce function attributes", false, false)
1917 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1918 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1919 INITIALIZE_PASS_END(PostOrderFunctionAttrsLegacyPass, "function-attrs",
1920                     "Deduce function attributes", false, false)
1921 
1922 Pass *llvm::createPostOrderFunctionAttrsLegacyPass() {
1923   return new PostOrderFunctionAttrsLegacyPass();
1924 }
1925 
1926 template <typename AARGetterT>
1927 static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) {
1928   SmallVector<Function *, 8> Functions;
1929   for (CallGraphNode *I : SCC) {
1930     Functions.push_back(I->getFunction());
1931   }
1932 
1933   return !deriveAttrsInPostOrder(Functions, AARGetter).empty();
1934 }
1935 
1936 bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) {
1937   if (skipSCC(SCC))
1938     return false;
1939   return runImpl(SCC, LegacyAARGetter(*this));
1940 }
1941 
1942 namespace {
1943 
1944 struct ReversePostOrderFunctionAttrsLegacyPass : public ModulePass {
1945   // Pass identification, replacement for typeid
1946   static char ID;
1947 
1948   ReversePostOrderFunctionAttrsLegacyPass() : ModulePass(ID) {
1949     initializeReversePostOrderFunctionAttrsLegacyPassPass(
1950         *PassRegistry::getPassRegistry());
1951   }
1952 
1953   bool runOnModule(Module &M) override;
1954 
1955   void getAnalysisUsage(AnalysisUsage &AU) const override {
1956     AU.setPreservesCFG();
1957     AU.addRequired<CallGraphWrapperPass>();
1958     AU.addPreserved<CallGraphWrapperPass>();
1959   }
1960 };
1961 
1962 } // end anonymous namespace
1963 
1964 char ReversePostOrderFunctionAttrsLegacyPass::ID = 0;
1965 
1966 INITIALIZE_PASS_BEGIN(ReversePostOrderFunctionAttrsLegacyPass,
1967                       "rpo-function-attrs", "Deduce function attributes in RPO",
1968                       false, false)
1969 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1970 INITIALIZE_PASS_END(ReversePostOrderFunctionAttrsLegacyPass,
1971                     "rpo-function-attrs", "Deduce function attributes in RPO",
1972                     false, false)
1973 
1974 Pass *llvm::createReversePostOrderFunctionAttrsPass() {
1975   return new ReversePostOrderFunctionAttrsLegacyPass();
1976 }
1977 
1978 static bool addNoRecurseAttrsTopDown(Function &F) {
1979   // We check the preconditions for the function prior to calling this to avoid
1980   // the cost of building up a reversible post-order list. We assert them here
1981   // to make sure none of the invariants this relies on were violated.
1982   assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!");
1983   assert(!F.doesNotRecurse() &&
1984          "This function has already been deduced as norecurs!");
1985   assert(F.hasInternalLinkage() &&
1986          "Can only do top-down deduction for internal linkage functions!");
1987 
1988   // If F is internal and all of its uses are calls from a non-recursive
1989   // functions, then none of its calls could in fact recurse without going
1990   // through a function marked norecurse, and so we can mark this function too
1991   // as norecurse. Note that the uses must actually be calls -- otherwise
1992   // a pointer to this function could be returned from a norecurse function but
1993   // this function could be recursively (indirectly) called. Note that this
1994   // also detects if F is directly recursive as F is not yet marked as
1995   // a norecurse function.
1996   for (auto *U : F.users()) {
1997     auto *I = dyn_cast<Instruction>(U);
1998     if (!I)
1999       return false;
2000     CallBase *CB = dyn_cast<CallBase>(I);
2001     if (!CB || !CB->getParent()->getParent()->doesNotRecurse())
2002       return false;
2003   }
2004   F.setDoesNotRecurse();
2005   ++NumNoRecurse;
2006   return true;
2007 }
2008 
2009 static bool deduceFunctionAttributeInRPO(Module &M, CallGraph &CG) {
2010   // We only have a post-order SCC traversal (because SCCs are inherently
2011   // discovered in post-order), so we accumulate them in a vector and then walk
2012   // it in reverse. This is simpler than using the RPO iterator infrastructure
2013   // because we need to combine SCC detection and the PO walk of the call
2014   // graph. We can also cheat egregiously because we're primarily interested in
2015   // synthesizing norecurse and so we can only save the singular SCCs as SCCs
2016   // with multiple functions in them will clearly be recursive.
2017   SmallVector<Function *, 16> Worklist;
2018   for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) {
2019     if (I->size() != 1)
2020       continue;
2021 
2022     Function *F = I->front()->getFunction();
2023     if (F && !F->isDeclaration() && !F->doesNotRecurse() &&
2024         F->hasInternalLinkage())
2025       Worklist.push_back(F);
2026   }
2027 
2028   bool Changed = false;
2029   for (auto *F : llvm::reverse(Worklist))
2030     Changed |= addNoRecurseAttrsTopDown(*F);
2031 
2032   return Changed;
2033 }
2034 
2035 bool ReversePostOrderFunctionAttrsLegacyPass::runOnModule(Module &M) {
2036   if (skipModule(M))
2037     return false;
2038 
2039   auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2040 
2041   return deduceFunctionAttributeInRPO(M, CG);
2042 }
2043 
2044 PreservedAnalyses
2045 ReversePostOrderFunctionAttrsPass::run(Module &M, ModuleAnalysisManager &AM) {
2046   auto &CG = AM.getResult<CallGraphAnalysis>(M);
2047 
2048   if (!deduceFunctionAttributeInRPO(M, CG))
2049     return PreservedAnalyses::all();
2050 
2051   PreservedAnalyses PA;
2052   PA.preserve<CallGraphAnalysis>();
2053   return PA;
2054 }
2055