1 //===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements interprocedural passes which walk the
11 /// call-graph deducing and/or propagating function attributes.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/IPO/FunctionAttrs.h"
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/BasicAliasAnalysis.h"
25 #include "llvm/Analysis/CGSCCPassManager.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CallGraphSCCPass.h"
28 #include "llvm/Analysis/CaptureTracking.h"
29 #include "llvm/Analysis/LazyCallGraph.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/MemoryLocation.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Argument.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/Constant.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/InstIterator.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Metadata.h"
45 #include "llvm/IR/PassManager.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/IR/Use.h"
48 #include "llvm/IR/User.h"
49 #include "llvm/IR/Value.h"
50 #include "llvm/InitializePasses.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include "llvm/Transforms/IPO.h"
59 #include <cassert>
60 #include <iterator>
61 #include <map>
62 #include <vector>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "functionattrs"
67 
68 STATISTIC(NumReadNone, "Number of functions marked readnone");
69 STATISTIC(NumReadOnly, "Number of functions marked readonly");
70 STATISTIC(NumWriteOnly, "Number of functions marked writeonly");
71 STATISTIC(NumNoCapture, "Number of arguments marked nocapture");
72 STATISTIC(NumReturned, "Number of arguments marked returned");
73 STATISTIC(NumReadNoneArg, "Number of arguments marked readnone");
74 STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly");
75 STATISTIC(NumNoAlias, "Number of function returns marked noalias");
76 STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull");
77 STATISTIC(NumNoRecurse, "Number of functions marked as norecurse");
78 STATISTIC(NumNoUnwind, "Number of functions marked as nounwind");
79 STATISTIC(NumNoFree, "Number of functions marked as nofree");
80 
81 static cl::opt<bool> EnableNonnullArgPropagation(
82     "enable-nonnull-arg-prop", cl::init(true), cl::Hidden,
83     cl::desc("Try to propagate nonnull argument attributes from callsites to "
84              "caller functions."));
85 
86 static cl::opt<bool> DisableNoUnwindInference(
87     "disable-nounwind-inference", cl::Hidden,
88     cl::desc("Stop inferring nounwind attribute during function-attrs pass"));
89 
90 static cl::opt<bool> DisableNoFreeInference(
91     "disable-nofree-inference", cl::Hidden,
92     cl::desc("Stop inferring nofree attribute during function-attrs pass"));
93 
94 namespace {
95 
96 using SCCNodeSet = SmallSetVector<Function *, 8>;
97 
98 } // end anonymous namespace
99 
100 /// Returns the memory access attribute for function F using AAR for AA results,
101 /// where SCCNodes is the current SCC.
102 ///
103 /// If ThisBody is true, this function may examine the function body and will
104 /// return a result pertaining to this copy of the function. If it is false, the
105 /// result will be based only on AA results for the function declaration; it
106 /// will be assumed that some other (perhaps less optimized) version of the
107 /// function may be selected at link time.
108 static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody,
109                                                   AAResults &AAR,
110                                                   const SCCNodeSet &SCCNodes) {
111   FunctionModRefBehavior MRB = AAR.getModRefBehavior(&F);
112   if (MRB == FMRB_DoesNotAccessMemory)
113     // Already perfect!
114     return MAK_ReadNone;
115 
116   if (!ThisBody) {
117     if (AliasAnalysis::onlyReadsMemory(MRB))
118       return MAK_ReadOnly;
119 
120     if (AliasAnalysis::doesNotReadMemory(MRB))
121       return MAK_WriteOnly;
122 
123     // Conservatively assume it reads and writes to memory.
124     return MAK_MayWrite;
125   }
126 
127   // Scan the function body for instructions that may read or write memory.
128   bool ReadsMemory = false;
129   bool WritesMemory = false;
130   for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) {
131     Instruction *I = &*II;
132 
133     // Some instructions can be ignored even if they read or write memory.
134     // Detect these now, skipping to the next instruction if one is found.
135     if (auto *Call = dyn_cast<CallBase>(I)) {
136       // Ignore calls to functions in the same SCC, as long as the call sites
137       // don't have operand bundles.  Calls with operand bundles are allowed to
138       // have memory effects not described by the memory effects of the call
139       // target.
140       if (!Call->hasOperandBundles() && Call->getCalledFunction() &&
141           SCCNodes.count(Call->getCalledFunction()))
142         continue;
143       FunctionModRefBehavior MRB = AAR.getModRefBehavior(Call);
144       ModRefInfo MRI = createModRefInfo(MRB);
145 
146       // If the call doesn't access memory, we're done.
147       if (isNoModRef(MRI))
148         continue;
149 
150       if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) {
151         // The call could access any memory. If that includes writes, note it.
152         if (isModSet(MRI))
153           WritesMemory = true;
154         // If it reads, note it.
155         if (isRefSet(MRI))
156           ReadsMemory = true;
157         continue;
158       }
159 
160       // Check whether all pointer arguments point to local memory, and
161       // ignore calls that only access local memory.
162       for (auto CI = Call->arg_begin(), CE = Call->arg_end(); CI != CE; ++CI) {
163         Value *Arg = *CI;
164         if (!Arg->getType()->isPtrOrPtrVectorTy())
165           continue;
166 
167         AAMDNodes AAInfo;
168         I->getAAMetadata(AAInfo);
169         MemoryLocation Loc(Arg, LocationSize::unknown(), AAInfo);
170 
171         // Skip accesses to local or constant memory as they don't impact the
172         // externally visible mod/ref behavior.
173         if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
174           continue;
175 
176         if (isModSet(MRI))
177           // Writes non-local memory.
178           WritesMemory = true;
179         if (isRefSet(MRI))
180           // Ok, it reads non-local memory.
181           ReadsMemory = true;
182       }
183       continue;
184     } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
185       // Ignore non-volatile loads from local memory. (Atomic is okay here.)
186       if (!LI->isVolatile()) {
187         MemoryLocation Loc = MemoryLocation::get(LI);
188         if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
189           continue;
190       }
191     } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
192       // Ignore non-volatile stores to local memory. (Atomic is okay here.)
193       if (!SI->isVolatile()) {
194         MemoryLocation Loc = MemoryLocation::get(SI);
195         if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
196           continue;
197       }
198     } else if (VAArgInst *VI = dyn_cast<VAArgInst>(I)) {
199       // Ignore vaargs on local memory.
200       MemoryLocation Loc = MemoryLocation::get(VI);
201       if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
202         continue;
203     }
204 
205     // Any remaining instructions need to be taken seriously!  Check if they
206     // read or write memory.
207     //
208     // Writes memory, remember that.
209     WritesMemory |= I->mayWriteToMemory();
210 
211     // If this instruction may read memory, remember that.
212     ReadsMemory |= I->mayReadFromMemory();
213   }
214 
215   if (WritesMemory) {
216     if (!ReadsMemory)
217       return MAK_WriteOnly;
218     else
219       return MAK_MayWrite;
220   }
221 
222   return ReadsMemory ? MAK_ReadOnly : MAK_ReadNone;
223 }
224 
225 MemoryAccessKind llvm::computeFunctionBodyMemoryAccess(Function &F,
226                                                        AAResults &AAR) {
227   return checkFunctionMemoryAccess(F, /*ThisBody=*/true, AAR, {});
228 }
229 
230 /// Deduce readonly/readnone attributes for the SCC.
231 template <typename AARGetterT>
232 static bool addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter) {
233   // Check if any of the functions in the SCC read or write memory.  If they
234   // write memory then they can't be marked readnone or readonly.
235   bool ReadsMemory = false;
236   bool WritesMemory = false;
237   for (Function *F : SCCNodes) {
238     // Call the callable parameter to look up AA results for this function.
239     AAResults &AAR = AARGetter(*F);
240 
241     // Non-exact function definitions may not be selected at link time, and an
242     // alternative version that writes to memory may be selected.  See the
243     // comment on GlobalValue::isDefinitionExact for more details.
244     switch (checkFunctionMemoryAccess(*F, F->hasExactDefinition(),
245                                       AAR, SCCNodes)) {
246     case MAK_MayWrite:
247       return false;
248     case MAK_ReadOnly:
249       ReadsMemory = true;
250       break;
251     case MAK_WriteOnly:
252       WritesMemory = true;
253       break;
254     case MAK_ReadNone:
255       // Nothing to do!
256       break;
257     }
258   }
259 
260   // If the SCC contains both functions that read and functions that write, then
261   // we cannot add readonly attributes.
262   if (ReadsMemory && WritesMemory)
263     return false;
264 
265   // Success!  Functions in this SCC do not access memory, or only read memory.
266   // Give them the appropriate attribute.
267   bool MadeChange = false;
268 
269   for (Function *F : SCCNodes) {
270     if (F->doesNotAccessMemory())
271       // Already perfect!
272       continue;
273 
274     if (F->onlyReadsMemory() && ReadsMemory)
275       // No change.
276       continue;
277 
278     if (F->doesNotReadMemory() && WritesMemory)
279       continue;
280 
281     MadeChange = true;
282 
283     // Clear out any existing attributes.
284     F->removeFnAttr(Attribute::ReadOnly);
285     F->removeFnAttr(Attribute::ReadNone);
286     F->removeFnAttr(Attribute::WriteOnly);
287 
288     if (!WritesMemory && !ReadsMemory) {
289       // Clear out any "access range attributes" if readnone was deduced.
290       F->removeFnAttr(Attribute::ArgMemOnly);
291       F->removeFnAttr(Attribute::InaccessibleMemOnly);
292       F->removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
293     }
294 
295     // Add in the new attribute.
296     if (WritesMemory && !ReadsMemory)
297       F->addFnAttr(Attribute::WriteOnly);
298     else
299       F->addFnAttr(ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone);
300 
301     if (WritesMemory && !ReadsMemory)
302       ++NumWriteOnly;
303     else if (ReadsMemory)
304       ++NumReadOnly;
305     else
306       ++NumReadNone;
307   }
308 
309   return MadeChange;
310 }
311 
312 namespace {
313 
314 /// For a given pointer Argument, this retains a list of Arguments of functions
315 /// in the same SCC that the pointer data flows into. We use this to build an
316 /// SCC of the arguments.
317 struct ArgumentGraphNode {
318   Argument *Definition;
319   SmallVector<ArgumentGraphNode *, 4> Uses;
320 };
321 
322 class ArgumentGraph {
323   // We store pointers to ArgumentGraphNode objects, so it's important that
324   // that they not move around upon insert.
325   using ArgumentMapTy = std::map<Argument *, ArgumentGraphNode>;
326 
327   ArgumentMapTy ArgumentMap;
328 
329   // There is no root node for the argument graph, in fact:
330   //   void f(int *x, int *y) { if (...) f(x, y); }
331   // is an example where the graph is disconnected. The SCCIterator requires a
332   // single entry point, so we maintain a fake ("synthetic") root node that
333   // uses every node. Because the graph is directed and nothing points into
334   // the root, it will not participate in any SCCs (except for its own).
335   ArgumentGraphNode SyntheticRoot;
336 
337 public:
338   ArgumentGraph() { SyntheticRoot.Definition = nullptr; }
339 
340   using iterator = SmallVectorImpl<ArgumentGraphNode *>::iterator;
341 
342   iterator begin() { return SyntheticRoot.Uses.begin(); }
343   iterator end() { return SyntheticRoot.Uses.end(); }
344   ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; }
345 
346   ArgumentGraphNode *operator[](Argument *A) {
347     ArgumentGraphNode &Node = ArgumentMap[A];
348     Node.Definition = A;
349     SyntheticRoot.Uses.push_back(&Node);
350     return &Node;
351   }
352 };
353 
354 /// This tracker checks whether callees are in the SCC, and if so it does not
355 /// consider that a capture, instead adding it to the "Uses" list and
356 /// continuing with the analysis.
357 struct ArgumentUsesTracker : public CaptureTracker {
358   ArgumentUsesTracker(const SCCNodeSet &SCCNodes) : SCCNodes(SCCNodes) {}
359 
360   void tooManyUses() override { Captured = true; }
361 
362   bool captured(const Use *U) override {
363     CallBase *CB = dyn_cast<CallBase>(U->getUser());
364     if (!CB) {
365       Captured = true;
366       return true;
367     }
368 
369     Function *F = CB->getCalledFunction();
370     if (!F || !F->hasExactDefinition() || !SCCNodes.count(F)) {
371       Captured = true;
372       return true;
373     }
374 
375     // Note: the callee and the two successor blocks *follow* the argument
376     // operands.  This means there is no need to adjust UseIndex to account for
377     // these.
378 
379     unsigned UseIndex =
380         std::distance(const_cast<const Use *>(CB->arg_begin()), U);
381 
382     assert(UseIndex < CB->data_operands_size() &&
383            "Indirect function calls should have been filtered above!");
384 
385     if (UseIndex >= CB->getNumArgOperands()) {
386       // Data operand, but not a argument operand -- must be a bundle operand
387       assert(CB->hasOperandBundles() && "Must be!");
388 
389       // CaptureTracking told us that we're being captured by an operand bundle
390       // use.  In this case it does not matter if the callee is within our SCC
391       // or not -- we've been captured in some unknown way, and we have to be
392       // conservative.
393       Captured = true;
394       return true;
395     }
396 
397     if (UseIndex >= F->arg_size()) {
398       assert(F->isVarArg() && "More params than args in non-varargs call");
399       Captured = true;
400       return true;
401     }
402 
403     Uses.push_back(&*std::next(F->arg_begin(), UseIndex));
404     return false;
405   }
406 
407   // True only if certainly captured (used outside our SCC).
408   bool Captured = false;
409 
410   // Uses within our SCC.
411   SmallVector<Argument *, 4> Uses;
412 
413   const SCCNodeSet &SCCNodes;
414 };
415 
416 } // end anonymous namespace
417 
418 namespace llvm {
419 
420 template <> struct GraphTraits<ArgumentGraphNode *> {
421   using NodeRef = ArgumentGraphNode *;
422   using ChildIteratorType = SmallVectorImpl<ArgumentGraphNode *>::iterator;
423 
424   static NodeRef getEntryNode(NodeRef A) { return A; }
425   static ChildIteratorType child_begin(NodeRef N) { return N->Uses.begin(); }
426   static ChildIteratorType child_end(NodeRef N) { return N->Uses.end(); }
427 };
428 
429 template <>
430 struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> {
431   static NodeRef getEntryNode(ArgumentGraph *AG) { return AG->getEntryNode(); }
432 
433   static ChildIteratorType nodes_begin(ArgumentGraph *AG) {
434     return AG->begin();
435   }
436 
437   static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); }
438 };
439 
440 } // end namespace llvm
441 
442 /// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
443 static Attribute::AttrKind
444 determinePointerReadAttrs(Argument *A,
445                           const SmallPtrSet<Argument *, 8> &SCCNodes) {
446   SmallVector<Use *, 32> Worklist;
447   SmallPtrSet<Use *, 32> Visited;
448 
449   // inalloca arguments are always clobbered by the call.
450   if (A->hasInAllocaAttr() || A->hasPreallocatedAttr())
451     return Attribute::None;
452 
453   bool IsRead = false;
454   // We don't need to track IsWritten. If A is written to, return immediately.
455 
456   for (Use &U : A->uses()) {
457     Visited.insert(&U);
458     Worklist.push_back(&U);
459   }
460 
461   while (!Worklist.empty()) {
462     Use *U = Worklist.pop_back_val();
463     Instruction *I = cast<Instruction>(U->getUser());
464 
465     switch (I->getOpcode()) {
466     case Instruction::BitCast:
467     case Instruction::GetElementPtr:
468     case Instruction::PHI:
469     case Instruction::Select:
470     case Instruction::AddrSpaceCast:
471       // The original value is not read/written via this if the new value isn't.
472       for (Use &UU : I->uses())
473         if (Visited.insert(&UU).second)
474           Worklist.push_back(&UU);
475       break;
476 
477     case Instruction::Call:
478     case Instruction::Invoke: {
479       bool Captures = true;
480 
481       if (I->getType()->isVoidTy())
482         Captures = false;
483 
484       auto AddUsersToWorklistIfCapturing = [&] {
485         if (Captures)
486           for (Use &UU : I->uses())
487             if (Visited.insert(&UU).second)
488               Worklist.push_back(&UU);
489       };
490 
491       CallBase &CB = cast<CallBase>(*I);
492       if (CB.doesNotAccessMemory()) {
493         AddUsersToWorklistIfCapturing();
494         continue;
495       }
496 
497       Function *F = CB.getCalledFunction();
498       if (!F) {
499         if (CB.onlyReadsMemory()) {
500           IsRead = true;
501           AddUsersToWorklistIfCapturing();
502           continue;
503         }
504         return Attribute::None;
505       }
506 
507       // Note: the callee and the two successor blocks *follow* the argument
508       // operands.  This means there is no need to adjust UseIndex to account
509       // for these.
510 
511       unsigned UseIndex = std::distance(CB.arg_begin(), U);
512 
513       // U cannot be the callee operand use: since we're exploring the
514       // transitive uses of an Argument, having such a use be a callee would
515       // imply the call site is an indirect call or invoke; and we'd take the
516       // early exit above.
517       assert(UseIndex < CB.data_operands_size() &&
518              "Data operand use expected!");
519 
520       bool IsOperandBundleUse = UseIndex >= CB.getNumArgOperands();
521 
522       if (UseIndex >= F->arg_size() && !IsOperandBundleUse) {
523         assert(F->isVarArg() && "More params than args in non-varargs call");
524         return Attribute::None;
525       }
526 
527       Captures &= !CB.doesNotCapture(UseIndex);
528 
529       // Since the optimizer (by design) cannot see the data flow corresponding
530       // to a operand bundle use, these cannot participate in the optimistic SCC
531       // analysis.  Instead, we model the operand bundle uses as arguments in
532       // call to a function external to the SCC.
533       if (IsOperandBundleUse ||
534           !SCCNodes.count(&*std::next(F->arg_begin(), UseIndex))) {
535 
536         // The accessors used on call site here do the right thing for calls and
537         // invokes with operand bundles.
538 
539         if (!CB.onlyReadsMemory() && !CB.onlyReadsMemory(UseIndex))
540           return Attribute::None;
541         if (!CB.doesNotAccessMemory(UseIndex))
542           IsRead = true;
543       }
544 
545       AddUsersToWorklistIfCapturing();
546       break;
547     }
548 
549     case Instruction::Load:
550       // A volatile load has side effects beyond what readonly can be relied
551       // upon.
552       if (cast<LoadInst>(I)->isVolatile())
553         return Attribute::None;
554 
555       IsRead = true;
556       break;
557 
558     case Instruction::ICmp:
559     case Instruction::Ret:
560       break;
561 
562     default:
563       return Attribute::None;
564     }
565   }
566 
567   return IsRead ? Attribute::ReadOnly : Attribute::ReadNone;
568 }
569 
570 /// Deduce returned attributes for the SCC.
571 static bool addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes) {
572   bool Changed = false;
573 
574   // Check each function in turn, determining if an argument is always returned.
575   for (Function *F : SCCNodes) {
576     // We can infer and propagate function attributes only when we know that the
577     // definition we'll get at link time is *exactly* the definition we see now.
578     // For more details, see GlobalValue::mayBeDerefined.
579     if (!F->hasExactDefinition())
580       continue;
581 
582     if (F->getReturnType()->isVoidTy())
583       continue;
584 
585     // There is nothing to do if an argument is already marked as 'returned'.
586     if (llvm::any_of(F->args(),
587                      [](const Argument &Arg) { return Arg.hasReturnedAttr(); }))
588       continue;
589 
590     auto FindRetArg = [&]() -> Value * {
591       Value *RetArg = nullptr;
592       for (BasicBlock &BB : *F)
593         if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) {
594           // Note that stripPointerCasts should look through functions with
595           // returned arguments.
596           Value *RetVal = Ret->getReturnValue()->stripPointerCasts();
597           if (!isa<Argument>(RetVal) || RetVal->getType() != F->getReturnType())
598             return nullptr;
599 
600           if (!RetArg)
601             RetArg = RetVal;
602           else if (RetArg != RetVal)
603             return nullptr;
604         }
605 
606       return RetArg;
607     };
608 
609     if (Value *RetArg = FindRetArg()) {
610       auto *A = cast<Argument>(RetArg);
611       A->addAttr(Attribute::Returned);
612       ++NumReturned;
613       Changed = true;
614     }
615   }
616 
617   return Changed;
618 }
619 
620 /// If a callsite has arguments that are also arguments to the parent function,
621 /// try to propagate attributes from the callsite's arguments to the parent's
622 /// arguments. This may be important because inlining can cause information loss
623 /// when attribute knowledge disappears with the inlined call.
624 static bool addArgumentAttrsFromCallsites(Function &F) {
625   if (!EnableNonnullArgPropagation)
626     return false;
627 
628   bool Changed = false;
629 
630   // For an argument attribute to transfer from a callsite to the parent, the
631   // call must be guaranteed to execute every time the parent is called.
632   // Conservatively, just check for calls in the entry block that are guaranteed
633   // to execute.
634   // TODO: This could be enhanced by testing if the callsite post-dominates the
635   // entry block or by doing simple forward walks or backward walks to the
636   // callsite.
637   BasicBlock &Entry = F.getEntryBlock();
638   for (Instruction &I : Entry) {
639     if (auto *CB = dyn_cast<CallBase>(&I)) {
640       if (auto *CalledFunc = CB->getCalledFunction()) {
641         for (auto &CSArg : CalledFunc->args()) {
642           if (!CSArg.hasNonNullAttr())
643             continue;
644 
645           // If the non-null callsite argument operand is an argument to 'F'
646           // (the caller) and the call is guaranteed to execute, then the value
647           // must be non-null throughout 'F'.
648           auto *FArg = dyn_cast<Argument>(CB->getArgOperand(CSArg.getArgNo()));
649           if (FArg && !FArg->hasNonNullAttr()) {
650             FArg->addAttr(Attribute::NonNull);
651             Changed = true;
652           }
653         }
654       }
655     }
656     if (!isGuaranteedToTransferExecutionToSuccessor(&I))
657       break;
658   }
659 
660   return Changed;
661 }
662 
663 static bool addReadAttr(Argument *A, Attribute::AttrKind R) {
664   assert((R == Attribute::ReadOnly || R == Attribute::ReadNone)
665          && "Must be a Read attribute.");
666   assert(A && "Argument must not be null.");
667 
668   // If the argument already has the attribute, nothing needs to be done.
669   if (A->hasAttribute(R))
670       return false;
671 
672   // Otherwise, remove potentially conflicting attribute, add the new one,
673   // and update statistics.
674   A->removeAttr(Attribute::WriteOnly);
675   A->removeAttr(Attribute::ReadOnly);
676   A->removeAttr(Attribute::ReadNone);
677   A->addAttr(R);
678   R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg;
679   return true;
680 }
681 
682 /// Deduce nocapture attributes for the SCC.
683 static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) {
684   bool Changed = false;
685 
686   ArgumentGraph AG;
687 
688   // Check each function in turn, determining which pointer arguments are not
689   // captured.
690   for (Function *F : SCCNodes) {
691     // We can infer and propagate function attributes only when we know that the
692     // definition we'll get at link time is *exactly* the definition we see now.
693     // For more details, see GlobalValue::mayBeDerefined.
694     if (!F->hasExactDefinition())
695       continue;
696 
697     Changed |= addArgumentAttrsFromCallsites(*F);
698 
699     // Functions that are readonly (or readnone) and nounwind and don't return
700     // a value can't capture arguments. Don't analyze them.
701     if (F->onlyReadsMemory() && F->doesNotThrow() &&
702         F->getReturnType()->isVoidTy()) {
703       for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
704            ++A) {
705         if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
706           A->addAttr(Attribute::NoCapture);
707           ++NumNoCapture;
708           Changed = true;
709         }
710       }
711       continue;
712     }
713 
714     for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
715          ++A) {
716       if (!A->getType()->isPointerTy())
717         continue;
718       bool HasNonLocalUses = false;
719       if (!A->hasNoCaptureAttr()) {
720         ArgumentUsesTracker Tracker(SCCNodes);
721         PointerMayBeCaptured(&*A, &Tracker);
722         if (!Tracker.Captured) {
723           if (Tracker.Uses.empty()) {
724             // If it's trivially not captured, mark it nocapture now.
725             A->addAttr(Attribute::NoCapture);
726             ++NumNoCapture;
727             Changed = true;
728           } else {
729             // If it's not trivially captured and not trivially not captured,
730             // then it must be calling into another function in our SCC. Save
731             // its particulars for Argument-SCC analysis later.
732             ArgumentGraphNode *Node = AG[&*A];
733             for (Argument *Use : Tracker.Uses) {
734               Node->Uses.push_back(AG[Use]);
735               if (Use != &*A)
736                 HasNonLocalUses = true;
737             }
738           }
739         }
740         // Otherwise, it's captured. Don't bother doing SCC analysis on it.
741       }
742       if (!HasNonLocalUses && !A->onlyReadsMemory()) {
743         // Can we determine that it's readonly/readnone without doing an SCC?
744         // Note that we don't allow any calls at all here, or else our result
745         // will be dependent on the iteration order through the functions in the
746         // SCC.
747         SmallPtrSet<Argument *, 8> Self;
748         Self.insert(&*A);
749         Attribute::AttrKind R = determinePointerReadAttrs(&*A, Self);
750         if (R != Attribute::None)
751           Changed = addReadAttr(A, R);
752       }
753     }
754   }
755 
756   // The graph we've collected is partial because we stopped scanning for
757   // argument uses once we solved the argument trivially. These partial nodes
758   // show up as ArgumentGraphNode objects with an empty Uses list, and for
759   // these nodes the final decision about whether they capture has already been
760   // made.  If the definition doesn't have a 'nocapture' attribute by now, it
761   // captures.
762 
763   for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) {
764     const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I;
765     if (ArgumentSCC.size() == 1) {
766       if (!ArgumentSCC[0]->Definition)
767         continue; // synthetic root node
768 
769       // eg. "void f(int* x) { if (...) f(x); }"
770       if (ArgumentSCC[0]->Uses.size() == 1 &&
771           ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) {
772         Argument *A = ArgumentSCC[0]->Definition;
773         A->addAttr(Attribute::NoCapture);
774         ++NumNoCapture;
775         Changed = true;
776       }
777       continue;
778     }
779 
780     bool SCCCaptured = false;
781     for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
782          I != E && !SCCCaptured; ++I) {
783       ArgumentGraphNode *Node = *I;
784       if (Node->Uses.empty()) {
785         if (!Node->Definition->hasNoCaptureAttr())
786           SCCCaptured = true;
787       }
788     }
789     if (SCCCaptured)
790       continue;
791 
792     SmallPtrSet<Argument *, 8> ArgumentSCCNodes;
793     // Fill ArgumentSCCNodes with the elements of the ArgumentSCC.  Used for
794     // quickly looking up whether a given Argument is in this ArgumentSCC.
795     for (ArgumentGraphNode *I : ArgumentSCC) {
796       ArgumentSCCNodes.insert(I->Definition);
797     }
798 
799     for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
800          I != E && !SCCCaptured; ++I) {
801       ArgumentGraphNode *N = *I;
802       for (ArgumentGraphNode *Use : N->Uses) {
803         Argument *A = Use->Definition;
804         if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A))
805           continue;
806         SCCCaptured = true;
807         break;
808       }
809     }
810     if (SCCCaptured)
811       continue;
812 
813     for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
814       Argument *A = ArgumentSCC[i]->Definition;
815       A->addAttr(Attribute::NoCapture);
816       ++NumNoCapture;
817       Changed = true;
818     }
819 
820     // We also want to compute readonly/readnone. With a small number of false
821     // negatives, we can assume that any pointer which is captured isn't going
822     // to be provably readonly or readnone, since by definition we can't
823     // analyze all uses of a captured pointer.
824     //
825     // The false negatives happen when the pointer is captured by a function
826     // that promises readonly/readnone behaviour on the pointer, then the
827     // pointer's lifetime ends before anything that writes to arbitrary memory.
828     // Also, a readonly/readnone pointer may be returned, but returning a
829     // pointer is capturing it.
830 
831     Attribute::AttrKind ReadAttr = Attribute::ReadNone;
832     for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
833       Argument *A = ArgumentSCC[i]->Definition;
834       Attribute::AttrKind K = determinePointerReadAttrs(A, ArgumentSCCNodes);
835       if (K == Attribute::ReadNone)
836         continue;
837       if (K == Attribute::ReadOnly) {
838         ReadAttr = Attribute::ReadOnly;
839         continue;
840       }
841       ReadAttr = K;
842       break;
843     }
844 
845     if (ReadAttr != Attribute::None) {
846       for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
847         Argument *A = ArgumentSCC[i]->Definition;
848         Changed = addReadAttr(A, ReadAttr);
849       }
850     }
851   }
852 
853   return Changed;
854 }
855 
856 /// Tests whether a function is "malloc-like".
857 ///
858 /// A function is "malloc-like" if it returns either null or a pointer that
859 /// doesn't alias any other pointer visible to the caller.
860 static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) {
861   SmallSetVector<Value *, 8> FlowsToReturn;
862   for (BasicBlock &BB : *F)
863     if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
864       FlowsToReturn.insert(Ret->getReturnValue());
865 
866   for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
867     Value *RetVal = FlowsToReturn[i];
868 
869     if (Constant *C = dyn_cast<Constant>(RetVal)) {
870       if (!C->isNullValue() && !isa<UndefValue>(C))
871         return false;
872 
873       continue;
874     }
875 
876     if (isa<Argument>(RetVal))
877       return false;
878 
879     if (Instruction *RVI = dyn_cast<Instruction>(RetVal))
880       switch (RVI->getOpcode()) {
881       // Extend the analysis by looking upwards.
882       case Instruction::BitCast:
883       case Instruction::GetElementPtr:
884       case Instruction::AddrSpaceCast:
885         FlowsToReturn.insert(RVI->getOperand(0));
886         continue;
887       case Instruction::Select: {
888         SelectInst *SI = cast<SelectInst>(RVI);
889         FlowsToReturn.insert(SI->getTrueValue());
890         FlowsToReturn.insert(SI->getFalseValue());
891         continue;
892       }
893       case Instruction::PHI: {
894         PHINode *PN = cast<PHINode>(RVI);
895         for (Value *IncValue : PN->incoming_values())
896           FlowsToReturn.insert(IncValue);
897         continue;
898       }
899 
900       // Check whether the pointer came from an allocation.
901       case Instruction::Alloca:
902         break;
903       case Instruction::Call:
904       case Instruction::Invoke: {
905         CallBase &CB = cast<CallBase>(*RVI);
906         if (CB.hasRetAttr(Attribute::NoAlias))
907           break;
908         if (CB.getCalledFunction() && SCCNodes.count(CB.getCalledFunction()))
909           break;
910         LLVM_FALLTHROUGH;
911       }
912       default:
913         return false; // Did not come from an allocation.
914       }
915 
916     if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false))
917       return false;
918   }
919 
920   return true;
921 }
922 
923 /// Deduce noalias attributes for the SCC.
924 static bool addNoAliasAttrs(const SCCNodeSet &SCCNodes) {
925   // Check each function in turn, determining which functions return noalias
926   // pointers.
927   for (Function *F : SCCNodes) {
928     // Already noalias.
929     if (F->returnDoesNotAlias())
930       continue;
931 
932     // We can infer and propagate function attributes only when we know that the
933     // definition we'll get at link time is *exactly* the definition we see now.
934     // For more details, see GlobalValue::mayBeDerefined.
935     if (!F->hasExactDefinition())
936       return false;
937 
938     // We annotate noalias return values, which are only applicable to
939     // pointer types.
940     if (!F->getReturnType()->isPointerTy())
941       continue;
942 
943     if (!isFunctionMallocLike(F, SCCNodes))
944       return false;
945   }
946 
947   bool MadeChange = false;
948   for (Function *F : SCCNodes) {
949     if (F->returnDoesNotAlias() ||
950         !F->getReturnType()->isPointerTy())
951       continue;
952 
953     F->setReturnDoesNotAlias();
954     ++NumNoAlias;
955     MadeChange = true;
956   }
957 
958   return MadeChange;
959 }
960 
961 /// Tests whether this function is known to not return null.
962 ///
963 /// Requires that the function returns a pointer.
964 ///
965 /// Returns true if it believes the function will not return a null, and sets
966 /// \p Speculative based on whether the returned conclusion is a speculative
967 /// conclusion due to SCC calls.
968 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
969                             bool &Speculative) {
970   assert(F->getReturnType()->isPointerTy() &&
971          "nonnull only meaningful on pointer types");
972   Speculative = false;
973 
974   SmallSetVector<Value *, 8> FlowsToReturn;
975   for (BasicBlock &BB : *F)
976     if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
977       FlowsToReturn.insert(Ret->getReturnValue());
978 
979   auto &DL = F->getParent()->getDataLayout();
980 
981   for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
982     Value *RetVal = FlowsToReturn[i];
983 
984     // If this value is locally known to be non-null, we're good
985     if (isKnownNonZero(RetVal, DL))
986       continue;
987 
988     // Otherwise, we need to look upwards since we can't make any local
989     // conclusions.
990     Instruction *RVI = dyn_cast<Instruction>(RetVal);
991     if (!RVI)
992       return false;
993     switch (RVI->getOpcode()) {
994     // Extend the analysis by looking upwards.
995     case Instruction::BitCast:
996     case Instruction::GetElementPtr:
997     case Instruction::AddrSpaceCast:
998       FlowsToReturn.insert(RVI->getOperand(0));
999       continue;
1000     case Instruction::Select: {
1001       SelectInst *SI = cast<SelectInst>(RVI);
1002       FlowsToReturn.insert(SI->getTrueValue());
1003       FlowsToReturn.insert(SI->getFalseValue());
1004       continue;
1005     }
1006     case Instruction::PHI: {
1007       PHINode *PN = cast<PHINode>(RVI);
1008       for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1009         FlowsToReturn.insert(PN->getIncomingValue(i));
1010       continue;
1011     }
1012     case Instruction::Call:
1013     case Instruction::Invoke: {
1014       CallBase &CB = cast<CallBase>(*RVI);
1015       Function *Callee = CB.getCalledFunction();
1016       // A call to a node within the SCC is assumed to return null until
1017       // proven otherwise
1018       if (Callee && SCCNodes.count(Callee)) {
1019         Speculative = true;
1020         continue;
1021       }
1022       return false;
1023     }
1024     default:
1025       return false; // Unknown source, may be null
1026     };
1027     llvm_unreachable("should have either continued or returned");
1028   }
1029 
1030   return true;
1031 }
1032 
1033 /// Deduce nonnull attributes for the SCC.
1034 static bool addNonNullAttrs(const SCCNodeSet &SCCNodes) {
1035   // Speculative that all functions in the SCC return only nonnull
1036   // pointers.  We may refute this as we analyze functions.
1037   bool SCCReturnsNonNull = true;
1038 
1039   bool MadeChange = false;
1040 
1041   // Check each function in turn, determining which functions return nonnull
1042   // pointers.
1043   for (Function *F : SCCNodes) {
1044     // Already nonnull.
1045     if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1046                                         Attribute::NonNull))
1047       continue;
1048 
1049     // We can infer and propagate function attributes only when we know that the
1050     // definition we'll get at link time is *exactly* the definition we see now.
1051     // For more details, see GlobalValue::mayBeDerefined.
1052     if (!F->hasExactDefinition())
1053       return false;
1054 
1055     // We annotate nonnull return values, which are only applicable to
1056     // pointer types.
1057     if (!F->getReturnType()->isPointerTy())
1058       continue;
1059 
1060     bool Speculative = false;
1061     if (isReturnNonNull(F, SCCNodes, Speculative)) {
1062       if (!Speculative) {
1063         // Mark the function eagerly since we may discover a function
1064         // which prevents us from speculating about the entire SCC
1065         LLVM_DEBUG(dbgs() << "Eagerly marking " << F->getName()
1066                           << " as nonnull\n");
1067         F->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1068         ++NumNonNullReturn;
1069         MadeChange = true;
1070       }
1071       continue;
1072     }
1073     // At least one function returns something which could be null, can't
1074     // speculate any more.
1075     SCCReturnsNonNull = false;
1076   }
1077 
1078   if (SCCReturnsNonNull) {
1079     for (Function *F : SCCNodes) {
1080       if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1081                                           Attribute::NonNull) ||
1082           !F->getReturnType()->isPointerTy())
1083         continue;
1084 
1085       LLVM_DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n");
1086       F->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1087       ++NumNonNullReturn;
1088       MadeChange = true;
1089     }
1090   }
1091 
1092   return MadeChange;
1093 }
1094 
1095 namespace {
1096 
1097 /// Collects a set of attribute inference requests and performs them all in one
1098 /// go on a single SCC Node. Inference involves scanning function bodies
1099 /// looking for instructions that violate attribute assumptions.
1100 /// As soon as all the bodies are fine we are free to set the attribute.
1101 /// Customization of inference for individual attributes is performed by
1102 /// providing a handful of predicates for each attribute.
1103 class AttributeInferer {
1104 public:
1105   /// Describes a request for inference of a single attribute.
1106   struct InferenceDescriptor {
1107 
1108     /// Returns true if this function does not have to be handled.
1109     /// General intent for this predicate is to provide an optimization
1110     /// for functions that do not need this attribute inference at all
1111     /// (say, for functions that already have the attribute).
1112     std::function<bool(const Function &)> SkipFunction;
1113 
1114     /// Returns true if this instruction violates attribute assumptions.
1115     std::function<bool(Instruction &)> InstrBreaksAttribute;
1116 
1117     /// Sets the inferred attribute for this function.
1118     std::function<void(Function &)> SetAttribute;
1119 
1120     /// Attribute we derive.
1121     Attribute::AttrKind AKind;
1122 
1123     /// If true, only "exact" definitions can be used to infer this attribute.
1124     /// See GlobalValue::isDefinitionExact.
1125     bool RequiresExactDefinition;
1126 
1127     InferenceDescriptor(Attribute::AttrKind AK,
1128                         std::function<bool(const Function &)> SkipFunc,
1129                         std::function<bool(Instruction &)> InstrScan,
1130                         std::function<void(Function &)> SetAttr,
1131                         bool ReqExactDef)
1132         : SkipFunction(SkipFunc), InstrBreaksAttribute(InstrScan),
1133           SetAttribute(SetAttr), AKind(AK),
1134           RequiresExactDefinition(ReqExactDef) {}
1135   };
1136 
1137 private:
1138   SmallVector<InferenceDescriptor, 4> InferenceDescriptors;
1139 
1140 public:
1141   void registerAttrInference(InferenceDescriptor AttrInference) {
1142     InferenceDescriptors.push_back(AttrInference);
1143   }
1144 
1145   bool run(const SCCNodeSet &SCCNodes);
1146 };
1147 
1148 /// Perform all the requested attribute inference actions according to the
1149 /// attribute predicates stored before.
1150 bool AttributeInferer::run(const SCCNodeSet &SCCNodes) {
1151   SmallVector<InferenceDescriptor, 4> InferInSCC = InferenceDescriptors;
1152   // Go through all the functions in SCC and check corresponding attribute
1153   // assumptions for each of them. Attributes that are invalid for this SCC
1154   // will be removed from InferInSCC.
1155   for (Function *F : SCCNodes) {
1156 
1157     // No attributes whose assumptions are still valid - done.
1158     if (InferInSCC.empty())
1159       return false;
1160 
1161     // Check if our attributes ever need scanning/can be scanned.
1162     llvm::erase_if(InferInSCC, [F](const InferenceDescriptor &ID) {
1163       if (ID.SkipFunction(*F))
1164         return false;
1165 
1166       // Remove from further inference (invalidate) when visiting a function
1167       // that has no instructions to scan/has an unsuitable definition.
1168       return F->isDeclaration() ||
1169              (ID.RequiresExactDefinition && !F->hasExactDefinition());
1170     });
1171 
1172     // For each attribute still in InferInSCC that doesn't explicitly skip F,
1173     // set up the F instructions scan to verify assumptions of the attribute.
1174     SmallVector<InferenceDescriptor, 4> InferInThisFunc;
1175     llvm::copy_if(
1176         InferInSCC, std::back_inserter(InferInThisFunc),
1177         [F](const InferenceDescriptor &ID) { return !ID.SkipFunction(*F); });
1178 
1179     if (InferInThisFunc.empty())
1180       continue;
1181 
1182     // Start instruction scan.
1183     for (Instruction &I : instructions(*F)) {
1184       llvm::erase_if(InferInThisFunc, [&](const InferenceDescriptor &ID) {
1185         if (!ID.InstrBreaksAttribute(I))
1186           return false;
1187         // Remove attribute from further inference on any other functions
1188         // because attribute assumptions have just been violated.
1189         llvm::erase_if(InferInSCC, [&ID](const InferenceDescriptor &D) {
1190           return D.AKind == ID.AKind;
1191         });
1192         // Remove attribute from the rest of current instruction scan.
1193         return true;
1194       });
1195 
1196       if (InferInThisFunc.empty())
1197         break;
1198     }
1199   }
1200 
1201   if (InferInSCC.empty())
1202     return false;
1203 
1204   bool Changed = false;
1205   for (Function *F : SCCNodes)
1206     // At this point InferInSCC contains only functions that were either:
1207     //   - explicitly skipped from scan/inference, or
1208     //   - verified to have no instructions that break attribute assumptions.
1209     // Hence we just go and force the attribute for all non-skipped functions.
1210     for (auto &ID : InferInSCC) {
1211       if (ID.SkipFunction(*F))
1212         continue;
1213       Changed = true;
1214       ID.SetAttribute(*F);
1215     }
1216   return Changed;
1217 }
1218 
1219 } // end anonymous namespace
1220 
1221 /// Helper for non-Convergent inference predicate InstrBreaksAttribute.
1222 static bool InstrBreaksNonConvergent(Instruction &I,
1223                                      const SCCNodeSet &SCCNodes) {
1224   const CallBase *CB = dyn_cast<CallBase>(&I);
1225   // Breaks non-convergent assumption if CS is a convergent call to a function
1226   // not in the SCC.
1227   return CB && CB->isConvergent() &&
1228          SCCNodes.count(CB->getCalledFunction()) == 0;
1229 }
1230 
1231 /// Helper for NoUnwind inference predicate InstrBreaksAttribute.
1232 static bool InstrBreaksNonThrowing(Instruction &I, const SCCNodeSet &SCCNodes) {
1233   if (!I.mayThrow())
1234     return false;
1235   if (const auto *CI = dyn_cast<CallInst>(&I)) {
1236     if (Function *Callee = CI->getCalledFunction()) {
1237       // I is a may-throw call to a function inside our SCC. This doesn't
1238       // invalidate our current working assumption that the SCC is no-throw; we
1239       // just have to scan that other function.
1240       if (SCCNodes.count(Callee) > 0)
1241         return false;
1242     }
1243   }
1244   return true;
1245 }
1246 
1247 /// Helper for NoFree inference predicate InstrBreaksAttribute.
1248 static bool InstrBreaksNoFree(Instruction &I, const SCCNodeSet &SCCNodes) {
1249   CallBase *CB = dyn_cast<CallBase>(&I);
1250   if (!CB)
1251     return false;
1252 
1253   Function *Callee = CB->getCalledFunction();
1254   if (!Callee)
1255     return true;
1256 
1257   if (Callee->doesNotFreeMemory())
1258     return false;
1259 
1260   if (SCCNodes.count(Callee) > 0)
1261     return false;
1262 
1263   return true;
1264 }
1265 
1266 /// Infer attributes from all functions in the SCC by scanning every
1267 /// instruction for compliance to the attribute assumptions. Currently it
1268 /// does:
1269 ///   - removal of Convergent attribute
1270 ///   - addition of NoUnwind attribute
1271 ///
1272 /// Returns true if any changes to function attributes were made.
1273 static bool inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes) {
1274 
1275   AttributeInferer AI;
1276 
1277   // Request to remove the convergent attribute from all functions in the SCC
1278   // if every callsite within the SCC is not convergent (except for calls
1279   // to functions within the SCC).
1280   // Note: Removal of the attr from the callsites will happen in
1281   // InstCombineCalls separately.
1282   AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1283       Attribute::Convergent,
1284       // Skip non-convergent functions.
1285       [](const Function &F) { return !F.isConvergent(); },
1286       // Instructions that break non-convergent assumption.
1287       [SCCNodes](Instruction &I) {
1288         return InstrBreaksNonConvergent(I, SCCNodes);
1289       },
1290       [](Function &F) {
1291         LLVM_DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName()
1292                           << "\n");
1293         F.setNotConvergent();
1294       },
1295       /* RequiresExactDefinition= */ false});
1296 
1297   if (!DisableNoUnwindInference)
1298     // Request to infer nounwind attribute for all the functions in the SCC if
1299     // every callsite within the SCC is not throwing (except for calls to
1300     // functions within the SCC). Note that nounwind attribute suffers from
1301     // derefinement - results may change depending on how functions are
1302     // optimized. Thus it can be inferred only from exact definitions.
1303     AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1304         Attribute::NoUnwind,
1305         // Skip non-throwing functions.
1306         [](const Function &F) { return F.doesNotThrow(); },
1307         // Instructions that break non-throwing assumption.
1308         [&SCCNodes](Instruction &I) {
1309           return InstrBreaksNonThrowing(I, SCCNodes);
1310         },
1311         [](Function &F) {
1312           LLVM_DEBUG(dbgs()
1313                      << "Adding nounwind attr to fn " << F.getName() << "\n");
1314           F.setDoesNotThrow();
1315           ++NumNoUnwind;
1316         },
1317         /* RequiresExactDefinition= */ true});
1318 
1319   if (!DisableNoFreeInference)
1320     // Request to infer nofree attribute for all the functions in the SCC if
1321     // every callsite within the SCC does not directly or indirectly free
1322     // memory (except for calls to functions within the SCC). Note that nofree
1323     // attribute suffers from derefinement - results may change depending on
1324     // how functions are optimized. Thus it can be inferred only from exact
1325     // definitions.
1326     AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1327         Attribute::NoFree,
1328         // Skip functions known not to free memory.
1329         [](const Function &F) { return F.doesNotFreeMemory(); },
1330         // Instructions that break non-deallocating assumption.
1331         [&SCCNodes](Instruction &I) {
1332           return InstrBreaksNoFree(I, SCCNodes);
1333         },
1334         [](Function &F) {
1335           LLVM_DEBUG(dbgs()
1336                      << "Adding nofree attr to fn " << F.getName() << "\n");
1337           F.setDoesNotFreeMemory();
1338           ++NumNoFree;
1339         },
1340         /* RequiresExactDefinition= */ true});
1341 
1342   // Perform all the requested attribute inference actions.
1343   return AI.run(SCCNodes);
1344 }
1345 
1346 static bool setDoesNotRecurse(Function &F) {
1347   if (F.doesNotRecurse())
1348     return false;
1349   F.setDoesNotRecurse();
1350   ++NumNoRecurse;
1351   return true;
1352 }
1353 
1354 static bool addNoRecurseAttrs(const SCCNodeSet &SCCNodes) {
1355   // Try and identify functions that do not recurse.
1356 
1357   // If the SCC contains multiple nodes we know for sure there is recursion.
1358   if (SCCNodes.size() != 1)
1359     return false;
1360 
1361   Function *F = *SCCNodes.begin();
1362   if (!F || !F->hasExactDefinition() || F->doesNotRecurse())
1363     return false;
1364 
1365   // If all of the calls in F are identifiable and are to norecurse functions, F
1366   // is norecurse. This check also detects self-recursion as F is not currently
1367   // marked norecurse, so any called from F to F will not be marked norecurse.
1368   for (auto &BB : *F)
1369     for (auto &I : BB.instructionsWithoutDebug())
1370       if (auto *CB = dyn_cast<CallBase>(&I)) {
1371         Function *Callee = CB->getCalledFunction();
1372         if (!Callee || Callee == F || !Callee->doesNotRecurse())
1373           // Function calls a potentially recursive function.
1374           return false;
1375       }
1376 
1377   // Every call was to a non-recursive function other than this function, and
1378   // we have no indirect recursion as the SCC size is one. This function cannot
1379   // recurse.
1380   return setDoesNotRecurse(*F);
1381 }
1382 
1383 template <typename AARGetterT>
1384 static bool deriveAttrsInPostOrder(SCCNodeSet &SCCNodes,
1385                                    AARGetterT &&AARGetter,
1386                                    bool HasUnknownCall) {
1387   bool Changed = false;
1388 
1389   // Bail if the SCC only contains optnone functions.
1390   if (SCCNodes.empty())
1391     return Changed;
1392 
1393   Changed |= addArgumentReturnedAttrs(SCCNodes);
1394   Changed |= addReadAttrs(SCCNodes, AARGetter);
1395   Changed |= addArgumentAttrs(SCCNodes);
1396 
1397   // If we have no external nodes participating in the SCC, we can deduce some
1398   // more precise attributes as well.
1399   if (!HasUnknownCall) {
1400     Changed |= addNoAliasAttrs(SCCNodes);
1401     Changed |= addNonNullAttrs(SCCNodes);
1402     Changed |= inferAttrsFromFunctionBodies(SCCNodes);
1403     Changed |= addNoRecurseAttrs(SCCNodes);
1404   }
1405 
1406   return Changed;
1407 }
1408 
1409 PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C,
1410                                                   CGSCCAnalysisManager &AM,
1411                                                   LazyCallGraph &CG,
1412                                                   CGSCCUpdateResult &) {
1413   FunctionAnalysisManager &FAM =
1414       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
1415 
1416   // We pass a lambda into functions to wire them up to the analysis manager
1417   // for getting function analyses.
1418   auto AARGetter = [&](Function &F) -> AAResults & {
1419     return FAM.getResult<AAManager>(F);
1420   };
1421 
1422   // Fill SCCNodes with the elements of the SCC. Also track whether there are
1423   // any external or opt-none nodes that will prevent us from optimizing any
1424   // part of the SCC.
1425   SCCNodeSet SCCNodes;
1426   bool HasUnknownCall = false;
1427   for (LazyCallGraph::Node &N : C) {
1428     Function &F = N.getFunction();
1429     if (F.hasOptNone() || F.hasFnAttribute(Attribute::Naked)) {
1430       // Treat any function we're trying not to optimize as if it were an
1431       // indirect call and omit it from the node set used below.
1432       HasUnknownCall = true;
1433       continue;
1434     }
1435     // Track whether any functions in this SCC have an unknown call edge.
1436     // Note: if this is ever a performance hit, we can common it with
1437     // subsequent routines which also do scans over the instructions of the
1438     // function.
1439     if (!HasUnknownCall)
1440       for (Instruction &I : instructions(F))
1441         if (auto *CB = dyn_cast<CallBase>(&I))
1442           if (!CB->getCalledFunction()) {
1443             HasUnknownCall = true;
1444             break;
1445           }
1446 
1447     SCCNodes.insert(&F);
1448   }
1449 
1450   if (deriveAttrsInPostOrder(SCCNodes, AARGetter, HasUnknownCall))
1451     return PreservedAnalyses::none();
1452 
1453   return PreservedAnalyses::all();
1454 }
1455 
1456 namespace {
1457 
1458 struct PostOrderFunctionAttrsLegacyPass : public CallGraphSCCPass {
1459   // Pass identification, replacement for typeid
1460   static char ID;
1461 
1462   PostOrderFunctionAttrsLegacyPass() : CallGraphSCCPass(ID) {
1463     initializePostOrderFunctionAttrsLegacyPassPass(
1464         *PassRegistry::getPassRegistry());
1465   }
1466 
1467   bool runOnSCC(CallGraphSCC &SCC) override;
1468 
1469   void getAnalysisUsage(AnalysisUsage &AU) const override {
1470     AU.setPreservesCFG();
1471     AU.addRequired<AssumptionCacheTracker>();
1472     getAAResultsAnalysisUsage(AU);
1473     CallGraphSCCPass::getAnalysisUsage(AU);
1474   }
1475 };
1476 
1477 } // end anonymous namespace
1478 
1479 char PostOrderFunctionAttrsLegacyPass::ID = 0;
1480 INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrsLegacyPass, "functionattrs",
1481                       "Deduce function attributes", false, false)
1482 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1483 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1484 INITIALIZE_PASS_END(PostOrderFunctionAttrsLegacyPass, "functionattrs",
1485                     "Deduce function attributes", false, false)
1486 
1487 Pass *llvm::createPostOrderFunctionAttrsLegacyPass() {
1488   return new PostOrderFunctionAttrsLegacyPass();
1489 }
1490 
1491 template <typename AARGetterT>
1492 static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) {
1493 
1494   // Fill SCCNodes with the elements of the SCC. Used for quickly looking up
1495   // whether a given CallGraphNode is in this SCC. Also track whether there are
1496   // any external or opt-none nodes that will prevent us from optimizing any
1497   // part of the SCC.
1498   SCCNodeSet SCCNodes;
1499   bool ExternalNode = false;
1500   for (CallGraphNode *I : SCC) {
1501     Function *F = I->getFunction();
1502     if (!F || F->hasOptNone() || F->hasFnAttribute(Attribute::Naked)) {
1503       // External node or function we're trying not to optimize - we both avoid
1504       // transform them and avoid leveraging information they provide.
1505       ExternalNode = true;
1506       continue;
1507     }
1508 
1509     SCCNodes.insert(F);
1510   }
1511 
1512   return deriveAttrsInPostOrder(SCCNodes, AARGetter, ExternalNode);
1513 }
1514 
1515 bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) {
1516   if (skipSCC(SCC))
1517     return false;
1518   return runImpl(SCC, LegacyAARGetter(*this));
1519 }
1520 
1521 namespace {
1522 
1523 struct ReversePostOrderFunctionAttrsLegacyPass : public ModulePass {
1524   // Pass identification, replacement for typeid
1525   static char ID;
1526 
1527   ReversePostOrderFunctionAttrsLegacyPass() : ModulePass(ID) {
1528     initializeReversePostOrderFunctionAttrsLegacyPassPass(
1529         *PassRegistry::getPassRegistry());
1530   }
1531 
1532   bool runOnModule(Module &M) override;
1533 
1534   void getAnalysisUsage(AnalysisUsage &AU) const override {
1535     AU.setPreservesCFG();
1536     AU.addRequired<CallGraphWrapperPass>();
1537     AU.addPreserved<CallGraphWrapperPass>();
1538   }
1539 };
1540 
1541 } // end anonymous namespace
1542 
1543 char ReversePostOrderFunctionAttrsLegacyPass::ID = 0;
1544 
1545 INITIALIZE_PASS_BEGIN(ReversePostOrderFunctionAttrsLegacyPass, "rpo-functionattrs",
1546                       "Deduce function attributes in RPO", false, false)
1547 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1548 INITIALIZE_PASS_END(ReversePostOrderFunctionAttrsLegacyPass, "rpo-functionattrs",
1549                     "Deduce function attributes in RPO", false, false)
1550 
1551 Pass *llvm::createReversePostOrderFunctionAttrsPass() {
1552   return new ReversePostOrderFunctionAttrsLegacyPass();
1553 }
1554 
1555 static bool addNoRecurseAttrsTopDown(Function &F) {
1556   // We check the preconditions for the function prior to calling this to avoid
1557   // the cost of building up a reversible post-order list. We assert them here
1558   // to make sure none of the invariants this relies on were violated.
1559   assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!");
1560   assert(!F.doesNotRecurse() &&
1561          "This function has already been deduced as norecurs!");
1562   assert(F.hasInternalLinkage() &&
1563          "Can only do top-down deduction for internal linkage functions!");
1564 
1565   // If F is internal and all of its uses are calls from a non-recursive
1566   // functions, then none of its calls could in fact recurse without going
1567   // through a function marked norecurse, and so we can mark this function too
1568   // as norecurse. Note that the uses must actually be calls -- otherwise
1569   // a pointer to this function could be returned from a norecurse function but
1570   // this function could be recursively (indirectly) called. Note that this
1571   // also detects if F is directly recursive as F is not yet marked as
1572   // a norecurse function.
1573   for (auto *U : F.users()) {
1574     auto *I = dyn_cast<Instruction>(U);
1575     if (!I)
1576       return false;
1577     CallBase *CB = dyn_cast<CallBase>(I);
1578     if (!CB || !CB->getParent()->getParent()->doesNotRecurse())
1579       return false;
1580   }
1581   return setDoesNotRecurse(F);
1582 }
1583 
1584 static bool deduceFunctionAttributeInRPO(Module &M, CallGraph &CG) {
1585   // We only have a post-order SCC traversal (because SCCs are inherently
1586   // discovered in post-order), so we accumulate them in a vector and then walk
1587   // it in reverse. This is simpler than using the RPO iterator infrastructure
1588   // because we need to combine SCC detection and the PO walk of the call
1589   // graph. We can also cheat egregiously because we're primarily interested in
1590   // synthesizing norecurse and so we can only save the singular SCCs as SCCs
1591   // with multiple functions in them will clearly be recursive.
1592   SmallVector<Function *, 16> Worklist;
1593   for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) {
1594     if (I->size() != 1)
1595       continue;
1596 
1597     Function *F = I->front()->getFunction();
1598     if (F && !F->isDeclaration() && !F->doesNotRecurse() &&
1599         F->hasInternalLinkage())
1600       Worklist.push_back(F);
1601   }
1602 
1603   bool Changed = false;
1604   for (auto *F : llvm::reverse(Worklist))
1605     Changed |= addNoRecurseAttrsTopDown(*F);
1606 
1607   return Changed;
1608 }
1609 
1610 bool ReversePostOrderFunctionAttrsLegacyPass::runOnModule(Module &M) {
1611   if (skipModule(M))
1612     return false;
1613 
1614   auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
1615 
1616   return deduceFunctionAttributeInRPO(M, CG);
1617 }
1618 
1619 PreservedAnalyses
1620 ReversePostOrderFunctionAttrsPass::run(Module &M, ModuleAnalysisManager &AM) {
1621   auto &CG = AM.getResult<CallGraphAnalysis>(M);
1622 
1623   if (!deduceFunctionAttributeInRPO(M, CG))
1624     return PreservedAnalyses::all();
1625 
1626   PreservedAnalyses PA;
1627   PA.preserve<CallGraphAnalysis>();
1628   return PA;
1629 }
1630