1 //===- LazyCallGraph.h - Analysis of a Module's call graph ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// Implements a lazy call graph analysis and related passes for the new pass
11 /// manager.
12 ///
13 /// NB: This is *not* a traditional call graph! It is a graph which models both
14 /// the current calls and potential calls. As a consequence there are many
15 /// edges in this call graph that do not correspond to a 'call' or 'invoke'
16 /// instruction.
17 ///
18 /// The primary use cases of this graph analysis is to facilitate iterating
19 /// across the functions of a module in ways that ensure all callees are
20 /// visited prior to a caller (given any SCC constraints), or vice versa. As
21 /// such is it particularly well suited to organizing CGSCC optimizations such
22 /// as inlining, outlining, argument promotion, etc. That is its primary use
23 /// case and motivates the design. It may not be appropriate for other
24 /// purposes. The use graph of functions or some other conservative analysis of
25 /// call instructions may be interesting for optimizations and subsequent
26 /// analyses which don't work in the context of an overly specified
27 /// potential-call-edge graph.
28 ///
29 /// To understand the specific rules and nature of this call graph analysis,
30 /// see the documentation of the \c LazyCallGraph below.
31 ///
32 //===----------------------------------------------------------------------===//
33 
34 #ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
35 #define LLVM_ANALYSIS_LAZYCALLGRAPH_H
36 
37 #include "llvm/ADT/ArrayRef.h"
38 #include "llvm/ADT/DenseMap.h"
39 #include "llvm/ADT/Optional.h"
40 #include "llvm/ADT/PointerIntPair.h"
41 #include "llvm/ADT/STLExtras.h"
42 #include "llvm/ADT/SetVector.h"
43 #include "llvm/ADT/SmallPtrSet.h"
44 #include "llvm/ADT/SmallVector.h"
45 #include "llvm/ADT/StringRef.h"
46 #include "llvm/ADT/iterator.h"
47 #include "llvm/ADT/iterator_range.h"
48 #include "llvm/Analysis/TargetLibraryInfo.h"
49 #include "llvm/IR/Constant.h"
50 #include "llvm/IR/Constants.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/PassManager.h"
53 #include "llvm/Support/Allocator.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include <cassert>
57 #include <iterator>
58 #include <string>
59 #include <utility>
60 
61 namespace llvm {
62 
63 class Module;
64 class Value;
65 
66 /// A lazily constructed view of the call graph of a module.
67 ///
68 /// With the edges of this graph, the motivating constraint that we are
69 /// attempting to maintain is that function-local optimization, CGSCC-local
70 /// optimizations, and optimizations transforming a pair of functions connected
71 /// by an edge in the graph, do not invalidate a bottom-up traversal of the SCC
72 /// DAG. That is, no optimizations will delete, remove, or add an edge such
73 /// that functions already visited in a bottom-up order of the SCC DAG are no
74 /// longer valid to have visited, or such that functions not yet visited in
75 /// a bottom-up order of the SCC DAG are not required to have already been
76 /// visited.
77 ///
78 /// Within this constraint, the desire is to minimize the merge points of the
79 /// SCC DAG. The greater the fanout of the SCC DAG and the fewer merge points
80 /// in the SCC DAG, the more independence there is in optimizing within it.
81 /// There is a strong desire to enable parallelization of optimizations over
82 /// the call graph, and both limited fanout and merge points will (artificially
83 /// in some cases) limit the scaling of such an effort.
84 ///
85 /// To this end, graph represents both direct and any potential resolution to
86 /// an indirect call edge. Another way to think about it is that it represents
87 /// both the direct call edges and any direct call edges that might be formed
88 /// through static optimizations. Specifically, it considers taking the address
89 /// of a function to be an edge in the call graph because this might be
90 /// forwarded to become a direct call by some subsequent function-local
91 /// optimization. The result is that the graph closely follows the use-def
92 /// edges for functions. Walking "up" the graph can be done by looking at all
93 /// of the uses of a function.
94 ///
95 /// The roots of the call graph are the external functions and functions
96 /// escaped into global variables. Those functions can be called from outside
97 /// of the module or via unknowable means in the IR -- we may not be able to
98 /// form even a potential call edge from a function body which may dynamically
99 /// load the function and call it.
100 ///
101 /// This analysis still requires updates to remain valid after optimizations
102 /// which could potentially change the set of potential callees. The
103 /// constraints it operates under only make the traversal order remain valid.
104 ///
105 /// The entire analysis must be re-computed if full interprocedural
106 /// optimizations run at any point. For example, globalopt completely
107 /// invalidates the information in this analysis.
108 ///
109 /// FIXME: This class is named LazyCallGraph in a lame attempt to distinguish
110 /// it from the existing CallGraph. At some point, it is expected that this
111 /// will be the only call graph and it will be renamed accordingly.
112 class LazyCallGraph {
113 public:
114   class Node;
115   class EdgeSequence;
116   class SCC;
117   class RefSCC;
118   class edge_iterator;
119   class call_edge_iterator;
120 
121   /// A class used to represent edges in the call graph.
122   ///
123   /// The lazy call graph models both *call* edges and *reference* edges. Call
124   /// edges are much what you would expect, and exist when there is a 'call' or
125   /// 'invoke' instruction of some function. Reference edges are also tracked
126   /// along side these, and exist whenever any instruction (transitively
127   /// through its operands) references a function. All call edges are
128   /// inherently reference edges, and so the reference graph forms a superset
129   /// of the formal call graph.
130   ///
131   /// All of these forms of edges are fundamentally represented as outgoing
132   /// edges. The edges are stored in the source node and point at the target
133   /// node. This allows the edge structure itself to be a very compact data
134   /// structure: essentially a tagged pointer.
135   class Edge {
136   public:
137     /// The kind of edge in the graph.
138     enum Kind : bool { Ref = false, Call = true };
139 
140     Edge();
141     explicit Edge(Node &N, Kind K);
142 
143     /// Test whether the edge is null.
144     ///
145     /// This happens when an edge has been deleted. We leave the edge objects
146     /// around but clear them.
147     explicit operator bool() const;
148 
149     /// Returnss the \c Kind of the edge.
150     Kind getKind() const;
151 
152     /// Test whether the edge represents a direct call to a function.
153     ///
154     /// This requires that the edge is not null.
155     bool isCall() const;
156 
157     /// Get the call graph node referenced by this edge.
158     ///
159     /// This requires that the edge is not null.
160     Node &getNode() const;
161 
162     /// Get the function referenced by this edge.
163     ///
164     /// This requires that the edge is not null.
165     Function &getFunction() const;
166 
167   private:
168     friend class LazyCallGraph::EdgeSequence;
169     friend class LazyCallGraph::RefSCC;
170 
171     PointerIntPair<Node *, 1, Kind> Value;
172 
setKind(Kind K)173     void setKind(Kind K) { Value.setInt(K); }
174   };
175 
176   /// The edge sequence object.
177   ///
178   /// This typically exists entirely within the node but is exposed as
179   /// a separate type because a node doesn't initially have edges. An explicit
180   /// population step is required to produce this sequence at first and it is
181   /// then cached in the node. It is also used to represent edges entering the
182   /// graph from outside the module to model the graph's roots.
183   ///
184   /// The sequence itself both iterable and indexable. The indexes remain
185   /// stable even as the sequence mutates (including removal).
186   class EdgeSequence {
187     friend class LazyCallGraph;
188     friend class LazyCallGraph::Node;
189     friend class LazyCallGraph::RefSCC;
190 
191     using VectorT = SmallVector<Edge, 4>;
192     using VectorImplT = SmallVectorImpl<Edge>;
193 
194   public:
195     /// An iterator used for the edges to both entry nodes and child nodes.
196     class iterator
197         : public iterator_adaptor_base<iterator, VectorImplT::iterator,
198                                        std::forward_iterator_tag> {
199       friend class LazyCallGraph;
200       friend class LazyCallGraph::Node;
201 
202       VectorImplT::iterator E;
203 
204       // Build the iterator for a specific position in the edge list.
iterator(VectorImplT::iterator BaseI,VectorImplT::iterator E)205       iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
206           : iterator_adaptor_base(BaseI), E(E) {
207         while (I != E && !*I)
208           ++I;
209       }
210 
211     public:
212       iterator() = default;
213 
214       using iterator_adaptor_base::operator++;
215       iterator &operator++() {
216         do {
217           ++I;
218         } while (I != E && !*I);
219         return *this;
220       }
221     };
222 
223     /// An iterator over specifically call edges.
224     ///
225     /// This has the same iteration properties as the \c iterator, but
226     /// restricts itself to edges which represent actual calls.
227     class call_iterator
228         : public iterator_adaptor_base<call_iterator, VectorImplT::iterator,
229                                        std::forward_iterator_tag> {
230       friend class LazyCallGraph;
231       friend class LazyCallGraph::Node;
232 
233       VectorImplT::iterator E;
234 
235       /// Advance the iterator to the next valid, call edge.
advanceToNextEdge()236       void advanceToNextEdge() {
237         while (I != E && (!*I || !I->isCall()))
238           ++I;
239       }
240 
241       // Build the iterator for a specific position in the edge list.
call_iterator(VectorImplT::iterator BaseI,VectorImplT::iterator E)242       call_iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
243           : iterator_adaptor_base(BaseI), E(E) {
244         advanceToNextEdge();
245       }
246 
247     public:
248       call_iterator() = default;
249 
250       using iterator_adaptor_base::operator++;
251       call_iterator &operator++() {
252         ++I;
253         advanceToNextEdge();
254         return *this;
255       }
256     };
257 
begin()258     iterator begin() { return iterator(Edges.begin(), Edges.end()); }
end()259     iterator end() { return iterator(Edges.end(), Edges.end()); }
260 
261     Edge &operator[](Node &N) {
262       assert(EdgeIndexMap.find(&N) != EdgeIndexMap.end() && "No such edge!");
263       auto &E = Edges[EdgeIndexMap.find(&N)->second];
264       assert(E && "Dead or null edge!");
265       return E;
266     }
267 
lookup(Node & N)268     Edge *lookup(Node &N) {
269       auto EI = EdgeIndexMap.find(&N);
270       if (EI == EdgeIndexMap.end())
271         return nullptr;
272       auto &E = Edges[EI->second];
273       return E ? &E : nullptr;
274     }
275 
call_begin()276     call_iterator call_begin() {
277       return call_iterator(Edges.begin(), Edges.end());
278     }
call_end()279     call_iterator call_end() { return call_iterator(Edges.end(), Edges.end()); }
280 
calls()281     iterator_range<call_iterator> calls() {
282       return make_range(call_begin(), call_end());
283     }
284 
empty()285     bool empty() {
286       for (auto &E : Edges)
287         if (E)
288           return false;
289 
290       return true;
291     }
292 
293   private:
294     VectorT Edges;
295     DenseMap<Node *, int> EdgeIndexMap;
296 
297     EdgeSequence() = default;
298 
299     /// Internal helper to insert an edge to a node.
300     void insertEdgeInternal(Node &ChildN, Edge::Kind EK);
301 
302     /// Internal helper to change an edge kind.
303     void setEdgeKind(Node &ChildN, Edge::Kind EK);
304 
305     /// Internal helper to remove the edge to the given function.
306     bool removeEdgeInternal(Node &ChildN);
307   };
308 
309   /// A node in the call graph.
310   ///
311   /// This represents a single node. It's primary roles are to cache the list of
312   /// callees, de-duplicate and provide fast testing of whether a function is
313   /// a callee, and facilitate iteration of child nodes in the graph.
314   ///
315   /// The node works much like an optional in order to lazily populate the
316   /// edges of each node. Until populated, there are no edges. Once populated,
317   /// you can access the edges by dereferencing the node or using the `->`
318   /// operator as if the node was an `Optional<EdgeSequence>`.
319   class Node {
320     friend class LazyCallGraph;
321     friend class LazyCallGraph::RefSCC;
322 
323   public:
getGraph()324     LazyCallGraph &getGraph() const { return *G; }
325 
getFunction()326     Function &getFunction() const { return *F; }
327 
getName()328     StringRef getName() const { return F->getName(); }
329 
330     /// Equality is defined as address equality.
331     bool operator==(const Node &N) const { return this == &N; }
332     bool operator!=(const Node &N) const { return !operator==(N); }
333 
334     /// Tests whether the node has been populated with edges.
isPopulated()335     bool isPopulated() const { return Edges.hasValue(); }
336 
337     /// Tests whether this is actually a dead node and no longer valid.
338     ///
339     /// Users rarely interact with nodes in this state and other methods are
340     /// invalid. This is used to model a node in an edge list where the
341     /// function has been completely removed.
isDead()342     bool isDead() const {
343       assert(!G == !F &&
344              "Both graph and function pointers should be null or non-null.");
345       return !G;
346     }
347 
348     // We allow accessing the edges by dereferencing or using the arrow
349     // operator, essentially wrapping the internal optional.
350     EdgeSequence &operator*() const {
351       // Rip const off because the node itself isn't changing here.
352       return const_cast<EdgeSequence &>(*Edges);
353     }
354     EdgeSequence *operator->() const { return &**this; }
355 
356     /// Populate the edges of this node if necessary.
357     ///
358     /// The first time this is called it will populate the edges for this node
359     /// in the graph. It does this by scanning the underlying function, so once
360     /// this is done, any changes to that function must be explicitly reflected
361     /// in updates to the graph.
362     ///
363     /// \returns the populated \c EdgeSequence to simplify walking it.
364     ///
365     /// This will not update or re-scan anything if called repeatedly. Instead,
366     /// the edge sequence is cached and returned immediately on subsequent
367     /// calls.
populate()368     EdgeSequence &populate() {
369       if (Edges)
370         return *Edges;
371 
372       return populateSlow();
373     }
374 
375   private:
376     LazyCallGraph *G;
377     Function *F;
378 
379     // We provide for the DFS numbering and Tarjan walk lowlink numbers to be
380     // stored directly within the node. These are both '-1' when nodes are part
381     // of an SCC (or RefSCC), or '0' when not yet reached in a DFS walk.
382     int DFSNumber = 0;
383     int LowLink = 0;
384 
385     Optional<EdgeSequence> Edges;
386 
387     /// Basic constructor implements the scanning of F into Edges and
388     /// EdgeIndexMap.
Node(LazyCallGraph & G,Function & F)389     Node(LazyCallGraph &G, Function &F) : G(&G), F(&F) {}
390 
391     /// Implementation of the scan when populating.
392     EdgeSequence &populateSlow();
393 
394     /// Internal helper to directly replace the function with a new one.
395     ///
396     /// This is used to facilitate tranfsormations which need to replace the
397     /// formal Function object but directly move the body and users from one to
398     /// the other.
399     void replaceFunction(Function &NewF);
400 
clear()401     void clear() { Edges.reset(); }
402 
403     /// Print the name of this node's function.
404     friend raw_ostream &operator<<(raw_ostream &OS, const Node &N) {
405       return OS << N.F->getName();
406     }
407 
408     /// Dump the name of this node's function to stderr.
409     void dump() const;
410   };
411 
412   /// An SCC of the call graph.
413   ///
414   /// This represents a Strongly Connected Component of the direct call graph
415   /// -- ignoring indirect calls and function references. It stores this as
416   /// a collection of call graph nodes. While the order of nodes in the SCC is
417   /// stable, it is not any particular order.
418   ///
419   /// The SCCs are nested within a \c RefSCC, see below for details about that
420   /// outer structure. SCCs do not support mutation of the call graph, that
421   /// must be done through the containing \c RefSCC in order to fully reason
422   /// about the ordering and connections of the graph.
423   class SCC {
424     friend class LazyCallGraph;
425     friend class LazyCallGraph::Node;
426 
427     RefSCC *OuterRefSCC;
428     SmallVector<Node *, 1> Nodes;
429 
430     template <typename NodeRangeT>
SCC(RefSCC & OuterRefSCC,NodeRangeT && Nodes)431     SCC(RefSCC &OuterRefSCC, NodeRangeT &&Nodes)
432         : OuterRefSCC(&OuterRefSCC), Nodes(std::forward<NodeRangeT>(Nodes)) {}
433 
clear()434     void clear() {
435       OuterRefSCC = nullptr;
436       Nodes.clear();
437     }
438 
439     /// Print a short descrtiption useful for debugging or logging.
440     ///
441     /// We print the function names in the SCC wrapped in '()'s and skipping
442     /// the middle functions if there are a large number.
443     //
444     // Note: this is defined inline to dodge issues with GCC's interpretation
445     // of enclosing namespaces for friend function declarations.
446     friend raw_ostream &operator<<(raw_ostream &OS, const SCC &C) {
447       OS << '(';
448       int i = 0;
449       for (LazyCallGraph::Node &N : C) {
450         if (i > 0)
451           OS << ", ";
452         // Elide the inner elements if there are too many.
453         if (i > 8) {
454           OS << "..., " << *C.Nodes.back();
455           break;
456         }
457         OS << N;
458         ++i;
459       }
460       OS << ')';
461       return OS;
462     }
463 
464     /// Dump a short description of this SCC to stderr.
465     void dump() const;
466 
467 #ifndef NDEBUG
468     /// Verify invariants about the SCC.
469     ///
470     /// This will attempt to validate all of the basic invariants within an
471     /// SCC, but not that it is a strongly connected componet per-se. Primarily
472     /// useful while building and updating the graph to check that basic
473     /// properties are in place rather than having inexplicable crashes later.
474     void verify();
475 #endif
476 
477   public:
478     using iterator = pointee_iterator<SmallVectorImpl<Node *>::const_iterator>;
479 
begin()480     iterator begin() const { return Nodes.begin(); }
end()481     iterator end() const { return Nodes.end(); }
482 
size()483     int size() const { return Nodes.size(); }
484 
getOuterRefSCC()485     RefSCC &getOuterRefSCC() const { return *OuterRefSCC; }
486 
487     /// Test if this SCC is a parent of \a C.
488     ///
489     /// Note that this is linear in the number of edges departing the current
490     /// SCC.
491     bool isParentOf(const SCC &C) const;
492 
493     /// Test if this SCC is an ancestor of \a C.
494     ///
495     /// Note that in the worst case this is linear in the number of edges
496     /// departing the current SCC and every SCC in the entire graph reachable
497     /// from this SCC. Thus this very well may walk every edge in the entire
498     /// call graph! Do not call this in a tight loop!
499     bool isAncestorOf(const SCC &C) const;
500 
501     /// Test if this SCC is a child of \a C.
502     ///
503     /// See the comments for \c isParentOf for detailed notes about the
504     /// complexity of this routine.
isChildOf(const SCC & C)505     bool isChildOf(const SCC &C) const { return C.isParentOf(*this); }
506 
507     /// Test if this SCC is a descendant of \a C.
508     ///
509     /// See the comments for \c isParentOf for detailed notes about the
510     /// complexity of this routine.
isDescendantOf(const SCC & C)511     bool isDescendantOf(const SCC &C) const { return C.isAncestorOf(*this); }
512 
513     /// Provide a short name by printing this SCC to a std::string.
514     ///
515     /// This copes with the fact that we don't have a name per-se for an SCC
516     /// while still making the use of this in debugging and logging useful.
getName()517     std::string getName() const {
518       std::string Name;
519       raw_string_ostream OS(Name);
520       OS << *this;
521       OS.flush();
522       return Name;
523     }
524   };
525 
526   /// A RefSCC of the call graph.
527   ///
528   /// This models a Strongly Connected Component of function reference edges in
529   /// the call graph. As opposed to actual SCCs, these can be used to scope
530   /// subgraphs of the module which are independent from other subgraphs of the
531   /// module because they do not reference it in any way. This is also the unit
532   /// where we do mutation of the graph in order to restrict mutations to those
533   /// which don't violate this independence.
534   ///
535   /// A RefSCC contains a DAG of actual SCCs. All the nodes within the RefSCC
536   /// are necessarily within some actual SCC that nests within it. Since
537   /// a direct call *is* a reference, there will always be at least one RefSCC
538   /// around any SCC.
539   class RefSCC {
540     friend class LazyCallGraph;
541     friend class LazyCallGraph::Node;
542 
543     LazyCallGraph *G;
544 
545     /// A postorder list of the inner SCCs.
546     SmallVector<SCC *, 4> SCCs;
547 
548     /// A map from SCC to index in the postorder list.
549     SmallDenseMap<SCC *, int, 4> SCCIndices;
550 
551     /// Fast-path constructor. RefSCCs should instead be constructed by calling
552     /// formRefSCCFast on the graph itself.
553     RefSCC(LazyCallGraph &G);
554 
clear()555     void clear() {
556       SCCs.clear();
557       SCCIndices.clear();
558     }
559 
560     /// Print a short description useful for debugging or logging.
561     ///
562     /// We print the SCCs wrapped in '[]'s and skipping the middle SCCs if
563     /// there are a large number.
564     //
565     // Note: this is defined inline to dodge issues with GCC's interpretation
566     // of enclosing namespaces for friend function declarations.
567     friend raw_ostream &operator<<(raw_ostream &OS, const RefSCC &RC) {
568       OS << '[';
569       int i = 0;
570       for (LazyCallGraph::SCC &C : RC) {
571         if (i > 0)
572           OS << ", ";
573         // Elide the inner elements if there are too many.
574         if (i > 4) {
575           OS << "..., " << *RC.SCCs.back();
576           break;
577         }
578         OS << C;
579         ++i;
580       }
581       OS << ']';
582       return OS;
583     }
584 
585     /// Dump a short description of this RefSCC to stderr.
586     void dump() const;
587 
588 #ifndef NDEBUG
589     /// Verify invariants about the RefSCC and all its SCCs.
590     ///
591     /// This will attempt to validate all of the invariants *within* the
592     /// RefSCC, but not that it is a strongly connected component of the larger
593     /// graph. This makes it useful even when partially through an update.
594     ///
595     /// Invariants checked:
596     /// - SCCs and their indices match.
597     /// - The SCCs list is in fact in post-order.
598     void verify();
599 #endif
600 
601   public:
602     using iterator = pointee_iterator<SmallVectorImpl<SCC *>::const_iterator>;
603     using range = iterator_range<iterator>;
604     using parent_iterator =
605         pointee_iterator<SmallPtrSetImpl<RefSCC *>::const_iterator>;
606 
begin()607     iterator begin() const { return SCCs.begin(); }
end()608     iterator end() const { return SCCs.end(); }
609 
size()610     ssize_t size() const { return SCCs.size(); }
611 
612     SCC &operator[](int Idx) { return *SCCs[Idx]; }
613 
find(SCC & C)614     iterator find(SCC &C) const {
615       return SCCs.begin() + SCCIndices.find(&C)->second;
616     }
617 
618     /// Test if this RefSCC is a parent of \a RC.
619     ///
620     /// CAUTION: This method walks every edge in the \c RefSCC, it can be very
621     /// expensive.
622     bool isParentOf(const RefSCC &RC) const;
623 
624     /// Test if this RefSCC is an ancestor of \a RC.
625     ///
626     /// CAUTION: This method walks the directed graph of edges as far as
627     /// necessary to find a possible path to the argument. In the worst case
628     /// this may walk the entire graph and can be extremely expensive.
629     bool isAncestorOf(const RefSCC &RC) const;
630 
631     /// Test if this RefSCC is a child of \a RC.
632     ///
633     /// CAUTION: This method walks every edge in the argument \c RefSCC, it can
634     /// be very expensive.
isChildOf(const RefSCC & RC)635     bool isChildOf(const RefSCC &RC) const { return RC.isParentOf(*this); }
636 
637     /// Test if this RefSCC is a descendant of \a RC.
638     ///
639     /// CAUTION: This method walks the directed graph of edges as far as
640     /// necessary to find a possible path from the argument. In the worst case
641     /// this may walk the entire graph and can be extremely expensive.
isDescendantOf(const RefSCC & RC)642     bool isDescendantOf(const RefSCC &RC) const {
643       return RC.isAncestorOf(*this);
644     }
645 
646     /// Provide a short name by printing this RefSCC to a std::string.
647     ///
648     /// This copes with the fact that we don't have a name per-se for an RefSCC
649     /// while still making the use of this in debugging and logging useful.
getName()650     std::string getName() const {
651       std::string Name;
652       raw_string_ostream OS(Name);
653       OS << *this;
654       OS.flush();
655       return Name;
656     }
657 
658     ///@{
659     /// \name Mutation API
660     ///
661     /// These methods provide the core API for updating the call graph in the
662     /// presence of (potentially still in-flight) DFS-found RefSCCs and SCCs.
663     ///
664     /// Note that these methods sometimes have complex runtimes, so be careful
665     /// how you call them.
666 
667     /// Make an existing internal ref edge into a call edge.
668     ///
669     /// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
670     /// If that happens, the optional callback \p MergedCB will be invoked (if
671     /// provided) on the SCCs being merged away prior to actually performing
672     /// the merge. Note that this will never include the target SCC as that
673     /// will be the SCC functions are merged into to resolve the cycle. Once
674     /// this function returns, these merged SCCs are not in a valid state but
675     /// the pointers will remain valid until destruction of the parent graph
676     /// instance for the purpose of clearing cached information. This function
677     /// also returns 'true' if a cycle was formed and some SCCs merged away as
678     /// a convenience.
679     ///
680     /// After this operation, both SourceN's SCC and TargetN's SCC may move
681     /// position within this RefSCC's postorder list. Any SCCs merged are
682     /// merged into the TargetN's SCC in order to preserve reachability analyses
683     /// which took place on that SCC.
684     bool switchInternalEdgeToCall(
685         Node &SourceN, Node &TargetN,
686         function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
687 
688     /// Make an existing internal call edge between separate SCCs into a ref
689     /// edge.
690     ///
691     /// If SourceN and TargetN in separate SCCs within this RefSCC, changing
692     /// the call edge between them to a ref edge is a trivial operation that
693     /// does not require any structural changes to the call graph.
694     void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN);
695 
696     /// Make an existing internal call edge within a single SCC into a ref
697     /// edge.
698     ///
699     /// Since SourceN and TargetN are part of a single SCC, this SCC may be
700     /// split up due to breaking a cycle in the call edges that formed it. If
701     /// that happens, then this routine will insert new SCCs into the postorder
702     /// list *before* the SCC of TargetN (previously the SCC of both). This
703     /// preserves postorder as the TargetN can reach all of the other nodes by
704     /// definition of previously being in a single SCC formed by the cycle from
705     /// SourceN to TargetN.
706     ///
707     /// The newly added SCCs are added *immediately* and contiguously
708     /// prior to the TargetN SCC and return the range covering the new SCCs in
709     /// the RefSCC's postorder sequence. You can directly iterate the returned
710     /// range to observe all of the new SCCs in postorder.
711     ///
712     /// Note that if SourceN and TargetN are in separate SCCs, the simpler
713     /// routine `switchTrivialInternalEdgeToRef` should be used instead.
714     iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
715                                                      Node &TargetN);
716 
717     /// Make an existing outgoing ref edge into a call edge.
718     ///
719     /// Note that this is trivial as there are no cyclic impacts and there
720     /// remains a reference edge.
721     void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN);
722 
723     /// Make an existing outgoing call edge into a ref edge.
724     ///
725     /// This is trivial as there are no cyclic impacts and there remains
726     /// a reference edge.
727     void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN);
728 
729     /// Insert a ref edge from one node in this RefSCC to another in this
730     /// RefSCC.
731     ///
732     /// This is always a trivial operation as it doesn't change any part of the
733     /// graph structure besides connecting the two nodes.
734     ///
735     /// Note that we don't support directly inserting internal *call* edges
736     /// because that could change the graph structure and requires returning
737     /// information about what became invalid. As a consequence, the pattern
738     /// should be to first insert the necessary ref edge, and then to switch it
739     /// to a call edge if needed and handle any invalidation that results. See
740     /// the \c switchInternalEdgeToCall routine for details.
741     void insertInternalRefEdge(Node &SourceN, Node &TargetN);
742 
743     /// Insert an edge whose parent is in this RefSCC and child is in some
744     /// child RefSCC.
745     ///
746     /// There must be an existing path from the \p SourceN to the \p TargetN.
747     /// This operation is inexpensive and does not change the set of SCCs and
748     /// RefSCCs in the graph.
749     void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
750 
751     /// Insert an edge whose source is in a descendant RefSCC and target is in
752     /// this RefSCC.
753     ///
754     /// There must be an existing path from the target to the source in this
755     /// case.
756     ///
757     /// NB! This is has the potential to be a very expensive function. It
758     /// inherently forms a cycle in the prior RefSCC DAG and we have to merge
759     /// RefSCCs to resolve that cycle. But finding all of the RefSCCs which
760     /// participate in the cycle can in the worst case require traversing every
761     /// RefSCC in the graph. Every attempt is made to avoid that, but passes
762     /// must still exercise caution calling this routine repeatedly.
763     ///
764     /// Also note that this can only insert ref edges. In order to insert
765     /// a call edge, first insert a ref edge and then switch it to a call edge.
766     /// These are intentionally kept as separate interfaces because each step
767     /// of the operation invalidates a different set of data structures.
768     ///
769     /// This returns all the RefSCCs which were merged into the this RefSCC
770     /// (the target's). This allows callers to invalidate any cached
771     /// information.
772     ///
773     /// FIXME: We could possibly optimize this quite a bit for cases where the
774     /// caller and callee are very nearby in the graph. See comments in the
775     /// implementation for details, but that use case might impact users.
776     SmallVector<RefSCC *, 1> insertIncomingRefEdge(Node &SourceN,
777                                                    Node &TargetN);
778 
779     /// Remove an edge whose source is in this RefSCC and target is *not*.
780     ///
781     /// This removes an inter-RefSCC edge. All inter-RefSCC edges originating
782     /// from this SCC have been fully explored by any in-flight DFS graph
783     /// formation, so this is always safe to call once you have the source
784     /// RefSCC.
785     ///
786     /// This operation does not change the cyclic structure of the graph and so
787     /// is very inexpensive. It may change the connectivity graph of the SCCs
788     /// though, so be careful calling this while iterating over them.
789     void removeOutgoingEdge(Node &SourceN, Node &TargetN);
790 
791     /// Remove a list of ref edges which are entirely within this RefSCC.
792     ///
793     /// Both the \a SourceN and all of the \a TargetNs must be within this
794     /// RefSCC. Removing these edges may break cycles that form this RefSCC and
795     /// thus this operation may change the RefSCC graph significantly. In
796     /// particular, this operation will re-form new RefSCCs based on the
797     /// remaining connectivity of the graph. The following invariants are
798     /// guaranteed to hold after calling this method:
799     ///
800     /// 1) If a ref-cycle remains after removal, it leaves this RefSCC intact
801     ///    and in the graph. No new RefSCCs are built.
802     /// 2) Otherwise, this RefSCC will be dead after this call and no longer in
803     ///    the graph or the postorder traversal of the call graph. Any iterator
804     ///    pointing at this RefSCC will become invalid.
805     /// 3) All newly formed RefSCCs will be returned and the order of the
806     ///    RefSCCs returned will be a valid postorder traversal of the new
807     ///    RefSCCs.
808     /// 4) No RefSCC other than this RefSCC has its member set changed (this is
809     ///    inherent in the definition of removing such an edge).
810     ///
811     /// These invariants are very important to ensure that we can build
812     /// optimization pipelines on top of the CGSCC pass manager which
813     /// intelligently update the RefSCC graph without invalidating other parts
814     /// of the RefSCC graph.
815     ///
816     /// Note that we provide no routine to remove a *call* edge. Instead, you
817     /// must first switch it to a ref edge using \c switchInternalEdgeToRef.
818     /// This split API is intentional as each of these two steps can invalidate
819     /// a different aspect of the graph structure and needs to have the
820     /// invalidation handled independently.
821     ///
822     /// The runtime complexity of this method is, in the worst case, O(V+E)
823     /// where V is the number of nodes in this RefSCC and E is the number of
824     /// edges leaving the nodes in this RefSCC. Note that E includes both edges
825     /// within this RefSCC and edges from this RefSCC to child RefSCCs. Some
826     /// effort has been made to minimize the overhead of common cases such as
827     /// self-edges and edge removals which result in a spanning tree with no
828     /// more cycles.
829     SmallVector<RefSCC *, 1> removeInternalRefEdge(Node &SourceN,
830                                                    ArrayRef<Node *> TargetNs);
831 
832     /// A convenience wrapper around the above to handle trivial cases of
833     /// inserting a new call edge.
834     ///
835     /// This is trivial whenever the target is in the same SCC as the source or
836     /// the edge is an outgoing edge to some descendant SCC. In these cases
837     /// there is no change to the cyclic structure of SCCs or RefSCCs.
838     ///
839     /// To further make calling this convenient, it also handles inserting
840     /// already existing edges.
841     void insertTrivialCallEdge(Node &SourceN, Node &TargetN);
842 
843     /// A convenience wrapper around the above to handle trivial cases of
844     /// inserting a new ref edge.
845     ///
846     /// This is trivial whenever the target is in the same RefSCC as the source
847     /// or the edge is an outgoing edge to some descendant RefSCC. In these
848     /// cases there is no change to the cyclic structure of the RefSCCs.
849     ///
850     /// To further make calling this convenient, it also handles inserting
851     /// already existing edges.
852     void insertTrivialRefEdge(Node &SourceN, Node &TargetN);
853 
854     /// Directly replace a node's function with a new function.
855     ///
856     /// This should be used when moving the body and users of a function to
857     /// a new formal function object but not otherwise changing the call graph
858     /// structure in any way.
859     ///
860     /// It requires that the old function in the provided node have zero uses
861     /// and the new function must have calls and references to it establishing
862     /// an equivalent graph.
863     void replaceNodeFunction(Node &N, Function &NewF);
864 
865     ///@}
866   };
867 
868   /// A post-order depth-first RefSCC iterator over the call graph.
869   ///
870   /// This iterator walks the cached post-order sequence of RefSCCs. However,
871   /// it trades stability for flexibility. It is restricted to a forward
872   /// iterator but will survive mutations which insert new RefSCCs and continue
873   /// to point to the same RefSCC even if it moves in the post-order sequence.
874   class postorder_ref_scc_iterator
875       : public iterator_facade_base<postorder_ref_scc_iterator,
876                                     std::forward_iterator_tag, RefSCC> {
877     friend class LazyCallGraph;
878     friend class LazyCallGraph::Node;
879 
880     /// Nonce type to select the constructor for the end iterator.
881     struct IsAtEndT {};
882 
883     LazyCallGraph *G;
884     RefSCC *RC = nullptr;
885 
886     /// Build the begin iterator for a node.
postorder_ref_scc_iterator(LazyCallGraph & G)887     postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G), RC(getRC(G, 0)) {}
888 
889     /// Build the end iterator for a node. This is selected purely by overload.
postorder_ref_scc_iterator(LazyCallGraph & G,IsAtEndT)890     postorder_ref_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/) : G(&G) {}
891 
892     /// Get the post-order RefSCC at the given index of the postorder walk,
893     /// populating it if necessary.
getRC(LazyCallGraph & G,int Index)894     static RefSCC *getRC(LazyCallGraph &G, int Index) {
895       if (Index == (int)G.PostOrderRefSCCs.size())
896         // We're at the end.
897         return nullptr;
898 
899       return G.PostOrderRefSCCs[Index];
900     }
901 
902   public:
903     bool operator==(const postorder_ref_scc_iterator &Arg) const {
904       return G == Arg.G && RC == Arg.RC;
905     }
906 
907     reference operator*() const { return *RC; }
908 
909     using iterator_facade_base::operator++;
910     postorder_ref_scc_iterator &operator++() {
911       assert(RC && "Cannot increment the end iterator!");
912       RC = getRC(*G, G->RefSCCIndices.find(RC)->second + 1);
913       return *this;
914     }
915   };
916 
917   /// Construct a graph for the given module.
918   ///
919   /// This sets up the graph and computes all of the entry points of the graph.
920   /// No function definitions are scanned until their nodes in the graph are
921   /// requested during traversal.
922   LazyCallGraph(Module &M,
923                 function_ref<TargetLibraryInfo &(Function &)> GetTLI);
924 
925   LazyCallGraph(LazyCallGraph &&G);
926   LazyCallGraph &operator=(LazyCallGraph &&RHS);
927 
928   bool invalidate(Module &, const PreservedAnalyses &PA,
929                   ModuleAnalysisManager::Invalidator &);
930 
begin()931   EdgeSequence::iterator begin() { return EntryEdges.begin(); }
end()932   EdgeSequence::iterator end() { return EntryEdges.end(); }
933 
934   void buildRefSCCs();
935 
postorder_ref_scc_begin()936   postorder_ref_scc_iterator postorder_ref_scc_begin() {
937     if (!EntryEdges.empty())
938       assert(!PostOrderRefSCCs.empty() &&
939              "Must form RefSCCs before iterating them!");
940     return postorder_ref_scc_iterator(*this);
941   }
postorder_ref_scc_end()942   postorder_ref_scc_iterator postorder_ref_scc_end() {
943     if (!EntryEdges.empty())
944       assert(!PostOrderRefSCCs.empty() &&
945              "Must form RefSCCs before iterating them!");
946     return postorder_ref_scc_iterator(*this,
947                                       postorder_ref_scc_iterator::IsAtEndT());
948   }
949 
postorder_ref_sccs()950   iterator_range<postorder_ref_scc_iterator> postorder_ref_sccs() {
951     return make_range(postorder_ref_scc_begin(), postorder_ref_scc_end());
952   }
953 
954   /// Lookup a function in the graph which has already been scanned and added.
lookup(const Function & F)955   Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }
956 
957   /// Lookup a function's SCC in the graph.
958   ///
959   /// \returns null if the function hasn't been assigned an SCC via the RefSCC
960   /// iterator walk.
lookupSCC(Node & N)961   SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }
962 
963   /// Lookup a function's RefSCC in the graph.
964   ///
965   /// \returns null if the function hasn't been assigned a RefSCC via the
966   /// RefSCC iterator walk.
lookupRefSCC(Node & N)967   RefSCC *lookupRefSCC(Node &N) const {
968     if (SCC *C = lookupSCC(N))
969       return &C->getOuterRefSCC();
970 
971     return nullptr;
972   }
973 
974   /// Get a graph node for a given function, scanning it to populate the graph
975   /// data as necessary.
get(Function & F)976   Node &get(Function &F) {
977     Node *&N = NodeMap[&F];
978     if (N)
979       return *N;
980 
981     return insertInto(F, N);
982   }
983 
984   /// Get the sequence of known and defined library functions.
985   ///
986   /// These functions, because they are known to LLVM, can have calls
987   /// introduced out of thin air from arbitrary IR.
getLibFunctions()988   ArrayRef<Function *> getLibFunctions() const {
989     return LibFunctions.getArrayRef();
990   }
991 
992   /// Test whether a function is a known and defined library function tracked by
993   /// the call graph.
994   ///
995   /// Because these functions are known to LLVM they are specially modeled in
996   /// the call graph and even when all IR-level references have been removed
997   /// remain active and reachable.
isLibFunction(Function & F)998   bool isLibFunction(Function &F) const { return LibFunctions.count(&F); }
999 
1000   ///@{
1001   /// \name Pre-SCC Mutation API
1002   ///
1003   /// These methods are only valid to call prior to forming any SCCs for this
1004   /// call graph. They can be used to update the core node-graph during
1005   /// a node-based inorder traversal that precedes any SCC-based traversal.
1006   ///
1007   /// Once you begin manipulating a call graph's SCCs, most mutation of the
1008   /// graph must be performed via a RefSCC method. There are some exceptions
1009   /// below.
1010 
1011   /// Update the call graph after inserting a new edge.
1012   void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
1013 
1014   /// Update the call graph after inserting a new edge.
insertEdge(Function & Source,Function & Target,Edge::Kind EK)1015   void insertEdge(Function &Source, Function &Target, Edge::Kind EK) {
1016     return insertEdge(get(Source), get(Target), EK);
1017   }
1018 
1019   /// Update the call graph after deleting an edge.
1020   void removeEdge(Node &SourceN, Node &TargetN);
1021 
1022   /// Update the call graph after deleting an edge.
removeEdge(Function & Source,Function & Target)1023   void removeEdge(Function &Source, Function &Target) {
1024     return removeEdge(get(Source), get(Target));
1025   }
1026 
1027   ///@}
1028 
1029   ///@{
1030   /// \name General Mutation API
1031   ///
1032   /// There are a very limited set of mutations allowed on the graph as a whole
1033   /// once SCCs have started to be formed. These routines have strict contracts
1034   /// but may be called at any point.
1035 
1036   /// Remove a dead function from the call graph (typically to delete it).
1037   ///
1038   /// Note that the function must have an empty use list, and the call graph
1039   /// must be up-to-date prior to calling this. That means it is by itself in
1040   /// a maximal SCC which is by itself in a maximal RefSCC, etc. No structural
1041   /// changes result from calling this routine other than potentially removing
1042   /// entry points into the call graph.
1043   ///
1044   /// If SCC formation has begun, this function must not be part of the current
1045   /// DFS in order to call this safely. Typically, the function will have been
1046   /// fully visited by the DFS prior to calling this routine.
1047   void removeDeadFunction(Function &F);
1048 
1049   /// Add a new function split/outlined from an existing function.
1050   ///
1051   /// The new function may only reference other functions that the original
1052   /// function did.
1053   ///
1054   /// The original function must reference (either directly or indirectly) the
1055   /// new function.
1056   ///
1057   /// The new function may also reference the original function.
1058   /// It may end up in a parent SCC in the case that the original function's
1059   /// edge to the new function is a ref edge, and the edge back is a call edge.
1060   void addSplitFunction(Function &OriginalFunction, Function &NewFunction);
1061 
1062   /// Add new ref-recursive functions split/outlined from an existing function.
1063   ///
1064   /// The new functions may only reference other functions that the original
1065   /// function did. The new functions may reference (not call) the original
1066   /// function.
1067   ///
1068   /// The original function must reference (not call) all new functions.
1069   /// All new functions must reference (not call) each other.
1070   void addSplitRefRecursiveFunctions(Function &OriginalFunction,
1071                                      ArrayRef<Function *> NewFunctions);
1072 
1073   ///@}
1074 
1075   ///@{
1076   /// \name Static helpers for code doing updates to the call graph.
1077   ///
1078   /// These helpers are used to implement parts of the call graph but are also
1079   /// useful to code doing updates or otherwise wanting to walk the IR in the
1080   /// same patterns as when we build the call graph.
1081 
1082   /// Recursively visits the defined functions whose address is reachable from
1083   /// every constant in the \p Worklist.
1084   ///
1085   /// Doesn't recurse through any constants already in the \p Visited set, and
1086   /// updates that set with every constant visited.
1087   ///
1088   /// For each defined function, calls \p Callback with that function.
1089   template <typename CallbackT>
visitReferences(SmallVectorImpl<Constant * > & Worklist,SmallPtrSetImpl<Constant * > & Visited,CallbackT Callback)1090   static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
1091                               SmallPtrSetImpl<Constant *> &Visited,
1092                               CallbackT Callback) {
1093     while (!Worklist.empty()) {
1094       Constant *C = Worklist.pop_back_val();
1095 
1096       if (Function *F = dyn_cast<Function>(C)) {
1097         if (!F->isDeclaration())
1098           Callback(*F);
1099         continue;
1100       }
1101 
1102       // The blockaddress constant expression is a weird special case, we can't
1103       // generically walk its operands the way we do for all other constants.
1104       if (BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
1105         // If we've already visited the function referred to by the block
1106         // address, we don't need to revisit it.
1107         if (Visited.count(BA->getFunction()))
1108           continue;
1109 
1110         // If all of the blockaddress' users are instructions within the
1111         // referred to function, we don't need to insert a cycle.
1112         if (llvm::all_of(BA->users(), [&](User *U) {
1113               if (Instruction *I = dyn_cast<Instruction>(U))
1114                 return I->getFunction() == BA->getFunction();
1115               return false;
1116             }))
1117           continue;
1118 
1119         // Otherwise we should go visit the referred to function.
1120         Visited.insert(BA->getFunction());
1121         Worklist.push_back(BA->getFunction());
1122         continue;
1123       }
1124 
1125       for (Value *Op : C->operand_values())
1126         if (Visited.insert(cast<Constant>(Op)).second)
1127           Worklist.push_back(cast<Constant>(Op));
1128     }
1129   }
1130 
1131   ///@}
1132 
1133 private:
1134   using node_stack_iterator = SmallVectorImpl<Node *>::reverse_iterator;
1135   using node_stack_range = iterator_range<node_stack_iterator>;
1136 
1137   /// Allocator that holds all the call graph nodes.
1138   SpecificBumpPtrAllocator<Node> BPA;
1139 
1140   /// Maps function->node for fast lookup.
1141   DenseMap<const Function *, Node *> NodeMap;
1142 
1143   /// The entry edges into the graph.
1144   ///
1145   /// These edges are from "external" sources. Put another way, they
1146   /// escape at the module scope.
1147   EdgeSequence EntryEdges;
1148 
1149   /// Allocator that holds all the call graph SCCs.
1150   SpecificBumpPtrAllocator<SCC> SCCBPA;
1151 
1152   /// Maps Function -> SCC for fast lookup.
1153   DenseMap<Node *, SCC *> SCCMap;
1154 
1155   /// Allocator that holds all the call graph RefSCCs.
1156   SpecificBumpPtrAllocator<RefSCC> RefSCCBPA;
1157 
1158   /// The post-order sequence of RefSCCs.
1159   ///
1160   /// This list is lazily formed the first time we walk the graph.
1161   SmallVector<RefSCC *, 16> PostOrderRefSCCs;
1162 
1163   /// A map from RefSCC to the index for it in the postorder sequence of
1164   /// RefSCCs.
1165   DenseMap<RefSCC *, int> RefSCCIndices;
1166 
1167   /// Defined functions that are also known library functions which the
1168   /// optimizer can reason about and therefore might introduce calls to out of
1169   /// thin air.
1170   SmallSetVector<Function *, 4> LibFunctions;
1171 
1172   /// Helper to insert a new function, with an already looked-up entry in
1173   /// the NodeMap.
1174   Node &insertInto(Function &F, Node *&MappedN);
1175 
1176   /// Helper to initialize a new node created outside of creating SCCs and add
1177   /// it to the NodeMap if necessary. For example, useful when a function is
1178   /// split.
1179   Node &initNode(Function &F);
1180 
1181   /// Helper to update pointers back to the graph object during moves.
1182   void updateGraphPtrs();
1183 
1184   /// Allocates an SCC and constructs it using the graph allocator.
1185   ///
1186   /// The arguments are forwarded to the constructor.
createSCC(Ts &&...Args)1187   template <typename... Ts> SCC *createSCC(Ts &&... Args) {
1188     return new (SCCBPA.Allocate()) SCC(std::forward<Ts>(Args)...);
1189   }
1190 
1191   /// Allocates a RefSCC and constructs it using the graph allocator.
1192   ///
1193   /// The arguments are forwarded to the constructor.
createRefSCC(Ts &&...Args)1194   template <typename... Ts> RefSCC *createRefSCC(Ts &&... Args) {
1195     return new (RefSCCBPA.Allocate()) RefSCC(std::forward<Ts>(Args)...);
1196   }
1197 
1198   /// Common logic for building SCCs from a sequence of roots.
1199   ///
1200   /// This is a very generic implementation of the depth-first walk and SCC
1201   /// formation algorithm. It uses a generic sequence of roots and generic
1202   /// callbacks for each step. This is designed to be used to implement both
1203   /// the RefSCC formation and SCC formation with shared logic.
1204   ///
1205   /// Currently this is a relatively naive implementation of Tarjan's DFS
1206   /// algorithm to form the SCCs.
1207   ///
1208   /// FIXME: We should consider newer variants such as Nuutila.
1209   template <typename RootsT, typename GetBeginT, typename GetEndT,
1210             typename GetNodeT, typename FormSCCCallbackT>
1211   static void buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
1212                                GetEndT &&GetEnd, GetNodeT &&GetNode,
1213                                FormSCCCallbackT &&FormSCC);
1214 
1215   /// Build the SCCs for a RefSCC out of a list of nodes.
1216   void buildSCCs(RefSCC &RC, node_stack_range Nodes);
1217 
1218   /// Get the index of a RefSCC within the postorder traversal.
1219   ///
1220   /// Requires that this RefSCC is a valid one in the (perhaps partial)
1221   /// postorder traversed part of the graph.
getRefSCCIndex(RefSCC & RC)1222   int getRefSCCIndex(RefSCC &RC) {
1223     auto IndexIt = RefSCCIndices.find(&RC);
1224     assert(IndexIt != RefSCCIndices.end() && "RefSCC doesn't have an index!");
1225     assert(PostOrderRefSCCs[IndexIt->second] == &RC &&
1226            "Index does not point back at RC!");
1227     return IndexIt->second;
1228   }
1229 };
1230 
Edge()1231 inline LazyCallGraph::Edge::Edge() : Value() {}
Edge(Node & N,Kind K)1232 inline LazyCallGraph::Edge::Edge(Node &N, Kind K) : Value(&N, K) {}
1233 
1234 inline LazyCallGraph::Edge::operator bool() const {
1235   return Value.getPointer() && !Value.getPointer()->isDead();
1236 }
1237 
getKind()1238 inline LazyCallGraph::Edge::Kind LazyCallGraph::Edge::getKind() const {
1239   assert(*this && "Queried a null edge!");
1240   return Value.getInt();
1241 }
1242 
isCall()1243 inline bool LazyCallGraph::Edge::isCall() const {
1244   assert(*this && "Queried a null edge!");
1245   return getKind() == Call;
1246 }
1247 
getNode()1248 inline LazyCallGraph::Node &LazyCallGraph::Edge::getNode() const {
1249   assert(*this && "Queried a null edge!");
1250   return *Value.getPointer();
1251 }
1252 
getFunction()1253 inline Function &LazyCallGraph::Edge::getFunction() const {
1254   assert(*this && "Queried a null edge!");
1255   return getNode().getFunction();
1256 }
1257 
1258 // Provide GraphTraits specializations for call graphs.
1259 template <> struct GraphTraits<LazyCallGraph::Node *> {
1260   using NodeRef = LazyCallGraph::Node *;
1261   using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
1262 
1263   static NodeRef getEntryNode(NodeRef N) { return N; }
1264   static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
1265   static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
1266 };
1267 template <> struct GraphTraits<LazyCallGraph *> {
1268   using NodeRef = LazyCallGraph::Node *;
1269   using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
1270 
1271   static NodeRef getEntryNode(NodeRef N) { return N; }
1272   static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
1273   static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
1274 };
1275 
1276 /// An analysis pass which computes the call graph for a module.
1277 class LazyCallGraphAnalysis : public AnalysisInfoMixin<LazyCallGraphAnalysis> {
1278   friend AnalysisInfoMixin<LazyCallGraphAnalysis>;
1279 
1280   static AnalysisKey Key;
1281 
1282 public:
1283   /// Inform generic clients of the result type.
1284   using Result = LazyCallGraph;
1285 
1286   /// Compute the \c LazyCallGraph for the module \c M.
1287   ///
1288   /// This just builds the set of entry points to the call graph. The rest is
1289   /// built lazily as it is walked.
1290   LazyCallGraph run(Module &M, ModuleAnalysisManager &AM) {
1291     FunctionAnalysisManager &FAM =
1292         AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1293     auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
1294       return FAM.getResult<TargetLibraryAnalysis>(F);
1295     };
1296     return LazyCallGraph(M, GetTLI);
1297   }
1298 };
1299 
1300 /// A pass which prints the call graph to a \c raw_ostream.
1301 ///
1302 /// This is primarily useful for testing the analysis.
1303 class LazyCallGraphPrinterPass
1304     : public PassInfoMixin<LazyCallGraphPrinterPass> {
1305   raw_ostream &OS;
1306 
1307 public:
1308   explicit LazyCallGraphPrinterPass(raw_ostream &OS);
1309 
1310   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
1311 };
1312 
1313 /// A pass which prints the call graph as a DOT file to a \c raw_ostream.
1314 ///
1315 /// This is primarily useful for visualization purposes.
1316 class LazyCallGraphDOTPrinterPass
1317     : public PassInfoMixin<LazyCallGraphDOTPrinterPass> {
1318   raw_ostream &OS;
1319 
1320 public:
1321   explicit LazyCallGraphDOTPrinterPass(raw_ostream &OS);
1322 
1323   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
1324 };
1325 
1326 } // end namespace llvm
1327 
1328 #endif // LLVM_ANALYSIS_LAZYCALLGRAPH_H
1329