1 //===- LazyCallGraph.h - Analysis of a Module's call graph ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// Implements a lazy call graph analysis and related passes for the new pass
11 /// manager.
12 ///
13 /// NB: This is *not* a traditional call graph! It is a graph which models both
14 /// the current calls and potential calls. As a consequence there are many
15 /// edges in this call graph that do not correspond to a 'call' or 'invoke'
16 /// instruction.
17 ///
18 /// The primary use cases of this graph analysis is to facilitate iterating
19 /// across the functions of a module in ways that ensure all callees are
20 /// visited prior to a caller (given any SCC constraints), or vice versa. As
21 /// such is it particularly well suited to organizing CGSCC optimizations such
22 /// as inlining, outlining, argument promotion, etc. That is its primary use
23 /// case and motivates the design. It may not be appropriate for other
24 /// purposes. The use graph of functions or some other conservative analysis of
25 /// call instructions may be interesting for optimizations and subsequent
26 /// analyses which don't work in the context of an overly specified
27 /// potential-call-edge graph.
28 ///
29 /// To understand the specific rules and nature of this call graph analysis,
30 /// see the documentation of the \c LazyCallGraph below.
31 ///
32 //===----------------------------------------------------------------------===//
33
34 #ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
35 #define LLVM_ANALYSIS_LAZYCALLGRAPH_H
36
37 #include "llvm/ADT/ArrayRef.h"
38 #include "llvm/ADT/DenseMap.h"
39 #include "llvm/ADT/Optional.h"
40 #include "llvm/ADT/PointerIntPair.h"
41 #include "llvm/ADT/STLExtras.h"
42 #include "llvm/ADT/SetVector.h"
43 #include "llvm/ADT/SmallPtrSet.h"
44 #include "llvm/ADT/SmallVector.h"
45 #include "llvm/ADT/StringRef.h"
46 #include "llvm/ADT/iterator.h"
47 #include "llvm/ADT/iterator_range.h"
48 #include "llvm/Analysis/TargetLibraryInfo.h"
49 #include "llvm/IR/Constant.h"
50 #include "llvm/IR/Constants.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/PassManager.h"
53 #include "llvm/Support/Allocator.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include <cassert>
57 #include <iterator>
58 #include <string>
59 #include <utility>
60
61 namespace llvm {
62
63 class Module;
64 class Value;
65
66 /// A lazily constructed view of the call graph of a module.
67 ///
68 /// With the edges of this graph, the motivating constraint that we are
69 /// attempting to maintain is that function-local optimization, CGSCC-local
70 /// optimizations, and optimizations transforming a pair of functions connected
71 /// by an edge in the graph, do not invalidate a bottom-up traversal of the SCC
72 /// DAG. That is, no optimizations will delete, remove, or add an edge such
73 /// that functions already visited in a bottom-up order of the SCC DAG are no
74 /// longer valid to have visited, or such that functions not yet visited in
75 /// a bottom-up order of the SCC DAG are not required to have already been
76 /// visited.
77 ///
78 /// Within this constraint, the desire is to minimize the merge points of the
79 /// SCC DAG. The greater the fanout of the SCC DAG and the fewer merge points
80 /// in the SCC DAG, the more independence there is in optimizing within it.
81 /// There is a strong desire to enable parallelization of optimizations over
82 /// the call graph, and both limited fanout and merge points will (artificially
83 /// in some cases) limit the scaling of such an effort.
84 ///
85 /// To this end, graph represents both direct and any potential resolution to
86 /// an indirect call edge. Another way to think about it is that it represents
87 /// both the direct call edges and any direct call edges that might be formed
88 /// through static optimizations. Specifically, it considers taking the address
89 /// of a function to be an edge in the call graph because this might be
90 /// forwarded to become a direct call by some subsequent function-local
91 /// optimization. The result is that the graph closely follows the use-def
92 /// edges for functions. Walking "up" the graph can be done by looking at all
93 /// of the uses of a function.
94 ///
95 /// The roots of the call graph are the external functions and functions
96 /// escaped into global variables. Those functions can be called from outside
97 /// of the module or via unknowable means in the IR -- we may not be able to
98 /// form even a potential call edge from a function body which may dynamically
99 /// load the function and call it.
100 ///
101 /// This analysis still requires updates to remain valid after optimizations
102 /// which could potentially change the set of potential callees. The
103 /// constraints it operates under only make the traversal order remain valid.
104 ///
105 /// The entire analysis must be re-computed if full interprocedural
106 /// optimizations run at any point. For example, globalopt completely
107 /// invalidates the information in this analysis.
108 ///
109 /// FIXME: This class is named LazyCallGraph in a lame attempt to distinguish
110 /// it from the existing CallGraph. At some point, it is expected that this
111 /// will be the only call graph and it will be renamed accordingly.
112 class LazyCallGraph {
113 public:
114 class Node;
115 class EdgeSequence;
116 class SCC;
117 class RefSCC;
118
119 /// A class used to represent edges in the call graph.
120 ///
121 /// The lazy call graph models both *call* edges and *reference* edges. Call
122 /// edges are much what you would expect, and exist when there is a 'call' or
123 /// 'invoke' instruction of some function. Reference edges are also tracked
124 /// along side these, and exist whenever any instruction (transitively
125 /// through its operands) references a function. All call edges are
126 /// inherently reference edges, and so the reference graph forms a superset
127 /// of the formal call graph.
128 ///
129 /// All of these forms of edges are fundamentally represented as outgoing
130 /// edges. The edges are stored in the source node and point at the target
131 /// node. This allows the edge structure itself to be a very compact data
132 /// structure: essentially a tagged pointer.
133 class Edge {
134 public:
135 /// The kind of edge in the graph.
136 enum Kind : bool { Ref = false, Call = true };
137
138 Edge();
139 explicit Edge(Node &N, Kind K);
140
141 /// Test whether the edge is null.
142 ///
143 /// This happens when an edge has been deleted. We leave the edge objects
144 /// around but clear them.
145 explicit operator bool() const;
146
147 /// Returnss the \c Kind of the edge.
148 Kind getKind() const;
149
150 /// Test whether the edge represents a direct call to a function.
151 ///
152 /// This requires that the edge is not null.
153 bool isCall() const;
154
155 /// Get the call graph node referenced by this edge.
156 ///
157 /// This requires that the edge is not null.
158 Node &getNode() const;
159
160 /// Get the function referenced by this edge.
161 ///
162 /// This requires that the edge is not null.
163 Function &getFunction() const;
164
165 private:
166 friend class LazyCallGraph::EdgeSequence;
167 friend class LazyCallGraph::RefSCC;
168
169 PointerIntPair<Node *, 1, Kind> Value;
170
setKind(Kind K)171 void setKind(Kind K) { Value.setInt(K); }
172 };
173
174 /// The edge sequence object.
175 ///
176 /// This typically exists entirely within the node but is exposed as
177 /// a separate type because a node doesn't initially have edges. An explicit
178 /// population step is required to produce this sequence at first and it is
179 /// then cached in the node. It is also used to represent edges entering the
180 /// graph from outside the module to model the graph's roots.
181 ///
182 /// The sequence itself both iterable and indexable. The indexes remain
183 /// stable even as the sequence mutates (including removal).
184 class EdgeSequence {
185 friend class LazyCallGraph;
186 friend class LazyCallGraph::Node;
187 friend class LazyCallGraph::RefSCC;
188
189 using VectorT = SmallVector<Edge, 4>;
190 using VectorImplT = SmallVectorImpl<Edge>;
191
192 public:
193 /// An iterator used for the edges to both entry nodes and child nodes.
194 class iterator
195 : public iterator_adaptor_base<iterator, VectorImplT::iterator,
196 std::forward_iterator_tag> {
197 friend class LazyCallGraph;
198 friend class LazyCallGraph::Node;
199
200 VectorImplT::iterator E;
201
202 // Build the iterator for a specific position in the edge list.
iterator(VectorImplT::iterator BaseI,VectorImplT::iterator E)203 iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
204 : iterator_adaptor_base(BaseI), E(E) {
205 while (I != E && !*I)
206 ++I;
207 }
208
209 public:
210 iterator() = default;
211
212 using iterator_adaptor_base::operator++;
213 iterator &operator++() {
214 do {
215 ++I;
216 } while (I != E && !*I);
217 return *this;
218 }
219 };
220
221 /// An iterator over specifically call edges.
222 ///
223 /// This has the same iteration properties as the \c iterator, but
224 /// restricts itself to edges which represent actual calls.
225 class call_iterator
226 : public iterator_adaptor_base<call_iterator, VectorImplT::iterator,
227 std::forward_iterator_tag> {
228 friend class LazyCallGraph;
229 friend class LazyCallGraph::Node;
230
231 VectorImplT::iterator E;
232
233 /// Advance the iterator to the next valid, call edge.
advanceToNextEdge()234 void advanceToNextEdge() {
235 while (I != E && (!*I || !I->isCall()))
236 ++I;
237 }
238
239 // Build the iterator for a specific position in the edge list.
call_iterator(VectorImplT::iterator BaseI,VectorImplT::iterator E)240 call_iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
241 : iterator_adaptor_base(BaseI), E(E) {
242 advanceToNextEdge();
243 }
244
245 public:
246 call_iterator() = default;
247
248 using iterator_adaptor_base::operator++;
249 call_iterator &operator++() {
250 ++I;
251 advanceToNextEdge();
252 return *this;
253 }
254 };
255
begin()256 iterator begin() { return iterator(Edges.begin(), Edges.end()); }
end()257 iterator end() { return iterator(Edges.end(), Edges.end()); }
258
259 Edge &operator[](Node &N) {
260 assert(EdgeIndexMap.find(&N) != EdgeIndexMap.end() && "No such edge!");
261 auto &E = Edges[EdgeIndexMap.find(&N)->second];
262 assert(E && "Dead or null edge!");
263 return E;
264 }
265
lookup(Node & N)266 Edge *lookup(Node &N) {
267 auto EI = EdgeIndexMap.find(&N);
268 if (EI == EdgeIndexMap.end())
269 return nullptr;
270 auto &E = Edges[EI->second];
271 return E ? &E : nullptr;
272 }
273
call_begin()274 call_iterator call_begin() {
275 return call_iterator(Edges.begin(), Edges.end());
276 }
call_end()277 call_iterator call_end() { return call_iterator(Edges.end(), Edges.end()); }
278
calls()279 iterator_range<call_iterator> calls() {
280 return make_range(call_begin(), call_end());
281 }
282
empty()283 bool empty() {
284 for (auto &E : Edges)
285 if (E)
286 return false;
287
288 return true;
289 }
290
291 private:
292 VectorT Edges;
293 DenseMap<Node *, int> EdgeIndexMap;
294
295 EdgeSequence() = default;
296
297 /// Internal helper to insert an edge to a node.
298 void insertEdgeInternal(Node &ChildN, Edge::Kind EK);
299
300 /// Internal helper to change an edge kind.
301 void setEdgeKind(Node &ChildN, Edge::Kind EK);
302
303 /// Internal helper to remove the edge to the given function.
304 bool removeEdgeInternal(Node &ChildN);
305 };
306
307 /// A node in the call graph.
308 ///
309 /// This represents a single node. It's primary roles are to cache the list of
310 /// callees, de-duplicate and provide fast testing of whether a function is
311 /// a callee, and facilitate iteration of child nodes in the graph.
312 ///
313 /// The node works much like an optional in order to lazily populate the
314 /// edges of each node. Until populated, there are no edges. Once populated,
315 /// you can access the edges by dereferencing the node or using the `->`
316 /// operator as if the node was an `Optional<EdgeSequence>`.
317 class Node {
318 friend class LazyCallGraph;
319 friend class LazyCallGraph::RefSCC;
320
321 public:
getGraph()322 LazyCallGraph &getGraph() const { return *G; }
323
getFunction()324 Function &getFunction() const { return *F; }
325
getName()326 StringRef getName() const { return F->getName(); }
327
328 /// Equality is defined as address equality.
329 bool operator==(const Node &N) const { return this == &N; }
330 bool operator!=(const Node &N) const { return !operator==(N); }
331
332 /// Tests whether the node has been populated with edges.
isPopulated()333 bool isPopulated() const { return Edges.hasValue(); }
334
335 /// Tests whether this is actually a dead node and no longer valid.
336 ///
337 /// Users rarely interact with nodes in this state and other methods are
338 /// invalid. This is used to model a node in an edge list where the
339 /// function has been completely removed.
isDead()340 bool isDead() const {
341 assert(!G == !F &&
342 "Both graph and function pointers should be null or non-null.");
343 return !G;
344 }
345
346 // We allow accessing the edges by dereferencing or using the arrow
347 // operator, essentially wrapping the internal optional.
348 EdgeSequence &operator*() const {
349 // Rip const off because the node itself isn't changing here.
350 return const_cast<EdgeSequence &>(*Edges);
351 }
352 EdgeSequence *operator->() const { return &**this; }
353
354 /// Populate the edges of this node if necessary.
355 ///
356 /// The first time this is called it will populate the edges for this node
357 /// in the graph. It does this by scanning the underlying function, so once
358 /// this is done, any changes to that function must be explicitly reflected
359 /// in updates to the graph.
360 ///
361 /// \returns the populated \c EdgeSequence to simplify walking it.
362 ///
363 /// This will not update or re-scan anything if called repeatedly. Instead,
364 /// the edge sequence is cached and returned immediately on subsequent
365 /// calls.
populate()366 EdgeSequence &populate() {
367 if (Edges)
368 return *Edges;
369
370 return populateSlow();
371 }
372
373 private:
374 LazyCallGraph *G;
375 Function *F;
376
377 // We provide for the DFS numbering and Tarjan walk lowlink numbers to be
378 // stored directly within the node. These are both '-1' when nodes are part
379 // of an SCC (or RefSCC), or '0' when not yet reached in a DFS walk.
380 int DFSNumber = 0;
381 int LowLink = 0;
382
383 Optional<EdgeSequence> Edges;
384
385 /// Basic constructor implements the scanning of F into Edges and
386 /// EdgeIndexMap.
Node(LazyCallGraph & G,Function & F)387 Node(LazyCallGraph &G, Function &F) : G(&G), F(&F) {}
388
389 /// Implementation of the scan when populating.
390 EdgeSequence &populateSlow();
391
392 /// Internal helper to directly replace the function with a new one.
393 ///
394 /// This is used to facilitate tranfsormations which need to replace the
395 /// formal Function object but directly move the body and users from one to
396 /// the other.
397 void replaceFunction(Function &NewF);
398
clear()399 void clear() { Edges.reset(); }
400
401 /// Print the name of this node's function.
402 friend raw_ostream &operator<<(raw_ostream &OS, const Node &N) {
403 return OS << N.F->getName();
404 }
405
406 /// Dump the name of this node's function to stderr.
407 void dump() const;
408 };
409
410 /// An SCC of the call graph.
411 ///
412 /// This represents a Strongly Connected Component of the direct call graph
413 /// -- ignoring indirect calls and function references. It stores this as
414 /// a collection of call graph nodes. While the order of nodes in the SCC is
415 /// stable, it is not any particular order.
416 ///
417 /// The SCCs are nested within a \c RefSCC, see below for details about that
418 /// outer structure. SCCs do not support mutation of the call graph, that
419 /// must be done through the containing \c RefSCC in order to fully reason
420 /// about the ordering and connections of the graph.
421 class SCC {
422 friend class LazyCallGraph;
423 friend class LazyCallGraph::Node;
424
425 RefSCC *OuterRefSCC;
426 SmallVector<Node *, 1> Nodes;
427
428 template <typename NodeRangeT>
SCC(RefSCC & OuterRefSCC,NodeRangeT && Nodes)429 SCC(RefSCC &OuterRefSCC, NodeRangeT &&Nodes)
430 : OuterRefSCC(&OuterRefSCC), Nodes(std::forward<NodeRangeT>(Nodes)) {}
431
clear()432 void clear() {
433 OuterRefSCC = nullptr;
434 Nodes.clear();
435 }
436
437 /// Print a short descrtiption useful for debugging or logging.
438 ///
439 /// We print the function names in the SCC wrapped in '()'s and skipping
440 /// the middle functions if there are a large number.
441 //
442 // Note: this is defined inline to dodge issues with GCC's interpretation
443 // of enclosing namespaces for friend function declarations.
444 friend raw_ostream &operator<<(raw_ostream &OS, const SCC &C) {
445 OS << '(';
446 int i = 0;
447 for (LazyCallGraph::Node &N : C) {
448 if (i > 0)
449 OS << ", ";
450 // Elide the inner elements if there are too many.
451 if (i > 8) {
452 OS << "..., " << *C.Nodes.back();
453 break;
454 }
455 OS << N;
456 ++i;
457 }
458 OS << ')';
459 return OS;
460 }
461
462 /// Dump a short description of this SCC to stderr.
463 void dump() const;
464
465 #ifndef NDEBUG
466 /// Verify invariants about the SCC.
467 ///
468 /// This will attempt to validate all of the basic invariants within an
469 /// SCC, but not that it is a strongly connected componet per-se. Primarily
470 /// useful while building and updating the graph to check that basic
471 /// properties are in place rather than having inexplicable crashes later.
472 void verify();
473 #endif
474
475 public:
476 using iterator = pointee_iterator<SmallVectorImpl<Node *>::const_iterator>;
477
begin()478 iterator begin() const { return Nodes.begin(); }
end()479 iterator end() const { return Nodes.end(); }
480
size()481 int size() const { return Nodes.size(); }
482
getOuterRefSCC()483 RefSCC &getOuterRefSCC() const { return *OuterRefSCC; }
484
485 /// Test if this SCC is a parent of \a C.
486 ///
487 /// Note that this is linear in the number of edges departing the current
488 /// SCC.
489 bool isParentOf(const SCC &C) const;
490
491 /// Test if this SCC is an ancestor of \a C.
492 ///
493 /// Note that in the worst case this is linear in the number of edges
494 /// departing the current SCC and every SCC in the entire graph reachable
495 /// from this SCC. Thus this very well may walk every edge in the entire
496 /// call graph! Do not call this in a tight loop!
497 bool isAncestorOf(const SCC &C) const;
498
499 /// Test if this SCC is a child of \a C.
500 ///
501 /// See the comments for \c isParentOf for detailed notes about the
502 /// complexity of this routine.
isChildOf(const SCC & C)503 bool isChildOf(const SCC &C) const { return C.isParentOf(*this); }
504
505 /// Test if this SCC is a descendant of \a C.
506 ///
507 /// See the comments for \c isParentOf for detailed notes about the
508 /// complexity of this routine.
isDescendantOf(const SCC & C)509 bool isDescendantOf(const SCC &C) const { return C.isAncestorOf(*this); }
510
511 /// Provide a short name by printing this SCC to a std::string.
512 ///
513 /// This copes with the fact that we don't have a name per-se for an SCC
514 /// while still making the use of this in debugging and logging useful.
getName()515 std::string getName() const {
516 std::string Name;
517 raw_string_ostream OS(Name);
518 OS << *this;
519 OS.flush();
520 return Name;
521 }
522 };
523
524 /// A RefSCC of the call graph.
525 ///
526 /// This models a Strongly Connected Component of function reference edges in
527 /// the call graph. As opposed to actual SCCs, these can be used to scope
528 /// subgraphs of the module which are independent from other subgraphs of the
529 /// module because they do not reference it in any way. This is also the unit
530 /// where we do mutation of the graph in order to restrict mutations to those
531 /// which don't violate this independence.
532 ///
533 /// A RefSCC contains a DAG of actual SCCs. All the nodes within the RefSCC
534 /// are necessarily within some actual SCC that nests within it. Since
535 /// a direct call *is* a reference, there will always be at least one RefSCC
536 /// around any SCC.
537 class RefSCC {
538 friend class LazyCallGraph;
539 friend class LazyCallGraph::Node;
540
541 LazyCallGraph *G;
542
543 /// A postorder list of the inner SCCs.
544 SmallVector<SCC *, 4> SCCs;
545
546 /// A map from SCC to index in the postorder list.
547 SmallDenseMap<SCC *, int, 4> SCCIndices;
548
549 /// Fast-path constructor. RefSCCs should instead be constructed by calling
550 /// formRefSCCFast on the graph itself.
551 RefSCC(LazyCallGraph &G);
552
clear()553 void clear() {
554 SCCs.clear();
555 SCCIndices.clear();
556 }
557
558 /// Print a short description useful for debugging or logging.
559 ///
560 /// We print the SCCs wrapped in '[]'s and skipping the middle SCCs if
561 /// there are a large number.
562 //
563 // Note: this is defined inline to dodge issues with GCC's interpretation
564 // of enclosing namespaces for friend function declarations.
565 friend raw_ostream &operator<<(raw_ostream &OS, const RefSCC &RC) {
566 OS << '[';
567 int i = 0;
568 for (LazyCallGraph::SCC &C : RC) {
569 if (i > 0)
570 OS << ", ";
571 // Elide the inner elements if there are too many.
572 if (i > 4) {
573 OS << "..., " << *RC.SCCs.back();
574 break;
575 }
576 OS << C;
577 ++i;
578 }
579 OS << ']';
580 return OS;
581 }
582
583 /// Dump a short description of this RefSCC to stderr.
584 void dump() const;
585
586 #ifndef NDEBUG
587 /// Verify invariants about the RefSCC and all its SCCs.
588 ///
589 /// This will attempt to validate all of the invariants *within* the
590 /// RefSCC, but not that it is a strongly connected component of the larger
591 /// graph. This makes it useful even when partially through an update.
592 ///
593 /// Invariants checked:
594 /// - SCCs and their indices match.
595 /// - The SCCs list is in fact in post-order.
596 void verify();
597 #endif
598
599 public:
600 using iterator = pointee_iterator<SmallVectorImpl<SCC *>::const_iterator>;
601 using range = iterator_range<iterator>;
602 using parent_iterator =
603 pointee_iterator<SmallPtrSetImpl<RefSCC *>::const_iterator>;
604
begin()605 iterator begin() const { return SCCs.begin(); }
end()606 iterator end() const { return SCCs.end(); }
607
size()608 ssize_t size() const { return SCCs.size(); }
609
610 SCC &operator[](int Idx) { return *SCCs[Idx]; }
611
find(SCC & C)612 iterator find(SCC &C) const {
613 return SCCs.begin() + SCCIndices.find(&C)->second;
614 }
615
616 /// Test if this RefSCC is a parent of \a RC.
617 ///
618 /// CAUTION: This method walks every edge in the \c RefSCC, it can be very
619 /// expensive.
620 bool isParentOf(const RefSCC &RC) const;
621
622 /// Test if this RefSCC is an ancestor of \a RC.
623 ///
624 /// CAUTION: This method walks the directed graph of edges as far as
625 /// necessary to find a possible path to the argument. In the worst case
626 /// this may walk the entire graph and can be extremely expensive.
627 bool isAncestorOf(const RefSCC &RC) const;
628
629 /// Test if this RefSCC is a child of \a RC.
630 ///
631 /// CAUTION: This method walks every edge in the argument \c RefSCC, it can
632 /// be very expensive.
isChildOf(const RefSCC & RC)633 bool isChildOf(const RefSCC &RC) const { return RC.isParentOf(*this); }
634
635 /// Test if this RefSCC is a descendant of \a RC.
636 ///
637 /// CAUTION: This method walks the directed graph of edges as far as
638 /// necessary to find a possible path from the argument. In the worst case
639 /// this may walk the entire graph and can be extremely expensive.
isDescendantOf(const RefSCC & RC)640 bool isDescendantOf(const RefSCC &RC) const {
641 return RC.isAncestorOf(*this);
642 }
643
644 /// Provide a short name by printing this RefSCC to a std::string.
645 ///
646 /// This copes with the fact that we don't have a name per-se for an RefSCC
647 /// while still making the use of this in debugging and logging useful.
getName()648 std::string getName() const {
649 std::string Name;
650 raw_string_ostream OS(Name);
651 OS << *this;
652 OS.flush();
653 return Name;
654 }
655
656 ///@{
657 /// \name Mutation API
658 ///
659 /// These methods provide the core API for updating the call graph in the
660 /// presence of (potentially still in-flight) DFS-found RefSCCs and SCCs.
661 ///
662 /// Note that these methods sometimes have complex runtimes, so be careful
663 /// how you call them.
664
665 /// Make an existing internal ref edge into a call edge.
666 ///
667 /// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
668 /// If that happens, the optional callback \p MergedCB will be invoked (if
669 /// provided) on the SCCs being merged away prior to actually performing
670 /// the merge. Note that this will never include the target SCC as that
671 /// will be the SCC functions are merged into to resolve the cycle. Once
672 /// this function returns, these merged SCCs are not in a valid state but
673 /// the pointers will remain valid until destruction of the parent graph
674 /// instance for the purpose of clearing cached information. This function
675 /// also returns 'true' if a cycle was formed and some SCCs merged away as
676 /// a convenience.
677 ///
678 /// After this operation, both SourceN's SCC and TargetN's SCC may move
679 /// position within this RefSCC's postorder list. Any SCCs merged are
680 /// merged into the TargetN's SCC in order to preserve reachability analyses
681 /// which took place on that SCC.
682 bool switchInternalEdgeToCall(
683 Node &SourceN, Node &TargetN,
684 function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
685
686 /// Make an existing internal call edge between separate SCCs into a ref
687 /// edge.
688 ///
689 /// If SourceN and TargetN in separate SCCs within this RefSCC, changing
690 /// the call edge between them to a ref edge is a trivial operation that
691 /// does not require any structural changes to the call graph.
692 void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN);
693
694 /// Make an existing internal call edge within a single SCC into a ref
695 /// edge.
696 ///
697 /// Since SourceN and TargetN are part of a single SCC, this SCC may be
698 /// split up due to breaking a cycle in the call edges that formed it. If
699 /// that happens, then this routine will insert new SCCs into the postorder
700 /// list *before* the SCC of TargetN (previously the SCC of both). This
701 /// preserves postorder as the TargetN can reach all of the other nodes by
702 /// definition of previously being in a single SCC formed by the cycle from
703 /// SourceN to TargetN.
704 ///
705 /// The newly added SCCs are added *immediately* and contiguously
706 /// prior to the TargetN SCC and return the range covering the new SCCs in
707 /// the RefSCC's postorder sequence. You can directly iterate the returned
708 /// range to observe all of the new SCCs in postorder.
709 ///
710 /// Note that if SourceN and TargetN are in separate SCCs, the simpler
711 /// routine `switchTrivialInternalEdgeToRef` should be used instead.
712 iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
713 Node &TargetN);
714
715 /// Make an existing outgoing ref edge into a call edge.
716 ///
717 /// Note that this is trivial as there are no cyclic impacts and there
718 /// remains a reference edge.
719 void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN);
720
721 /// Make an existing outgoing call edge into a ref edge.
722 ///
723 /// This is trivial as there are no cyclic impacts and there remains
724 /// a reference edge.
725 void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN);
726
727 /// Insert a ref edge from one node in this RefSCC to another in this
728 /// RefSCC.
729 ///
730 /// This is always a trivial operation as it doesn't change any part of the
731 /// graph structure besides connecting the two nodes.
732 ///
733 /// Note that we don't support directly inserting internal *call* edges
734 /// because that could change the graph structure and requires returning
735 /// information about what became invalid. As a consequence, the pattern
736 /// should be to first insert the necessary ref edge, and then to switch it
737 /// to a call edge if needed and handle any invalidation that results. See
738 /// the \c switchInternalEdgeToCall routine for details.
739 void insertInternalRefEdge(Node &SourceN, Node &TargetN);
740
741 /// Insert an edge whose parent is in this RefSCC and child is in some
742 /// child RefSCC.
743 ///
744 /// There must be an existing path from the \p SourceN to the \p TargetN.
745 /// This operation is inexpensive and does not change the set of SCCs and
746 /// RefSCCs in the graph.
747 void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
748
749 /// Insert an edge whose source is in a descendant RefSCC and target is in
750 /// this RefSCC.
751 ///
752 /// There must be an existing path from the target to the source in this
753 /// case.
754 ///
755 /// NB! This is has the potential to be a very expensive function. It
756 /// inherently forms a cycle in the prior RefSCC DAG and we have to merge
757 /// RefSCCs to resolve that cycle. But finding all of the RefSCCs which
758 /// participate in the cycle can in the worst case require traversing every
759 /// RefSCC in the graph. Every attempt is made to avoid that, but passes
760 /// must still exercise caution calling this routine repeatedly.
761 ///
762 /// Also note that this can only insert ref edges. In order to insert
763 /// a call edge, first insert a ref edge and then switch it to a call edge.
764 /// These are intentionally kept as separate interfaces because each step
765 /// of the operation invalidates a different set of data structures.
766 ///
767 /// This returns all the RefSCCs which were merged into the this RefSCC
768 /// (the target's). This allows callers to invalidate any cached
769 /// information.
770 ///
771 /// FIXME: We could possibly optimize this quite a bit for cases where the
772 /// caller and callee are very nearby in the graph. See comments in the
773 /// implementation for details, but that use case might impact users.
774 SmallVector<RefSCC *, 1> insertIncomingRefEdge(Node &SourceN,
775 Node &TargetN);
776
777 /// Remove an edge whose source is in this RefSCC and target is *not*.
778 ///
779 /// This removes an inter-RefSCC edge. All inter-RefSCC edges originating
780 /// from this SCC have been fully explored by any in-flight DFS graph
781 /// formation, so this is always safe to call once you have the source
782 /// RefSCC.
783 ///
784 /// This operation does not change the cyclic structure of the graph and so
785 /// is very inexpensive. It may change the connectivity graph of the SCCs
786 /// though, so be careful calling this while iterating over them.
787 void removeOutgoingEdge(Node &SourceN, Node &TargetN);
788
789 /// Remove a list of ref edges which are entirely within this RefSCC.
790 ///
791 /// Both the \a SourceN and all of the \a TargetNs must be within this
792 /// RefSCC. Removing these edges may break cycles that form this RefSCC and
793 /// thus this operation may change the RefSCC graph significantly. In
794 /// particular, this operation will re-form new RefSCCs based on the
795 /// remaining connectivity of the graph. The following invariants are
796 /// guaranteed to hold after calling this method:
797 ///
798 /// 1) If a ref-cycle remains after removal, it leaves this RefSCC intact
799 /// and in the graph. No new RefSCCs are built.
800 /// 2) Otherwise, this RefSCC will be dead after this call and no longer in
801 /// the graph or the postorder traversal of the call graph. Any iterator
802 /// pointing at this RefSCC will become invalid.
803 /// 3) All newly formed RefSCCs will be returned and the order of the
804 /// RefSCCs returned will be a valid postorder traversal of the new
805 /// RefSCCs.
806 /// 4) No RefSCC other than this RefSCC has its member set changed (this is
807 /// inherent in the definition of removing such an edge).
808 ///
809 /// These invariants are very important to ensure that we can build
810 /// optimization pipelines on top of the CGSCC pass manager which
811 /// intelligently update the RefSCC graph without invalidating other parts
812 /// of the RefSCC graph.
813 ///
814 /// Note that we provide no routine to remove a *call* edge. Instead, you
815 /// must first switch it to a ref edge using \c switchInternalEdgeToRef.
816 /// This split API is intentional as each of these two steps can invalidate
817 /// a different aspect of the graph structure and needs to have the
818 /// invalidation handled independently.
819 ///
820 /// The runtime complexity of this method is, in the worst case, O(V+E)
821 /// where V is the number of nodes in this RefSCC and E is the number of
822 /// edges leaving the nodes in this RefSCC. Note that E includes both edges
823 /// within this RefSCC and edges from this RefSCC to child RefSCCs. Some
824 /// effort has been made to minimize the overhead of common cases such as
825 /// self-edges and edge removals which result in a spanning tree with no
826 /// more cycles.
827 SmallVector<RefSCC *, 1> removeInternalRefEdge(Node &SourceN,
828 ArrayRef<Node *> TargetNs);
829
830 /// A convenience wrapper around the above to handle trivial cases of
831 /// inserting a new call edge.
832 ///
833 /// This is trivial whenever the target is in the same SCC as the source or
834 /// the edge is an outgoing edge to some descendant SCC. In these cases
835 /// there is no change to the cyclic structure of SCCs or RefSCCs.
836 ///
837 /// To further make calling this convenient, it also handles inserting
838 /// already existing edges.
839 void insertTrivialCallEdge(Node &SourceN, Node &TargetN);
840
841 /// A convenience wrapper around the above to handle trivial cases of
842 /// inserting a new ref edge.
843 ///
844 /// This is trivial whenever the target is in the same RefSCC as the source
845 /// or the edge is an outgoing edge to some descendant RefSCC. In these
846 /// cases there is no change to the cyclic structure of the RefSCCs.
847 ///
848 /// To further make calling this convenient, it also handles inserting
849 /// already existing edges.
850 void insertTrivialRefEdge(Node &SourceN, Node &TargetN);
851
852 /// Directly replace a node's function with a new function.
853 ///
854 /// This should be used when moving the body and users of a function to
855 /// a new formal function object but not otherwise changing the call graph
856 /// structure in any way.
857 ///
858 /// It requires that the old function in the provided node have zero uses
859 /// and the new function must have calls and references to it establishing
860 /// an equivalent graph.
861 void replaceNodeFunction(Node &N, Function &NewF);
862
863 ///@}
864 };
865
866 /// A post-order depth-first RefSCC iterator over the call graph.
867 ///
868 /// This iterator walks the cached post-order sequence of RefSCCs. However,
869 /// it trades stability for flexibility. It is restricted to a forward
870 /// iterator but will survive mutations which insert new RefSCCs and continue
871 /// to point to the same RefSCC even if it moves in the post-order sequence.
872 class postorder_ref_scc_iterator
873 : public iterator_facade_base<postorder_ref_scc_iterator,
874 std::forward_iterator_tag, RefSCC> {
875 friend class LazyCallGraph;
876 friend class LazyCallGraph::Node;
877
878 /// Nonce type to select the constructor for the end iterator.
879 struct IsAtEndT {};
880
881 LazyCallGraph *G;
882 RefSCC *RC = nullptr;
883
884 /// Build the begin iterator for a node.
postorder_ref_scc_iterator(LazyCallGraph & G)885 postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G), RC(getRC(G, 0)) {}
886
887 /// Build the end iterator for a node. This is selected purely by overload.
postorder_ref_scc_iterator(LazyCallGraph & G,IsAtEndT)888 postorder_ref_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/) : G(&G) {}
889
890 /// Get the post-order RefSCC at the given index of the postorder walk,
891 /// populating it if necessary.
getRC(LazyCallGraph & G,int Index)892 static RefSCC *getRC(LazyCallGraph &G, int Index) {
893 if (Index == (int)G.PostOrderRefSCCs.size())
894 // We're at the end.
895 return nullptr;
896
897 return G.PostOrderRefSCCs[Index];
898 }
899
900 public:
901 bool operator==(const postorder_ref_scc_iterator &Arg) const {
902 return G == Arg.G && RC == Arg.RC;
903 }
904
905 reference operator*() const { return *RC; }
906
907 using iterator_facade_base::operator++;
908 postorder_ref_scc_iterator &operator++() {
909 assert(RC && "Cannot increment the end iterator!");
910 RC = getRC(*G, G->RefSCCIndices.find(RC)->second + 1);
911 return *this;
912 }
913 };
914
915 /// Construct a graph for the given module.
916 ///
917 /// This sets up the graph and computes all of the entry points of the graph.
918 /// No function definitions are scanned until their nodes in the graph are
919 /// requested during traversal.
920 LazyCallGraph(Module &M,
921 function_ref<TargetLibraryInfo &(Function &)> GetTLI);
922
923 LazyCallGraph(LazyCallGraph &&G);
924 LazyCallGraph &operator=(LazyCallGraph &&RHS);
925
926 bool invalidate(Module &, const PreservedAnalyses &PA,
927 ModuleAnalysisManager::Invalidator &);
928
begin()929 EdgeSequence::iterator begin() { return EntryEdges.begin(); }
end()930 EdgeSequence::iterator end() { return EntryEdges.end(); }
931
932 void buildRefSCCs();
933
postorder_ref_scc_begin()934 postorder_ref_scc_iterator postorder_ref_scc_begin() {
935 if (!EntryEdges.empty())
936 assert(!PostOrderRefSCCs.empty() &&
937 "Must form RefSCCs before iterating them!");
938 return postorder_ref_scc_iterator(*this);
939 }
postorder_ref_scc_end()940 postorder_ref_scc_iterator postorder_ref_scc_end() {
941 if (!EntryEdges.empty())
942 assert(!PostOrderRefSCCs.empty() &&
943 "Must form RefSCCs before iterating them!");
944 return postorder_ref_scc_iterator(*this,
945 postorder_ref_scc_iterator::IsAtEndT());
946 }
947
postorder_ref_sccs()948 iterator_range<postorder_ref_scc_iterator> postorder_ref_sccs() {
949 return make_range(postorder_ref_scc_begin(), postorder_ref_scc_end());
950 }
951
952 /// Lookup a function in the graph which has already been scanned and added.
lookup(const Function & F)953 Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }
954
955 /// Lookup a function's SCC in the graph.
956 ///
957 /// \returns null if the function hasn't been assigned an SCC via the RefSCC
958 /// iterator walk.
lookupSCC(Node & N)959 SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }
960
961 /// Lookup a function's RefSCC in the graph.
962 ///
963 /// \returns null if the function hasn't been assigned a RefSCC via the
964 /// RefSCC iterator walk.
lookupRefSCC(Node & N)965 RefSCC *lookupRefSCC(Node &N) const {
966 if (SCC *C = lookupSCC(N))
967 return &C->getOuterRefSCC();
968
969 return nullptr;
970 }
971
972 /// Get a graph node for a given function, scanning it to populate the graph
973 /// data as necessary.
get(Function & F)974 Node &get(Function &F) {
975 Node *&N = NodeMap[&F];
976 if (N)
977 return *N;
978
979 return insertInto(F, N);
980 }
981
982 /// Get the sequence of known and defined library functions.
983 ///
984 /// These functions, because they are known to LLVM, can have calls
985 /// introduced out of thin air from arbitrary IR.
getLibFunctions()986 ArrayRef<Function *> getLibFunctions() const {
987 return LibFunctions.getArrayRef();
988 }
989
990 /// Test whether a function is a known and defined library function tracked by
991 /// the call graph.
992 ///
993 /// Because these functions are known to LLVM they are specially modeled in
994 /// the call graph and even when all IR-level references have been removed
995 /// remain active and reachable.
isLibFunction(Function & F)996 bool isLibFunction(Function &F) const { return LibFunctions.count(&F); }
997
998 ///@{
999 /// \name Pre-SCC Mutation API
1000 ///
1001 /// These methods are only valid to call prior to forming any SCCs for this
1002 /// call graph. They can be used to update the core node-graph during
1003 /// a node-based inorder traversal that precedes any SCC-based traversal.
1004 ///
1005 /// Once you begin manipulating a call graph's SCCs, most mutation of the
1006 /// graph must be performed via a RefSCC method. There are some exceptions
1007 /// below.
1008
1009 /// Update the call graph after inserting a new edge.
1010 void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
1011
1012 /// Update the call graph after inserting a new edge.
insertEdge(Function & Source,Function & Target,Edge::Kind EK)1013 void insertEdge(Function &Source, Function &Target, Edge::Kind EK) {
1014 return insertEdge(get(Source), get(Target), EK);
1015 }
1016
1017 /// Update the call graph after deleting an edge.
1018 void removeEdge(Node &SourceN, Node &TargetN);
1019
1020 /// Update the call graph after deleting an edge.
removeEdge(Function & Source,Function & Target)1021 void removeEdge(Function &Source, Function &Target) {
1022 return removeEdge(get(Source), get(Target));
1023 }
1024
1025 ///@}
1026
1027 ///@{
1028 /// \name General Mutation API
1029 ///
1030 /// There are a very limited set of mutations allowed on the graph as a whole
1031 /// once SCCs have started to be formed. These routines have strict contracts
1032 /// but may be called at any point.
1033
1034 /// Remove a dead function from the call graph (typically to delete it).
1035 ///
1036 /// Note that the function must have an empty use list, and the call graph
1037 /// must be up-to-date prior to calling this. That means it is by itself in
1038 /// a maximal SCC which is by itself in a maximal RefSCC, etc. No structural
1039 /// changes result from calling this routine other than potentially removing
1040 /// entry points into the call graph.
1041 ///
1042 /// If SCC formation has begun, this function must not be part of the current
1043 /// DFS in order to call this safely. Typically, the function will have been
1044 /// fully visited by the DFS prior to calling this routine.
1045 void removeDeadFunction(Function &F);
1046
1047 /// Add a new function split/outlined from an existing function.
1048 ///
1049 /// The new function may only reference other functions that the original
1050 /// function did.
1051 ///
1052 /// The original function must reference (either directly or indirectly) the
1053 /// new function.
1054 ///
1055 /// The new function may also reference the original function.
1056 /// It may end up in a parent SCC in the case that the original function's
1057 /// edge to the new function is a ref edge, and the edge back is a call edge.
1058 void addSplitFunction(Function &OriginalFunction, Function &NewFunction);
1059
1060 /// Add new ref-recursive functions split/outlined from an existing function.
1061 ///
1062 /// The new functions may only reference other functions that the original
1063 /// function did. The new functions may reference (not call) the original
1064 /// function.
1065 ///
1066 /// The original function must reference (not call) all new functions.
1067 /// All new functions must reference (not call) each other.
1068 void addSplitRefRecursiveFunctions(Function &OriginalFunction,
1069 ArrayRef<Function *> NewFunctions);
1070
1071 ///@}
1072
1073 ///@{
1074 /// \name Static helpers for code doing updates to the call graph.
1075 ///
1076 /// These helpers are used to implement parts of the call graph but are also
1077 /// useful to code doing updates or otherwise wanting to walk the IR in the
1078 /// same patterns as when we build the call graph.
1079
1080 /// Recursively visits the defined functions whose address is reachable from
1081 /// every constant in the \p Worklist.
1082 ///
1083 /// Doesn't recurse through any constants already in the \p Visited set, and
1084 /// updates that set with every constant visited.
1085 ///
1086 /// For each defined function, calls \p Callback with that function.
1087 template <typename CallbackT>
visitReferences(SmallVectorImpl<Constant * > & Worklist,SmallPtrSetImpl<Constant * > & Visited,CallbackT Callback)1088 static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
1089 SmallPtrSetImpl<Constant *> &Visited,
1090 CallbackT Callback) {
1091 while (!Worklist.empty()) {
1092 Constant *C = Worklist.pop_back_val();
1093
1094 if (Function *F = dyn_cast<Function>(C)) {
1095 if (!F->isDeclaration())
1096 Callback(*F);
1097 continue;
1098 }
1099
1100 // The blockaddress constant expression is a weird special case, we can't
1101 // generically walk its operands the way we do for all other constants.
1102 if (BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
1103 // If we've already visited the function referred to by the block
1104 // address, we don't need to revisit it.
1105 if (Visited.count(BA->getFunction()))
1106 continue;
1107
1108 // If all of the blockaddress' users are instructions within the
1109 // referred to function, we don't need to insert a cycle.
1110 if (llvm::all_of(BA->users(), [&](User *U) {
1111 if (Instruction *I = dyn_cast<Instruction>(U))
1112 return I->getFunction() == BA->getFunction();
1113 return false;
1114 }))
1115 continue;
1116
1117 // Otherwise we should go visit the referred to function.
1118 Visited.insert(BA->getFunction());
1119 Worklist.push_back(BA->getFunction());
1120 continue;
1121 }
1122
1123 for (Value *Op : C->operand_values())
1124 if (Visited.insert(cast<Constant>(Op)).second)
1125 Worklist.push_back(cast<Constant>(Op));
1126 }
1127 }
1128
1129 ///@}
1130
1131 private:
1132 using node_stack_iterator = SmallVectorImpl<Node *>::reverse_iterator;
1133 using node_stack_range = iterator_range<node_stack_iterator>;
1134
1135 /// Allocator that holds all the call graph nodes.
1136 SpecificBumpPtrAllocator<Node> BPA;
1137
1138 /// Maps function->node for fast lookup.
1139 DenseMap<const Function *, Node *> NodeMap;
1140
1141 /// The entry edges into the graph.
1142 ///
1143 /// These edges are from "external" sources. Put another way, they
1144 /// escape at the module scope.
1145 EdgeSequence EntryEdges;
1146
1147 /// Allocator that holds all the call graph SCCs.
1148 SpecificBumpPtrAllocator<SCC> SCCBPA;
1149
1150 /// Maps Function -> SCC for fast lookup.
1151 DenseMap<Node *, SCC *> SCCMap;
1152
1153 /// Allocator that holds all the call graph RefSCCs.
1154 SpecificBumpPtrAllocator<RefSCC> RefSCCBPA;
1155
1156 /// The post-order sequence of RefSCCs.
1157 ///
1158 /// This list is lazily formed the first time we walk the graph.
1159 SmallVector<RefSCC *, 16> PostOrderRefSCCs;
1160
1161 /// A map from RefSCC to the index for it in the postorder sequence of
1162 /// RefSCCs.
1163 DenseMap<RefSCC *, int> RefSCCIndices;
1164
1165 /// Defined functions that are also known library functions which the
1166 /// optimizer can reason about and therefore might introduce calls to out of
1167 /// thin air.
1168 SmallSetVector<Function *, 4> LibFunctions;
1169
1170 /// Helper to insert a new function, with an already looked-up entry in
1171 /// the NodeMap.
1172 Node &insertInto(Function &F, Node *&MappedN);
1173
1174 /// Helper to initialize a new node created outside of creating SCCs and add
1175 /// it to the NodeMap if necessary. For example, useful when a function is
1176 /// split.
1177 Node &initNode(Function &F);
1178
1179 /// Helper to update pointers back to the graph object during moves.
1180 void updateGraphPtrs();
1181
1182 /// Allocates an SCC and constructs it using the graph allocator.
1183 ///
1184 /// The arguments are forwarded to the constructor.
createSCC(Ts &&...Args)1185 template <typename... Ts> SCC *createSCC(Ts &&... Args) {
1186 return new (SCCBPA.Allocate()) SCC(std::forward<Ts>(Args)...);
1187 }
1188
1189 /// Allocates a RefSCC and constructs it using the graph allocator.
1190 ///
1191 /// The arguments are forwarded to the constructor.
createRefSCC(Ts &&...Args)1192 template <typename... Ts> RefSCC *createRefSCC(Ts &&... Args) {
1193 return new (RefSCCBPA.Allocate()) RefSCC(std::forward<Ts>(Args)...);
1194 }
1195
1196 /// Common logic for building SCCs from a sequence of roots.
1197 ///
1198 /// This is a very generic implementation of the depth-first walk and SCC
1199 /// formation algorithm. It uses a generic sequence of roots and generic
1200 /// callbacks for each step. This is designed to be used to implement both
1201 /// the RefSCC formation and SCC formation with shared logic.
1202 ///
1203 /// Currently this is a relatively naive implementation of Tarjan's DFS
1204 /// algorithm to form the SCCs.
1205 ///
1206 /// FIXME: We should consider newer variants such as Nuutila.
1207 template <typename RootsT, typename GetBeginT, typename GetEndT,
1208 typename GetNodeT, typename FormSCCCallbackT>
1209 static void buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
1210 GetEndT &&GetEnd, GetNodeT &&GetNode,
1211 FormSCCCallbackT &&FormSCC);
1212
1213 /// Build the SCCs for a RefSCC out of a list of nodes.
1214 void buildSCCs(RefSCC &RC, node_stack_range Nodes);
1215
1216 /// Get the index of a RefSCC within the postorder traversal.
1217 ///
1218 /// Requires that this RefSCC is a valid one in the (perhaps partial)
1219 /// postorder traversed part of the graph.
getRefSCCIndex(RefSCC & RC)1220 int getRefSCCIndex(RefSCC &RC) {
1221 auto IndexIt = RefSCCIndices.find(&RC);
1222 assert(IndexIt != RefSCCIndices.end() && "RefSCC doesn't have an index!");
1223 assert(PostOrderRefSCCs[IndexIt->second] == &RC &&
1224 "Index does not point back at RC!");
1225 return IndexIt->second;
1226 }
1227 };
1228
Edge()1229 inline LazyCallGraph::Edge::Edge() : Value() {}
Edge(Node & N,Kind K)1230 inline LazyCallGraph::Edge::Edge(Node &N, Kind K) : Value(&N, K) {}
1231
1232 inline LazyCallGraph::Edge::operator bool() const {
1233 return Value.getPointer() && !Value.getPointer()->isDead();
1234 }
1235
getKind()1236 inline LazyCallGraph::Edge::Kind LazyCallGraph::Edge::getKind() const {
1237 assert(*this && "Queried a null edge!");
1238 return Value.getInt();
1239 }
1240
isCall()1241 inline bool LazyCallGraph::Edge::isCall() const {
1242 assert(*this && "Queried a null edge!");
1243 return getKind() == Call;
1244 }
1245
getNode()1246 inline LazyCallGraph::Node &LazyCallGraph::Edge::getNode() const {
1247 assert(*this && "Queried a null edge!");
1248 return *Value.getPointer();
1249 }
1250
getFunction()1251 inline Function &LazyCallGraph::Edge::getFunction() const {
1252 assert(*this && "Queried a null edge!");
1253 return getNode().getFunction();
1254 }
1255
1256 // Provide GraphTraits specializations for call graphs.
1257 template <> struct GraphTraits<LazyCallGraph::Node *> {
1258 using NodeRef = LazyCallGraph::Node *;
1259 using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
1260
1261 static NodeRef getEntryNode(NodeRef N) { return N; }
1262 static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
1263 static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
1264 };
1265 template <> struct GraphTraits<LazyCallGraph *> {
1266 using NodeRef = LazyCallGraph::Node *;
1267 using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
1268
1269 static NodeRef getEntryNode(NodeRef N) { return N; }
1270 static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
1271 static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
1272 };
1273
1274 /// An analysis pass which computes the call graph for a module.
1275 class LazyCallGraphAnalysis : public AnalysisInfoMixin<LazyCallGraphAnalysis> {
1276 friend AnalysisInfoMixin<LazyCallGraphAnalysis>;
1277
1278 static AnalysisKey Key;
1279
1280 public:
1281 /// Inform generic clients of the result type.
1282 using Result = LazyCallGraph;
1283
1284 /// Compute the \c LazyCallGraph for the module \c M.
1285 ///
1286 /// This just builds the set of entry points to the call graph. The rest is
1287 /// built lazily as it is walked.
1288 LazyCallGraph run(Module &M, ModuleAnalysisManager &AM) {
1289 FunctionAnalysisManager &FAM =
1290 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1291 auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
1292 return FAM.getResult<TargetLibraryAnalysis>(F);
1293 };
1294 return LazyCallGraph(M, GetTLI);
1295 }
1296 };
1297
1298 /// A pass which prints the call graph to a \c raw_ostream.
1299 ///
1300 /// This is primarily useful for testing the analysis.
1301 class LazyCallGraphPrinterPass
1302 : public PassInfoMixin<LazyCallGraphPrinterPass> {
1303 raw_ostream &OS;
1304
1305 public:
1306 explicit LazyCallGraphPrinterPass(raw_ostream &OS);
1307
1308 PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
1309 };
1310
1311 /// A pass which prints the call graph as a DOT file to a \c raw_ostream.
1312 ///
1313 /// This is primarily useful for visualization purposes.
1314 class LazyCallGraphDOTPrinterPass
1315 : public PassInfoMixin<LazyCallGraphDOTPrinterPass> {
1316 raw_ostream &OS;
1317
1318 public:
1319 explicit LazyCallGraphDOTPrinterPass(raw_ostream &OS);
1320
1321 PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
1322 };
1323
1324 } // end namespace llvm
1325
1326 #endif // LLVM_ANALYSIS_LAZYCALLGRAPH_H
1327