LLVM 19.0.0git
LazyCallGraph.cpp
Go to the documentation of this file.
1//===- LazyCallGraph.cpp - Analysis of a Module's call graph --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10
11#include "llvm/ADT/ArrayRef.h"
12#include "llvm/ADT/STLExtras.h"
13#include "llvm/ADT/Sequence.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Function.h"
22#include "llvm/IR/Instruction.h"
23#include "llvm/IR/Module.h"
24#include "llvm/IR/PassManager.h"
27#include "llvm/Support/Debug.h"
30#include <algorithm>
31
32#ifdef EXPENSIVE_CHECKS
33#include "llvm/ADT/ScopeExit.h"
34#endif
35
36using namespace llvm;
37
38#define DEBUG_TYPE "lcg"
39
40void LazyCallGraph::EdgeSequence::insertEdgeInternal(Node &TargetN,
41 Edge::Kind EK) {
42 EdgeIndexMap.try_emplace(&TargetN, Edges.size());
43 Edges.emplace_back(TargetN, EK);
44}
45
46void LazyCallGraph::EdgeSequence::setEdgeKind(Node &TargetN, Edge::Kind EK) {
47 Edges[EdgeIndexMap.find(&TargetN)->second].setKind(EK);
48}
49
50bool LazyCallGraph::EdgeSequence::removeEdgeInternal(Node &TargetN) {
51 auto IndexMapI = EdgeIndexMap.find(&TargetN);
52 if (IndexMapI == EdgeIndexMap.end())
53 return false;
54
55 Edges[IndexMapI->second] = Edge();
56 EdgeIndexMap.erase(IndexMapI);
57 return true;
58}
59
63 if (!EdgeIndexMap.try_emplace(&N, Edges.size()).second)
64 return;
65
66 LLVM_DEBUG(dbgs() << " Added callable function: " << N.getName() << "\n");
68}
69
70LazyCallGraph::EdgeSequence &LazyCallGraph::Node::populateSlow() {
71 assert(!Edges && "Must not have already populated the edges for this node!");
72
73 LLVM_DEBUG(dbgs() << " Adding functions called by '" << getName()
74 << "' to the graph.\n");
75
76 Edges = EdgeSequence();
77
81
82 // Find all the potential call graph edges in this function. We track both
83 // actual call edges and indirect references to functions. The direct calls
84 // are trivially added, but to accumulate the latter we walk the instructions
85 // and add every operand which is a constant to the worklist to process
86 // afterward.
87 //
88 // Note that we consider *any* function with a definition to be a viable
89 // edge. Even if the function's definition is subject to replacement by
90 // some other module (say, a weak definition) there may still be
91 // optimizations which essentially speculate based on the definition and
92 // a way to check that the specific definition is in fact the one being
93 // used. For example, this could be done by moving the weak definition to
94 // a strong (internal) definition and making the weak definition be an
95 // alias. Then a test of the address of the weak function against the new
96 // strong definition's address would be an effective way to determine the
97 // safety of optimizing a direct call edge.
98 for (BasicBlock &BB : *F)
99 for (Instruction &I : BB) {
100 if (auto *CB = dyn_cast<CallBase>(&I))
101 if (Function *Callee = CB->getCalledFunction())
102 if (!Callee->isDeclaration())
103 if (Callees.insert(Callee).second) {
104 Visited.insert(Callee);
105 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*Callee),
107 }
108
109 for (Value *Op : I.operand_values())
110 if (Constant *C = dyn_cast<Constant>(Op))
111 if (Visited.insert(C).second)
112 Worklist.push_back(C);
113 }
114
115 // We've collected all the constant (and thus potentially function or
116 // function containing) operands to all the instructions in the function.
117 // Process them (recursively) collecting every function found.
118 visitReferences(Worklist, Visited, [&](Function &F) {
119 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(F),
121 });
122
123 // Add implicit reference edges to any defined libcall functions (if we
124 // haven't found an explicit edge).
125 for (auto *F : G->LibFunctions)
126 if (!Visited.count(F))
127 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*F),
129
130 return *Edges;
131}
132
133void LazyCallGraph::Node::replaceFunction(Function &NewF) {
134 assert(F != &NewF && "Must not replace a function with itself!");
135 F = &NewF;
136}
137
138#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
139LLVM_DUMP_METHOD void LazyCallGraph::Node::dump() const {
140 dbgs() << *this << '\n';
141}
142#endif
143
145 LibFunc LF;
146
147 // Either this is a normal library function or a "vectorizable"
148 // function. Not using the VFDatabase here because this query
149 // is related only to libraries handled via the TLI.
150 return TLI.getLibFunc(F, LF) ||
151 TLI.isKnownVectorFunctionInLibrary(F.getName());
152}
153
156 LLVM_DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier()
157 << "\n");
158 for (Function &F : M) {
159 if (F.isDeclaration())
160 continue;
161 // If this function is a known lib function to LLVM then we want to
162 // synthesize reference edges to it to model the fact that LLVM can turn
163 // arbitrary code into a library function call.
164 if (isKnownLibFunction(F, GetTLI(F)))
165 LibFunctions.insert(&F);
166
167 if (F.hasLocalLinkage())
168 continue;
169
170 // External linkage defined functions have edges to them from other
171 // modules.
172 LLVM_DEBUG(dbgs() << " Adding '" << F.getName()
173 << "' to entry set of the graph.\n");
174 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), Edge::Ref);
175 }
176
177 // Externally visible aliases of internal functions are also viable entry
178 // edges to the module.
179 for (auto &A : M.aliases()) {
180 if (A.hasLocalLinkage())
181 continue;
182 if (Function* F = dyn_cast<Function>(A.getAliasee())) {
183 LLVM_DEBUG(dbgs() << " Adding '" << F->getName()
184 << "' with alias '" << A.getName()
185 << "' to entry set of the graph.\n");
186 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(*F), Edge::Ref);
187 }
188 }
189
190 // Now add entry nodes for functions reachable via initializers to globals.
193 for (GlobalVariable &GV : M.globals())
194 if (GV.hasInitializer())
195 if (Visited.insert(GV.getInitializer()).second)
196 Worklist.push_back(GV.getInitializer());
197
199 dbgs() << " Adding functions referenced by global initializers to the "
200 "entry set.\n");
201 visitReferences(Worklist, Visited, [&](Function &F) {
202 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F),
204 });
205}
206
208 : BPA(std::move(G.BPA)), NodeMap(std::move(G.NodeMap)),
209 EntryEdges(std::move(G.EntryEdges)), SCCBPA(std::move(G.SCCBPA)),
210 SCCMap(std::move(G.SCCMap)), LibFunctions(std::move(G.LibFunctions)) {
211 updateGraphPtrs();
212}
213
216 // Check whether the analysis, all analyses on functions, or the function's
217 // CFG have been preserved.
219 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Module>>());
220}
221
223 BPA = std::move(G.BPA);
224 NodeMap = std::move(G.NodeMap);
225 EntryEdges = std::move(G.EntryEdges);
226 SCCBPA = std::move(G.SCCBPA);
227 SCCMap = std::move(G.SCCMap);
228 LibFunctions = std::move(G.LibFunctions);
229 updateGraphPtrs();
230 return *this;
231}
232
233#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
234LLVM_DUMP_METHOD void LazyCallGraph::SCC::dump() const {
235 dbgs() << *this << '\n';
236}
237#endif
238
239#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
240void LazyCallGraph::SCC::verify() {
241 assert(OuterRefSCC && "Can't have a null RefSCC!");
242 assert(!Nodes.empty() && "Can't have an empty SCC!");
243
244 for (Node *N : Nodes) {
245 assert(N && "Can't have a null node!");
246 assert(OuterRefSCC->G->lookupSCC(*N) == this &&
247 "Node does not map to this SCC!");
248 assert(N->DFSNumber == -1 &&
249 "Must set DFS numbers to -1 when adding a node to an SCC!");
250 assert(N->LowLink == -1 &&
251 "Must set low link to -1 when adding a node to an SCC!");
252 for (Edge &E : **N)
253 assert(E.getNode().isPopulated() && "Can't have an unpopulated node!");
254
255#ifdef EXPENSIVE_CHECKS
256 // Verify that all nodes in this SCC can reach all other nodes.
257 SmallVector<Node *, 4> Worklist;
259 Worklist.push_back(N);
260 while (!Worklist.empty()) {
261 Node *VisitingNode = Worklist.pop_back_val();
262 if (!Visited.insert(VisitingNode).second)
263 continue;
264 for (Edge &E : (*VisitingNode)->calls())
265 Worklist.push_back(&E.getNode());
266 }
267 for (Node *NodeToVisit : Nodes) {
268 assert(Visited.contains(NodeToVisit) &&
269 "Cannot reach all nodes within SCC");
270 }
271#endif
272 }
273}
274#endif
275
277 if (this == &C)
278 return false;
279
280 for (Node &N : *this)
281 for (Edge &E : N->calls())
282 if (OuterRefSCC->G->lookupSCC(E.getNode()) == &C)
283 return true;
284
285 // No edges found.
286 return false;
287}
288
289bool LazyCallGraph::SCC::isAncestorOf(const SCC &TargetC) const {
290 if (this == &TargetC)
291 return false;
292
293 LazyCallGraph &G = *OuterRefSCC->G;
294
295 // Start with this SCC.
296 SmallPtrSet<const SCC *, 16> Visited = {this};
297 SmallVector<const SCC *, 16> Worklist = {this};
298
299 // Walk down the graph until we run out of edges or find a path to TargetC.
300 do {
301 const SCC &C = *Worklist.pop_back_val();
302 for (Node &N : C)
303 for (Edge &E : N->calls()) {
304 SCC *CalleeC = G.lookupSCC(E.getNode());
305 if (!CalleeC)
306 continue;
307
308 // If the callee's SCC is the TargetC, we're done.
309 if (CalleeC == &TargetC)
310 return true;
311
312 // If this is the first time we've reached this SCC, put it on the
313 // worklist to recurse through.
314 if (Visited.insert(CalleeC).second)
315 Worklist.push_back(CalleeC);
316 }
317 } while (!Worklist.empty());
318
319 // No paths found.
320 return false;
321}
322
323LazyCallGraph::RefSCC::RefSCC(LazyCallGraph &G) : G(&G) {}
324
325#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
326LLVM_DUMP_METHOD void LazyCallGraph::RefSCC::dump() const {
327 dbgs() << *this << '\n';
328}
329#endif
330
331#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
332void LazyCallGraph::RefSCC::verify() {
333 assert(G && "Can't have a null graph!");
334 assert(!SCCs.empty() && "Can't have an empty SCC!");
335
336 // Verify basic properties of the SCCs.
338 for (SCC *C : SCCs) {
339 assert(C && "Can't have a null SCC!");
340 C->verify();
341 assert(&C->getOuterRefSCC() == this &&
342 "SCC doesn't think it is inside this RefSCC!");
343 bool Inserted = SCCSet.insert(C).second;
344 assert(Inserted && "Found a duplicate SCC!");
345 auto IndexIt = SCCIndices.find(C);
346 assert(IndexIt != SCCIndices.end() &&
347 "Found an SCC that doesn't have an index!");
348 }
349
350 // Check that our indices map correctly.
351 for (auto [C, I] : SCCIndices) {
352 assert(C && "Can't have a null SCC in the indices!");
353 assert(SCCSet.count(C) && "Found an index for an SCC not in the RefSCC!");
354 assert(SCCs[I] == C && "Index doesn't point to SCC!");
355 }
356
357 // Check that the SCCs are in fact in post-order.
358 for (int I = 0, Size = SCCs.size(); I < Size; ++I) {
359 SCC &SourceSCC = *SCCs[I];
360 for (Node &N : SourceSCC)
361 for (Edge &E : *N) {
362 if (!E.isCall())
363 continue;
364 SCC &TargetSCC = *G->lookupSCC(E.getNode());
365 if (&TargetSCC.getOuterRefSCC() == this) {
366 assert(SCCIndices.find(&TargetSCC)->second <= I &&
367 "Edge between SCCs violates post-order relationship.");
368 continue;
369 }
370 }
371 }
372
373#ifdef EXPENSIVE_CHECKS
374 // Verify that all nodes in this RefSCC can reach all other nodes.
376 for (SCC *C : SCCs) {
377 for (Node &N : *C)
378 Nodes.push_back(&N);
379 }
380 for (Node *N : Nodes) {
381 SmallVector<Node *, 4> Worklist;
383 Worklist.push_back(N);
384 while (!Worklist.empty()) {
385 Node *VisitingNode = Worklist.pop_back_val();
386 if (!Visited.insert(VisitingNode).second)
387 continue;
388 for (Edge &E : **VisitingNode)
389 Worklist.push_back(&E.getNode());
390 }
391 for (Node *NodeToVisit : Nodes) {
392 assert(Visited.contains(NodeToVisit) &&
393 "Cannot reach all nodes within RefSCC");
394 }
395 }
396#endif
397}
398#endif
399
401 if (&RC == this)
402 return false;
403
404 // Search all edges to see if this is a parent.
405 for (SCC &C : *this)
406 for (Node &N : C)
407 for (Edge &E : *N)
408 if (G->lookupRefSCC(E.getNode()) == &RC)
409 return true;
410
411 return false;
412}
413
415 if (&RC == this)
416 return false;
417
418 // For each descendant of this RefSCC, see if one of its children is the
419 // argument. If not, add that descendant to the worklist and continue
420 // searching.
423 Worklist.push_back(this);
424 Visited.insert(this);
425 do {
426 const RefSCC &DescendantRC = *Worklist.pop_back_val();
427 for (SCC &C : DescendantRC)
428 for (Node &N : C)
429 for (Edge &E : *N) {
430 auto *ChildRC = G->lookupRefSCC(E.getNode());
431 if (ChildRC == &RC)
432 return true;
433 if (!ChildRC || !Visited.insert(ChildRC).second)
434 continue;
435 Worklist.push_back(ChildRC);
436 }
437 } while (!Worklist.empty());
438
439 return false;
440}
441
442/// Generic helper that updates a postorder sequence of SCCs for a potentially
443/// cycle-introducing edge insertion.
444///
445/// A postorder sequence of SCCs of a directed graph has one fundamental
446/// property: all deges in the DAG of SCCs point "up" the sequence. That is,
447/// all edges in the SCC DAG point to prior SCCs in the sequence.
448///
449/// This routine both updates a postorder sequence and uses that sequence to
450/// compute the set of SCCs connected into a cycle. It should only be called to
451/// insert a "downward" edge which will require changing the sequence to
452/// restore it to a postorder.
453///
454/// When inserting an edge from an earlier SCC to a later SCC in some postorder
455/// sequence, all of the SCCs which may be impacted are in the closed range of
456/// those two within the postorder sequence. The algorithm used here to restore
457/// the state is as follows:
458///
459/// 1) Starting from the source SCC, construct a set of SCCs which reach the
460/// source SCC consisting of just the source SCC. Then scan toward the
461/// target SCC in postorder and for each SCC, if it has an edge to an SCC
462/// in the set, add it to the set. Otherwise, the source SCC is not
463/// a successor, move it in the postorder sequence to immediately before
464/// the source SCC, shifting the source SCC and all SCCs in the set one
465/// position toward the target SCC. Stop scanning after processing the
466/// target SCC.
467/// 2) If the source SCC is now past the target SCC in the postorder sequence,
468/// and thus the new edge will flow toward the start, we are done.
469/// 3) Otherwise, starting from the target SCC, walk all edges which reach an
470/// SCC between the source and the target, and add them to the set of
471/// connected SCCs, then recurse through them. Once a complete set of the
472/// SCCs the target connects to is known, hoist the remaining SCCs between
473/// the source and the target to be above the target. Note that there is no
474/// need to process the source SCC, it is already known to connect.
475/// 4) At this point, all of the SCCs in the closed range between the source
476/// SCC and the target SCC in the postorder sequence are connected,
477/// including the target SCC and the source SCC. Inserting the edge from
478/// the source SCC to the target SCC will form a cycle out of precisely
479/// these SCCs. Thus we can merge all of the SCCs in this closed range into
480/// a single SCC.
481///
482/// This process has various important properties:
483/// - Only mutates the SCCs when adding the edge actually changes the SCC
484/// structure.
485/// - Never mutates SCCs which are unaffected by the change.
486/// - Updates the postorder sequence to correctly satisfy the postorder
487/// constraint after the edge is inserted.
488/// - Only reorders SCCs in the closed postorder sequence from the source to
489/// the target, so easy to bound how much has changed even in the ordering.
490/// - Big-O is the number of edges in the closed postorder range of SCCs from
491/// source to target.
492///
493/// This helper routine, in addition to updating the postorder sequence itself
494/// will also update a map from SCCs to indices within that sequence.
495///
496/// The sequence and the map must operate on pointers to the SCC type.
497///
498/// Two callbacks must be provided. The first computes the subset of SCCs in
499/// the postorder closed range from the source to the target which connect to
500/// the source SCC via some (transitive) set of edges. The second computes the
501/// subset of the same range which the target SCC connects to via some
502/// (transitive) set of edges. Both callbacks should populate the set argument
503/// provided.
504template <typename SCCT, typename PostorderSequenceT, typename SCCIndexMapT,
505 typename ComputeSourceConnectedSetCallableT,
506 typename ComputeTargetConnectedSetCallableT>
509 SCCT &SourceSCC, SCCT &TargetSCC, PostorderSequenceT &SCCs,
510 SCCIndexMapT &SCCIndices,
511 ComputeSourceConnectedSetCallableT ComputeSourceConnectedSet,
512 ComputeTargetConnectedSetCallableT ComputeTargetConnectedSet) {
513 int SourceIdx = SCCIndices[&SourceSCC];
514 int TargetIdx = SCCIndices[&TargetSCC];
515 assert(SourceIdx < TargetIdx && "Cannot have equal indices here!");
516
517 SmallPtrSet<SCCT *, 4> ConnectedSet;
518
519 // Compute the SCCs which (transitively) reach the source.
520 ComputeSourceConnectedSet(ConnectedSet);
521
522 // Partition the SCCs in this part of the port-order sequence so only SCCs
523 // connecting to the source remain between it and the target. This is
524 // a benign partition as it preserves postorder.
525 auto SourceI = std::stable_partition(
526 SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx + 1,
527 [&ConnectedSet](SCCT *C) { return !ConnectedSet.count(C); });
528 for (int I = SourceIdx, E = TargetIdx + 1; I < E; ++I)
529 SCCIndices.find(SCCs[I])->second = I;
530
531 // If the target doesn't connect to the source, then we've corrected the
532 // post-order and there are no cycles formed.
533 if (!ConnectedSet.count(&TargetSCC)) {
534 assert(SourceI > (SCCs.begin() + SourceIdx) &&
535 "Must have moved the source to fix the post-order.");
536 assert(*std::prev(SourceI) == &TargetSCC &&
537 "Last SCC to move should have bene the target.");
538
539 // Return an empty range at the target SCC indicating there is nothing to
540 // merge.
541 return make_range(std::prev(SourceI), std::prev(SourceI));
542 }
543
544 assert(SCCs[TargetIdx] == &TargetSCC &&
545 "Should not have moved target if connected!");
546 SourceIdx = SourceI - SCCs.begin();
547 assert(SCCs[SourceIdx] == &SourceSCC &&
548 "Bad updated index computation for the source SCC!");
549
550 // See whether there are any remaining intervening SCCs between the source
551 // and target. If so we need to make sure they all are reachable form the
552 // target.
553 if (SourceIdx + 1 < TargetIdx) {
554 ConnectedSet.clear();
555 ComputeTargetConnectedSet(ConnectedSet);
556
557 // Partition SCCs so that only SCCs reached from the target remain between
558 // the source and the target. This preserves postorder.
559 auto TargetI = std::stable_partition(
560 SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1,
561 [&ConnectedSet](SCCT *C) { return ConnectedSet.count(C); });
562 for (int I = SourceIdx + 1, E = TargetIdx + 1; I < E; ++I)
563 SCCIndices.find(SCCs[I])->second = I;
564 TargetIdx = std::prev(TargetI) - SCCs.begin();
565 assert(SCCs[TargetIdx] == &TargetSCC &&
566 "Should always end with the target!");
567 }
568
569 // At this point, we know that connecting source to target forms a cycle
570 // because target connects back to source, and we know that all the SCCs
571 // between the source and target in the postorder sequence participate in that
572 // cycle.
573 return make_range(SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx);
574}
575
577 Node &SourceN, Node &TargetN,
578 function_ref<void(ArrayRef<SCC *> MergeSCCs)> MergeCB) {
579 assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
580 SmallVector<SCC *, 1> DeletedSCCs;
581
582#ifdef EXPENSIVE_CHECKS
583 verify();
584 auto VerifyOnExit = make_scope_exit([&]() { verify(); });
585#endif
586
587 SCC &SourceSCC = *G->lookupSCC(SourceN);
588 SCC &TargetSCC = *G->lookupSCC(TargetN);
589
590 // If the two nodes are already part of the same SCC, we're also done as
591 // we've just added more connectivity.
592 if (&SourceSCC == &TargetSCC) {
593 SourceN->setEdgeKind(TargetN, Edge::Call);
594 return false; // No new cycle.
595 }
596
597 // At this point we leverage the postorder list of SCCs to detect when the
598 // insertion of an edge changes the SCC structure in any way.
599 //
600 // First and foremost, we can eliminate the need for any changes when the
601 // edge is toward the beginning of the postorder sequence because all edges
602 // flow in that direction already. Thus adding a new one cannot form a cycle.
603 int SourceIdx = SCCIndices[&SourceSCC];
604 int TargetIdx = SCCIndices[&TargetSCC];
605 if (TargetIdx < SourceIdx) {
606 SourceN->setEdgeKind(TargetN, Edge::Call);
607 return false; // No new cycle.
608 }
609
610 // Compute the SCCs which (transitively) reach the source.
611 auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) {
612#ifdef EXPENSIVE_CHECKS
613 // Check that the RefSCC is still valid before computing this as the
614 // results will be nonsensical of we've broken its invariants.
615 verify();
616#endif
617 ConnectedSet.insert(&SourceSCC);
618 auto IsConnected = [&](SCC &C) {
619 for (Node &N : C)
620 for (Edge &E : N->calls())
621 if (ConnectedSet.count(G->lookupSCC(E.getNode())))
622 return true;
623
624 return false;
625 };
626
627 for (SCC *C :
628 make_range(SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1))
629 if (IsConnected(*C))
630 ConnectedSet.insert(C);
631 };
632
633 // Use a normal worklist to find which SCCs the target connects to. We still
634 // bound the search based on the range in the postorder list we care about,
635 // but because this is forward connectivity we just "recurse" through the
636 // edges.
637 auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) {
638#ifdef EXPENSIVE_CHECKS
639 // Check that the RefSCC is still valid before computing this as the
640 // results will be nonsensical of we've broken its invariants.
641 verify();
642#endif
643 ConnectedSet.insert(&TargetSCC);
644 SmallVector<SCC *, 4> Worklist;
645 Worklist.push_back(&TargetSCC);
646 do {
647 SCC &C = *Worklist.pop_back_val();
648 for (Node &N : C)
649 for (Edge &E : *N) {
650 if (!E.isCall())
651 continue;
652 SCC &EdgeC = *G->lookupSCC(E.getNode());
653 if (&EdgeC.getOuterRefSCC() != this)
654 // Not in this RefSCC...
655 continue;
656 if (SCCIndices.find(&EdgeC)->second <= SourceIdx)
657 // Not in the postorder sequence between source and target.
658 continue;
659
660 if (ConnectedSet.insert(&EdgeC).second)
661 Worklist.push_back(&EdgeC);
662 }
663 } while (!Worklist.empty());
664 };
665
666 // Use a generic helper to update the postorder sequence of SCCs and return
667 // a range of any SCCs connected into a cycle by inserting this edge. This
668 // routine will also take care of updating the indices into the postorder
669 // sequence.
671 SourceSCC, TargetSCC, SCCs, SCCIndices, ComputeSourceConnectedSet,
672 ComputeTargetConnectedSet);
673
674 // Run the user's callback on the merged SCCs before we actually merge them.
675 if (MergeCB)
676 MergeCB(ArrayRef(MergeRange.begin(), MergeRange.end()));
677
678 // If the merge range is empty, then adding the edge didn't actually form any
679 // new cycles. We're done.
680 if (MergeRange.empty()) {
681 // Now that the SCC structure is finalized, flip the kind to call.
682 SourceN->setEdgeKind(TargetN, Edge::Call);
683 return false; // No new cycle.
684 }
685
686#ifdef EXPENSIVE_CHECKS
687 // Before merging, check that the RefSCC remains valid after all the
688 // postorder updates.
689 verify();
690#endif
691
692 // Otherwise we need to merge all the SCCs in the cycle into a single result
693 // SCC.
694 //
695 // NB: We merge into the target because all of these functions were already
696 // reachable from the target, meaning any SCC-wide properties deduced about it
697 // other than the set of functions within it will not have changed.
698 for (SCC *C : MergeRange) {
699 assert(C != &TargetSCC &&
700 "We merge *into* the target and shouldn't process it here!");
701 SCCIndices.erase(C);
702 TargetSCC.Nodes.append(C->Nodes.begin(), C->Nodes.end());
703 for (Node *N : C->Nodes)
704 G->SCCMap[N] = &TargetSCC;
705 C->clear();
706 DeletedSCCs.push_back(C);
707 }
708
709 // Erase the merged SCCs from the list and update the indices of the
710 // remaining SCCs.
711 int IndexOffset = MergeRange.end() - MergeRange.begin();
712 auto EraseEnd = SCCs.erase(MergeRange.begin(), MergeRange.end());
713 for (SCC *C : make_range(EraseEnd, SCCs.end()))
714 SCCIndices[C] -= IndexOffset;
715
716 // Now that the SCC structure is finalized, flip the kind to call.
717 SourceN->setEdgeKind(TargetN, Edge::Call);
718
719 // And we're done, but we did form a new cycle.
720 return true;
721}
722
724 Node &TargetN) {
725 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
726
727#ifdef EXPENSIVE_CHECKS
728 verify();
729 auto VerifyOnExit = make_scope_exit([&]() { verify(); });
730#endif
731
732 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
733 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
734 assert(G->lookupSCC(SourceN) != G->lookupSCC(TargetN) &&
735 "Source and Target must be in separate SCCs for this to be trivial!");
736
737 // Set the edge kind.
738 SourceN->setEdgeKind(TargetN, Edge::Ref);
739}
740
743 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
744
745#ifdef EXPENSIVE_CHECKS
746 verify();
747 auto VerifyOnExit = make_scope_exit([&]() { verify(); });
748#endif
749
750 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
751 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
752
753 SCC &TargetSCC = *G->lookupSCC(TargetN);
754 assert(G->lookupSCC(SourceN) == &TargetSCC && "Source and Target must be in "
755 "the same SCC to require the "
756 "full CG update.");
757
758 // Set the edge kind.
759 SourceN->setEdgeKind(TargetN, Edge::Ref);
760
761 // Otherwise we are removing a call edge from a single SCC. This may break
762 // the cycle. In order to compute the new set of SCCs, we need to do a small
763 // DFS over the nodes within the SCC to form any sub-cycles that remain as
764 // distinct SCCs and compute a postorder over the resulting SCCs.
765 //
766 // However, we specially handle the target node. The target node is known to
767 // reach all other nodes in the original SCC by definition. This means that
768 // we want the old SCC to be replaced with an SCC containing that node as it
769 // will be the root of whatever SCC DAG results from the DFS. Assumptions
770 // about an SCC such as the set of functions called will continue to hold,
771 // etc.
772
773 SCC &OldSCC = TargetSCC;
775 SmallVector<Node *, 16> PendingSCCStack;
776 SmallVector<SCC *, 4> NewSCCs;
777
778 // Prepare the nodes for a fresh DFS.
780 Worklist.swap(OldSCC.Nodes);
781 for (Node *N : Worklist) {
782 N->DFSNumber = N->LowLink = 0;
783 G->SCCMap.erase(N);
784 }
785
786 // Force the target node to be in the old SCC. This also enables us to take
787 // a very significant short-cut in the standard Tarjan walk to re-form SCCs
788 // below: whenever we build an edge that reaches the target node, we know
789 // that the target node eventually connects back to all other nodes in our
790 // walk. As a consequence, we can detect and handle participants in that
791 // cycle without walking all the edges that form this connection, and instead
792 // by relying on the fundamental guarantee coming into this operation (all
793 // nodes are reachable from the target due to previously forming an SCC).
794 TargetN.DFSNumber = TargetN.LowLink = -1;
795 OldSCC.Nodes.push_back(&TargetN);
796 G->SCCMap[&TargetN] = &OldSCC;
797
798 // Scan down the stack and DFS across the call edges.
799 for (Node *RootN : Worklist) {
800 assert(DFSStack.empty() &&
801 "Cannot begin a new root with a non-empty DFS stack!");
802 assert(PendingSCCStack.empty() &&
803 "Cannot begin a new root with pending nodes for an SCC!");
804
805 // Skip any nodes we've already reached in the DFS.
806 if (RootN->DFSNumber != 0) {
807 assert(RootN->DFSNumber == -1 &&
808 "Shouldn't have any mid-DFS root nodes!");
809 continue;
810 }
811
812 RootN->DFSNumber = RootN->LowLink = 1;
813 int NextDFSNumber = 2;
814
815 DFSStack.emplace_back(RootN, (*RootN)->call_begin());
816 do {
817 auto [N, I] = DFSStack.pop_back_val();
818 auto E = (*N)->call_end();
819 while (I != E) {
820 Node &ChildN = I->getNode();
821 if (ChildN.DFSNumber == 0) {
822 // We haven't yet visited this child, so descend, pushing the current
823 // node onto the stack.
824 DFSStack.emplace_back(N, I);
825
826 assert(!G->SCCMap.count(&ChildN) &&
827 "Found a node with 0 DFS number but already in an SCC!");
828 ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++;
829 N = &ChildN;
830 I = (*N)->call_begin();
831 E = (*N)->call_end();
832 continue;
833 }
834
835 // Check for the child already being part of some component.
836 if (ChildN.DFSNumber == -1) {
837 if (G->lookupSCC(ChildN) == &OldSCC) {
838 // If the child is part of the old SCC, we know that it can reach
839 // every other node, so we have formed a cycle. Pull the entire DFS
840 // and pending stacks into it. See the comment above about setting
841 // up the old SCC for why we do this.
842 int OldSize = OldSCC.size();
843 OldSCC.Nodes.push_back(N);
844 OldSCC.Nodes.append(PendingSCCStack.begin(), PendingSCCStack.end());
845 PendingSCCStack.clear();
846 while (!DFSStack.empty())
847 OldSCC.Nodes.push_back(DFSStack.pop_back_val().first);
848 for (Node &N : drop_begin(OldSCC, OldSize)) {
849 N.DFSNumber = N.LowLink = -1;
850 G->SCCMap[&N] = &OldSCC;
851 }
852 N = nullptr;
853 break;
854 }
855
856 // If the child has already been added to some child component, it
857 // couldn't impact the low-link of this parent because it isn't
858 // connected, and thus its low-link isn't relevant so skip it.
859 ++I;
860 continue;
861 }
862
863 // Track the lowest linked child as the lowest link for this node.
864 assert(ChildN.LowLink > 0 && "Must have a positive low-link number!");
865 if (ChildN.LowLink < N->LowLink)
866 N->LowLink = ChildN.LowLink;
867
868 // Move to the next edge.
869 ++I;
870 }
871 if (!N)
872 // Cleared the DFS early, start another round.
873 break;
874
875 // We've finished processing N and its descendants, put it on our pending
876 // SCC stack to eventually get merged into an SCC of nodes.
877 PendingSCCStack.push_back(N);
878
879 // If this node is linked to some lower entry, continue walking up the
880 // stack.
881 if (N->LowLink != N->DFSNumber)
882 continue;
883
884 // Otherwise, we've completed an SCC. Append it to our post order list of
885 // SCCs.
886 int RootDFSNumber = N->DFSNumber;
887 // Find the range of the node stack by walking down until we pass the
888 // root DFS number.
889 auto SCCNodes = make_range(
890 PendingSCCStack.rbegin(),
891 find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) {
892 return N->DFSNumber < RootDFSNumber;
893 }));
894
895 // Form a new SCC out of these nodes and then clear them off our pending
896 // stack.
897 NewSCCs.push_back(G->createSCC(*this, SCCNodes));
898 for (Node &N : *NewSCCs.back()) {
899 N.DFSNumber = N.LowLink = -1;
900 G->SCCMap[&N] = NewSCCs.back();
901 }
902 PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end());
903 } while (!DFSStack.empty());
904 }
905
906 // Insert the remaining SCCs before the old one. The old SCC can reach all
907 // other SCCs we form because it contains the target node of the removed edge
908 // of the old SCC. This means that we will have edges into all the new SCCs,
909 // which means the old one must come last for postorder.
910 int OldIdx = SCCIndices[&OldSCC];
911 SCCs.insert(SCCs.begin() + OldIdx, NewSCCs.begin(), NewSCCs.end());
912
913 // Update the mapping from SCC* to index to use the new SCC*s, and remove the
914 // old SCC from the mapping.
915 for (int Idx = OldIdx, Size = SCCs.size(); Idx < Size; ++Idx)
916 SCCIndices[SCCs[Idx]] = Idx;
917
918 return make_range(SCCs.begin() + OldIdx,
919 SCCs.begin() + OldIdx + NewSCCs.size());
920}
921
923 Node &TargetN) {
924 assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
925
926 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
927 assert(G->lookupRefSCC(TargetN) != this &&
928 "Target must not be in this RefSCC.");
929#ifdef EXPENSIVE_CHECKS
930 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
931 "Target must be a descendant of the Source.");
932#endif
933
934 // Edges between RefSCCs are the same regardless of call or ref, so we can
935 // just flip the edge here.
936 SourceN->setEdgeKind(TargetN, Edge::Call);
937
938#ifdef EXPENSIVE_CHECKS
939 verify();
940#endif
941}
942
944 Node &TargetN) {
945 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
946
947 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
948 assert(G->lookupRefSCC(TargetN) != this &&
949 "Target must not be in this RefSCC.");
950#ifdef EXPENSIVE_CHECKS
951 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
952 "Target must be a descendant of the Source.");
953#endif
954
955 // Edges between RefSCCs are the same regardless of call or ref, so we can
956 // just flip the edge here.
957 SourceN->setEdgeKind(TargetN, Edge::Ref);
958
959#ifdef EXPENSIVE_CHECKS
960 verify();
961#endif
962}
963
965 Node &TargetN) {
966 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
967 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
968
969 SourceN->insertEdgeInternal(TargetN, Edge::Ref);
970
971#ifdef EXPENSIVE_CHECKS
972 verify();
973#endif
974}
975
977 Edge::Kind EK) {
978 // First insert it into the caller.
979 SourceN->insertEdgeInternal(TargetN, EK);
980
981 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
982
983 assert(G->lookupRefSCC(TargetN) != this &&
984 "Target must not be in this RefSCC.");
985#ifdef EXPENSIVE_CHECKS
986 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
987 "Target must be a descendant of the Source.");
988#endif
989
990#ifdef EXPENSIVE_CHECKS
991 verify();
992#endif
993}
994
997 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
998 RefSCC &SourceC = *G->lookupRefSCC(SourceN);
999 assert(&SourceC != this && "Source must not be in this RefSCC.");
1000#ifdef EXPENSIVE_CHECKS
1001 assert(SourceC.isDescendantOf(*this) &&
1002 "Source must be a descendant of the Target.");
1003#endif
1004
1005 SmallVector<RefSCC *, 1> DeletedRefSCCs;
1006
1007#ifdef EXPENSIVE_CHECKS
1008 verify();
1009 auto VerifyOnExit = make_scope_exit([&]() { verify(); });
1010#endif
1011
1012 int SourceIdx = G->RefSCCIndices[&SourceC];
1013 int TargetIdx = G->RefSCCIndices[this];
1014 assert(SourceIdx < TargetIdx &&
1015 "Postorder list doesn't see edge as incoming!");
1016
1017 // Compute the RefSCCs which (transitively) reach the source. We do this by
1018 // working backwards from the source using the parent set in each RefSCC,
1019 // skipping any RefSCCs that don't fall in the postorder range. This has the
1020 // advantage of walking the sparser parent edge (in high fan-out graphs) but
1021 // more importantly this removes examining all forward edges in all RefSCCs
1022 // within the postorder range which aren't in fact connected. Only connected
1023 // RefSCCs (and their edges) are visited here.
1024 auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) {
1025 Set.insert(&SourceC);
1026 auto IsConnected = [&](RefSCC &RC) {
1027 for (SCC &C : RC)
1028 for (Node &N : C)
1029 for (Edge &E : *N)
1030 if (Set.count(G->lookupRefSCC(E.getNode())))
1031 return true;
1032
1033 return false;
1034 };
1035
1036 for (RefSCC *C : make_range(G->PostOrderRefSCCs.begin() + SourceIdx + 1,
1037 G->PostOrderRefSCCs.begin() + TargetIdx + 1))
1038 if (IsConnected(*C))
1039 Set.insert(C);
1040 };
1041
1042 // Use a normal worklist to find which SCCs the target connects to. We still
1043 // bound the search based on the range in the postorder list we care about,
1044 // but because this is forward connectivity we just "recurse" through the
1045 // edges.
1046 auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) {
1047 Set.insert(this);
1048 SmallVector<RefSCC *, 4> Worklist;
1049 Worklist.push_back(this);
1050 do {
1051 RefSCC &RC = *Worklist.pop_back_val();
1052 for (SCC &C : RC)
1053 for (Node &N : C)
1054 for (Edge &E : *N) {
1055 RefSCC &EdgeRC = *G->lookupRefSCC(E.getNode());
1056 if (G->getRefSCCIndex(EdgeRC) <= SourceIdx)
1057 // Not in the postorder sequence between source and target.
1058 continue;
1059
1060 if (Set.insert(&EdgeRC).second)
1061 Worklist.push_back(&EdgeRC);
1062 }
1063 } while (!Worklist.empty());
1064 };
1065
1066 // Use a generic helper to update the postorder sequence of RefSCCs and return
1067 // a range of any RefSCCs connected into a cycle by inserting this edge. This
1068 // routine will also take care of updating the indices into the postorder
1069 // sequence.
1072 SourceC, *this, G->PostOrderRefSCCs, G->RefSCCIndices,
1073 ComputeSourceConnectedSet, ComputeTargetConnectedSet);
1074
1075 // Build a set, so we can do fast tests for whether a RefSCC will end up as
1076 // part of the merged RefSCC.
1077 SmallPtrSet<RefSCC *, 16> MergeSet(MergeRange.begin(), MergeRange.end());
1078
1079 // This RefSCC will always be part of that set, so just insert it here.
1080 MergeSet.insert(this);
1081
1082 // Now that we have identified all the SCCs which need to be merged into
1083 // a connected set with the inserted edge, merge all of them into this SCC.
1084 SmallVector<SCC *, 16> MergedSCCs;
1085 int SCCIndex = 0;
1086 for (RefSCC *RC : MergeRange) {
1087 assert(RC != this && "We're merging into the target RefSCC, so it "
1088 "shouldn't be in the range.");
1089
1090 // Walk the inner SCCs to update their up-pointer and walk all the edges to
1091 // update any parent sets.
1092 // FIXME: We should try to find a way to avoid this (rather expensive) edge
1093 // walk by updating the parent sets in some other manner.
1094 for (SCC &InnerC : *RC) {
1095 InnerC.OuterRefSCC = this;
1096 SCCIndices[&InnerC] = SCCIndex++;
1097 for (Node &N : InnerC)
1098 G->SCCMap[&N] = &InnerC;
1099 }
1100
1101 // Now merge in the SCCs. We can actually move here so try to reuse storage
1102 // the first time through.
1103 if (MergedSCCs.empty())
1104 MergedSCCs = std::move(RC->SCCs);
1105 else
1106 MergedSCCs.append(RC->SCCs.begin(), RC->SCCs.end());
1107 RC->SCCs.clear();
1108 DeletedRefSCCs.push_back(RC);
1109 }
1110
1111 // Append our original SCCs to the merged list and move it into place.
1112 for (SCC &InnerC : *this)
1113 SCCIndices[&InnerC] = SCCIndex++;
1114 MergedSCCs.append(SCCs.begin(), SCCs.end());
1115 SCCs = std::move(MergedSCCs);
1116
1117 // Remove the merged away RefSCCs from the post order sequence.
1118 for (RefSCC *RC : MergeRange)
1119 G->RefSCCIndices.erase(RC);
1120 int IndexOffset = MergeRange.end() - MergeRange.begin();
1121 auto EraseEnd =
1122 G->PostOrderRefSCCs.erase(MergeRange.begin(), MergeRange.end());
1123 for (RefSCC *RC : make_range(EraseEnd, G->PostOrderRefSCCs.end()))
1124 G->RefSCCIndices[RC] -= IndexOffset;
1125
1126 // At this point we have a merged RefSCC with a post-order SCCs list, just
1127 // connect the nodes to form the new edge.
1128 SourceN->insertEdgeInternal(TargetN, Edge::Ref);
1129
1130 // We return the list of SCCs which were merged so that callers can
1131 // invalidate any data they have associated with those SCCs. Note that these
1132 // SCCs are no longer in an interesting state (they are totally empty) but
1133 // the pointers will remain stable for the life of the graph itself.
1134 return DeletedRefSCCs;
1135}
1136
1138 assert(G->lookupRefSCC(SourceN) == this &&
1139 "The source must be a member of this RefSCC.");
1140 assert(G->lookupRefSCC(TargetN) != this &&
1141 "The target must not be a member of this RefSCC");
1142
1143#ifdef EXPENSIVE_CHECKS
1144 verify();
1145 auto VerifyOnExit = make_scope_exit([&]() { verify(); });
1146#endif
1147
1148 // First remove it from the node.
1149 bool Removed = SourceN->removeEdgeInternal(TargetN);
1150 (void)Removed;
1151 assert(Removed && "Target not in the edge set for this caller?");
1152}
1153
1156 ArrayRef<Node *> TargetNs) {
1157 // We return a list of the resulting *new* RefSCCs in post-order.
1159
1160#ifdef EXPENSIVE_CHECKS
1161 // Verify the RefSCC is valid to start with and that either we return an empty
1162 // list of result RefSCCs and this RefSCC remains valid, or we return new
1163 // RefSCCs and this RefSCC is dead.
1164 verify();
1165 auto VerifyOnExit = make_scope_exit([&]() {
1166 // If we didn't replace our RefSCC with new ones, check that this one
1167 // remains valid.
1168 if (G)
1169 verify();
1170 });
1171#endif
1172
1173 // First remove the actual edges.
1174 for (Node *TargetN : TargetNs) {
1175 assert(!(*SourceN)[*TargetN].isCall() &&
1176 "Cannot remove a call edge, it must first be made a ref edge");
1177
1178 bool Removed = SourceN->removeEdgeInternal(*TargetN);
1179 (void)Removed;
1180 assert(Removed && "Target not in the edge set for this caller?");
1181 }
1182
1183 // Direct self references don't impact the ref graph at all.
1184 if (llvm::all_of(TargetNs,
1185 [&](Node *TargetN) { return &SourceN == TargetN; }))
1186 return Result;
1187
1188 // If all targets are in the same SCC as the source, because no call edges
1189 // were removed there is no RefSCC structure change.
1190 SCC &SourceC = *G->lookupSCC(SourceN);
1191 if (llvm::all_of(TargetNs, [&](Node *TargetN) {
1192 return G->lookupSCC(*TargetN) == &SourceC;
1193 }))
1194 return Result;
1195
1196 // We build somewhat synthetic new RefSCCs by providing a postorder mapping
1197 // for each inner SCC. We store these inside the low-link field of the nodes
1198 // rather than associated with SCCs because this saves a round-trip through
1199 // the node->SCC map and in the common case, SCCs are small. We will verify
1200 // that we always give the same number to every node in the SCC such that
1201 // these are equivalent.
1202 int PostOrderNumber = 0;
1203
1204 // Reset all the other nodes to prepare for a DFS over them, and add them to
1205 // our worklist.
1206 SmallVector<Node *, 8> Worklist;
1207 for (SCC *C : SCCs) {
1208 for (Node &N : *C)
1209 N.DFSNumber = N.LowLink = 0;
1210
1211 Worklist.append(C->Nodes.begin(), C->Nodes.end());
1212 }
1213
1214 // Track the number of nodes in this RefSCC so that we can quickly recognize
1215 // an important special case of the edge removal not breaking the cycle of
1216 // this RefSCC.
1217 const int NumRefSCCNodes = Worklist.size();
1218
1220 SmallVector<Node *, 4> PendingRefSCCStack;
1221 do {
1222 assert(DFSStack.empty() &&
1223 "Cannot begin a new root with a non-empty DFS stack!");
1224 assert(PendingRefSCCStack.empty() &&
1225 "Cannot begin a new root with pending nodes for an SCC!");
1226
1227 Node *RootN = Worklist.pop_back_val();
1228 // Skip any nodes we've already reached in the DFS.
1229 if (RootN->DFSNumber != 0) {
1230 assert(RootN->DFSNumber == -1 &&
1231 "Shouldn't have any mid-DFS root nodes!");
1232 continue;
1233 }
1234
1235 RootN->DFSNumber = RootN->LowLink = 1;
1236 int NextDFSNumber = 2;
1237
1238 DFSStack.emplace_back(RootN, (*RootN)->begin());
1239 do {
1240 auto [N, I] = DFSStack.pop_back_val();
1241 auto E = (*N)->end();
1242
1243 assert(N->DFSNumber != 0 && "We should always assign a DFS number "
1244 "before processing a node.");
1245
1246 while (I != E) {
1247 Node &ChildN = I->getNode();
1248 if (ChildN.DFSNumber == 0) {
1249 // Mark that we should start at this child when next this node is the
1250 // top of the stack. We don't start at the next child to ensure this
1251 // child's lowlink is reflected.
1252 DFSStack.emplace_back(N, I);
1253
1254 // Continue, resetting to the child node.
1255 ChildN.LowLink = ChildN.DFSNumber = NextDFSNumber++;
1256 N = &ChildN;
1257 I = ChildN->begin();
1258 E = ChildN->end();
1259 continue;
1260 }
1261 if (ChildN.DFSNumber == -1) {
1262 // If this child isn't currently in this RefSCC, no need to process
1263 // it.
1264 ++I;
1265 continue;
1266 }
1267
1268 // Track the lowest link of the children, if any are still in the stack.
1269 // Any child not on the stack will have a LowLink of -1.
1270 assert(ChildN.LowLink != 0 &&
1271 "Low-link must not be zero with a non-zero DFS number.");
1272 if (ChildN.LowLink >= 0 && ChildN.LowLink < N->LowLink)
1273 N->LowLink = ChildN.LowLink;
1274 ++I;
1275 }
1276
1277 // We've finished processing N and its descendants, put it on our pending
1278 // stack to eventually get merged into a RefSCC.
1279 PendingRefSCCStack.push_back(N);
1280
1281 // If this node is linked to some lower entry, continue walking up the
1282 // stack.
1283 if (N->LowLink != N->DFSNumber) {
1284 assert(!DFSStack.empty() &&
1285 "We never found a viable root for a RefSCC to pop off!");
1286 continue;
1287 }
1288
1289 // Otherwise, form a new RefSCC from the top of the pending node stack.
1290 int RefSCCNumber = PostOrderNumber++;
1291 int RootDFSNumber = N->DFSNumber;
1292
1293 // Find the range of the node stack by walking down until we pass the
1294 // root DFS number. Update the DFS numbers and low link numbers in the
1295 // process to avoid re-walking this list where possible.
1296 auto StackRI = find_if(reverse(PendingRefSCCStack), [&](Node *N) {
1297 if (N->DFSNumber < RootDFSNumber)
1298 // We've found the bottom.
1299 return true;
1300
1301 // Update this node and keep scanning.
1302 N->DFSNumber = -1;
1303 // Save the post-order number in the lowlink field so that we can use
1304 // it to map SCCs into new RefSCCs after we finish the DFS.
1305 N->LowLink = RefSCCNumber;
1306 return false;
1307 });
1308 auto RefSCCNodes = make_range(StackRI.base(), PendingRefSCCStack.end());
1309
1310 // If we find a cycle containing all nodes originally in this RefSCC then
1311 // the removal hasn't changed the structure at all. This is an important
1312 // special case, and we can directly exit the entire routine more
1313 // efficiently as soon as we discover it.
1314 if (llvm::size(RefSCCNodes) == NumRefSCCNodes) {
1315 // Clear out the low link field as we won't need it.
1316 for (Node *N : RefSCCNodes)
1317 N->LowLink = -1;
1318 // Return the empty result immediately.
1319 return Result;
1320 }
1321
1322 // We've already marked the nodes internally with the RefSCC number so
1323 // just clear them off the stack and continue.
1324 PendingRefSCCStack.erase(RefSCCNodes.begin(), PendingRefSCCStack.end());
1325 } while (!DFSStack.empty());
1326
1327 assert(DFSStack.empty() && "Didn't flush the entire DFS stack!");
1328 assert(PendingRefSCCStack.empty() && "Didn't flush all pending nodes!");
1329 } while (!Worklist.empty());
1330
1331 assert(PostOrderNumber > 1 &&
1332 "Should never finish the DFS when the existing RefSCC remains valid!");
1333
1334 // Otherwise we create a collection of new RefSCC nodes and build
1335 // a radix-sort style map from postorder number to these new RefSCCs. We then
1336 // append SCCs to each of these RefSCCs in the order they occurred in the
1337 // original SCCs container.
1338 for (int I = 0; I < PostOrderNumber; ++I)
1339 Result.push_back(G->createRefSCC(*G));
1340
1341 // Insert the resulting postorder sequence into the global graph postorder
1342 // sequence before the current RefSCC in that sequence, and then remove the
1343 // current one.
1344 //
1345 // FIXME: It'd be nice to change the APIs so that we returned an iterator
1346 // range over the global postorder sequence and generally use that sequence
1347 // rather than building a separate result vector here.
1348 int Idx = G->getRefSCCIndex(*this);
1349 G->PostOrderRefSCCs.erase(G->PostOrderRefSCCs.begin() + Idx);
1350 G->PostOrderRefSCCs.insert(G->PostOrderRefSCCs.begin() + Idx, Result.begin(),
1351 Result.end());
1352 for (int I : seq<int>(Idx, G->PostOrderRefSCCs.size()))
1353 G->RefSCCIndices[G->PostOrderRefSCCs[I]] = I;
1354
1355 for (SCC *C : SCCs) {
1356 // We store the SCC number in the node's low-link field above.
1357 int SCCNumber = C->begin()->LowLink;
1358 // Clear out all the SCC's node's low-link fields now that we're done
1359 // using them as side-storage.
1360 for (Node &N : *C) {
1361 assert(N.LowLink == SCCNumber &&
1362 "Cannot have different numbers for nodes in the same SCC!");
1363 N.LowLink = -1;
1364 }
1365
1366 RefSCC &RC = *Result[SCCNumber];
1367 int SCCIndex = RC.SCCs.size();
1368 RC.SCCs.push_back(C);
1369 RC.SCCIndices[C] = SCCIndex;
1370 C->OuterRefSCC = &RC;
1371 }
1372
1373 // Now that we've moved things into the new RefSCCs, clear out our current
1374 // one.
1375 G = nullptr;
1376 SCCs.clear();
1377 SCCIndices.clear();
1378
1379#ifdef EXPENSIVE_CHECKS
1380 // Verify the new RefSCCs we've built.
1381 for (RefSCC *RC : Result)
1382 RC->verify();
1383#endif
1384
1385 // Return the new list of SCCs.
1386 return Result;
1387}
1388
1390 Node &TargetN) {
1391#ifdef EXPENSIVE_CHECKS
1392 auto ExitVerifier = make_scope_exit([this] { verify(); });
1393
1394 // Check that we aren't breaking some invariants of the SCC graph. Note that
1395 // this is quadratic in the number of edges in the call graph!
1396 SCC &SourceC = *G->lookupSCC(SourceN);
1397 SCC &TargetC = *G->lookupSCC(TargetN);
1398 if (&SourceC != &TargetC)
1399 assert(SourceC.isAncestorOf(TargetC) &&
1400 "Call edge is not trivial in the SCC graph!");
1401#endif
1402
1403 // First insert it into the source or find the existing edge.
1404 auto [Iterator, Inserted] =
1405 SourceN->EdgeIndexMap.try_emplace(&TargetN, SourceN->Edges.size());
1406 if (!Inserted) {
1407 // Already an edge, just update it.
1408 Edge &E = SourceN->Edges[Iterator->second];
1409 if (E.isCall())
1410 return; // Nothing to do!
1411 E.setKind(Edge::Call);
1412 } else {
1413 // Create the new edge.
1414 SourceN->Edges.emplace_back(TargetN, Edge::Call);
1415 }
1416}
1417
1419#ifdef EXPENSIVE_CHECKS
1420 auto ExitVerifier = make_scope_exit([this] { verify(); });
1421
1422 // Check that we aren't breaking some invariants of the RefSCC graph.
1423 RefSCC &SourceRC = *G->lookupRefSCC(SourceN);
1424 RefSCC &TargetRC = *G->lookupRefSCC(TargetN);
1425 if (&SourceRC != &TargetRC)
1426 assert(SourceRC.isAncestorOf(TargetRC) &&
1427 "Ref edge is not trivial in the RefSCC graph!");
1428#endif
1429
1430 // First insert it into the source or find the existing edge.
1431 auto [Iterator, Inserted] =
1432 SourceN->EdgeIndexMap.try_emplace(&TargetN, SourceN->Edges.size());
1433 (void)Iterator;
1434 if (!Inserted)
1435 // Already an edge, we're done.
1436 return;
1437
1438 // Create the new edge.
1439 SourceN->Edges.emplace_back(TargetN, Edge::Ref);
1440}
1441
1443 Function &OldF = N.getFunction();
1444
1445#ifdef EXPENSIVE_CHECKS
1446 auto ExitVerifier = make_scope_exit([this] { verify(); });
1447
1448 assert(G->lookupRefSCC(N) == this &&
1449 "Cannot replace the function of a node outside this RefSCC.");
1450
1451 assert(G->NodeMap.find(&NewF) == G->NodeMap.end() &&
1452 "Must not have already walked the new function!'");
1453
1454 // It is important that this replacement not introduce graph changes so we
1455 // insist that the caller has already removed every use of the original
1456 // function and that all uses of the new function correspond to existing
1457 // edges in the graph. The common and expected way to use this is when
1458 // replacing the function itself in the IR without changing the call graph
1459 // shape and just updating the analysis based on that.
1460 assert(&OldF != &NewF && "Cannot replace a function with itself!");
1461 assert(OldF.use_empty() &&
1462 "Must have moved all uses from the old function to the new!");
1463#endif
1464
1465 N.replaceFunction(NewF);
1466
1467 // Update various call graph maps.
1468 G->NodeMap.erase(&OldF);
1469 G->NodeMap[&NewF] = &N;
1470
1471 // Update lib functions.
1472 if (G->isLibFunction(OldF)) {
1473 G->LibFunctions.remove(&OldF);
1474 G->LibFunctions.insert(&NewF);
1475 }
1476}
1477
1478void LazyCallGraph::insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK) {
1479 assert(SCCMap.empty() &&
1480 "This method cannot be called after SCCs have been formed!");
1481
1482 return SourceN->insertEdgeInternal(TargetN, EK);
1483}
1484
1485void LazyCallGraph::removeEdge(Node &SourceN, Node &TargetN) {
1486 assert(SCCMap.empty() &&
1487 "This method cannot be called after SCCs have been formed!");
1488
1489 bool Removed = SourceN->removeEdgeInternal(TargetN);
1490 (void)Removed;
1491 assert(Removed && "Target not in the edge set for this caller?");
1492}
1493
1495 // FIXME: This is unnecessarily restrictive. We should be able to remove
1496 // functions which recursively call themselves.
1497 assert(F.hasZeroLiveUses() &&
1498 "This routine should only be called on trivially dead functions!");
1499
1500 // We shouldn't remove library functions as they are never really dead while
1501 // the call graph is in use -- every function definition refers to them.
1503 "Must not remove lib functions from the call graph!");
1504
1505 auto NI = NodeMap.find(&F);
1506 if (NI == NodeMap.end())
1507 // Not in the graph at all!
1508 return;
1509
1510 Node &N = *NI->second;
1511
1512 // Cannot remove a function which has yet to be visited in the DFS walk, so
1513 // if we have a node at all then we must have an SCC and RefSCC.
1514 auto CI = SCCMap.find(&N);
1515 assert(CI != SCCMap.end() &&
1516 "Tried to remove a node without an SCC after DFS walk started!");
1517 SCC &C = *CI->second;
1518 RefSCC *RC = &C.getOuterRefSCC();
1519
1520 // In extremely rare cases, we can delete a dead function which is still in a
1521 // non-trivial RefSCC. This can happen due to spurious ref edges sticking
1522 // around after an IR function reference is removed.
1523 if (RC->size() != 1) {
1524 SmallVector<Node *, 0> NodesInRC;
1525 for (SCC &OtherC : *RC) {
1526 for (Node &OtherN : OtherC)
1527 NodesInRC.push_back(&OtherN);
1528 }
1529 for (Node *OtherN : NodesInRC) {
1530 if ((*OtherN)->lookup(N)) {
1531 auto NewRefSCCs =
1532 RC->removeInternalRefEdge(*OtherN, ArrayRef<Node *>(&N));
1533 // If we've split into multiple RefSCCs, RC is now invalid and the
1534 // RefSCC containing C will be different.
1535 if (!NewRefSCCs.empty())
1536 RC = &C.getOuterRefSCC();
1537 }
1538 }
1539 }
1540
1541 NodeMap.erase(NI);
1542 EntryEdges.removeEdgeInternal(N);
1543 SCCMap.erase(CI);
1544
1545 // This node must be the only member of its SCC as it has no callers, and
1546 // that SCC must be the only member of a RefSCC as it has no references.
1547 // Validate these properties first.
1548 assert(C.size() == 1 && "Dead functions must be in a singular SCC");
1549 assert(RC->size() == 1 && "Dead functions must be in a singular RefSCC");
1550
1551 // Finally clear out all the data structures from the node down through the
1552 // components. postorder_ref_scc_iterator will skip empty RefSCCs, so no need
1553 // to adjust LazyCallGraph data structures.
1554 N.clear();
1555 N.G = nullptr;
1556 N.F = nullptr;
1557 C.clear();
1558 RC->clear();
1559 RC->G = nullptr;
1560
1561 // Nothing to delete as all the objects are allocated in stable bump pointer
1562 // allocators.
1563}
1564
1565// Gets the Edge::Kind from one function to another by looking at the function's
1566// instructions. Asserts if there is no edge.
1567// Useful for determining what type of edge should exist between functions when
1568// the edge hasn't been created yet.
1570 Function &NewFunction) {
1571 // In release builds, assume that if there are no direct calls to the new
1572 // function, then there is a ref edge. In debug builds, keep track of
1573 // references to assert that there is actually a ref edge if there is no call
1574 // edge.
1575#ifndef NDEBUG
1578#endif
1579
1580 for (Instruction &I : instructions(OriginalFunction)) {
1581 if (auto *CB = dyn_cast<CallBase>(&I)) {
1582 if (Function *Callee = CB->getCalledFunction()) {
1583 if (Callee == &NewFunction)
1585 }
1586 }
1587#ifndef NDEBUG
1588 for (Value *Op : I.operand_values()) {
1589 if (Constant *C = dyn_cast<Constant>(Op)) {
1590 if (Visited.insert(C).second)
1591 Worklist.push_back(C);
1592 }
1593 }
1594#endif
1595 }
1596
1597#ifndef NDEBUG
1598 bool FoundNewFunction = false;
1599 LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &F) {
1600 if (&F == &NewFunction)
1601 FoundNewFunction = true;
1602 });
1603 assert(FoundNewFunction && "No edge from original function to new function");
1604#endif
1605
1607}
1608
1610 Function &NewFunction) {
1611 assert(lookup(OriginalFunction) &&
1612 "Original function's node should already exist");
1613 Node &OriginalN = get(OriginalFunction);
1614 SCC *OriginalC = lookupSCC(OriginalN);
1615 RefSCC *OriginalRC = lookupRefSCC(OriginalN);
1616
1617#ifdef EXPENSIVE_CHECKS
1618 OriginalRC->verify();
1619 auto VerifyOnExit = make_scope_exit([&]() { OriginalRC->verify(); });
1620#endif
1621
1622 assert(!lookup(NewFunction) &&
1623 "New function's node should not already exist");
1624 Node &NewN = initNode(NewFunction);
1625
1626 Edge::Kind EK = getEdgeKind(OriginalFunction, NewFunction);
1627
1628 SCC *NewC = nullptr;
1629 for (Edge &E : *NewN) {
1630 Node &EN = E.getNode();
1631 if (EK == Edge::Kind::Call && E.isCall() && lookupSCC(EN) == OriginalC) {
1632 // If the edge to the new function is a call edge and there is a call edge
1633 // from the new function to any function in the original function's SCC,
1634 // it is in the same SCC (and RefSCC) as the original function.
1635 NewC = OriginalC;
1636 NewC->Nodes.push_back(&NewN);
1637 break;
1638 }
1639 }
1640
1641 if (!NewC) {
1642 for (Edge &E : *NewN) {
1643 Node &EN = E.getNode();
1644 if (lookupRefSCC(EN) == OriginalRC) {
1645 // If there is any edge from the new function to any function in the
1646 // original function's RefSCC, it is in the same RefSCC as the original
1647 // function but a new SCC.
1648 RefSCC *NewRC = OriginalRC;
1649 NewC = createSCC(*NewRC, SmallVector<Node *, 1>({&NewN}));
1650
1651 // The new function's SCC is not the same as the original function's
1652 // SCC, since that case was handled earlier. If the edge from the
1653 // original function to the new function was a call edge, then we need
1654 // to insert the newly created function's SCC before the original
1655 // function's SCC. Otherwise, either the new SCC comes after the
1656 // original function's SCC, or it doesn't matter, and in both cases we
1657 // can add it to the very end.
1658 int InsertIndex = EK == Edge::Kind::Call ? NewRC->SCCIndices[OriginalC]
1659 : NewRC->SCCIndices.size();
1660 NewRC->SCCs.insert(NewRC->SCCs.begin() + InsertIndex, NewC);
1661 for (int I = InsertIndex, Size = NewRC->SCCs.size(); I < Size; ++I)
1662 NewRC->SCCIndices[NewRC->SCCs[I]] = I;
1663
1664 break;
1665 }
1666 }
1667 }
1668
1669 if (!NewC) {
1670 // We didn't find any edges back to the original function's RefSCC, so the
1671 // new function belongs in a new RefSCC. The new RefSCC goes before the
1672 // original function's RefSCC.
1673 RefSCC *NewRC = createRefSCC(*this);
1674 NewC = createSCC(*NewRC, SmallVector<Node *, 1>({&NewN}));
1675 NewRC->SCCIndices[NewC] = 0;
1676 NewRC->SCCs.push_back(NewC);
1677 auto OriginalRCIndex = RefSCCIndices.find(OriginalRC)->second;
1678 PostOrderRefSCCs.insert(PostOrderRefSCCs.begin() + OriginalRCIndex, NewRC);
1679 for (int I = OriginalRCIndex, Size = PostOrderRefSCCs.size(); I < Size; ++I)
1680 RefSCCIndices[PostOrderRefSCCs[I]] = I;
1681 }
1682
1683 SCCMap[&NewN] = NewC;
1684
1685 OriginalN->insertEdgeInternal(NewN, EK);
1686}
1687
1689 Function &OriginalFunction, ArrayRef<Function *> NewFunctions) {
1690 assert(!NewFunctions.empty() && "Can't add zero functions");
1691 assert(lookup(OriginalFunction) &&
1692 "Original function's node should already exist");
1693 Node &OriginalN = get(OriginalFunction);
1694 RefSCC *OriginalRC = lookupRefSCC(OriginalN);
1695
1696#ifdef EXPENSIVE_CHECKS
1697 OriginalRC->verify();
1698 auto VerifyOnExit = make_scope_exit([&]() {
1699 OriginalRC->verify();
1700 for (Function *NewFunction : NewFunctions)
1701 lookupRefSCC(get(*NewFunction))->verify();
1702 });
1703#endif
1704
1705 bool ExistsRefToOriginalRefSCC = false;
1706
1707 for (Function *NewFunction : NewFunctions) {
1708 Node &NewN = initNode(*NewFunction);
1709
1710 OriginalN->insertEdgeInternal(NewN, Edge::Kind::Ref);
1711
1712 // Check if there is any edge from any new function back to any function in
1713 // the original function's RefSCC.
1714 for (Edge &E : *NewN) {
1715 if (lookupRefSCC(E.getNode()) == OriginalRC) {
1716 ExistsRefToOriginalRefSCC = true;
1717 break;
1718 }
1719 }
1720 }
1721
1722 RefSCC *NewRC;
1723 if (ExistsRefToOriginalRefSCC) {
1724 // If there is any edge from any new function to any function in the
1725 // original function's RefSCC, all new functions will be in the same RefSCC
1726 // as the original function.
1727 NewRC = OriginalRC;
1728 } else {
1729 // Otherwise the new functions are in their own RefSCC.
1730 NewRC = createRefSCC(*this);
1731 // The new RefSCC goes before the original function's RefSCC in postorder
1732 // since there are only edges from the original function's RefSCC to the new
1733 // RefSCC.
1734 auto OriginalRCIndex = RefSCCIndices.find(OriginalRC)->second;
1735 PostOrderRefSCCs.insert(PostOrderRefSCCs.begin() + OriginalRCIndex, NewRC);
1736 for (int I = OriginalRCIndex, Size = PostOrderRefSCCs.size(); I < Size; ++I)
1737 RefSCCIndices[PostOrderRefSCCs[I]] = I;
1738 }
1739
1740 for (Function *NewFunction : NewFunctions) {
1741 Node &NewN = get(*NewFunction);
1742 // Each new function is in its own new SCC. The original function can only
1743 // have a ref edge to new functions, and no other existing functions can
1744 // have references to new functions. Each new function only has a ref edge
1745 // to the other new functions.
1746 SCC *NewC = createSCC(*NewRC, SmallVector<Node *, 1>({&NewN}));
1747 // The new SCCs are either sibling SCCs or parent SCCs to all other existing
1748 // SCCs in the RefSCC. Either way, they can go at the back of the postorder
1749 // SCC list.
1750 auto Index = NewRC->SCCIndices.size();
1751 NewRC->SCCIndices[NewC] = Index;
1752 NewRC->SCCs.push_back(NewC);
1753 SCCMap[&NewN] = NewC;
1754 }
1755
1756#ifndef NDEBUG
1757 for (Function *F1 : NewFunctions) {
1758 assert(getEdgeKind(OriginalFunction, *F1) == Edge::Kind::Ref &&
1759 "Expected ref edges from original function to every new function");
1760 Node &N1 = get(*F1);
1761 for (Function *F2 : NewFunctions) {
1762 if (F1 == F2)
1763 continue;
1764 Node &N2 = get(*F2);
1765 assert(!N1->lookup(N2)->isCall() &&
1766 "Edges between new functions must be ref edges");
1767 }
1768 }
1769#endif
1770}
1771
1772LazyCallGraph::Node &LazyCallGraph::insertInto(Function &F, Node *&MappedN) {
1773 return *new (MappedN = BPA.Allocate()) Node(*this, F);
1774}
1775
1776void LazyCallGraph::updateGraphPtrs() {
1777 // Walk the node map to update their graph pointers. While this iterates in
1778 // an unstable order, the order has no effect, so it remains correct.
1779 for (auto &FunctionNodePair : NodeMap)
1780 FunctionNodePair.second->G = this;
1781
1782 for (auto *RC : PostOrderRefSCCs)
1783 RC->G = this;
1784}
1785
1786LazyCallGraph::Node &LazyCallGraph::initNode(Function &F) {
1787 Node &N = get(F);
1788 N.DFSNumber = N.LowLink = -1;
1789 N.populate();
1790 NodeMap[&F] = &N;
1791 return N;
1792}
1793
1794template <typename RootsT, typename GetBeginT, typename GetEndT,
1795 typename GetNodeT, typename FormSCCCallbackT>
1796void LazyCallGraph::buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
1797 GetEndT &&GetEnd, GetNodeT &&GetNode,
1798 FormSCCCallbackT &&FormSCC) {
1799 using EdgeItT = decltype(GetBegin(std::declval<Node &>()));
1800
1802 SmallVector<Node *, 16> PendingSCCStack;
1803
1804 // Scan down the stack and DFS across the call edges.
1805 for (Node *RootN : Roots) {
1806 assert(DFSStack.empty() &&
1807 "Cannot begin a new root with a non-empty DFS stack!");
1808 assert(PendingSCCStack.empty() &&
1809 "Cannot begin a new root with pending nodes for an SCC!");
1810
1811 // Skip any nodes we've already reached in the DFS.
1812 if (RootN->DFSNumber != 0) {
1813 assert(RootN->DFSNumber == -1 &&
1814 "Shouldn't have any mid-DFS root nodes!");
1815 continue;
1816 }
1817
1818 RootN->DFSNumber = RootN->LowLink = 1;
1819 int NextDFSNumber = 2;
1820
1821 DFSStack.emplace_back(RootN, GetBegin(*RootN));
1822 do {
1823 auto [N, I] = DFSStack.pop_back_val();
1824 auto E = GetEnd(*N);
1825 while (I != E) {
1826 Node &ChildN = GetNode(I);
1827 if (ChildN.DFSNumber == 0) {
1828 // We haven't yet visited this child, so descend, pushing the current
1829 // node onto the stack.
1830 DFSStack.emplace_back(N, I);
1831
1832 ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++;
1833 N = &ChildN;
1834 I = GetBegin(*N);
1835 E = GetEnd(*N);
1836 continue;
1837 }
1838
1839 // If the child has already been added to some child component, it
1840 // couldn't impact the low-link of this parent because it isn't
1841 // connected, and thus its low-link isn't relevant so skip it.
1842 if (ChildN.DFSNumber == -1) {
1843 ++I;
1844 continue;
1845 }
1846
1847 // Track the lowest linked child as the lowest link for this node.
1848 assert(ChildN.LowLink > 0 && "Must have a positive low-link number!");
1849 if (ChildN.LowLink < N->LowLink)
1850 N->LowLink = ChildN.LowLink;
1851
1852 // Move to the next edge.
1853 ++I;
1854 }
1855
1856 // We've finished processing N and its descendants, put it on our pending
1857 // SCC stack to eventually get merged into an SCC of nodes.
1858 PendingSCCStack.push_back(N);
1859
1860 // If this node is linked to some lower entry, continue walking up the
1861 // stack.
1862 if (N->LowLink != N->DFSNumber)
1863 continue;
1864
1865 // Otherwise, we've completed an SCC. Append it to our post order list of
1866 // SCCs.
1867 int RootDFSNumber = N->DFSNumber;
1868 // Find the range of the node stack by walking down until we pass the
1869 // root DFS number.
1870 auto SCCNodes = make_range(
1871 PendingSCCStack.rbegin(),
1872 find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) {
1873 return N->DFSNumber < RootDFSNumber;
1874 }));
1875 // Form a new SCC out of these nodes and then clear them off our pending
1876 // stack.
1877 FormSCC(SCCNodes);
1878 PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end());
1879 } while (!DFSStack.empty());
1880 }
1881}
1882
1883/// Build the internal SCCs for a RefSCC from a sequence of nodes.
1884///
1885/// Appends the SCCs to the provided vector and updates the map with their
1886/// indices. Both the vector and map must be empty when passed into this
1887/// routine.
1888void LazyCallGraph::buildSCCs(RefSCC &RC, node_stack_range Nodes) {
1889 assert(RC.SCCs.empty() && "Already built SCCs!");
1890 assert(RC.SCCIndices.empty() && "Already mapped SCC indices!");
1891
1892 for (Node *N : Nodes) {
1893 assert(N->LowLink >= (*Nodes.begin())->LowLink &&
1894 "We cannot have a low link in an SCC lower than its root on the "
1895 "stack!");
1896
1897 // This node will go into the next RefSCC, clear out its DFS and low link
1898 // as we scan.
1899 N->DFSNumber = N->LowLink = 0;
1900 }
1901
1902 // Each RefSCC contains a DAG of the call SCCs. To build these, we do
1903 // a direct walk of the call edges using Tarjan's algorithm. We reuse the
1904 // internal storage as we won't need it for the outer graph's DFS any longer.
1905 buildGenericSCCs(
1906 Nodes, [](Node &N) { return N->call_begin(); },
1907 [](Node &N) { return N->call_end(); },
1908 [](EdgeSequence::call_iterator I) -> Node & { return I->getNode(); },
1909 [this, &RC](node_stack_range Nodes) {
1910 RC.SCCs.push_back(createSCC(RC, Nodes));
1911 for (Node &N : *RC.SCCs.back()) {
1912 N.DFSNumber = N.LowLink = -1;
1913 SCCMap[&N] = RC.SCCs.back();
1914 }
1915 });
1916
1917 // Wire up the SCC indices.
1918 for (int I = 0, Size = RC.SCCs.size(); I < Size; ++I)
1919 RC.SCCIndices[RC.SCCs[I]] = I;
1920}
1921
1923 if (EntryEdges.empty() || !PostOrderRefSCCs.empty())
1924 // RefSCCs are either non-existent or already built!
1925 return;
1926
1927 assert(RefSCCIndices.empty() && "Already mapped RefSCC indices!");
1928
1930 for (Edge &E : *this)
1931 Roots.push_back(&E.getNode());
1932
1933 // The roots will be iterated in order.
1934 buildGenericSCCs(
1935 Roots,
1936 [](Node &N) {
1937 // We need to populate each node as we begin to walk its edges.
1938 N.populate();
1939 return N->begin();
1940 },
1941 [](Node &N) { return N->end(); },
1942 [](EdgeSequence::iterator I) -> Node & { return I->getNode(); },
1943 [this](node_stack_range Nodes) {
1944 RefSCC *NewRC = createRefSCC(*this);
1945 buildSCCs(*NewRC, Nodes);
1946
1947 // Push the new node into the postorder list and remember its position
1948 // in the index map.
1949 bool Inserted =
1950 RefSCCIndices.try_emplace(NewRC, PostOrderRefSCCs.size()).second;
1951 (void)Inserted;
1952 assert(Inserted && "Cannot already have this RefSCC in the index map!");
1953 PostOrderRefSCCs.push_back(NewRC);
1954#ifdef EXPENSIVE_CHECKS
1955 NewRC->verify();
1956#endif
1957 });
1958}
1959
1962 function_ref<void(Function &)> Callback) {
1963 while (!Worklist.empty()) {
1964 Constant *C = Worklist.pop_back_val();
1965
1966 if (Function *F = dyn_cast<Function>(C)) {
1967 if (!F->isDeclaration())
1968 Callback(*F);
1969 continue;
1970 }
1971
1972 // blockaddresses are weird and don't participate in the call graph anyway,
1973 // skip them.
1974 if (isa<BlockAddress>(C))
1975 continue;
1976
1977 for (Value *Op : C->operand_values())
1978 if (Visited.insert(cast<Constant>(Op)).second)
1979 Worklist.push_back(cast<Constant>(Op));
1980 }
1981}
1982
1983AnalysisKey LazyCallGraphAnalysis::Key;
1984
1986
1988 OS << " Edges in function: " << N.getFunction().getName() << "\n";
1989 for (LazyCallGraph::Edge &E : N.populate())
1990 OS << " " << (E.isCall() ? "call" : "ref ") << " -> "
1991 << E.getFunction().getName() << "\n";
1992
1993 OS << "\n";
1994}
1995
1997 OS << " SCC with " << C.size() << " functions:\n";
1998
1999 for (LazyCallGraph::Node &N : C)
2000 OS << " " << N.getFunction().getName() << "\n";
2001}
2002
2004 OS << " RefSCC with " << C.size() << " call SCCs:\n";
2005
2006 for (LazyCallGraph::SCC &InnerC : C)
2007 printSCC(OS, InnerC);
2008
2009 OS << "\n";
2010}
2011
2015
2016 OS << "Printing the call graph for module: " << M.getModuleIdentifier()
2017 << "\n\n";
2018
2019 for (Function &F : M)
2020 printNode(OS, G.get(F));
2021
2022 G.buildRefSCCs();
2023 for (LazyCallGraph::RefSCC &C : G.postorder_ref_sccs())
2024 printRefSCC(OS, C);
2025
2026 return PreservedAnalyses::all();
2027}
2028
2030 : OS(OS) {}
2031
2033 std::string Name =
2034 "\"" + DOT::EscapeString(std::string(N.getFunction().getName())) + "\"";
2035
2036 for (LazyCallGraph::Edge &E : N.populate()) {
2037 OS << " " << Name << " -> \""
2038 << DOT::EscapeString(std::string(E.getFunction().getName())) << "\"";
2039 if (!E.isCall()) // It is a ref edge.
2040 OS << " [style=dashed,label=\"ref\"]";
2041 OS << ";\n";
2042 }
2043
2044 OS << "\n";
2045}
2046
2050
2051 OS << "digraph \"" << DOT::EscapeString(M.getModuleIdentifier()) << "\" {\n";
2052
2053 for (Function &F : M)
2054 printNodeDOT(OS, G.get(F));
2055
2056 OS << "}\n";
2057
2058 return PreservedAnalyses::all();
2059}
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:529
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
static void printNode(raw_ostream &OS, LazyCallGraph::Node &N)
static void printRefSCC(raw_ostream &OS, LazyCallGraph::RefSCC &C)
static iterator_range< typename PostorderSequenceT::iterator > updatePostorderSequenceForEdgeInsertion(SCCT &SourceSCC, SCCT &TargetSCC, PostorderSequenceT &SCCs, SCCIndexMapT &SCCIndices, ComputeSourceConnectedSetCallableT ComputeSourceConnectedSet, ComputeTargetConnectedSetCallableT ComputeTargetConnectedSet)
Generic helper that updates a postorder sequence of SCCs for a potentially cycle-introducing edge ins...
static void printNodeDOT(raw_ostream &OS, LazyCallGraph::Node &N)
static LazyCallGraph::Edge::Kind getEdgeKind(Function &OriginalFunction, Function &NewFunction)
static void printSCC(raw_ostream &OS, LazyCallGraph::SCC &C)
static void addEdge(SmallVectorImpl< LazyCallGraph::Edge > &Edges, DenseMap< LazyCallGraph::Node *, int > &EdgeIndexMap, LazyCallGraph::Node &N, LazyCallGraph::Edge::Kind EK)
static bool isKnownLibFunction(Function &F, TargetLibraryInfo &TLI)
Implements a lazy call graph analysis and related passes for the new pass manager.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
Module.h This file contains the declarations for the Module class.
ppc ctr loops verify
This header defines various interfaces for pass management in LLVM.
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:47
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:360
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
An analysis pass which computes the call graph for a module.
LazyCallGraphDOTPrinterPass(raw_ostream &OS)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LazyCallGraphPrinterPass(raw_ostream &OS)
An iterator used for the edges to both entry nodes and child nodes.
The edge sequence object.
A class used to represent edges in the call graph.
bool isCall() const
Test whether the edge represents a direct call to a function.
Kind
The kind of edge in the graph.
Node & getNode() const
Get the call graph node referenced by this edge.
A node in the call graph.
A RefSCC of the call graph.
SmallVector< RefSCC *, 1 > insertIncomingRefEdge(Node &SourceN, Node &TargetN)
Insert an edge whose source is in a descendant RefSCC and target is in this RefSCC.
bool switchInternalEdgeToCall(Node &SourceN, Node &TargetN, function_ref< void(ArrayRef< SCC * > MergedSCCs)> MergeCB={})
Make an existing internal ref edge into a call edge.
bool isAncestorOf(const RefSCC &RC) const
Test if this RefSCC is an ancestor of RC.
void insertTrivialRefEdge(Node &SourceN, Node &TargetN)
A convenience wrapper around the above to handle trivial cases of inserting a new ref edge.
bool isDescendantOf(const RefSCC &RC) const
Test if this RefSCC is a descendant of RC.
void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN)
Make an existing outgoing ref edge into a call edge.
void replaceNodeFunction(Node &N, Function &NewF)
Directly replace a node's function with a new function.
void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK)
Insert an edge whose parent is in this RefSCC and child is in some child RefSCC.
SmallVector< RefSCC *, 1 > removeInternalRefEdge(Node &SourceN, ArrayRef< Node * > TargetNs)
Remove a list of ref edges which are entirely within this RefSCC.
iterator_range< iterator > switchInternalEdgeToRef(Node &SourceN, Node &TargetN)
Make an existing internal call edge within a single SCC into a ref edge.
void insertInternalRefEdge(Node &SourceN, Node &TargetN)
Insert a ref edge from one node in this RefSCC to another in this RefSCC.
void insertTrivialCallEdge(Node &SourceN, Node &TargetN)
A convenience wrapper around the above to handle trivial cases of inserting a new call edge.
void removeOutgoingEdge(Node &SourceN, Node &TargetN)
Remove an edge whose source is in this RefSCC and target is not.
void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN)
Make an existing outgoing call edge into a ref edge.
void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN)
Make an existing internal call edge between separate SCCs into a ref edge.
bool isParentOf(const RefSCC &RC) const
Test if this RefSCC is a parent of RC.
An SCC of the call graph.
bool isAncestorOf(const SCC &C) const
Test if this SCC is an ancestor of C.
bool isParentOf(const SCC &C) const
Test if this SCC is a parent of C.
RefSCC & getOuterRefSCC() const
A lazily constructed view of the call graph of a module.
bool isLibFunction(Function &F) const
Test whether a function is a known and defined library function tracked by the call graph.
RefSCC * lookupRefSCC(Node &N) const
Lookup a function's RefSCC in the graph.
void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK)
Update the call graph after inserting a new edge.
LazyCallGraph(Module &M, function_ref< TargetLibraryInfo &(Function &)> GetTLI)
Construct a graph for the given module.
static void visitReferences(SmallVectorImpl< Constant * > &Worklist, SmallPtrSetImpl< Constant * > &Visited, function_ref< void(Function &)> Callback)
Recursively visits the defined functions whose address is reachable from every constant in the Workli...
void removeDeadFunction(Function &F)
Remove a dead function from the call graph (typically to delete it).
void addSplitFunction(Function &OriginalFunction, Function &NewFunction)
Add a new function split/outlined from an existing function.
void addSplitRefRecursiveFunctions(Function &OriginalFunction, ArrayRef< Function * > NewFunctions)
Add new ref-recursive functions split/outlined from an existing function.
void removeEdge(Node &SourceN, Node &TargetN)
Update the call graph after deleting an edge.
Node & get(Function &F)
Get a graph node for a given function, scanning it to populate the graph data as necessary.
SCC * lookupSCC(Node &N) const
Lookup a function's SCC in the graph.
LazyCallGraph & operator=(LazyCallGraph &&RHS)
bool invalidate(Module &, const PreservedAnalyses &PA, ModuleAnalysisManager::Invalidator &)
Node * lookup(const Function &F) const
Lookup a function in the graph which has already been scanned and added.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:115
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
iterator erase(const_iterator CI)
Definition: SmallVector.h:750
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void swap(SmallVectorImpl &RHS)
Definition: SmallVector.h:981
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Provides information about what library functions are available for the current target.
bool isKnownVectorFunctionInLibrary(StringRef F) const
Check if the function "F" is listed in a library known to LLVM.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
A range adaptor for a pair of iterators.
IteratorT end() const
IteratorT begin() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
std::string EscapeString(const std::string &Label)
Definition: GraphWriter.cpp:55
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
#define N
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:26