1 //===- ScheduleDAG.cpp - Implement the ScheduleDAG class ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file Implements the ScheduleDAG class, which is a base class used by
10 /// scheduling implementation classes.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/ScheduleDAG.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/ADT/iterator_range.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
21 #include "llvm/CodeGen/SelectionDAGNodes.h"
22 #include "llvm/CodeGen/TargetInstrInfo.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/CodeGen/TargetSubtargetInfo.h"
25 #include "llvm/Config/llvm-config.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Compiler.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include <algorithm>
31 #include <cassert>
32 #include <iterator>
33 #include <limits>
34 #include <utility>
35 #include <vector>
36 
37 using namespace llvm;
38 
39 #define DEBUG_TYPE "pre-RA-sched"
40 
41 STATISTIC(NumNewPredsAdded, "Number of times a  single predecessor was added");
42 STATISTIC(NumTopoInits,
43           "Number of times the topological order has been recomputed");
44 
45 #ifndef NDEBUG
46 static cl::opt<bool> StressSchedOpt(
47   "stress-sched", cl::Hidden, cl::init(false),
48   cl::desc("Stress test instruction scheduling"));
49 #endif
50 
51 void SchedulingPriorityQueue::anchor() {}
52 
53 ScheduleDAG::ScheduleDAG(MachineFunction &mf)
54     : TM(mf.getTarget()), TII(mf.getSubtarget().getInstrInfo()),
55       TRI(mf.getSubtarget().getRegisterInfo()), MF(mf),
56       MRI(mf.getRegInfo()) {
57 #ifndef NDEBUG
58   StressSched = StressSchedOpt;
59 #endif
60 }
61 
62 ScheduleDAG::~ScheduleDAG() = default;
63 
64 void ScheduleDAG::clearDAG() {
65   SUnits.clear();
66   EntrySU = SUnit();
67   ExitSU = SUnit();
68 }
69 
70 const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
71   if (!Node || !Node->isMachineOpcode()) return nullptr;
72   return &TII->get(Node->getMachineOpcode());
73 }
74 
75 LLVM_DUMP_METHOD void SDep::dump(const TargetRegisterInfo *TRI) const {
76   switch (getKind()) {
77   case Data:   dbgs() << "Data"; break;
78   case Anti:   dbgs() << "Anti"; break;
79   case Output: dbgs() << "Out "; break;
80   case Order:  dbgs() << "Ord "; break;
81   }
82 
83   switch (getKind()) {
84   case Data:
85     dbgs() << " Latency=" << getLatency();
86     if (TRI && isAssignedRegDep())
87       dbgs() << " Reg=" << printReg(getReg(), TRI);
88     break;
89   case Anti:
90   case Output:
91     dbgs() << " Latency=" << getLatency();
92     break;
93   case Order:
94     dbgs() << " Latency=" << getLatency();
95     switch(Contents.OrdKind) {
96     case Barrier:      dbgs() << " Barrier"; break;
97     case MayAliasMem:
98     case MustAliasMem: dbgs() << " Memory"; break;
99     case Artificial:   dbgs() << " Artificial"; break;
100     case Weak:         dbgs() << " Weak"; break;
101     case Cluster:      dbgs() << " Cluster"; break;
102     }
103     break;
104   }
105 }
106 
107 bool SUnit::addPred(const SDep &D, bool Required) {
108   // If this node already has this dependence, don't add a redundant one.
109   for (SDep &PredDep : Preds) {
110     // Zero-latency weak edges may be added purely for heuristic ordering. Don't
111     // add them if another kind of edge already exists.
112     if (!Required && PredDep.getSUnit() == D.getSUnit())
113       return false;
114     if (PredDep.overlaps(D)) {
115       // Extend the latency if needed. Equivalent to
116       // removePred(PredDep) + addPred(D).
117       if (PredDep.getLatency() < D.getLatency()) {
118         SUnit *PredSU = PredDep.getSUnit();
119         // Find the corresponding successor in N.
120         SDep ForwardD = PredDep;
121         ForwardD.setSUnit(this);
122         for (SDep &SuccDep : PredSU->Succs) {
123           if (SuccDep == ForwardD) {
124             SuccDep.setLatency(D.getLatency());
125             break;
126           }
127         }
128         PredDep.setLatency(D.getLatency());
129       }
130       return false;
131     }
132   }
133   // Now add a corresponding succ to N.
134   SDep P = D;
135   P.setSUnit(this);
136   SUnit *N = D.getSUnit();
137   // Update the bookkeeping.
138   if (D.getKind() == SDep::Data) {
139     assert(NumPreds < std::numeric_limits<unsigned>::max() &&
140            "NumPreds will overflow!");
141     assert(N->NumSuccs < std::numeric_limits<unsigned>::max() &&
142            "NumSuccs will overflow!");
143     ++NumPreds;
144     ++N->NumSuccs;
145   }
146   if (!N->isScheduled) {
147     if (D.isWeak()) {
148       ++WeakPredsLeft;
149     }
150     else {
151       assert(NumPredsLeft < std::numeric_limits<unsigned>::max() &&
152              "NumPredsLeft will overflow!");
153       ++NumPredsLeft;
154     }
155   }
156   if (!isScheduled) {
157     if (D.isWeak()) {
158       ++N->WeakSuccsLeft;
159     }
160     else {
161       assert(N->NumSuccsLeft < std::numeric_limits<unsigned>::max() &&
162              "NumSuccsLeft will overflow!");
163       ++N->NumSuccsLeft;
164     }
165   }
166   Preds.push_back(D);
167   N->Succs.push_back(P);
168   if (P.getLatency() != 0) {
169     this->setDepthDirty();
170     N->setHeightDirty();
171   }
172   return true;
173 }
174 
175 void SUnit::removePred(const SDep &D) {
176   // Find the matching predecessor.
177   SmallVectorImpl<SDep>::iterator I = llvm::find(Preds, D);
178   if (I == Preds.end())
179     return;
180   // Find the corresponding successor in N.
181   SDep P = D;
182   P.setSUnit(this);
183   SUnit *N = D.getSUnit();
184   SmallVectorImpl<SDep>::iterator Succ = llvm::find(N->Succs, P);
185   assert(Succ != N->Succs.end() && "Mismatching preds / succs lists!");
186   // Update the bookkeeping.
187   if (P.getKind() == SDep::Data) {
188     assert(NumPreds > 0 && "NumPreds will underflow!");
189     assert(N->NumSuccs > 0 && "NumSuccs will underflow!");
190     --NumPreds;
191     --N->NumSuccs;
192   }
193   if (!N->isScheduled) {
194     if (D.isWeak()) {
195       assert(WeakPredsLeft > 0 && "WeakPredsLeft will underflow!");
196       --WeakPredsLeft;
197     } else {
198       assert(NumPredsLeft > 0 && "NumPredsLeft will underflow!");
199       --NumPredsLeft;
200     }
201   }
202   if (!isScheduled) {
203     if (D.isWeak()) {
204       assert(WeakSuccsLeft > 0 && "WeakSuccsLeft will underflow!");
205       --N->WeakSuccsLeft;
206     } else {
207       assert(N->NumSuccsLeft > 0 && "NumSuccsLeft will underflow!");
208       --N->NumSuccsLeft;
209     }
210   }
211   N->Succs.erase(Succ);
212   Preds.erase(I);
213   if (P.getLatency() != 0) {
214     this->setDepthDirty();
215     N->setHeightDirty();
216   }
217 }
218 
219 void SUnit::setDepthDirty() {
220   if (!isDepthCurrent) return;
221   SmallVector<SUnit*, 8> WorkList;
222   WorkList.push_back(this);
223   do {
224     SUnit *SU = WorkList.pop_back_val();
225     SU->isDepthCurrent = false;
226     for (SDep &SuccDep : SU->Succs) {
227       SUnit *SuccSU = SuccDep.getSUnit();
228       if (SuccSU->isDepthCurrent)
229         WorkList.push_back(SuccSU);
230     }
231   } while (!WorkList.empty());
232 }
233 
234 void SUnit::setHeightDirty() {
235   if (!isHeightCurrent) return;
236   SmallVector<SUnit*, 8> WorkList;
237   WorkList.push_back(this);
238   do {
239     SUnit *SU = WorkList.pop_back_val();
240     SU->isHeightCurrent = false;
241     for (SDep &PredDep : SU->Preds) {
242       SUnit *PredSU = PredDep.getSUnit();
243       if (PredSU->isHeightCurrent)
244         WorkList.push_back(PredSU);
245     }
246   } while (!WorkList.empty());
247 }
248 
249 void SUnit::setDepthToAtLeast(unsigned NewDepth) {
250   if (NewDepth <= getDepth())
251     return;
252   setDepthDirty();
253   Depth = NewDepth;
254   isDepthCurrent = true;
255 }
256 
257 void SUnit::setHeightToAtLeast(unsigned NewHeight) {
258   if (NewHeight <= getHeight())
259     return;
260   setHeightDirty();
261   Height = NewHeight;
262   isHeightCurrent = true;
263 }
264 
265 /// Calculates the maximal path from the node to the exit.
266 void SUnit::ComputeDepth() {
267   SmallVector<SUnit*, 8> WorkList;
268   WorkList.push_back(this);
269   do {
270     SUnit *Cur = WorkList.back();
271 
272     bool Done = true;
273     unsigned MaxPredDepth = 0;
274     for (const SDep &PredDep : Cur->Preds) {
275       SUnit *PredSU = PredDep.getSUnit();
276       if (PredSU->isDepthCurrent)
277         MaxPredDepth = std::max(MaxPredDepth,
278                                 PredSU->Depth + PredDep.getLatency());
279       else {
280         Done = false;
281         WorkList.push_back(PredSU);
282       }
283     }
284 
285     if (Done) {
286       WorkList.pop_back();
287       if (MaxPredDepth != Cur->Depth) {
288         Cur->setDepthDirty();
289         Cur->Depth = MaxPredDepth;
290       }
291       Cur->isDepthCurrent = true;
292     }
293   } while (!WorkList.empty());
294 }
295 
296 /// Calculates the maximal path from the node to the entry.
297 void SUnit::ComputeHeight() {
298   SmallVector<SUnit*, 8> WorkList;
299   WorkList.push_back(this);
300   do {
301     SUnit *Cur = WorkList.back();
302 
303     bool Done = true;
304     unsigned MaxSuccHeight = 0;
305     for (const SDep &SuccDep : Cur->Succs) {
306       SUnit *SuccSU = SuccDep.getSUnit();
307       if (SuccSU->isHeightCurrent)
308         MaxSuccHeight = std::max(MaxSuccHeight,
309                                  SuccSU->Height + SuccDep.getLatency());
310       else {
311         Done = false;
312         WorkList.push_back(SuccSU);
313       }
314     }
315 
316     if (Done) {
317       WorkList.pop_back();
318       if (MaxSuccHeight != Cur->Height) {
319         Cur->setHeightDirty();
320         Cur->Height = MaxSuccHeight;
321       }
322       Cur->isHeightCurrent = true;
323     }
324   } while (!WorkList.empty());
325 }
326 
327 void SUnit::biasCriticalPath() {
328   if (NumPreds < 2)
329     return;
330 
331   SUnit::pred_iterator BestI = Preds.begin();
332   unsigned MaxDepth = BestI->getSUnit()->getDepth();
333   for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
334        ++I) {
335     if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
336       BestI = I;
337   }
338   if (BestI != Preds.begin())
339     std::swap(*Preds.begin(), *BestI);
340 }
341 
342 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
343 LLVM_DUMP_METHOD void SUnit::dumpAttributes() const {
344   dbgs() << "  # preds left       : " << NumPredsLeft << "\n";
345   dbgs() << "  # succs left       : " << NumSuccsLeft << "\n";
346   if (WeakPredsLeft)
347     dbgs() << "  # weak preds left  : " << WeakPredsLeft << "\n";
348   if (WeakSuccsLeft)
349     dbgs() << "  # weak succs left  : " << WeakSuccsLeft << "\n";
350   dbgs() << "  # rdefs left       : " << NumRegDefsLeft << "\n";
351   dbgs() << "  Latency            : " << Latency << "\n";
352   dbgs() << "  Depth              : " << getDepth() << "\n";
353   dbgs() << "  Height             : " << getHeight() << "\n";
354 }
355 
356 LLVM_DUMP_METHOD void ScheduleDAG::dumpNodeName(const SUnit &SU) const {
357   if (&SU == &EntrySU)
358     dbgs() << "EntrySU";
359   else if (&SU == &ExitSU)
360     dbgs() << "ExitSU";
361   else
362     dbgs() << "SU(" << SU.NodeNum << ")";
363 }
364 
365 LLVM_DUMP_METHOD void ScheduleDAG::dumpNodeAll(const SUnit &SU) const {
366   dumpNode(SU);
367   SU.dumpAttributes();
368   if (SU.Preds.size() > 0) {
369     dbgs() << "  Predecessors:\n";
370     for (const SDep &Dep : SU.Preds) {
371       dbgs() << "    ";
372       dumpNodeName(*Dep.getSUnit());
373       dbgs() << ": ";
374       Dep.dump(TRI);
375       dbgs() << '\n';
376     }
377   }
378   if (SU.Succs.size() > 0) {
379     dbgs() << "  Successors:\n";
380     for (const SDep &Dep : SU.Succs) {
381       dbgs() << "    ";
382       dumpNodeName(*Dep.getSUnit());
383       dbgs() << ": ";
384       Dep.dump(TRI);
385       dbgs() << '\n';
386     }
387   }
388 }
389 #endif
390 
391 #ifndef NDEBUG
392 unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
393   bool AnyNotSched = false;
394   unsigned DeadNodes = 0;
395   for (const SUnit &SUnit : SUnits) {
396     if (!SUnit.isScheduled) {
397       if (SUnit.NumPreds == 0 && SUnit.NumSuccs == 0) {
398         ++DeadNodes;
399         continue;
400       }
401       if (!AnyNotSched)
402         dbgs() << "*** Scheduling failed! ***\n";
403       dumpNode(SUnit);
404       dbgs() << "has not been scheduled!\n";
405       AnyNotSched = true;
406     }
407     if (SUnit.isScheduled &&
408         (isBottomUp ? SUnit.getHeight() : SUnit.getDepth()) >
409           unsigned(std::numeric_limits<int>::max())) {
410       if (!AnyNotSched)
411         dbgs() << "*** Scheduling failed! ***\n";
412       dumpNode(SUnit);
413       dbgs() << "has an unexpected "
414            << (isBottomUp ? "Height" : "Depth") << " value!\n";
415       AnyNotSched = true;
416     }
417     if (isBottomUp) {
418       if (SUnit.NumSuccsLeft != 0) {
419         if (!AnyNotSched)
420           dbgs() << "*** Scheduling failed! ***\n";
421         dumpNode(SUnit);
422         dbgs() << "has successors left!\n";
423         AnyNotSched = true;
424       }
425     } else {
426       if (SUnit.NumPredsLeft != 0) {
427         if (!AnyNotSched)
428           dbgs() << "*** Scheduling failed! ***\n";
429         dumpNode(SUnit);
430         dbgs() << "has predecessors left!\n";
431         AnyNotSched = true;
432       }
433     }
434   }
435   assert(!AnyNotSched);
436   return SUnits.size() - DeadNodes;
437 }
438 #endif
439 
440 void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
441   // The idea of the algorithm is taken from
442   // "Online algorithms for managing the topological order of
443   // a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
444   // This is the MNR algorithm, which was first introduced by
445   // A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
446   // "Maintaining a topological order under edge insertions".
447   //
448   // Short description of the algorithm:
449   //
450   // Topological ordering, ord, of a DAG maps each node to a topological
451   // index so that for all edges X->Y it is the case that ord(X) < ord(Y).
452   //
453   // This means that if there is a path from the node X to the node Z,
454   // then ord(X) < ord(Z).
455   //
456   // This property can be used to check for reachability of nodes:
457   // if Z is reachable from X, then an insertion of the edge Z->X would
458   // create a cycle.
459   //
460   // The algorithm first computes a topological ordering for the DAG by
461   // initializing the Index2Node and Node2Index arrays and then tries to keep
462   // the ordering up-to-date after edge insertions by reordering the DAG.
463   //
464   // On insertion of the edge X->Y, the algorithm first marks by calling DFS
465   // the nodes reachable from Y, and then shifts them using Shift to lie
466   // immediately after X in Index2Node.
467 
468   // Cancel pending updates, mark as valid.
469   Dirty = false;
470   Updates.clear();
471 
472   unsigned DAGSize = SUnits.size();
473   std::vector<SUnit*> WorkList;
474   WorkList.reserve(DAGSize);
475 
476   Index2Node.resize(DAGSize);
477   Node2Index.resize(DAGSize);
478 
479   // Initialize the data structures.
480   if (ExitSU)
481     WorkList.push_back(ExitSU);
482   for (SUnit &SU : SUnits) {
483     int NodeNum = SU.NodeNum;
484     unsigned Degree = SU.Succs.size();
485     // Temporarily use the Node2Index array as scratch space for degree counts.
486     Node2Index[NodeNum] = Degree;
487 
488     // Is it a node without dependencies?
489     if (Degree == 0) {
490       assert(SU.Succs.empty() && "SUnit should have no successors");
491       // Collect leaf nodes.
492       WorkList.push_back(&SU);
493     }
494   }
495 
496   int Id = DAGSize;
497   while (!WorkList.empty()) {
498     SUnit *SU = WorkList.back();
499     WorkList.pop_back();
500     if (SU->NodeNum < DAGSize)
501       Allocate(SU->NodeNum, --Id);
502     for (const SDep &PredDep : SU->Preds) {
503       SUnit *SU = PredDep.getSUnit();
504       if (SU->NodeNum < DAGSize && !--Node2Index[SU->NodeNum])
505         // If all dependencies of the node are processed already,
506         // then the node can be computed now.
507         WorkList.push_back(SU);
508     }
509   }
510 
511   Visited.resize(DAGSize);
512   NumTopoInits++;
513 
514 #ifndef NDEBUG
515   // Check correctness of the ordering
516   for (SUnit &SU : SUnits)  {
517     for (const SDep &PD : SU.Preds) {
518       assert(Node2Index[SU.NodeNum] > Node2Index[PD.getSUnit()->NodeNum] &&
519       "Wrong topological sorting");
520     }
521   }
522 #endif
523 }
524 
525 void ScheduleDAGTopologicalSort::FixOrder() {
526   // Recompute from scratch after new nodes have been added.
527   if (Dirty) {
528     InitDAGTopologicalSorting();
529     return;
530   }
531 
532   // Otherwise apply updates one-by-one.
533   for (auto &U : Updates)
534     AddPred(U.first, U.second);
535   Updates.clear();
536 }
537 
538 void ScheduleDAGTopologicalSort::AddPredQueued(SUnit *Y, SUnit *X) {
539   // Recomputing the order from scratch is likely more efficient than applying
540   // updates one-by-one for too many updates. The current cut-off is arbitrarily
541   // chosen.
542   Dirty = Dirty || Updates.size() > 10;
543 
544   if (Dirty)
545     return;
546 
547   Updates.emplace_back(Y, X);
548 }
549 
550 void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) {
551   int UpperBound, LowerBound;
552   LowerBound = Node2Index[Y->NodeNum];
553   UpperBound = Node2Index[X->NodeNum];
554   bool HasLoop = false;
555   // Is Ord(X) < Ord(Y) ?
556   if (LowerBound < UpperBound) {
557     // Update the topological order.
558     Visited.reset();
559     DFS(Y, UpperBound, HasLoop);
560     assert(!HasLoop && "Inserted edge creates a loop!");
561     // Recompute topological indexes.
562     Shift(Visited, LowerBound, UpperBound);
563   }
564 
565   NumNewPredsAdded++;
566 }
567 
568 void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
569   // InitDAGTopologicalSorting();
570 }
571 
572 void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
573                                      bool &HasLoop) {
574   std::vector<const SUnit*> WorkList;
575   WorkList.reserve(SUnits.size());
576 
577   WorkList.push_back(SU);
578   do {
579     SU = WorkList.back();
580     WorkList.pop_back();
581     Visited.set(SU->NodeNum);
582     for (const SDep &SuccDep : llvm::reverse(SU->Succs)) {
583       unsigned s = SuccDep.getSUnit()->NodeNum;
584       // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
585       if (s >= Node2Index.size())
586         continue;
587       if (Node2Index[s] == UpperBound) {
588         HasLoop = true;
589         return;
590       }
591       // Visit successors if not already and in affected region.
592       if (!Visited.test(s) && Node2Index[s] < UpperBound) {
593         WorkList.push_back(SuccDep.getSUnit());
594       }
595     }
596   } while (!WorkList.empty());
597 }
598 
599 std::vector<int> ScheduleDAGTopologicalSort::GetSubGraph(const SUnit &StartSU,
600                                                          const SUnit &TargetSU,
601                                                          bool &Success) {
602   std::vector<const SUnit*> WorkList;
603   int LowerBound = Node2Index[StartSU.NodeNum];
604   int UpperBound = Node2Index[TargetSU.NodeNum];
605   bool Found = false;
606   BitVector VisitedBack;
607   std::vector<int> Nodes;
608 
609   if (LowerBound > UpperBound) {
610     Success = false;
611     return Nodes;
612   }
613 
614   WorkList.reserve(SUnits.size());
615   Visited.reset();
616 
617   // Starting from StartSU, visit all successors up
618   // to UpperBound.
619   WorkList.push_back(&StartSU);
620   do {
621     const SUnit *SU = WorkList.back();
622     WorkList.pop_back();
623     for (const SDep &SD : llvm::reverse(SU->Succs)) {
624       const SUnit *Succ = SD.getSUnit();
625       unsigned s = Succ->NodeNum;
626       // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
627       if (Succ->isBoundaryNode())
628         continue;
629       if (Node2Index[s] == UpperBound) {
630         Found = true;
631         continue;
632       }
633       // Visit successors if not already and in affected region.
634       if (!Visited.test(s) && Node2Index[s] < UpperBound) {
635         Visited.set(s);
636         WorkList.push_back(Succ);
637       }
638     }
639   } while (!WorkList.empty());
640 
641   if (!Found) {
642     Success = false;
643     return Nodes;
644   }
645 
646   WorkList.clear();
647   VisitedBack.resize(SUnits.size());
648   Found = false;
649 
650   // Starting from TargetSU, visit all predecessors up
651   // to LowerBound. SUs that are visited by the two
652   // passes are added to Nodes.
653   WorkList.push_back(&TargetSU);
654   do {
655     const SUnit *SU = WorkList.back();
656     WorkList.pop_back();
657     for (const SDep &SD : llvm::reverse(SU->Preds)) {
658       const SUnit *Pred = SD.getSUnit();
659       unsigned s = Pred->NodeNum;
660       // Edges to non-SUnits are allowed but ignored (e.g. EntrySU).
661       if (Pred->isBoundaryNode())
662         continue;
663       if (Node2Index[s] == LowerBound) {
664         Found = true;
665         continue;
666       }
667       if (!VisitedBack.test(s) && Visited.test(s)) {
668         VisitedBack.set(s);
669         WorkList.push_back(Pred);
670         Nodes.push_back(s);
671       }
672     }
673   } while (!WorkList.empty());
674 
675   assert(Found && "Error in SUnit Graph!");
676   Success = true;
677   return Nodes;
678 }
679 
680 void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
681                                        int UpperBound) {
682   std::vector<int> L;
683   int shift = 0;
684   int i;
685 
686   for (i = LowerBound; i <= UpperBound; ++i) {
687     // w is node at topological index i.
688     int w = Index2Node[i];
689     if (Visited.test(w)) {
690       // Unmark.
691       Visited.reset(w);
692       L.push_back(w);
693       shift = shift + 1;
694     } else {
695       Allocate(w, i - shift);
696     }
697   }
698 
699   for (unsigned LI : L) {
700     Allocate(LI, i - shift);
701     i = i + 1;
702   }
703 }
704 
705 bool ScheduleDAGTopologicalSort::WillCreateCycle(SUnit *TargetSU, SUnit *SU) {
706   FixOrder();
707   // Is SU reachable from TargetSU via successor edges?
708   if (IsReachable(SU, TargetSU))
709     return true;
710   for (const SDep &PredDep : TargetSU->Preds)
711     if (PredDep.isAssignedRegDep() &&
712         IsReachable(SU, PredDep.getSUnit()))
713       return true;
714   return false;
715 }
716 
717 void ScheduleDAGTopologicalSort::AddSUnitWithoutPredecessors(const SUnit *SU) {
718   assert(SU->NodeNum == Index2Node.size() && "Node cannot be added at the end");
719   assert(SU->NumPreds == 0 && "Can only add SU's with no predecessors");
720   Node2Index.push_back(Index2Node.size());
721   Index2Node.push_back(SU->NodeNum);
722   Visited.resize(Node2Index.size());
723 }
724 
725 bool ScheduleDAGTopologicalSort::IsReachable(const SUnit *SU,
726                                              const SUnit *TargetSU) {
727   assert(TargetSU != nullptr && "Invalid target SUnit");
728   assert(SU != nullptr && "Invalid SUnit");
729   FixOrder();
730   // If insertion of the edge SU->TargetSU would create a cycle
731   // then there is a path from TargetSU to SU.
732   int UpperBound, LowerBound;
733   LowerBound = Node2Index[TargetSU->NodeNum];
734   UpperBound = Node2Index[SU->NodeNum];
735   bool HasLoop = false;
736   // Is Ord(TargetSU) < Ord(SU) ?
737   if (LowerBound < UpperBound) {
738     Visited.reset();
739     // There may be a path from TargetSU to SU. Check for it.
740     DFS(TargetSU, UpperBound, HasLoop);
741   }
742   return HasLoop;
743 }
744 
745 void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
746   Node2Index[n] = index;
747   Index2Node[index] = n;
748 }
749 
750 ScheduleDAGTopologicalSort::
751 ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits, SUnit *exitsu)
752   : SUnits(sunits), ExitSU(exitsu) {}
753 
754 ScheduleHazardRecognizer::~ScheduleHazardRecognizer() = default;
755