1 //===---- LatencyPriorityQueue.cpp - A latency-oriented priority queue ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the LatencyPriorityQueue class, which is a
10 // SchedulingPriorityQueue that schedules using latency information to
11 // reduce the length of the critical path through the basic block.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/LatencyPriorityQueue.h"
16 #include "llvm/Config/llvm-config.h"
17 #include "llvm/Support/Debug.h"
18 #include "llvm/Support/raw_ostream.h"
19 using namespace llvm;
20 
21 #define DEBUG_TYPE "scheduler"
22 
23 bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
24   // The isScheduleHigh flag allows nodes with wraparound dependencies that
25   // cannot easily be modeled as edges with latencies to be scheduled as
26   // soon as possible in a top-down schedule.
27   if (LHS->isScheduleHigh && !RHS->isScheduleHigh)
28     return false;
29   if (!LHS->isScheduleHigh && RHS->isScheduleHigh)
30     return true;
31 
32   unsigned LHSNum = LHS->NodeNum;
33   unsigned RHSNum = RHS->NodeNum;
34 
35   // The most important heuristic is scheduling the critical path.
36   unsigned LHSLatency = PQ->getLatency(LHSNum);
37   unsigned RHSLatency = PQ->getLatency(RHSNum);
38   if (LHSLatency < RHSLatency) return true;
39   if (LHSLatency > RHSLatency) return false;
40 
41   // After that, if two nodes have identical latencies, look to see if one will
42   // unblock more other nodes than the other.
43   unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
44   unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
45   if (LHSBlocked < RHSBlocked) return true;
46   if (LHSBlocked > RHSBlocked) return false;
47 
48   // Finally, just to provide a stable ordering, use the node number as a
49   // deciding factor.
50   return RHSNum < LHSNum;
51 }
52 
53 
54 /// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
55 /// of SU, return it, otherwise return null.
56 SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
57   SUnit *OnlyAvailablePred = nullptr;
58   for (const SDep &P : SU->Preds) {
59     SUnit &Pred = *P.getSUnit();
60     if (!Pred.isScheduled) {
61       // We found an available, but not scheduled, predecessor.  If it's the
62       // only one we have found, keep track of it... otherwise give up.
63       if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
64         return nullptr;
65       OnlyAvailablePred = &Pred;
66     }
67   }
68 
69   return OnlyAvailablePred;
70 }
71 
72 void LatencyPriorityQueue::push(SUnit *SU) {
73   // Look at all of the successors of this node.  Count the number of nodes that
74   // this node is the sole unscheduled node for.
75   unsigned NumNodesBlocking = 0;
76   for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
77        I != E; ++I) {
78     if (getSingleUnscheduledPred(I->getSUnit()) == SU)
79       ++NumNodesBlocking;
80   }
81   NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
82 
83   Queue.push_back(SU);
84 }
85 
86 
87 // scheduledNode - As nodes are scheduled, we look to see if there are any
88 // successor nodes that have a single unscheduled predecessor.  If so, that
89 // single predecessor has a higher priority, since scheduling it will make
90 // the node available.
91 void LatencyPriorityQueue::scheduledNode(SUnit *SU) {
92   for (const SDep &Succ : SU->Succs)
93     AdjustPriorityOfUnscheduledPreds(Succ.getSUnit());
94 }
95 
96 /// AdjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just
97 /// scheduled.  If SU is not itself available, then there is at least one
98 /// predecessor node that has not been scheduled yet.  If SU has exactly ONE
99 /// unscheduled predecessor, we want to increase its priority: it getting
100 /// scheduled will make this node available, so it is better than some other
101 /// node of the same priority that will not make a node available.
102 void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
103   if (SU->isAvailable) return;  // All preds scheduled.
104 
105   SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
106   if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable) return;
107 
108   // Okay, we found a single predecessor that is available, but not scheduled.
109   // Since it is available, it must be in the priority queue.  First remove it.
110   remove(OnlyAvailablePred);
111 
112   // Reinsert the node into the priority queue, which recomputes its
113   // NumNodesSolelyBlocking value.
114   push(OnlyAvailablePred);
115 }
116 
117 SUnit *LatencyPriorityQueue::pop() {
118   if (empty()) return nullptr;
119   std::vector<SUnit *>::iterator Best = Queue.begin();
120   for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
121        E = Queue.end(); I != E; ++I)
122     if (Picker(*Best, *I))
123       Best = I;
124   SUnit *V = *Best;
125   if (Best != std::prev(Queue.end()))
126     std::swap(*Best, Queue.back());
127   Queue.pop_back();
128   return V;
129 }
130 
131 void LatencyPriorityQueue::remove(SUnit *SU) {
132   assert(!Queue.empty() && "Queue is empty!");
133   std::vector<SUnit *>::iterator I = find(Queue, SU);
134   assert(I != Queue.end() && "Queue doesn't contain the SU being removed!");
135   if (I != std::prev(Queue.end()))
136     std::swap(*I, Queue.back());
137   Queue.pop_back();
138 }
139 
140 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
141 LLVM_DUMP_METHOD void LatencyPriorityQueue::dump(ScheduleDAG *DAG) const {
142   dbgs() << "Latency Priority Queue\n";
143   dbgs() << "  Number of Queue Entries: " << Queue.size() << "\n";
144   for (const SUnit *SU : Queue) {
145     dbgs() << "    ";
146     DAG->dumpNode(*SU);
147   }
148 }
149 #endif
150