1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements a top-down list scheduler, using standard algorithms.
10 // The basic approach uses a priority queue of available nodes to schedule.
11 // One at a time, nodes are taken from the priority queue (thus in priority
12 // order), checked for legality to schedule, and emitted if legal.
13 //
14 // Nodes may not be legal to schedule either due to structural hazards (e.g.
15 // pipeline or resource constraints) or because an input to the instruction has
16 // not completed execution.
17 //
18 //===----------------------------------------------------------------------===//
19
20 #include "AggressiveAntiDepBreaker.h"
21 #include "AntiDepBreaker.h"
22 #include "CriticalAntiDepBreaker.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/CodeGen/LatencyPriorityQueue.h"
26 #include "llvm/CodeGen/MachineDominators.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/RegisterClassInfo.h"
32 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
33 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
34 #include "llvm/CodeGen/SchedulerRegistry.h"
35 #include "llvm/CodeGen/TargetInstrInfo.h"
36 #include "llvm/CodeGen/TargetLowering.h"
37 #include "llvm/CodeGen/TargetPassConfig.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/Config/llvm-config.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/raw_ostream.h"
46 using namespace llvm;
47
48 #define DEBUG_TYPE "post-RA-sched"
49
50 STATISTIC(NumNoops, "Number of noops inserted");
51 STATISTIC(NumStalls, "Number of pipeline stalls");
52 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
53
54 // Post-RA scheduling is enabled with
55 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to
56 // override the target.
57 static cl::opt<bool>
58 EnablePostRAScheduler("post-RA-scheduler",
59 cl::desc("Enable scheduling after register allocation"),
60 cl::init(false), cl::Hidden);
61 static cl::opt<std::string>
62 EnableAntiDepBreaking("break-anti-dependencies",
63 cl::desc("Break post-RA scheduling anti-dependencies: "
64 "\"critical\", \"all\", or \"none\""),
65 cl::init("none"), cl::Hidden);
66
67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
68 static cl::opt<int>
69 DebugDiv("postra-sched-debugdiv",
70 cl::desc("Debug control MBBs that are scheduled"),
71 cl::init(0), cl::Hidden);
72 static cl::opt<int>
73 DebugMod("postra-sched-debugmod",
74 cl::desc("Debug control MBBs that are scheduled"),
75 cl::init(0), cl::Hidden);
76
~AntiDepBreaker()77 AntiDepBreaker::~AntiDepBreaker() { }
78
79 namespace {
80 class PostRAScheduler : public MachineFunctionPass {
81 const TargetInstrInfo *TII = nullptr;
82 RegisterClassInfo RegClassInfo;
83
84 public:
85 static char ID;
PostRAScheduler()86 PostRAScheduler() : MachineFunctionPass(ID) {}
87
getAnalysisUsage(AnalysisUsage & AU) const88 void getAnalysisUsage(AnalysisUsage &AU) const override {
89 AU.setPreservesCFG();
90 AU.addRequired<AAResultsWrapperPass>();
91 AU.addRequired<TargetPassConfig>();
92 AU.addRequired<MachineDominatorTree>();
93 AU.addPreserved<MachineDominatorTree>();
94 AU.addRequired<MachineLoopInfo>();
95 AU.addPreserved<MachineLoopInfo>();
96 MachineFunctionPass::getAnalysisUsage(AU);
97 }
98
getRequiredProperties() const99 MachineFunctionProperties getRequiredProperties() const override {
100 return MachineFunctionProperties().set(
101 MachineFunctionProperties::Property::NoVRegs);
102 }
103
104 bool runOnMachineFunction(MachineFunction &Fn) override;
105
106 private:
107 bool enablePostRAScheduler(
108 const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel,
109 TargetSubtargetInfo::AntiDepBreakMode &Mode,
110 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const;
111 };
112 char PostRAScheduler::ID = 0;
113
114 class SchedulePostRATDList : public ScheduleDAGInstrs {
115 /// AvailableQueue - The priority queue to use for the available SUnits.
116 ///
117 LatencyPriorityQueue AvailableQueue;
118
119 /// PendingQueue - This contains all of the instructions whose operands have
120 /// been issued, but their results are not ready yet (due to the latency of
121 /// the operation). Once the operands becomes available, the instruction is
122 /// added to the AvailableQueue.
123 std::vector<SUnit*> PendingQueue;
124
125 /// HazardRec - The hazard recognizer to use.
126 ScheduleHazardRecognizer *HazardRec;
127
128 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
129 AntiDepBreaker *AntiDepBreak;
130
131 /// AA - AliasAnalysis for making memory reference queries.
132 AliasAnalysis *AA;
133
134 /// The schedule. Null SUnit*'s represent noop instructions.
135 std::vector<SUnit*> Sequence;
136
137 /// Ordered list of DAG postprocessing steps.
138 std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
139
140 /// The index in BB of RegionEnd.
141 ///
142 /// This is the instruction number from the top of the current block, not
143 /// the SlotIndex. It is only used by the AntiDepBreaker.
144 unsigned EndIndex;
145
146 public:
147 SchedulePostRATDList(
148 MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA,
149 const RegisterClassInfo &,
150 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
151 SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs);
152
153 ~SchedulePostRATDList() override;
154
155 /// startBlock - Initialize register live-range state for scheduling in
156 /// this block.
157 ///
158 void startBlock(MachineBasicBlock *BB) override;
159
160 // Set the index of RegionEnd within the current BB.
setEndIndex(unsigned EndIdx)161 void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; }
162
163 /// Initialize the scheduler state for the next scheduling region.
164 void enterRegion(MachineBasicBlock *bb,
165 MachineBasicBlock::iterator begin,
166 MachineBasicBlock::iterator end,
167 unsigned regioninstrs) override;
168
169 /// Notify that the scheduler has finished scheduling the current region.
170 void exitRegion() override;
171
172 /// Schedule - Schedule the instruction range using list scheduling.
173 ///
174 void schedule() override;
175
176 void EmitSchedule();
177
178 /// Observe - Update liveness information to account for the current
179 /// instruction, which will not be scheduled.
180 ///
181 void Observe(MachineInstr &MI, unsigned Count);
182
183 /// finishBlock - Clean up register live-range state.
184 ///
185 void finishBlock() override;
186
187 private:
188 /// Apply each ScheduleDAGMutation step in order.
189 void postprocessDAG();
190
191 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
192 void ReleaseSuccessors(SUnit *SU);
193 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
194 void ListScheduleTopDown();
195
196 void dumpSchedule() const;
197 void emitNoop(unsigned CurCycle);
198 };
199 }
200
201 char &llvm::PostRASchedulerID = PostRAScheduler::ID;
202
203 INITIALIZE_PASS(PostRAScheduler, DEBUG_TYPE,
204 "Post RA top-down list latency scheduler", false, false)
205
SchedulePostRATDList(MachineFunction & MF,MachineLoopInfo & MLI,AliasAnalysis * AA,const RegisterClassInfo & RCI,TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,SmallVectorImpl<const TargetRegisterClass * > & CriticalPathRCs)206 SchedulePostRATDList::SchedulePostRATDList(
207 MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA,
208 const RegisterClassInfo &RCI,
209 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
210 SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs)
211 : ScheduleDAGInstrs(MF, &MLI), AA(AA), EndIndex(0) {
212
213 const InstrItineraryData *InstrItins =
214 MF.getSubtarget().getInstrItineraryData();
215 HazardRec =
216 MF.getSubtarget().getInstrInfo()->CreateTargetPostRAHazardRecognizer(
217 InstrItins, this);
218 MF.getSubtarget().getPostRAMutations(Mutations);
219
220 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE ||
221 MRI.tracksLiveness()) &&
222 "Live-ins must be accurate for anti-dependency breaking");
223 AntiDepBreak =
224 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
225 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
226 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ?
227 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : nullptr));
228 }
229
~SchedulePostRATDList()230 SchedulePostRATDList::~SchedulePostRATDList() {
231 delete HazardRec;
232 delete AntiDepBreak;
233 }
234
235 /// Initialize state associated with the next scheduling region.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)236 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb,
237 MachineBasicBlock::iterator begin,
238 MachineBasicBlock::iterator end,
239 unsigned regioninstrs) {
240 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
241 Sequence.clear();
242 }
243
244 /// Print the schedule before exiting the region.
exitRegion()245 void SchedulePostRATDList::exitRegion() {
246 LLVM_DEBUG({
247 dbgs() << "*** Final schedule ***\n";
248 dumpSchedule();
249 dbgs() << '\n';
250 });
251 ScheduleDAGInstrs::exitRegion();
252 }
253
254 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
255 /// dumpSchedule - dump the scheduled Sequence.
dumpSchedule() const256 LLVM_DUMP_METHOD void SchedulePostRATDList::dumpSchedule() const {
257 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
258 if (SUnit *SU = Sequence[i])
259 dumpNode(*SU);
260 else
261 dbgs() << "**** NOOP ****\n";
262 }
263 }
264 #endif
265
enablePostRAScheduler(const TargetSubtargetInfo & ST,CodeGenOpt::Level OptLevel,TargetSubtargetInfo::AntiDepBreakMode & Mode,TargetSubtargetInfo::RegClassVector & CriticalPathRCs) const266 bool PostRAScheduler::enablePostRAScheduler(
267 const TargetSubtargetInfo &ST,
268 CodeGenOpt::Level OptLevel,
269 TargetSubtargetInfo::AntiDepBreakMode &Mode,
270 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const {
271 Mode = ST.getAntiDepBreakMode();
272 ST.getCriticalPathRCs(CriticalPathRCs);
273
274 // Check for explicit enable/disable of post-ra scheduling.
275 if (EnablePostRAScheduler.getPosition() > 0)
276 return EnablePostRAScheduler;
277
278 return ST.enablePostRAScheduler() &&
279 OptLevel >= ST.getOptLevelToEnablePostRAScheduler();
280 }
281
runOnMachineFunction(MachineFunction & Fn)282 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
283 if (skipFunction(Fn.getFunction()))
284 return false;
285
286 TII = Fn.getSubtarget().getInstrInfo();
287 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
288 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
289 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
290
291 RegClassInfo.runOnMachineFunction(Fn);
292
293 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode =
294 TargetSubtargetInfo::ANTIDEP_NONE;
295 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs;
296
297 // Check that post-RA scheduling is enabled for this target.
298 // This may upgrade the AntiDepMode.
299 if (!enablePostRAScheduler(Fn.getSubtarget(), PassConfig->getOptLevel(),
300 AntiDepMode, CriticalPathRCs))
301 return false;
302
303 // Check for antidep breaking override...
304 if (EnableAntiDepBreaking.getPosition() > 0) {
305 AntiDepMode = (EnableAntiDepBreaking == "all")
306 ? TargetSubtargetInfo::ANTIDEP_ALL
307 : ((EnableAntiDepBreaking == "critical")
308 ? TargetSubtargetInfo::ANTIDEP_CRITICAL
309 : TargetSubtargetInfo::ANTIDEP_NONE);
310 }
311
312 LLVM_DEBUG(dbgs() << "PostRAScheduler\n");
313
314 SchedulePostRATDList Scheduler(Fn, MLI, AA, RegClassInfo, AntiDepMode,
315 CriticalPathRCs);
316
317 // Loop over all of the basic blocks
318 for (auto &MBB : Fn) {
319 #ifndef NDEBUG
320 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
321 if (DebugDiv > 0) {
322 static int bbcnt = 0;
323 if (bbcnt++ % DebugDiv != DebugMod)
324 continue;
325 dbgs() << "*** DEBUG scheduling " << Fn.getName() << ":"
326 << printMBBReference(MBB) << " ***\n";
327 }
328 #endif
329
330 // Initialize register live-range state for scheduling in this block.
331 Scheduler.startBlock(&MBB);
332
333 // Schedule each sequence of instructions not interrupted by a label
334 // or anything else that effectively needs to shut down scheduling.
335 MachineBasicBlock::iterator Current = MBB.end();
336 unsigned Count = MBB.size(), CurrentCount = Count;
337 for (MachineBasicBlock::iterator I = Current; I != MBB.begin();) {
338 MachineInstr &MI = *std::prev(I);
339 --Count;
340 // Calls are not scheduling boundaries before register allocation, but
341 // post-ra we don't gain anything by scheduling across calls since we
342 // don't need to worry about register pressure.
343 if (MI.isCall() || TII->isSchedulingBoundary(MI, &MBB, Fn)) {
344 Scheduler.enterRegion(&MBB, I, Current, CurrentCount - Count);
345 Scheduler.setEndIndex(CurrentCount);
346 Scheduler.schedule();
347 Scheduler.exitRegion();
348 Scheduler.EmitSchedule();
349 Current = &MI;
350 CurrentCount = Count;
351 Scheduler.Observe(MI, CurrentCount);
352 }
353 I = MI;
354 if (MI.isBundle())
355 Count -= MI.getBundleSize();
356 }
357 assert(Count == 0 && "Instruction count mismatch!");
358 assert((MBB.begin() == Current || CurrentCount != 0) &&
359 "Instruction count mismatch!");
360 Scheduler.enterRegion(&MBB, MBB.begin(), Current, CurrentCount);
361 Scheduler.setEndIndex(CurrentCount);
362 Scheduler.schedule();
363 Scheduler.exitRegion();
364 Scheduler.EmitSchedule();
365
366 // Clean up register live-range state.
367 Scheduler.finishBlock();
368
369 // Update register kills
370 Scheduler.fixupKills(MBB);
371 }
372
373 return true;
374 }
375
376 /// StartBlock - Initialize register live-range state for scheduling in
377 /// this block.
378 ///
startBlock(MachineBasicBlock * BB)379 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
380 // Call the superclass.
381 ScheduleDAGInstrs::startBlock(BB);
382
383 // Reset the hazard recognizer and anti-dep breaker.
384 HazardRec->Reset();
385 if (AntiDepBreak)
386 AntiDepBreak->StartBlock(BB);
387 }
388
389 /// Schedule - Schedule the instruction range using list scheduling.
390 ///
schedule()391 void SchedulePostRATDList::schedule() {
392 // Build the scheduling graph.
393 buildSchedGraph(AA);
394
395 if (AntiDepBreak) {
396 unsigned Broken =
397 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
398 EndIndex, DbgValues);
399
400 if (Broken != 0) {
401 // We made changes. Update the dependency graph.
402 // Theoretically we could update the graph in place:
403 // When a live range is changed to use a different register, remove
404 // the def's anti-dependence *and* output-dependence edges due to
405 // that register, and add new anti-dependence and output-dependence
406 // edges based on the next live range of the register.
407 ScheduleDAG::clearDAG();
408 buildSchedGraph(AA);
409
410 NumFixedAnti += Broken;
411 }
412 }
413
414 postprocessDAG();
415
416 LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n");
417 LLVM_DEBUG(dump());
418
419 AvailableQueue.initNodes(SUnits);
420 ListScheduleTopDown();
421 AvailableQueue.releaseState();
422 }
423
424 /// Observe - Update liveness information to account for the current
425 /// instruction, which will not be scheduled.
426 ///
Observe(MachineInstr & MI,unsigned Count)427 void SchedulePostRATDList::Observe(MachineInstr &MI, unsigned Count) {
428 if (AntiDepBreak)
429 AntiDepBreak->Observe(MI, Count, EndIndex);
430 }
431
432 /// FinishBlock - Clean up register live-range state.
433 ///
finishBlock()434 void SchedulePostRATDList::finishBlock() {
435 if (AntiDepBreak)
436 AntiDepBreak->FinishBlock();
437
438 // Call the superclass.
439 ScheduleDAGInstrs::finishBlock();
440 }
441
442 /// Apply each ScheduleDAGMutation step in order.
postprocessDAG()443 void SchedulePostRATDList::postprocessDAG() {
444 for (auto &M : Mutations)
445 M->apply(this);
446 }
447
448 //===----------------------------------------------------------------------===//
449 // Top-Down Scheduling
450 //===----------------------------------------------------------------------===//
451
452 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
453 /// the PendingQueue if the count reaches zero.
ReleaseSucc(SUnit * SU,SDep * SuccEdge)454 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
455 SUnit *SuccSU = SuccEdge->getSUnit();
456
457 if (SuccEdge->isWeak()) {
458 --SuccSU->WeakPredsLeft;
459 return;
460 }
461 #ifndef NDEBUG
462 if (SuccSU->NumPredsLeft == 0) {
463 dbgs() << "*** Scheduling failed! ***\n";
464 dumpNode(*SuccSU);
465 dbgs() << " has been released too many times!\n";
466 llvm_unreachable(nullptr);
467 }
468 #endif
469 --SuccSU->NumPredsLeft;
470
471 // Standard scheduler algorithms will recompute the depth of the successor
472 // here as such:
473 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
474 //
475 // However, we lazily compute node depth instead. Note that
476 // ScheduleNodeTopDown has already updated the depth of this node which causes
477 // all descendents to be marked dirty. Setting the successor depth explicitly
478 // here would cause depth to be recomputed for all its ancestors. If the
479 // successor is not yet ready (because of a transitively redundant edge) then
480 // this causes depth computation to be quadratic in the size of the DAG.
481
482 // If all the node's predecessors are scheduled, this node is ready
483 // to be scheduled. Ignore the special ExitSU node.
484 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
485 PendingQueue.push_back(SuccSU);
486 }
487
488 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
ReleaseSuccessors(SUnit * SU)489 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
490 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
491 I != E; ++I) {
492 ReleaseSucc(SU, &*I);
493 }
494 }
495
496 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
497 /// count of its successors. If a successor pending count is zero, add it to
498 /// the Available queue.
ScheduleNodeTopDown(SUnit * SU,unsigned CurCycle)499 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
500 LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
501 LLVM_DEBUG(dumpNode(*SU));
502
503 Sequence.push_back(SU);
504 assert(CurCycle >= SU->getDepth() &&
505 "Node scheduled above its depth!");
506 SU->setDepthToAtLeast(CurCycle);
507
508 ReleaseSuccessors(SU);
509 SU->isScheduled = true;
510 AvailableQueue.scheduledNode(SU);
511 }
512
513 /// emitNoop - Add a noop to the current instruction sequence.
emitNoop(unsigned CurCycle)514 void SchedulePostRATDList::emitNoop(unsigned CurCycle) {
515 LLVM_DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
516 HazardRec->EmitNoop();
517 Sequence.push_back(nullptr); // NULL here means noop
518 ++NumNoops;
519 }
520
521 /// ListScheduleTopDown - The main loop of list scheduling for top-down
522 /// schedulers.
ListScheduleTopDown()523 void SchedulePostRATDList::ListScheduleTopDown() {
524 unsigned CurCycle = 0;
525
526 // We're scheduling top-down but we're visiting the regions in
527 // bottom-up order, so we don't know the hazards at the start of a
528 // region. So assume no hazards (this should usually be ok as most
529 // blocks are a single region).
530 HazardRec->Reset();
531
532 // Release any successors of the special Entry node.
533 ReleaseSuccessors(&EntrySU);
534
535 // Add all leaves to Available queue.
536 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
537 // It is available if it has no predecessors.
538 if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) {
539 AvailableQueue.push(&SUnits[i]);
540 SUnits[i].isAvailable = true;
541 }
542 }
543
544 // In any cycle where we can't schedule any instructions, we must
545 // stall or emit a noop, depending on the target.
546 bool CycleHasInsts = false;
547
548 // While Available queue is not empty, grab the node with the highest
549 // priority. If it is not ready put it back. Schedule the node.
550 std::vector<SUnit*> NotReady;
551 Sequence.reserve(SUnits.size());
552 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
553 // Check to see if any of the pending instructions are ready to issue. If
554 // so, add them to the available queue.
555 unsigned MinDepth = ~0u;
556 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
557 if (PendingQueue[i]->getDepth() <= CurCycle) {
558 AvailableQueue.push(PendingQueue[i]);
559 PendingQueue[i]->isAvailable = true;
560 PendingQueue[i] = PendingQueue.back();
561 PendingQueue.pop_back();
562 --i; --e;
563 } else if (PendingQueue[i]->getDepth() < MinDepth)
564 MinDepth = PendingQueue[i]->getDepth();
565 }
566
567 LLVM_DEBUG(dbgs() << "\n*** Examining Available\n";
568 AvailableQueue.dump(this));
569
570 SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr;
571 bool HasNoopHazards = false;
572 while (!AvailableQueue.empty()) {
573 SUnit *CurSUnit = AvailableQueue.pop();
574
575 ScheduleHazardRecognizer::HazardType HT =
576 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
577 if (HT == ScheduleHazardRecognizer::NoHazard) {
578 if (HazardRec->ShouldPreferAnother(CurSUnit)) {
579 if (!NotPreferredSUnit) {
580 // If this is the first non-preferred node for this cycle, then
581 // record it and continue searching for a preferred node. If this
582 // is not the first non-preferred node, then treat it as though
583 // there had been a hazard.
584 NotPreferredSUnit = CurSUnit;
585 continue;
586 }
587 } else {
588 FoundSUnit = CurSUnit;
589 break;
590 }
591 }
592
593 // Remember if this is a noop hazard.
594 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
595
596 NotReady.push_back(CurSUnit);
597 }
598
599 // If we have a non-preferred node, push it back onto the available list.
600 // If we did not find a preferred node, then schedule this first
601 // non-preferred node.
602 if (NotPreferredSUnit) {
603 if (!FoundSUnit) {
604 LLVM_DEBUG(
605 dbgs() << "*** Will schedule a non-preferred instruction...\n");
606 FoundSUnit = NotPreferredSUnit;
607 } else {
608 AvailableQueue.push(NotPreferredSUnit);
609 }
610
611 NotPreferredSUnit = nullptr;
612 }
613
614 // Add the nodes that aren't ready back onto the available list.
615 if (!NotReady.empty()) {
616 AvailableQueue.push_all(NotReady);
617 NotReady.clear();
618 }
619
620 // If we found a node to schedule...
621 if (FoundSUnit) {
622 // If we need to emit noops prior to this instruction, then do so.
623 unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit);
624 for (unsigned i = 0; i != NumPreNoops; ++i)
625 emitNoop(CurCycle);
626
627 // ... schedule the node...
628 ScheduleNodeTopDown(FoundSUnit, CurCycle);
629 HazardRec->EmitInstruction(FoundSUnit);
630 CycleHasInsts = true;
631 if (HazardRec->atIssueLimit()) {
632 LLVM_DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle
633 << '\n');
634 HazardRec->AdvanceCycle();
635 ++CurCycle;
636 CycleHasInsts = false;
637 }
638 } else {
639 if (CycleHasInsts) {
640 LLVM_DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
641 HazardRec->AdvanceCycle();
642 } else if (!HasNoopHazards) {
643 // Otherwise, we have a pipeline stall, but no other problem,
644 // just advance the current cycle and try again.
645 LLVM_DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
646 HazardRec->AdvanceCycle();
647 ++NumStalls;
648 } else {
649 // Otherwise, we have no instructions to issue and we have instructions
650 // that will fault if we don't do this right. This is the case for
651 // processors without pipeline interlocks and other cases.
652 emitNoop(CurCycle);
653 }
654
655 ++CurCycle;
656 CycleHasInsts = false;
657 }
658 }
659
660 #ifndef NDEBUG
661 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false);
662 unsigned Noops = 0;
663 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
664 if (!Sequence[i])
665 ++Noops;
666 assert(Sequence.size() - Noops == ScheduledNodes &&
667 "The number of nodes scheduled doesn't match the expected number!");
668 #endif // NDEBUG
669 }
670
671 // EmitSchedule - Emit the machine code in scheduled order.
EmitSchedule()672 void SchedulePostRATDList::EmitSchedule() {
673 RegionBegin = RegionEnd;
674
675 // If first instruction was a DBG_VALUE then put it back.
676 if (FirstDbgValue)
677 BB->splice(RegionEnd, BB, FirstDbgValue);
678
679 // Then re-insert them according to the given schedule.
680 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
681 if (SUnit *SU = Sequence[i])
682 BB->splice(RegionEnd, BB, SU->getInstr());
683 else
684 // Null SUnit* is a noop.
685 TII->insertNoop(*BB, RegionEnd);
686
687 // Update the Begin iterator, as the first instruction in the block
688 // may have been scheduled later.
689 if (i == 0)
690 RegionBegin = std::prev(RegionEnd);
691 }
692
693 // Reinsert any remaining debug_values.
694 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
695 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
696 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
697 MachineInstr *DbgValue = P.first;
698 MachineBasicBlock::iterator OrigPrivMI = P.second;
699 BB->splice(++OrigPrivMI, BB, DbgValue);
700 }
701 DbgValues.clear();
702 FirstDbgValue = nullptr;
703 }
704