1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
14 //
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
18 //
19 //===----------------------------------------------------------------------===//
20
21 #include "AggressiveAntiDepBreaker.h"
22 #include "AntiDepBreaker.h"
23 #include "CriticalAntiDepBreaker.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/CodeGen/LatencyPriorityQueue.h"
27 #include "llvm/CodeGen/MachineDominators.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachineLoopInfo.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/CodeGen/RegisterClassInfo.h"
33 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
34 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
35 #include "llvm/CodeGen/SchedulerRegistry.h"
36 #include "llvm/CodeGen/TargetInstrInfo.h"
37 #include "llvm/CodeGen/TargetLowering.h"
38 #include "llvm/CodeGen/TargetPassConfig.h"
39 #include "llvm/CodeGen/TargetRegisterInfo.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/Config/llvm-config.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/raw_ostream.h"
46 using namespace llvm;
47
48 #define DEBUG_TYPE "post-RA-sched"
49
50 STATISTIC(NumNoops, "Number of noops inserted");
51 STATISTIC(NumStalls, "Number of pipeline stalls");
52 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
53
54 // Post-RA scheduling is enabled with
55 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to
56 // override the target.
57 static cl::opt<bool>
58 EnablePostRAScheduler("post-RA-scheduler",
59 cl::desc("Enable scheduling after register allocation"),
60 cl::init(false), cl::Hidden);
61 static cl::opt<std::string>
62 EnableAntiDepBreaking("break-anti-dependencies",
63 cl::desc("Break post-RA scheduling anti-dependencies: "
64 "\"critical\", \"all\", or \"none\""),
65 cl::init("none"), cl::Hidden);
66
67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
68 static cl::opt<int>
69 DebugDiv("postra-sched-debugdiv",
70 cl::desc("Debug control MBBs that are scheduled"),
71 cl::init(0), cl::Hidden);
72 static cl::opt<int>
73 DebugMod("postra-sched-debugmod",
74 cl::desc("Debug control MBBs that are scheduled"),
75 cl::init(0), cl::Hidden);
76
~AntiDepBreaker()77 AntiDepBreaker::~AntiDepBreaker() { }
78
79 namespace {
80 class PostRAScheduler : public MachineFunctionPass {
81 const TargetInstrInfo *TII;
82 RegisterClassInfo RegClassInfo;
83
84 public:
85 static char ID;
PostRAScheduler()86 PostRAScheduler() : MachineFunctionPass(ID) {}
87
getAnalysisUsage(AnalysisUsage & AU) const88 void getAnalysisUsage(AnalysisUsage &AU) const override {
89 AU.setPreservesCFG();
90 AU.addRequired<AAResultsWrapperPass>();
91 AU.addRequired<TargetPassConfig>();
92 AU.addRequired<MachineDominatorTree>();
93 AU.addPreserved<MachineDominatorTree>();
94 AU.addRequired<MachineLoopInfo>();
95 AU.addPreserved<MachineLoopInfo>();
96 MachineFunctionPass::getAnalysisUsage(AU);
97 }
98
getRequiredProperties() const99 MachineFunctionProperties getRequiredProperties() const override {
100 return MachineFunctionProperties().set(
101 MachineFunctionProperties::Property::NoVRegs);
102 }
103
104 bool runOnMachineFunction(MachineFunction &Fn) override;
105
106 private:
107 bool enablePostRAScheduler(
108 const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel,
109 TargetSubtargetInfo::AntiDepBreakMode &Mode,
110 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const;
111 };
112 char PostRAScheduler::ID = 0;
113
114 class SchedulePostRATDList : public ScheduleDAGInstrs {
115 /// AvailableQueue - The priority queue to use for the available SUnits.
116 ///
117 LatencyPriorityQueue AvailableQueue;
118
119 /// PendingQueue - This contains all of the instructions whose operands have
120 /// been issued, but their results are not ready yet (due to the latency of
121 /// the operation). Once the operands becomes available, the instruction is
122 /// added to the AvailableQueue.
123 std::vector<SUnit*> PendingQueue;
124
125 /// HazardRec - The hazard recognizer to use.
126 ScheduleHazardRecognizer *HazardRec;
127
128 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
129 AntiDepBreaker *AntiDepBreak;
130
131 /// AA - AliasAnalysis for making memory reference queries.
132 AliasAnalysis *AA;
133
134 /// The schedule. Null SUnit*'s represent noop instructions.
135 std::vector<SUnit*> Sequence;
136
137 /// Ordered list of DAG postprocessing steps.
138 std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
139
140 /// The index in BB of RegionEnd.
141 ///
142 /// This is the instruction number from the top of the current block, not
143 /// the SlotIndex. It is only used by the AntiDepBreaker.
144 unsigned EndIndex;
145
146 public:
147 SchedulePostRATDList(
148 MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA,
149 const RegisterClassInfo &,
150 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
151 SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs);
152
153 ~SchedulePostRATDList() override;
154
155 /// startBlock - Initialize register live-range state for scheduling in
156 /// this block.
157 ///
158 void startBlock(MachineBasicBlock *BB) override;
159
160 // Set the index of RegionEnd within the current BB.
setEndIndex(unsigned EndIdx)161 void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; }
162
163 /// Initialize the scheduler state for the next scheduling region.
164 void enterRegion(MachineBasicBlock *bb,
165 MachineBasicBlock::iterator begin,
166 MachineBasicBlock::iterator end,
167 unsigned regioninstrs) override;
168
169 /// Notify that the scheduler has finished scheduling the current region.
170 void exitRegion() override;
171
172 /// Schedule - Schedule the instruction range using list scheduling.
173 ///
174 void schedule() override;
175
176 void EmitSchedule();
177
178 /// Observe - Update liveness information to account for the current
179 /// instruction, which will not be scheduled.
180 ///
181 void Observe(MachineInstr &MI, unsigned Count);
182
183 /// finishBlock - Clean up register live-range state.
184 ///
185 void finishBlock() override;
186
187 private:
188 /// Apply each ScheduleDAGMutation step in order.
189 void postprocessDAG();
190
191 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
192 void ReleaseSuccessors(SUnit *SU);
193 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
194 void ListScheduleTopDown();
195
196 void dumpSchedule() const;
197 void emitNoop(unsigned CurCycle);
198 };
199 }
200
201 char &llvm::PostRASchedulerID = PostRAScheduler::ID;
202
203 INITIALIZE_PASS(PostRAScheduler, DEBUG_TYPE,
204 "Post RA top-down list latency scheduler", false, false)
205
SchedulePostRATDList(MachineFunction & MF,MachineLoopInfo & MLI,AliasAnalysis * AA,const RegisterClassInfo & RCI,TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,SmallVectorImpl<const TargetRegisterClass * > & CriticalPathRCs)206 SchedulePostRATDList::SchedulePostRATDList(
207 MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA,
208 const RegisterClassInfo &RCI,
209 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
210 SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs)
211 : ScheduleDAGInstrs(MF, &MLI), AA(AA), EndIndex(0) {
212
213 const InstrItineraryData *InstrItins =
214 MF.getSubtarget().getInstrItineraryData();
215 HazardRec =
216 MF.getSubtarget().getInstrInfo()->CreateTargetPostRAHazardRecognizer(
217 InstrItins, this);
218 MF.getSubtarget().getPostRAMutations(Mutations);
219
220 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE ||
221 MRI.tracksLiveness()) &&
222 "Live-ins must be accurate for anti-dependency breaking");
223 AntiDepBreak =
224 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
225 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
226 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ?
227 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : nullptr));
228 }
229
~SchedulePostRATDList()230 SchedulePostRATDList::~SchedulePostRATDList() {
231 delete HazardRec;
232 delete AntiDepBreak;
233 }
234
235 /// Initialize state associated with the next scheduling region.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)236 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb,
237 MachineBasicBlock::iterator begin,
238 MachineBasicBlock::iterator end,
239 unsigned regioninstrs) {
240 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
241 Sequence.clear();
242 }
243
244 /// Print the schedule before exiting the region.
exitRegion()245 void SchedulePostRATDList::exitRegion() {
246 LLVM_DEBUG({
247 dbgs() << "*** Final schedule ***\n";
248 dumpSchedule();
249 dbgs() << '\n';
250 });
251 ScheduleDAGInstrs::exitRegion();
252 }
253
254 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
255 /// dumpSchedule - dump the scheduled Sequence.
dumpSchedule() const256 LLVM_DUMP_METHOD void SchedulePostRATDList::dumpSchedule() const {
257 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
258 if (SUnit *SU = Sequence[i])
259 SU->dump(this);
260 else
261 dbgs() << "**** NOOP ****\n";
262 }
263 }
264 #endif
265
enablePostRAScheduler(const TargetSubtargetInfo & ST,CodeGenOpt::Level OptLevel,TargetSubtargetInfo::AntiDepBreakMode & Mode,TargetSubtargetInfo::RegClassVector & CriticalPathRCs) const266 bool PostRAScheduler::enablePostRAScheduler(
267 const TargetSubtargetInfo &ST,
268 CodeGenOpt::Level OptLevel,
269 TargetSubtargetInfo::AntiDepBreakMode &Mode,
270 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const {
271 Mode = ST.getAntiDepBreakMode();
272 ST.getCriticalPathRCs(CriticalPathRCs);
273
274 // Check for explicit enable/disable of post-ra scheduling.
275 if (EnablePostRAScheduler.getPosition() > 0)
276 return EnablePostRAScheduler;
277
278 return ST.enablePostRAScheduler() &&
279 OptLevel >= ST.getOptLevelToEnablePostRAScheduler();
280 }
281
runOnMachineFunction(MachineFunction & Fn)282 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
283 if (skipFunction(Fn.getFunction()))
284 return false;
285
286 TII = Fn.getSubtarget().getInstrInfo();
287 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
288 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
289 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
290
291 RegClassInfo.runOnMachineFunction(Fn);
292
293 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode =
294 TargetSubtargetInfo::ANTIDEP_NONE;
295 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs;
296
297 // Check that post-RA scheduling is enabled for this target.
298 // This may upgrade the AntiDepMode.
299 if (!enablePostRAScheduler(Fn.getSubtarget(), PassConfig->getOptLevel(),
300 AntiDepMode, CriticalPathRCs))
301 return false;
302
303 // Check for antidep breaking override...
304 if (EnableAntiDepBreaking.getPosition() > 0) {
305 AntiDepMode = (EnableAntiDepBreaking == "all")
306 ? TargetSubtargetInfo::ANTIDEP_ALL
307 : ((EnableAntiDepBreaking == "critical")
308 ? TargetSubtargetInfo::ANTIDEP_CRITICAL
309 : TargetSubtargetInfo::ANTIDEP_NONE);
310 }
311
312 LLVM_DEBUG(dbgs() << "PostRAScheduler\n");
313
314 SchedulePostRATDList Scheduler(Fn, MLI, AA, RegClassInfo, AntiDepMode,
315 CriticalPathRCs);
316
317 // Loop over all of the basic blocks
318 for (auto &MBB : Fn) {
319 #ifndef NDEBUG
320 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
321 if (DebugDiv > 0) {
322 static int bbcnt = 0;
323 if (bbcnt++ % DebugDiv != DebugMod)
324 continue;
325 dbgs() << "*** DEBUG scheduling " << Fn.getName() << ":"
326 << printMBBReference(MBB) << " ***\n";
327 }
328 #endif
329
330 // Initialize register live-range state for scheduling in this block.
331 Scheduler.startBlock(&MBB);
332
333 // Schedule each sequence of instructions not interrupted by a label
334 // or anything else that effectively needs to shut down scheduling.
335 MachineBasicBlock::iterator Current = MBB.end();
336 unsigned Count = MBB.size(), CurrentCount = Count;
337 for (MachineBasicBlock::iterator I = Current; I != MBB.begin();) {
338 MachineInstr &MI = *std::prev(I);
339 --Count;
340 // Calls are not scheduling boundaries before register allocation, but
341 // post-ra we don't gain anything by scheduling across calls since we
342 // don't need to worry about register pressure.
343 if (MI.isCall() || TII->isSchedulingBoundary(MI, &MBB, Fn)) {
344 Scheduler.enterRegion(&MBB, I, Current, CurrentCount - Count);
345 Scheduler.setEndIndex(CurrentCount);
346 Scheduler.schedule();
347 Scheduler.exitRegion();
348 Scheduler.EmitSchedule();
349 Current = &MI;
350 CurrentCount = Count;
351 Scheduler.Observe(MI, CurrentCount);
352 }
353 I = MI;
354 if (MI.isBundle())
355 Count -= MI.getBundleSize();
356 }
357 assert(Count == 0 && "Instruction count mismatch!");
358 assert((MBB.begin() == Current || CurrentCount != 0) &&
359 "Instruction count mismatch!");
360 Scheduler.enterRegion(&MBB, MBB.begin(), Current, CurrentCount);
361 Scheduler.setEndIndex(CurrentCount);
362 Scheduler.schedule();
363 Scheduler.exitRegion();
364 Scheduler.EmitSchedule();
365
366 // Clean up register live-range state.
367 Scheduler.finishBlock();
368
369 // Update register kills
370 Scheduler.fixupKills(MBB);
371 }
372
373 return true;
374 }
375
376 /// StartBlock - Initialize register live-range state for scheduling in
377 /// this block.
378 ///
startBlock(MachineBasicBlock * BB)379 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
380 // Call the superclass.
381 ScheduleDAGInstrs::startBlock(BB);
382
383 // Reset the hazard recognizer and anti-dep breaker.
384 HazardRec->Reset();
385 if (AntiDepBreak)
386 AntiDepBreak->StartBlock(BB);
387 }
388
389 /// Schedule - Schedule the instruction range using list scheduling.
390 ///
schedule()391 void SchedulePostRATDList::schedule() {
392 // Build the scheduling graph.
393 buildSchedGraph(AA);
394
395 if (AntiDepBreak) {
396 unsigned Broken =
397 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
398 EndIndex, DbgValues);
399
400 if (Broken != 0) {
401 // We made changes. Update the dependency graph.
402 // Theoretically we could update the graph in place:
403 // When a live range is changed to use a different register, remove
404 // the def's anti-dependence *and* output-dependence edges due to
405 // that register, and add new anti-dependence and output-dependence
406 // edges based on the next live range of the register.
407 ScheduleDAG::clearDAG();
408 buildSchedGraph(AA);
409
410 NumFixedAnti += Broken;
411 }
412 }
413
414 postprocessDAG();
415
416 LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n");
417 LLVM_DEBUG(for (const SUnit &SU
418 : SUnits) {
419 SU.dumpAll(this);
420 dbgs() << '\n';
421 });
422
423 AvailableQueue.initNodes(SUnits);
424 ListScheduleTopDown();
425 AvailableQueue.releaseState();
426 }
427
428 /// Observe - Update liveness information to account for the current
429 /// instruction, which will not be scheduled.
430 ///
Observe(MachineInstr & MI,unsigned Count)431 void SchedulePostRATDList::Observe(MachineInstr &MI, unsigned Count) {
432 if (AntiDepBreak)
433 AntiDepBreak->Observe(MI, Count, EndIndex);
434 }
435
436 /// FinishBlock - Clean up register live-range state.
437 ///
finishBlock()438 void SchedulePostRATDList::finishBlock() {
439 if (AntiDepBreak)
440 AntiDepBreak->FinishBlock();
441
442 // Call the superclass.
443 ScheduleDAGInstrs::finishBlock();
444 }
445
446 /// Apply each ScheduleDAGMutation step in order.
postprocessDAG()447 void SchedulePostRATDList::postprocessDAG() {
448 for (auto &M : Mutations)
449 M->apply(this);
450 }
451
452 //===----------------------------------------------------------------------===//
453 // Top-Down Scheduling
454 //===----------------------------------------------------------------------===//
455
456 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
457 /// the PendingQueue if the count reaches zero.
ReleaseSucc(SUnit * SU,SDep * SuccEdge)458 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
459 SUnit *SuccSU = SuccEdge->getSUnit();
460
461 if (SuccEdge->isWeak()) {
462 --SuccSU->WeakPredsLeft;
463 return;
464 }
465 #ifndef NDEBUG
466 if (SuccSU->NumPredsLeft == 0) {
467 dbgs() << "*** Scheduling failed! ***\n";
468 SuccSU->dump(this);
469 dbgs() << " has been released too many times!\n";
470 llvm_unreachable(nullptr);
471 }
472 #endif
473 --SuccSU->NumPredsLeft;
474
475 // Standard scheduler algorithms will recompute the depth of the successor
476 // here as such:
477 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
478 //
479 // However, we lazily compute node depth instead. Note that
480 // ScheduleNodeTopDown has already updated the depth of this node which causes
481 // all descendents to be marked dirty. Setting the successor depth explicitly
482 // here would cause depth to be recomputed for all its ancestors. If the
483 // successor is not yet ready (because of a transitively redundant edge) then
484 // this causes depth computation to be quadratic in the size of the DAG.
485
486 // If all the node's predecessors are scheduled, this node is ready
487 // to be scheduled. Ignore the special ExitSU node.
488 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
489 PendingQueue.push_back(SuccSU);
490 }
491
492 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
ReleaseSuccessors(SUnit * SU)493 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
494 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
495 I != E; ++I) {
496 ReleaseSucc(SU, &*I);
497 }
498 }
499
500 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
501 /// count of its successors. If a successor pending count is zero, add it to
502 /// the Available queue.
ScheduleNodeTopDown(SUnit * SU,unsigned CurCycle)503 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
504 LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
505 LLVM_DEBUG(SU->dump(this));
506
507 Sequence.push_back(SU);
508 assert(CurCycle >= SU->getDepth() &&
509 "Node scheduled above its depth!");
510 SU->setDepthToAtLeast(CurCycle);
511
512 ReleaseSuccessors(SU);
513 SU->isScheduled = true;
514 AvailableQueue.scheduledNode(SU);
515 }
516
517 /// emitNoop - Add a noop to the current instruction sequence.
emitNoop(unsigned CurCycle)518 void SchedulePostRATDList::emitNoop(unsigned CurCycle) {
519 LLVM_DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
520 HazardRec->EmitNoop();
521 Sequence.push_back(nullptr); // NULL here means noop
522 ++NumNoops;
523 }
524
525 /// ListScheduleTopDown - The main loop of list scheduling for top-down
526 /// schedulers.
ListScheduleTopDown()527 void SchedulePostRATDList::ListScheduleTopDown() {
528 unsigned CurCycle = 0;
529
530 // We're scheduling top-down but we're visiting the regions in
531 // bottom-up order, so we don't know the hazards at the start of a
532 // region. So assume no hazards (this should usually be ok as most
533 // blocks are a single region).
534 HazardRec->Reset();
535
536 // Release any successors of the special Entry node.
537 ReleaseSuccessors(&EntrySU);
538
539 // Add all leaves to Available queue.
540 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
541 // It is available if it has no predecessors.
542 if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) {
543 AvailableQueue.push(&SUnits[i]);
544 SUnits[i].isAvailable = true;
545 }
546 }
547
548 // In any cycle where we can't schedule any instructions, we must
549 // stall or emit a noop, depending on the target.
550 bool CycleHasInsts = false;
551
552 // While Available queue is not empty, grab the node with the highest
553 // priority. If it is not ready put it back. Schedule the node.
554 std::vector<SUnit*> NotReady;
555 Sequence.reserve(SUnits.size());
556 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
557 // Check to see if any of the pending instructions are ready to issue. If
558 // so, add them to the available queue.
559 unsigned MinDepth = ~0u;
560 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
561 if (PendingQueue[i]->getDepth() <= CurCycle) {
562 AvailableQueue.push(PendingQueue[i]);
563 PendingQueue[i]->isAvailable = true;
564 PendingQueue[i] = PendingQueue.back();
565 PendingQueue.pop_back();
566 --i; --e;
567 } else if (PendingQueue[i]->getDepth() < MinDepth)
568 MinDepth = PendingQueue[i]->getDepth();
569 }
570
571 LLVM_DEBUG(dbgs() << "\n*** Examining Available\n";
572 AvailableQueue.dump(this));
573
574 SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr;
575 bool HasNoopHazards = false;
576 while (!AvailableQueue.empty()) {
577 SUnit *CurSUnit = AvailableQueue.pop();
578
579 ScheduleHazardRecognizer::HazardType HT =
580 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
581 if (HT == ScheduleHazardRecognizer::NoHazard) {
582 if (HazardRec->ShouldPreferAnother(CurSUnit)) {
583 if (!NotPreferredSUnit) {
584 // If this is the first non-preferred node for this cycle, then
585 // record it and continue searching for a preferred node. If this
586 // is not the first non-preferred node, then treat it as though
587 // there had been a hazard.
588 NotPreferredSUnit = CurSUnit;
589 continue;
590 }
591 } else {
592 FoundSUnit = CurSUnit;
593 break;
594 }
595 }
596
597 // Remember if this is a noop hazard.
598 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
599
600 NotReady.push_back(CurSUnit);
601 }
602
603 // If we have a non-preferred node, push it back onto the available list.
604 // If we did not find a preferred node, then schedule this first
605 // non-preferred node.
606 if (NotPreferredSUnit) {
607 if (!FoundSUnit) {
608 LLVM_DEBUG(
609 dbgs() << "*** Will schedule a non-preferred instruction...\n");
610 FoundSUnit = NotPreferredSUnit;
611 } else {
612 AvailableQueue.push(NotPreferredSUnit);
613 }
614
615 NotPreferredSUnit = nullptr;
616 }
617
618 // Add the nodes that aren't ready back onto the available list.
619 if (!NotReady.empty()) {
620 AvailableQueue.push_all(NotReady);
621 NotReady.clear();
622 }
623
624 // If we found a node to schedule...
625 if (FoundSUnit) {
626 // If we need to emit noops prior to this instruction, then do so.
627 unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit);
628 for (unsigned i = 0; i != NumPreNoops; ++i)
629 emitNoop(CurCycle);
630
631 // ... schedule the node...
632 ScheduleNodeTopDown(FoundSUnit, CurCycle);
633 HazardRec->EmitInstruction(FoundSUnit);
634 CycleHasInsts = true;
635 if (HazardRec->atIssueLimit()) {
636 LLVM_DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle
637 << '\n');
638 HazardRec->AdvanceCycle();
639 ++CurCycle;
640 CycleHasInsts = false;
641 }
642 } else {
643 if (CycleHasInsts) {
644 LLVM_DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
645 HazardRec->AdvanceCycle();
646 } else if (!HasNoopHazards) {
647 // Otherwise, we have a pipeline stall, but no other problem,
648 // just advance the current cycle and try again.
649 LLVM_DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
650 HazardRec->AdvanceCycle();
651 ++NumStalls;
652 } else {
653 // Otherwise, we have no instructions to issue and we have instructions
654 // that will fault if we don't do this right. This is the case for
655 // processors without pipeline interlocks and other cases.
656 emitNoop(CurCycle);
657 }
658
659 ++CurCycle;
660 CycleHasInsts = false;
661 }
662 }
663
664 #ifndef NDEBUG
665 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false);
666 unsigned Noops = 0;
667 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
668 if (!Sequence[i])
669 ++Noops;
670 assert(Sequence.size() - Noops == ScheduledNodes &&
671 "The number of nodes scheduled doesn't match the expected number!");
672 #endif // NDEBUG
673 }
674
675 // EmitSchedule - Emit the machine code in scheduled order.
EmitSchedule()676 void SchedulePostRATDList::EmitSchedule() {
677 RegionBegin = RegionEnd;
678
679 // If first instruction was a DBG_VALUE then put it back.
680 if (FirstDbgValue)
681 BB->splice(RegionEnd, BB, FirstDbgValue);
682
683 // Then re-insert them according to the given schedule.
684 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
685 if (SUnit *SU = Sequence[i])
686 BB->splice(RegionEnd, BB, SU->getInstr());
687 else
688 // Null SUnit* is a noop.
689 TII->insertNoop(*BB, RegionEnd);
690
691 // Update the Begin iterator, as the first instruction in the block
692 // may have been scheduled later.
693 if (i == 0)
694 RegionBegin = std::prev(RegionEnd);
695 }
696
697 // Reinsert any remaining debug_values.
698 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
699 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
700 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
701 MachineInstr *DbgValue = P.first;
702 MachineBasicBlock::iterator OrigPrivMI = P.second;
703 BB->splice(++OrigPrivMI, BB, DbgValue);
704 }
705 DbgValues.clear();
706 FirstDbgValue = nullptr;
707 }
708