1 //===-- WebAssemblyCFGSort.cpp - CFG Sorting ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements a CFG sorting pass.
11 ///
12 /// This pass reorders the blocks in a function to put them into topological
13 /// order, ignoring loop backedges, and without any loop or exception being
14 /// interrupted by a block not dominated by the its header, with special care
15 /// to keep the order as similar as possible to the original order.
16 ///
17 ////===----------------------------------------------------------------------===//
18 
19 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
20 #include "Utils/WebAssemblyUtilities.h"
21 #include "WebAssembly.h"
22 #include "WebAssemblyExceptionInfo.h"
23 #include "WebAssemblySortRegion.h"
24 #include "WebAssemblySubtarget.h"
25 #include "llvm/ADT/PriorityQueue.h"
26 #include "llvm/ADT/SetVector.h"
27 #include "llvm/CodeGen/MachineDominators.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineLoopInfo.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/CodeGen/WasmEHFuncInfo.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/raw_ostream.h"
35 using namespace llvm;
36 using WebAssembly::SortRegion;
37 using WebAssembly::SortRegionInfo;
38 
39 #define DEBUG_TYPE "wasm-cfg-sort"
40 
41 // Option to disable EH pad first sorting. Only for testing unwind destination
42 // mismatches in CFGStackify.
43 static cl::opt<bool> WasmDisableEHPadSort(
44     "wasm-disable-ehpad-sort", cl::ReallyHidden,
45     cl::desc(
46         "WebAssembly: Disable EH pad-first sort order. Testing purpose only."),
47     cl::init(false));
48 
49 namespace {
50 
51 class WebAssemblyCFGSort final : public MachineFunctionPass {
52   StringRef getPassName() const override { return "WebAssembly CFG Sort"; }
53 
54   void getAnalysisUsage(AnalysisUsage &AU) const override {
55     AU.setPreservesCFG();
56     AU.addRequired<MachineDominatorTree>();
57     AU.addPreserved<MachineDominatorTree>();
58     AU.addRequired<MachineLoopInfo>();
59     AU.addPreserved<MachineLoopInfo>();
60     AU.addRequired<WebAssemblyExceptionInfo>();
61     AU.addPreserved<WebAssemblyExceptionInfo>();
62     MachineFunctionPass::getAnalysisUsage(AU);
63   }
64 
65   bool runOnMachineFunction(MachineFunction &MF) override;
66 
67 public:
68   static char ID; // Pass identification, replacement for typeid
69   WebAssemblyCFGSort() : MachineFunctionPass(ID) {}
70 };
71 } // end anonymous namespace
72 
73 char WebAssemblyCFGSort::ID = 0;
74 INITIALIZE_PASS(WebAssemblyCFGSort, DEBUG_TYPE,
75                 "Reorders blocks in topological order", false, false)
76 
77 FunctionPass *llvm::createWebAssemblyCFGSort() {
78   return new WebAssemblyCFGSort();
79 }
80 
81 static void maybeUpdateTerminator(MachineBasicBlock *MBB) {
82 #ifndef NDEBUG
83   bool AnyBarrier = false;
84 #endif
85   bool AllAnalyzable = true;
86   for (const MachineInstr &Term : MBB->terminators()) {
87 #ifndef NDEBUG
88     AnyBarrier |= Term.isBarrier();
89 #endif
90     AllAnalyzable &= Term.isBranch() && !Term.isIndirectBranch();
91   }
92   assert((AnyBarrier || AllAnalyzable) &&
93          "analyzeBranch needs to analyze any block with a fallthrough");
94 
95   // Find the layout successor from the original block order.
96   MachineFunction *MF = MBB->getParent();
97   MachineBasicBlock *OriginalSuccessor =
98       unsigned(MBB->getNumber() + 1) < MF->getNumBlockIDs()
99           ? MF->getBlockNumbered(MBB->getNumber() + 1)
100           : nullptr;
101 
102   if (AllAnalyzable)
103     MBB->updateTerminator(OriginalSuccessor);
104 }
105 
106 namespace {
107 // EH pads are selected first regardless of the block comparison order.
108 // When only one of the BBs is an EH pad, we give a higher priority to it, to
109 // prevent common mismatches between possibly throwing calls and ehpads they
110 // unwind to, as in the example below:
111 //
112 // bb0:
113 //   call @foo      // If this throws, unwind to bb2
114 // bb1:
115 //   call @bar      // If this throws, unwind to bb3
116 // bb2 (ehpad):
117 //   handler_bb2
118 // bb3 (ehpad):
119 //   handler_bb3
120 // continuing code
121 //
122 // Because this pass tries to preserve the original BB order, this order will
123 // not change. But this will result in this try-catch structure in CFGStackify,
124 // resulting in a mismatch:
125 // try
126 //   try
127 //     call @foo
128 //     call @bar    // This should unwind to bb3, not bb2!
129 //   catch
130 //     handler_bb2
131 //   end
132 // catch
133 //   handler_bb3
134 // end
135 // continuing code
136 //
137 // If we give a higher priority to an EH pad whenever it is ready in this
138 // example, when both bb1 and bb2 are ready, we would pick up bb2 first.
139 
140 /// Sort blocks by their number.
141 struct CompareBlockNumbers {
142   bool operator()(const MachineBasicBlock *A,
143                   const MachineBasicBlock *B) const {
144     if (!WasmDisableEHPadSort) {
145       if (A->isEHPad() && !B->isEHPad())
146         return false;
147       if (!A->isEHPad() && B->isEHPad())
148         return true;
149     }
150 
151     return A->getNumber() > B->getNumber();
152   }
153 };
154 /// Sort blocks by their number in the opposite order..
155 struct CompareBlockNumbersBackwards {
156   bool operator()(const MachineBasicBlock *A,
157                   const MachineBasicBlock *B) const {
158     if (!WasmDisableEHPadSort) {
159       if (A->isEHPad() && !B->isEHPad())
160         return false;
161       if (!A->isEHPad() && B->isEHPad())
162         return true;
163     }
164 
165     return A->getNumber() < B->getNumber();
166   }
167 };
168 /// Bookkeeping for a region to help ensure that we don't mix blocks not
169 /// dominated by the its header among its blocks.
170 struct Entry {
171   const SortRegion *TheRegion;
172   unsigned NumBlocksLeft;
173 
174   /// List of blocks not dominated by Loop's header that are deferred until
175   /// after all of Loop's blocks have been seen.
176   std::vector<MachineBasicBlock *> Deferred;
177 
178   explicit Entry(const SortRegion *R)
179       : TheRegion(R), NumBlocksLeft(R->getNumBlocks()) {}
180 };
181 } // end anonymous namespace
182 
183 /// Sort the blocks, taking special care to make sure that regions are not
184 /// interrupted by blocks not dominated by their header.
185 /// TODO: There are many opportunities for improving the heuristics here.
186 /// Explore them.
187 static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
188                        const WebAssemblyExceptionInfo &WEI,
189                        const MachineDominatorTree &MDT) {
190   // Remember original layout ordering, so we can update terminators after
191   // reordering to point to the original layout successor.
192   MF.RenumberBlocks();
193 
194   // Prepare for a topological sort: Record the number of predecessors each
195   // block has, ignoring loop backedges.
196   SmallVector<unsigned, 16> NumPredsLeft(MF.getNumBlockIDs(), 0);
197   for (MachineBasicBlock &MBB : MF) {
198     unsigned N = MBB.pred_size();
199     if (MachineLoop *L = MLI.getLoopFor(&MBB))
200       if (L->getHeader() == &MBB)
201         for (const MachineBasicBlock *Pred : MBB.predecessors())
202           if (L->contains(Pred))
203             --N;
204     NumPredsLeft[MBB.getNumber()] = N;
205   }
206 
207   // Topological sort the CFG, with additional constraints:
208   //  - Between a region header and the last block in the region, there can be
209   //    no blocks not dominated by its header.
210   //  - It's desirable to preserve the original block order when possible.
211   // We use two ready lists; Preferred and Ready. Preferred has recently
212   // processed successors, to help preserve block sequences from the original
213   // order. Ready has the remaining ready blocks. EH blocks are picked first
214   // from both queues.
215   PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
216                 CompareBlockNumbers>
217       Preferred;
218   PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
219                 CompareBlockNumbersBackwards>
220       Ready;
221 
222   const auto *EHInfo = MF.getWasmEHFuncInfo();
223   SortRegionInfo SRI(MLI, WEI);
224   SmallVector<Entry, 4> Entries;
225   for (MachineBasicBlock *MBB = &MF.front();;) {
226     const SortRegion *R = SRI.getRegionFor(MBB);
227     if (R) {
228       // If MBB is a region header, add it to the active region list. We can't
229       // put any blocks that it doesn't dominate until we see the end of the
230       // region.
231       if (R->getHeader() == MBB)
232         Entries.push_back(Entry(R));
233       // For each active region the block is in, decrement the count. If MBB is
234       // the last block in an active region, take it off the list and pick up
235       // any blocks deferred because the header didn't dominate them.
236       for (Entry &E : Entries)
237         if (E.TheRegion->contains(MBB) && --E.NumBlocksLeft == 0)
238           for (auto *DeferredBlock : E.Deferred)
239             Ready.push(DeferredBlock);
240       while (!Entries.empty() && Entries.back().NumBlocksLeft == 0)
241         Entries.pop_back();
242     }
243     // The main topological sort logic.
244     for (MachineBasicBlock *Succ : MBB->successors()) {
245       // Ignore backedges.
246       if (MachineLoop *SuccL = MLI.getLoopFor(Succ))
247         if (SuccL->getHeader() == Succ && SuccL->contains(MBB))
248           continue;
249       // Decrement the predecessor count. If it's now zero, it's ready.
250       if (--NumPredsLeft[Succ->getNumber()] == 0) {
251         // When we are in a SortRegion, we allow sorting of not only BBs that
252         // belong to the current (innermost) region but also BBs that are
253         // dominated by the current region header. But we should not do this for
254         // exceptions because there can be cases in which, for example:
255         // EHPad A's unwind destination (where the exception lands when it is
256         // not caught by EHPad A) is EHPad B, so EHPad B does not belong to the
257         // exception dominated by EHPad A. But EHPad B is dominated by EHPad A,
258         // so EHPad B can be sorted within EHPad A's exception. This is
259         // incorrect because we may end up delegating/rethrowing to an inner
260         // scope in CFGStackify. So here we make sure those unwind destinations
261         // are deferred until their unwind source's exception is sorted.
262         if (EHInfo && EHInfo->hasUnwindSrcs(Succ)) {
263           SmallPtrSet<MachineBasicBlock *, 4> UnwindSrcs =
264               EHInfo->getUnwindSrcs(Succ);
265           bool IsDeferred = false;
266           for (Entry &E : Entries) {
267             if (UnwindSrcs.count(E.TheRegion->getHeader())) {
268               E.Deferred.push_back(Succ);
269               IsDeferred = true;
270               break;
271             }
272           }
273           if (IsDeferred)
274             continue;
275         }
276         Preferred.push(Succ);
277       }
278     }
279     // Determine the block to follow MBB. First try to find a preferred block,
280     // to preserve the original block order when possible.
281     MachineBasicBlock *Next = nullptr;
282     while (!Preferred.empty()) {
283       Next = Preferred.top();
284       Preferred.pop();
285       // If X isn't dominated by the top active region header, defer it until
286       // that region is done.
287       if (!Entries.empty() &&
288           !MDT.dominates(Entries.back().TheRegion->getHeader(), Next)) {
289         Entries.back().Deferred.push_back(Next);
290         Next = nullptr;
291         continue;
292       }
293       // If Next was originally ordered before MBB, and it isn't because it was
294       // loop-rotated above the header, it's not preferred.
295       if (Next->getNumber() < MBB->getNumber() &&
296           (WasmDisableEHPadSort || !Next->isEHPad()) &&
297           (!R || !R->contains(Next) ||
298            R->getHeader()->getNumber() < Next->getNumber())) {
299         Ready.push(Next);
300         Next = nullptr;
301         continue;
302       }
303       break;
304     }
305     // If we didn't find a suitable block in the Preferred list, check the
306     // general Ready list.
307     if (!Next) {
308       // If there are no more blocks to process, we're done.
309       if (Ready.empty()) {
310         maybeUpdateTerminator(MBB);
311         break;
312       }
313       for (;;) {
314         Next = Ready.top();
315         Ready.pop();
316         // If Next isn't dominated by the top active region header, defer it
317         // until that region is done.
318         if (!Entries.empty() &&
319             !MDT.dominates(Entries.back().TheRegion->getHeader(), Next)) {
320           Entries.back().Deferred.push_back(Next);
321           continue;
322         }
323         break;
324       }
325     }
326     // Move the next block into place and iterate.
327     Next->moveAfter(MBB);
328     maybeUpdateTerminator(MBB);
329     MBB = Next;
330   }
331   assert(Entries.empty() && "Active sort region list not finished");
332   MF.RenumberBlocks();
333 
334 #ifndef NDEBUG
335   SmallSetVector<const SortRegion *, 8> OnStack;
336 
337   // Insert a sentinel representing the degenerate loop that starts at the
338   // function entry block and includes the entire function as a "loop" that
339   // executes once.
340   OnStack.insert(nullptr);
341 
342   for (auto &MBB : MF) {
343     assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative.");
344     const SortRegion *Region = SRI.getRegionFor(&MBB);
345 
346     if (Region && &MBB == Region->getHeader()) {
347       // Region header.
348       if (Region->isLoop()) {
349         // Loop header. The loop predecessor should be sorted above, and the
350         // other predecessors should be backedges below.
351         for (auto *Pred : MBB.predecessors())
352           assert(
353               (Pred->getNumber() < MBB.getNumber() || Region->contains(Pred)) &&
354               "Loop header predecessors must be loop predecessors or "
355               "backedges");
356       } else {
357         // Exception header. All predecessors should be sorted above.
358         for (auto *Pred : MBB.predecessors())
359           assert(Pred->getNumber() < MBB.getNumber() &&
360                  "Non-loop-header predecessors should be topologically sorted");
361       }
362       assert(OnStack.insert(Region) &&
363              "Regions should be declared at most once.");
364 
365     } else {
366       // Not a region header. All predecessors should be sorted above.
367       for (auto *Pred : MBB.predecessors())
368         assert(Pred->getNumber() < MBB.getNumber() &&
369                "Non-loop-header predecessors should be topologically sorted");
370       assert(OnStack.count(SRI.getRegionFor(&MBB)) &&
371              "Blocks must be nested in their regions");
372     }
373     while (OnStack.size() > 1 && &MBB == SRI.getBottom(OnStack.back()))
374       OnStack.pop_back();
375   }
376   assert(OnStack.pop_back_val() == nullptr &&
377          "The function entry block shouldn't actually be a region header");
378   assert(OnStack.empty() &&
379          "Control flow stack pushes and pops should be balanced.");
380 #endif
381 }
382 
383 bool WebAssemblyCFGSort::runOnMachineFunction(MachineFunction &MF) {
384   LLVM_DEBUG(dbgs() << "********** CFG Sorting **********\n"
385                        "********** Function: "
386                     << MF.getName() << '\n');
387 
388   const auto &MLI = getAnalysis<MachineLoopInfo>();
389   const auto &WEI = getAnalysis<WebAssemblyExceptionInfo>();
390   auto &MDT = getAnalysis<MachineDominatorTree>();
391   // Liveness is not tracked for VALUE_STACK physreg.
392   MF.getRegInfo().invalidateLiveness();
393 
394   // Sort the blocks, with contiguous sort regions.
395   sortBlocks(MF, MLI, WEI, MDT);
396 
397   return true;
398 }
399