1 //! Computation of basic block order in emitted code.
2 //!
3 //! This module handles the translation from CLIF BBs to VCode BBs.
4 //!
5 //! The basic idea is that we compute a sequence of "lowered blocks" that
6 //! correspond to one or more blocks in the graph: (CLIF CFG) `union` (implicit
7 //! block on *every* edge). Conceptually, the lowering pipeline wants to insert
8 //! moves for phi-nodes on every block-to-block transfer; these blocks always
9 //! conceptually exist, but may be merged with an "original" CLIF block (and
10 //! hence not actually exist; this is equivalent to inserting the blocks only on
11 //! critical edges).
12 //!
13 //! In other words, starting from a CFG like this (where each "CLIF block" and
14 //! "(edge N->M)" is a separate basic block):
15 //!
16 //! ```plain
17 //!
18 //!              CLIF block 0
19 //!               /           \
20 //!       (edge 0->1)         (edge 0->2)
21 //!              |                |
22 //!       CLIF block 1         CLIF block 2
23 //!              \                /
24 //!           (edge 1->3)   (edge 2->3)
25 //!                   \      /
26 //!                 CLIF block 3
27 //! ```
28 //!
29 //! We can produce a CFG of lowered blocks like so:
30 //!
31 //! ```plain
32 //!            +--------------+
33 //!            | CLIF block 0 |
34 //!            +--------------+
35 //!               /           \
36 //!     +--------------+     +--------------+
37 //!     | (edge 0->1)  |     |(edge 0->2)   |
38 //!     | CLIF block 1 |     | CLIF block 2 |
39 //!     +--------------+     +--------------+
40 //!              \                /
41 //!          +-----------+ +-----------+
42 //!          |(edge 1->3)| |(edge 2->3)|
43 //!          +-----------+ +-----------+
44 //!                   \      /
45 //!                +------------+
46 //!                |CLIF block 3|
47 //!                +------------+
48 //! ```
49 //!
50 //! (note that the edges into CLIF blocks 1 and 2 could be merged with those
51 //! blocks' original bodies, but the out-edges could not because for simplicity
52 //! in the successor-function definition, we only ever merge an edge onto one
53 //! side of an original CLIF block.)
54 //!
55 //! Each `LoweredBlock` names just an original CLIF block, an original CLIF
56 //! block prepended or appended with an edge block (never both, though), or just
57 //! an edge block.
58 //!
59 //! To compute this lowering, we do a DFS over the CLIF-plus-edge-block graph
60 //! (never actually materialized, just defined by a "successors" function), and
61 //! compute the reverse postorder.
62 //!
63 //! This algorithm isn't perfect w.r.t. generated code quality: we don't, for
64 //! example, consider any information about whether edge blocks will actually
65 //! have content, because this computation happens as part of lowering *before*
66 //! regalloc, and regalloc may or may not insert moves/spills/reloads on any
67 //! particular edge. But it works relatively well and is conceptually simple.
68 //! Furthermore, the [MachBuffer] machine-code sink performs final peephole-like
69 //! branch editing that in practice elides empty blocks and simplifies some of
70 //! the other redundancies that this scheme produces.
71 
72 use crate::entity::SecondaryMap;
73 use crate::fx::{FxHashMap, FxHashSet};
74 use crate::ir::{Block, Function, Inst, Opcode};
75 use crate::machinst::lower::visit_block_succs;
76 use crate::machinst::*;
77 
78 use log::debug;
79 use smallvec::SmallVec;
80 
81 /// Mapping from CLIF BBs to VCode BBs.
82 #[derive(Debug)]
83 pub struct BlockLoweringOrder {
84     /// Lowered blocks, in BlockIndex order. Each block is some combination of
85     /// (i) a CLIF block, and (ii) inserted crit-edge blocks before or after;
86     /// see [LoweredBlock] for details.
87     lowered_order: Vec<LoweredBlock>,
88     /// Successors for all lowered blocks, in one serialized vector. Indexed by
89     /// the ranges in `lowered_succ_ranges`.
90     lowered_succs: Vec<(Inst, LoweredBlock)>,
91     /// BlockIndex values for successors for all lowered blocks, in the same
92     /// order as `lowered_succs`.
93     lowered_succ_indices: Vec<(Inst, BlockIndex)>,
94     /// Ranges in `lowered_succs` giving the successor lists for each lowered
95     /// block. Indexed by lowering-order index (`BlockIndex`).
96     lowered_succ_ranges: Vec<(usize, usize)>,
97     /// Mapping from CLIF BB to BlockIndex (index in lowered order). Note that
98     /// some CLIF BBs may not be lowered; in particular, we skip unreachable
99     /// blocks.
100     orig_map: SecondaryMap<Block, Option<BlockIndex>>,
101 }
102 
103 /// The origin of a block in the lowered block-order: either an original CLIF
104 /// block, or an inserted edge-block, or a combination of the two if an edge is
105 /// non-critical.
106 #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
107 pub enum LoweredBlock {
108     /// Block in original CLIF, with no merged edge-blocks.
109     Orig {
110         /// Original CLIF block.
111         block: Block,
112     },
113     /// Block in the original CLIF, plus edge-block to one succ (which is the
114     /// one successor of the original block).
115     OrigAndEdge {
116         /// The original CLIF block contained in this lowered block.
117         block: Block,
118         /// The edge (jump) instruction transitioning from this block
119         /// to the next, i.e., corresponding to the included edge-block. This
120         /// will be an instruction in `block`.
121         edge_inst: Inst,
122         /// The successor CLIF block.
123         succ: Block,
124     },
125     /// Block in the original CLIF, preceded by edge-block from one pred (which
126     /// is the one pred of the original block).
127     EdgeAndOrig {
128         /// The previous CLIF block, i.e., the edge block's predecessor.
129         pred: Block,
130         /// The edge (jump) instruction corresponding to the included
131         /// edge-block. This will be an instruction in `pred`.
132         edge_inst: Inst,
133         /// The original CLIF block included in this lowered block.
134         block: Block,
135     },
136     /// Split critical edge between two CLIF blocks. This lowered block does not
137     /// correspond to any original CLIF blocks; it only serves as an insertion
138     /// point for work to happen on the transition from `pred` to `succ`.
139     Edge {
140         /// The predecessor CLIF block.
141         pred: Block,
142         /// The edge (jump) instruction corresponding to this edge's transition.
143         /// This will be an instruction in `pred`.
144         edge_inst: Inst,
145         /// The successor CLIF block.
146         succ: Block,
147     },
148 }
149 
150 impl LoweredBlock {
151     /// The associated original (CLIF) block included in this lowered block, if
152     /// any.
orig_block(self) -> Option<Block>153     pub fn orig_block(self) -> Option<Block> {
154         match self {
155             LoweredBlock::Orig { block, .. }
156             | LoweredBlock::OrigAndEdge { block, .. }
157             | LoweredBlock::EdgeAndOrig { block, .. } => Some(block),
158             LoweredBlock::Edge { .. } => None,
159         }
160     }
161 
162     /// The associated in-edge, if any.
in_edge(self) -> Option<(Block, Inst, Block)>163     pub fn in_edge(self) -> Option<(Block, Inst, Block)> {
164         match self {
165             LoweredBlock::EdgeAndOrig {
166                 pred,
167                 edge_inst,
168                 block,
169             } => Some((pred, edge_inst, block)),
170             _ => None,
171         }
172     }
173 
174     /// the associated out-edge, if any. Also includes edge-only blocks.
out_edge(self) -> Option<(Block, Inst, Block)>175     pub fn out_edge(self) -> Option<(Block, Inst, Block)> {
176         match self {
177             LoweredBlock::OrigAndEdge {
178                 block,
179                 edge_inst,
180                 succ,
181             } => Some((block, edge_inst, succ)),
182             LoweredBlock::Edge {
183                 pred,
184                 edge_inst,
185                 succ,
186             } => Some((pred, edge_inst, succ)),
187             _ => None,
188         }
189     }
190 }
191 
192 impl BlockLoweringOrder {
193     /// Compute and return a lowered block order for `f`.
new(f: &Function) -> BlockLoweringOrder194     pub fn new(f: &Function) -> BlockLoweringOrder {
195         debug!("BlockLoweringOrder: function body {:?}", f);
196 
197         // Step 1: compute the in-edge and out-edge count of every block.
198         let mut block_in_count = SecondaryMap::with_default(0);
199         let mut block_out_count = SecondaryMap::with_default(0);
200 
201         // Cache the block successors to avoid re-examining branches below.
202         let mut block_succs: SmallVec<[(Inst, Block); 128]> = SmallVec::new();
203         let mut block_succ_range = SecondaryMap::with_default((0, 0));
204         let mut fallthrough_return_block = None;
205         for block in f.layout.blocks() {
206             let block_succ_start = block_succs.len();
207             visit_block_succs(f, block, |inst, succ| {
208                 block_out_count[block] += 1;
209                 block_in_count[succ] += 1;
210                 block_succs.push((inst, succ));
211             });
212             let block_succ_end = block_succs.len();
213             block_succ_range[block] = (block_succ_start, block_succ_end);
214 
215             for inst in f.layout.block_likely_branches(block) {
216                 if f.dfg[inst].opcode() == Opcode::Return {
217                     // Implicit output edge for any return.
218                     block_out_count[block] += 1;
219                 }
220                 if f.dfg[inst].opcode() == Opcode::FallthroughReturn {
221                     // Fallthrough return block must come last.
222                     debug_assert!(fallthrough_return_block == None);
223                     fallthrough_return_block = Some(block);
224                 }
225             }
226         }
227         // Implicit input edge for entry block.
228         if let Some(entry) = f.layout.entry_block() {
229             block_in_count[entry] += 1;
230         }
231 
232         // Here we define the implicit CLIF-plus-edges graph. There are
233         // conceptually two such graphs: the original, with every edge explicit,
234         // and the merged one, with blocks (represented by `LoweredBlock`
235         // values) that contain original CLIF blocks, edges, or both. This
236         // function returns a lowered block's successors as per the latter, with
237         // consideration to edge-block merging.
238         //
239         // Note that there is a property of the block-merging rules below
240         // that is very important to ensure we don't miss any lowered blocks:
241         // any block in the implicit CLIF-plus-edges graph will *only* be
242         // included in one block in the merged graph.
243         //
244         // This, combined with the property that every edge block is reachable
245         // only from one predecessor (and hence cannot be reached by a DFS
246         // backedge), means that it is sufficient in our DFS below to track
247         // visited-bits per original CLIF block only, not per edge. This greatly
248         // simplifies the data structures (no need to keep a sparse hash-set of
249         // (block, block) tuples).
250         let compute_lowered_succs = |ret: &mut Vec<(Inst, LoweredBlock)>, block: LoweredBlock| {
251             let start_idx = ret.len();
252             match block {
253                 LoweredBlock::Orig { block } | LoweredBlock::EdgeAndOrig { block, .. } => {
254                     // At an orig block; successors are always edge blocks,
255                     // possibly with orig blocks following.
256                     let range = block_succ_range[block];
257                     for &(edge_inst, succ) in &block_succs[range.0..range.1] {
258                         if block_in_count[succ] == 1 {
259                             ret.push((
260                                 edge_inst,
261                                 LoweredBlock::EdgeAndOrig {
262                                     pred: block,
263                                     edge_inst,
264                                     block: succ,
265                                 },
266                             ));
267                         } else {
268                             ret.push((
269                                 edge_inst,
270                                 LoweredBlock::Edge {
271                                     pred: block,
272                                     edge_inst,
273                                     succ,
274                                 },
275                             ));
276                         }
277                     }
278                 }
279                 LoweredBlock::Edge {
280                     succ, edge_inst, ..
281                 }
282                 | LoweredBlock::OrigAndEdge {
283                     succ, edge_inst, ..
284                 } => {
285                     // At an edge block; successors are always orig blocks,
286                     // possibly with edge blocks following.
287                     if block_out_count[succ] == 1 {
288                         let range = block_succ_range[succ];
289                         // check if the one succ is a real CFG edge (vs.
290                         // implicit return succ).
291                         if range.1 - range.0 > 0 {
292                             debug_assert!(range.1 - range.0 == 1);
293                             let (succ_edge_inst, succ_succ) = block_succs[range.0];
294                             ret.push((
295                                 edge_inst,
296                                 LoweredBlock::OrigAndEdge {
297                                     block: succ,
298                                     edge_inst: succ_edge_inst,
299                                     succ: succ_succ,
300                                 },
301                             ));
302                         } else {
303                             ret.push((edge_inst, LoweredBlock::Orig { block: succ }));
304                         }
305                     } else {
306                         ret.push((edge_inst, LoweredBlock::Orig { block: succ }));
307                     }
308                 }
309             }
310             let end_idx = ret.len();
311             (start_idx, end_idx)
312         };
313 
314         // Build the explicit LoweredBlock-to-LoweredBlock successors list.
315         let mut lowered_succs = vec![];
316         let mut lowered_succ_indices = vec![];
317 
318         // Step 2: Compute RPO traversal of the implicit CLIF-plus-edge-block graph. Use an
319         // explicit stack so we don't overflow the real stack with a deep DFS.
320         #[derive(Debug)]
321         struct StackEntry {
322             this: LoweredBlock,
323             succs: (usize, usize), // range in lowered_succs
324             cur_succ: usize,       // index in lowered_succs
325         }
326 
327         let mut stack: SmallVec<[StackEntry; 16]> = SmallVec::new();
328         let mut visited = FxHashSet::default();
329         let mut postorder = vec![];
330         if let Some(entry) = f.layout.entry_block() {
331             // FIXME(cfallin): we might be able to use OrigAndEdge. Find a way
332             // to not special-case the entry block here.
333             let block = LoweredBlock::Orig { block: entry };
334             visited.insert(block);
335             let range = compute_lowered_succs(&mut lowered_succs, block);
336             lowered_succ_indices.resize(lowered_succs.len(), 0);
337             stack.push(StackEntry {
338                 this: block,
339                 succs: range,
340                 cur_succ: range.1,
341             });
342         }
343 
344         let mut deferred_last = None;
345         while !stack.is_empty() {
346             let stack_entry = stack.last_mut().unwrap();
347             let range = stack_entry.succs;
348             if stack_entry.cur_succ == range.0 {
349                 let orig_block = stack_entry.this.orig_block();
350                 if orig_block.is_some() && orig_block == fallthrough_return_block {
351                     deferred_last = Some((stack_entry.this, range));
352                 } else {
353                     postorder.push((stack_entry.this, range));
354                 }
355                 stack.pop();
356             } else {
357                 // Heuristic: chase the children in reverse. This puts the first
358                 // successor block first in RPO, all other things being equal,
359                 // which tends to prioritize loop backedges over out-edges,
360                 // putting the edge-block closer to the loop body and minimizing
361                 // live-ranges in linear instruction space.
362                 let next = lowered_succs[stack_entry.cur_succ - 1].1;
363                 stack_entry.cur_succ -= 1;
364                 if visited.contains(&next) {
365                     continue;
366                 }
367                 visited.insert(next);
368                 let range = compute_lowered_succs(&mut lowered_succs, next);
369                 lowered_succ_indices.resize(lowered_succs.len(), 0);
370                 stack.push(StackEntry {
371                     this: next,
372                     succs: range,
373                     cur_succ: range.1,
374                 });
375             }
376         }
377 
378         postorder.reverse();
379         let mut rpo = postorder;
380         if let Some(d) = deferred_last {
381             rpo.push(d);
382         }
383 
384         // Step 3: now that we have RPO, build the BlockIndex/BB fwd/rev maps.
385         let mut lowered_order = vec![];
386         let mut lowered_succ_ranges = vec![];
387         let mut lb_to_bindex = FxHashMap::default();
388         for (block, succ_range) in rpo.into_iter() {
389             lb_to_bindex.insert(block, lowered_order.len() as BlockIndex);
390             lowered_order.push(block);
391             lowered_succ_ranges.push(succ_range);
392         }
393 
394         let lowered_succ_indices = lowered_succs
395             .iter()
396             .map(|&(inst, succ)| (inst, lb_to_bindex.get(&succ).cloned().unwrap()))
397             .collect();
398 
399         let mut orig_map = SecondaryMap::with_default(None);
400         for (i, lb) in lowered_order.iter().enumerate() {
401             let i = i as BlockIndex;
402             if let Some(b) = lb.orig_block() {
403                 orig_map[b] = Some(i);
404             }
405         }
406 
407         let result = BlockLoweringOrder {
408             lowered_order,
409             lowered_succs,
410             lowered_succ_indices,
411             lowered_succ_ranges,
412             orig_map,
413         };
414         debug!("BlockLoweringOrder: {:?}", result);
415         result
416     }
417 
418     /// Get the lowered order of blocks.
lowered_order(&self) -> &[LoweredBlock]419     pub fn lowered_order(&self) -> &[LoweredBlock] {
420         &self.lowered_order[..]
421     }
422 
423     /// Get the successors for a lowered block, by index in `lowered_order()`'s
424     /// returned slice. Each successsor is paired with the edge-instruction
425     /// (branch) corresponding to this edge.
succs(&self, block: BlockIndex) -> &[(Inst, LoweredBlock)]426     pub fn succs(&self, block: BlockIndex) -> &[(Inst, LoweredBlock)] {
427         let range = self.lowered_succ_ranges[block as usize];
428         &self.lowered_succs[range.0..range.1]
429     }
430 
431     /// Get the successor indices for a lowered block.
succ_indices(&self, block: BlockIndex) -> &[(Inst, BlockIndex)]432     pub fn succ_indices(&self, block: BlockIndex) -> &[(Inst, BlockIndex)] {
433         let range = self.lowered_succ_ranges[block as usize];
434         &self.lowered_succ_indices[range.0..range.1]
435     }
436 
437     /// Get the lowered block index containing a CLIF block, if any. (May not be
438     /// present if the original CLIF block was unreachable.)
lowered_block_for_bb(&self, bb: Block) -> Option<BlockIndex>439     pub fn lowered_block_for_bb(&self, bb: Block) -> Option<BlockIndex> {
440         self.orig_map[bb]
441     }
442 }
443 
444 #[cfg(test)]
445 mod test {
446     use super::*;
447     use crate::cursor::{Cursor, FuncCursor};
448     use crate::ir::types::*;
449     use crate::ir::{AbiParam, ExternalName, Function, InstBuilder, Signature};
450     use crate::isa::CallConv;
451 
build_test_func(n_blocks: usize, edges: &[(usize, usize)]) -> Function452     fn build_test_func(n_blocks: usize, edges: &[(usize, usize)]) -> Function {
453         assert!(n_blocks > 0);
454 
455         let name = ExternalName::testcase("test0");
456         let mut sig = Signature::new(CallConv::SystemV);
457         sig.params.push(AbiParam::new(I32));
458         let mut func = Function::with_name_signature(name, sig);
459         let blocks = (0..n_blocks)
460             .map(|i| {
461                 let bb = func.dfg.make_block();
462                 assert!(bb.as_u32() == i as u32);
463                 bb
464             })
465             .collect::<Vec<_>>();
466 
467         let arg0 = func.dfg.append_block_param(blocks[0], I32);
468 
469         let mut pos = FuncCursor::new(&mut func);
470 
471         let mut edge = 0;
472         for i in 0..n_blocks {
473             pos.insert_block(blocks[i]);
474             let mut succs = vec![];
475             while edge < edges.len() && edges[edge].0 == i {
476                 succs.push(edges[edge].1);
477                 edge += 1;
478             }
479             if succs.len() == 0 {
480                 pos.ins().return_(&[arg0]);
481             } else if succs.len() == 1 {
482                 pos.ins().jump(blocks[succs[0]], &[]);
483             } else if succs.len() == 2 {
484                 pos.ins().brnz(arg0, blocks[succs[0]], &[]);
485                 pos.ins().jump(blocks[succs[1]], &[]);
486             } else {
487                 panic!("Too many successors");
488             }
489         }
490 
491         func
492     }
493 
494     #[test]
test_blockorder_diamond()495     fn test_blockorder_diamond() {
496         let func = build_test_func(4, &[(0, 1), (0, 2), (1, 3), (2, 3)]);
497         let order = BlockLoweringOrder::new(&func);
498 
499         assert_eq!(order.lowered_order.len(), 6);
500 
501         assert!(order.lowered_order[0].orig_block().unwrap().as_u32() == 0);
502         assert!(order.lowered_order[0].in_edge().is_none());
503         assert!(order.lowered_order[0].out_edge().is_none());
504 
505         assert!(order.lowered_order[1].orig_block().unwrap().as_u32() == 1);
506         assert!(order.lowered_order[1].in_edge().unwrap().0.as_u32() == 0);
507         assert!(order.lowered_order[1].in_edge().unwrap().2.as_u32() == 1);
508 
509         assert!(order.lowered_order[2].orig_block().is_none());
510         assert!(order.lowered_order[2].in_edge().is_none());
511         assert!(order.lowered_order[2].out_edge().unwrap().0.as_u32() == 1);
512         assert!(order.lowered_order[2].out_edge().unwrap().2.as_u32() == 3);
513 
514         assert!(order.lowered_order[3].orig_block().unwrap().as_u32() == 2);
515         assert!(order.lowered_order[3].in_edge().unwrap().0.as_u32() == 0);
516         assert!(order.lowered_order[3].in_edge().unwrap().2.as_u32() == 2);
517         assert!(order.lowered_order[3].out_edge().is_none());
518 
519         assert!(order.lowered_order[4].orig_block().is_none());
520         assert!(order.lowered_order[4].in_edge().is_none());
521         assert!(order.lowered_order[4].out_edge().unwrap().0.as_u32() == 2);
522         assert!(order.lowered_order[4].out_edge().unwrap().2.as_u32() == 3);
523 
524         assert!(order.lowered_order[5].orig_block().unwrap().as_u32() == 3);
525         assert!(order.lowered_order[5].in_edge().is_none());
526         assert!(order.lowered_order[5].out_edge().is_none());
527     }
528 
529     #[test]
test_blockorder_critedge()530     fn test_blockorder_critedge() {
531         //            0
532         //          /   \
533         //         1     2
534         //        /  \     \
535         //       3    4    |
536         //       |\  _|____|
537         //       | \/ |
538         //       | /\ |
539         //       5    6
540         //
541         // (3 -> 5, 3 -> 6, 4 -> 6 are critical edges and must be split)
542         //
543         let func = build_test_func(
544             7,
545             &[
546                 (0, 1),
547                 (0, 2),
548                 (1, 3),
549                 (1, 4),
550                 (2, 5),
551                 (3, 5),
552                 (3, 6),
553                 (4, 6),
554             ],
555         );
556         let order = BlockLoweringOrder::new(&func);
557 
558         assert_eq!(order.lowered_order.len(), 11);
559         println!("ordered = {:?}", order.lowered_order);
560 
561         // block 0
562         assert!(order.lowered_order[0].orig_block().unwrap().as_u32() == 0);
563         assert!(order.lowered_order[0].in_edge().is_none());
564         assert!(order.lowered_order[0].out_edge().is_none());
565 
566         // edge 0->1 + block 1
567         assert!(order.lowered_order[1].orig_block().unwrap().as_u32() == 1);
568         assert!(order.lowered_order[1].in_edge().unwrap().0.as_u32() == 0);
569         assert!(order.lowered_order[1].in_edge().unwrap().2.as_u32() == 1);
570         assert!(order.lowered_order[1].out_edge().is_none());
571 
572         // edge 1->3 + block 3
573         assert!(order.lowered_order[2].orig_block().unwrap().as_u32() == 3);
574         assert!(order.lowered_order[2].in_edge().unwrap().0.as_u32() == 1);
575         assert!(order.lowered_order[2].in_edge().unwrap().2.as_u32() == 3);
576         assert!(order.lowered_order[2].out_edge().is_none());
577 
578         // edge 3->5
579         assert!(order.lowered_order[3].orig_block().is_none());
580         assert!(order.lowered_order[3].in_edge().is_none());
581         assert!(order.lowered_order[3].out_edge().unwrap().0.as_u32() == 3);
582         assert!(order.lowered_order[3].out_edge().unwrap().2.as_u32() == 5);
583 
584         // edge 3->6
585         assert!(order.lowered_order[4].orig_block().is_none());
586         assert!(order.lowered_order[4].in_edge().is_none());
587         assert!(order.lowered_order[4].out_edge().unwrap().0.as_u32() == 3);
588         assert!(order.lowered_order[4].out_edge().unwrap().2.as_u32() == 6);
589 
590         // edge 1->4 + block 4
591         assert!(order.lowered_order[5].orig_block().unwrap().as_u32() == 4);
592         assert!(order.lowered_order[5].in_edge().unwrap().0.as_u32() == 1);
593         assert!(order.lowered_order[5].in_edge().unwrap().2.as_u32() == 4);
594         assert!(order.lowered_order[5].out_edge().is_none());
595 
596         // edge 4->6
597         assert!(order.lowered_order[6].orig_block().is_none());
598         assert!(order.lowered_order[6].in_edge().is_none());
599         assert!(order.lowered_order[6].out_edge().unwrap().0.as_u32() == 4);
600         assert!(order.lowered_order[6].out_edge().unwrap().2.as_u32() == 6);
601 
602         // block 6
603         assert!(order.lowered_order[7].orig_block().unwrap().as_u32() == 6);
604         assert!(order.lowered_order[7].in_edge().is_none());
605         assert!(order.lowered_order[7].out_edge().is_none());
606 
607         // edge 0->2 + block 2
608         assert!(order.lowered_order[8].orig_block().unwrap().as_u32() == 2);
609         assert!(order.lowered_order[8].in_edge().unwrap().0.as_u32() == 0);
610         assert!(order.lowered_order[8].in_edge().unwrap().2.as_u32() == 2);
611         assert!(order.lowered_order[8].out_edge().is_none());
612 
613         // edge 2->5
614         assert!(order.lowered_order[9].orig_block().is_none());
615         assert!(order.lowered_order[9].in_edge().is_none());
616         assert!(order.lowered_order[9].out_edge().unwrap().0.as_u32() == 2);
617         assert!(order.lowered_order[9].out_edge().unwrap().2.as_u32() == 5);
618 
619         // block 5
620         assert!(order.lowered_order[10].orig_block().unwrap().as_u32() == 5);
621         assert!(order.lowered_order[10].in_edge().is_none());
622         assert!(order.lowered_order[10].out_edge().is_none());
623     }
624 }
625