1 #![allow(non_snake_case)]
2 #![allow(non_camel_case_types)]
3 
4 //! Backtracking allocator: per-real-register commitment maps
5 
6 use std::cmp::Ordering;
7 use std::fmt;
8 
9 use crate::avl_tree::{AVLTree, AVL_NULL};
10 use crate::data_structures::{
11     cmp_range_frags, InstPoint, RangeFrag, RangeFragIx, RangeId, SortedRangeFragIxs,
12     SortedRangeFrags, TypedIxVec,
13 };
14 
15 //=============================================================================
16 // Per-real-register commitment maps
17 //
18 
19 // Something that pairs a fragment index with the identity of the virtual or real range to which
20 // this fragment conceptually "belongs", at least for the purposes of this commitment map.  If
21 // the `lr_id` field denotes a real range, the associated fragment belongs to a real-reg live
22 // range and is therefore non-evictable.  The identity of the range is necessary because:
23 //
24 // * for VirtualRanges, (1) we may need to evict the mapping, so we will need to get hold of the
25 //   VirtualRange, so that we have all fragments of the VirtualRange to hand, and (2) if the
26 //   client requires stackmaps, we need to look at the VirtualRange to see if it is reftyped.
27 //
28 // * for RealRanges, only (2) applies; (1) is irrelevant since RealRange assignments are
29 //   non-evictable.
30 //
31 // (A fragment merely denotes a sequence of instruction (points), but within the context of a
32 // commitment map for a real register, obviously any particular fragment can't be part of two
33 // different virtual live ranges.)
34 //
35 // Note that we don't intend to actually use the PartialOrd methods for RangeFragAndRangeId.
36 // However, they need to exist since we want to construct an AVLTree<RangeFragAndRangeId>, and
37 // that requires PartialOrd for its element type.  For working with such trees we will supply
38 // our own comparison function; hence PartialOrd here serves only to placate the typechecker.
39 // It should never actually be used.
40 #[derive(Clone)]
41 pub struct RangeFragAndRangeId {
42     pub frag: RangeFrag,
43     pub id: RangeId,
44 }
45 impl RangeFragAndRangeId {
new(frag: RangeFrag, id: RangeId) -> Self46     fn new(frag: RangeFrag, id: RangeId) -> Self {
47         Self { frag, id }
48     }
49 }
50 impl PartialEq for RangeFragAndRangeId {
eq(&self, _other: &Self) -> bool51     fn eq(&self, _other: &Self) -> bool {
52         // See comments above.
53         panic!("impl PartialEq for RangeFragAndRangeId: should never be used");
54     }
55 }
56 impl PartialOrd for RangeFragAndRangeId {
partial_cmp(&self, _other: &Self) -> Option<Ordering>57     fn partial_cmp(&self, _other: &Self) -> Option<Ordering> {
58         // See comments above.
59         panic!("impl PartialOrd for RangeFragAndRangeId: should never be used");
60     }
61 }
62 impl fmt::Debug for RangeFragAndRangeId {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result63     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
64         write!(fmt, "(FnV {:?} {:?})", self.frag, self.id)
65     }
66 }
67 
68 //=============================================================================
69 // Per-real-register commitment maps
70 //
71 
72 // This indicates the current set of fragments to which some real register is
73 // currently "committed".  The fragments *must* be non-overlapping.  Hence
74 // they form a total order, and so we may validly build an AVL tree of them.
75 
76 pub struct CommitmentMap {
77     pub tree: AVLTree<RangeFragAndRangeId>,
78 }
79 impl fmt::Debug for CommitmentMap {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result80     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
81         let as_vec = self.tree.to_vec();
82         as_vec.fmt(fmt)
83     }
84 }
85 
86 impl CommitmentMap {
new() -> Self87     pub fn new() -> Self {
88         // The AVL tree constructor needs a default value for the elements.  It
89         // will never be used.  The RangeId index value will show as
90         // obviously bogus if we ever try to "dereference" any part of it.
91         let dflt = RangeFragAndRangeId::new(RangeFrag::invalid_value(), RangeId::invalid_value());
92         Self {
93             tree: AVLTree::<RangeFragAndRangeId>::new(dflt),
94         }
95     }
96 
add(&mut self, to_add_frags: &SortedRangeFrags, to_add_lr_id: RangeId)97     pub fn add(&mut self, to_add_frags: &SortedRangeFrags, to_add_lr_id: RangeId) {
98         for frag in &to_add_frags.frags {
99             let to_add = RangeFragAndRangeId::new(frag.clone(), to_add_lr_id);
100             let added = self.tree.insert(
101                 to_add,
102                 Some(&|pair1: RangeFragAndRangeId, pair2: RangeFragAndRangeId| {
103                     cmp_range_frags(&pair1.frag, &pair2.frag)
104                 }),
105             );
106             // If this fails, it means the fragment overlaps one that has already
107             // been committed to.  That's a serious error.
108             assert!(added);
109         }
110     }
111 
add_indirect( &mut self, to_add_frags: &SortedRangeFragIxs, to_add_lr_id: RangeId, frag_env: &TypedIxVec<RangeFragIx, RangeFrag>, )112     pub fn add_indirect(
113         &mut self,
114         to_add_frags: &SortedRangeFragIxs,
115         to_add_lr_id: RangeId,
116         frag_env: &TypedIxVec<RangeFragIx, RangeFrag>,
117     ) {
118         for fix in &to_add_frags.frag_ixs {
119             let to_add = RangeFragAndRangeId::new(frag_env[*fix].clone(), to_add_lr_id);
120             let added = self.tree.insert(
121                 to_add,
122                 Some(&|pair1: RangeFragAndRangeId, pair2: RangeFragAndRangeId| {
123                     cmp_range_frags(&pair1.frag, &pair2.frag)
124                 }),
125             );
126             // If this fails, it means the fragment overlaps one that has already
127             // been committed to.  That's a serious error.
128             assert!(added);
129         }
130     }
131 
del(&mut self, to_del_frags: &SortedRangeFrags)132     pub fn del(&mut self, to_del_frags: &SortedRangeFrags) {
133         for frag in &to_del_frags.frags {
134             // re RangeId::invalid_value(): we don't care what the RangeId is, since we're
135             // deleting by RangeFrags alone.
136             let to_del = RangeFragAndRangeId::new(frag.clone(), RangeId::invalid_value());
137             let deleted = self.tree.delete(
138                 to_del,
139                 Some(&|pair1: RangeFragAndRangeId, pair2: RangeFragAndRangeId| {
140                     cmp_range_frags(&pair1.frag, &pair2.frag)
141                 }),
142             );
143             // If this fails, it means the fragment wasn't already committed to.
144             // That's also a serious error.
145             assert!(deleted);
146         }
147     }
148 
149     // Find the RangeId for the RangeFrag that overlaps `pt`, if one exists.
150     // This is conceptually equivalent to LogicalSpillSlot::get_refness_at_inst_point.
lookup_inst_point(&self, pt: InstPoint) -> Option<RangeId>151     pub fn lookup_inst_point(&self, pt: InstPoint) -> Option<RangeId> {
152         let mut root = self.tree.root;
153         while root != AVL_NULL {
154             let root_node = &self.tree.pool[root as usize];
155             let root_item = &root_node.item;
156             if pt < root_item.frag.first {
157                 // `pt` is to the left of the `root`.  So there's no
158                 // overlap with `root`.  Continue by inspecting the left subtree.
159                 root = root_node.left;
160             } else if root_item.frag.last < pt {
161                 // Ditto for the right subtree.
162                 root = root_node.right;
163             } else {
164                 // `pt` overlaps the `root`, so we have what we want.
165                 return Some(root_item.id);
166             }
167         }
168         None
169     }
170 }
171