1 use crate::rt::{execution, object, thread, MAX_ATOMIC_HISTORY, MAX_THREADS};
2 
3 #[cfg(feature = "checkpoint")]
4 use serde::{Deserialize, Serialize};
5 
6 /// An execution path
7 #[derive(Debug)]
8 #[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
9 pub(crate) struct Path {
10     preemption_bound: Option<u8>,
11 
12     /// Current execution's position in the branches vec.
13     ///
14     /// When the execution starts, this is zero, but `branches` might not be
15     /// empty.
16     ///
17     /// In order to perform an exhaustive search, the execution is seeded with a
18     /// set of branches.
19     pos: usize,
20 
21     /// List of all branches in the execution.
22     ///
23     /// A branch is of type `Schedule`, `Load`, or `Spurious`
24     branches: object::Store<Entry>,
25 }
26 
27 #[derive(Debug)]
28 #[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
29 pub(crate) struct Schedule {
30     /// Number of times the thread leading to this branch point has been
31     /// pre-empted.
32     preemptions: u8,
33 
34     /// The thread that was active first
35     initial_active: Option<u8>,
36 
37     /// State of each thread
38     threads: [Thread; MAX_THREADS],
39 
40     /// The previous schedule branch
41     prev: Option<object::Ref<Schedule>>,
42 }
43 
44 #[derive(Debug)]
45 #[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
46 pub(crate) struct Load {
47     /// All possible values
48     values: [u8; MAX_ATOMIC_HISTORY],
49 
50     /// Current value
51     pos: u8,
52 
53     /// Number of values in list
54     len: u8,
55 }
56 
57 #[derive(Debug)]
58 #[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
59 pub(crate) struct Spurious(bool);
60 
61 objects! {
62     #[derive(Debug)]
63     #[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
64     Entry,
65     Schedule(Schedule),
66     Load(Load),
67     Spurious(Spurious),
68 }
69 
70 #[derive(Debug, Eq, PartialEq, Clone, Copy)]
71 #[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
72 pub(crate) enum Thread {
73     /// The thread is currently disabled
74     Disabled,
75 
76     /// The thread should not be explored
77     Skip,
78 
79     /// The thread is in a yield state.
80     Yield,
81 
82     /// The thread is waiting to be explored
83     Pending,
84 
85     /// The thread is currently being explored
86     Active,
87 
88     /// The thread has been explored
89     Visited,
90 }
91 
92 macro_rules! assert_path_len {
93     ($branches:expr) => {{
94         assert!(
95             $branches.len() < $branches.capacity(),
96             "Model exeeded maximum number of branches. This is often caused \
97              by an algorithm requiring the processor to make progress, e.g. \
98              spin locks.",
99         );
100     }};
101 }
102 
103 impl Path {
104     /// Create a new, blank, configured to branch at most `max_branches` times
105     /// and at most `preemption_bound` thread preemptions.
new(max_branches: usize, preemption_bound: Option<u8>) -> Path106     pub(crate) fn new(max_branches: usize, preemption_bound: Option<u8>) -> Path {
107         Path {
108             preemption_bound,
109             pos: 0,
110             branches: object::Store::with_capacity(max_branches),
111         }
112     }
113 
set_max_branches(&mut self, max_branches: usize)114     pub(crate) fn set_max_branches(&mut self, max_branches: usize) {
115         self.branches
116             .reserve_exact(max_branches - self.branches.len());
117     }
118 
119     /// Returns `true` if the execution has reached a point where the known path
120     /// has been traversed and has reached a new branching point.
is_traversed(&self) -> bool121     pub(super) fn is_traversed(&self) -> bool {
122         self.pos == self.branches.len()
123     }
124 
pos(&self) -> usize125     pub(super) fn pos(&self) -> usize {
126         self.pos
127     }
128 
129     /// Push a new atomic-load branch
push_load(&mut self, seed: &[u8])130     pub(super) fn push_load(&mut self, seed: &[u8]) {
131         assert_path_len!(self.branches);
132 
133         let load_ref = self.branches.insert(Load {
134             values: [0; MAX_ATOMIC_HISTORY],
135             pos: 0,
136             len: 0,
137         });
138 
139         let load = load_ref.get_mut(&mut self.branches);
140 
141         for (i, &store) in seed.iter().enumerate() {
142             assert!(
143                 store < MAX_ATOMIC_HISTORY as u8,
144                 "[loom internal bug] store = {}; max = {}",
145                 store,
146                 MAX_ATOMIC_HISTORY
147             );
148             assert!(
149                 i < MAX_ATOMIC_HISTORY,
150                 "[loom internal bug] i = {}; max = {}",
151                 i,
152                 MAX_ATOMIC_HISTORY
153             );
154 
155             load.values[i] = store as u8;
156             load.len += 1;
157         }
158     }
159 
160     /// Returns the atomic write to read
branch_load(&mut self) -> usize161     pub(super) fn branch_load(&mut self) -> usize {
162         assert!(!self.is_traversed(), "[loom internal bug]");
163 
164         let load = object::Ref::from_usize(self.pos)
165             .downcast::<Load>(&self.branches)
166             .expect("Reached unexpected exploration state. Is the model fully determistic?")
167             .get(&self.branches);
168 
169         self.pos += 1;
170 
171         load.values[load.pos as usize] as usize
172     }
173 
174     /// Branch on spurious notifications
branch_spurious(&mut self) -> bool175     pub(super) fn branch_spurious(&mut self) -> bool {
176         if self.is_traversed() {
177             assert_path_len!(self.branches);
178 
179             self.branches.insert(Spurious(false));
180         }
181 
182         let spurious = object::Ref::from_usize(self.pos)
183             .downcast::<Spurious>(&self.branches)
184             .expect("Reached unexpected exploration state. Is the model fully determistic?")
185             .get(&self.branches)
186             .0;
187 
188         self.pos += 1;
189         spurious
190     }
191 
192     /// Returns the thread identifier to schedule
branch_thread( &mut self, execution_id: execution::Id, seed: impl ExactSizeIterator<Item = Thread>, ) -> Option<thread::Id>193     pub(super) fn branch_thread(
194         &mut self,
195         execution_id: execution::Id,
196         seed: impl ExactSizeIterator<Item = Thread>,
197     ) -> Option<thread::Id> {
198         if self.is_traversed() {
199             assert_path_len!(self.branches);
200 
201             // Find the last thread scheduling branch in the path
202             let prev = self.last_schedule();
203 
204             // Entering a new exploration space.
205             //
206             // Initialize a  new branch. The initial field values don't matter
207             // as they will be updated below.
208             let schedule_ref = self.branches.insert(Schedule {
209                 preemptions: 0,
210                 initial_active: None,
211                 threads: [Thread::Disabled; MAX_THREADS],
212                 prev,
213             });
214 
215             // Get a reference to the branch in the object store.
216             let schedule = schedule_ref.get_mut(&mut self.branches);
217 
218             assert!(seed.len() <= MAX_THREADS, "[loom internal bug]");
219 
220             // Currently active thread
221             let mut active = None;
222 
223             for (i, v) in seed.enumerate() {
224                 // Initialize thread states
225                 schedule.threads[i] = v;
226 
227                 if v.is_active() {
228                     assert!(
229                         active.is_none(),
230                         "[loom internal bug] only one thread should start as active"
231                     );
232                     active = Some(i as u8);
233                 }
234             }
235 
236             // Ensure at least one thread is active, otherwise toggle a yielded
237             // thread.
238             if active.is_none() {
239                 for (i, th) in schedule.threads.iter_mut().enumerate() {
240                     if *th == Thread::Yield {
241                         *th = Thread::Active;
242                         active = Some(i as u8);
243                         break;
244                     }
245                 }
246             }
247 
248             let mut initial_active = active;
249 
250             if let Some(prev) = prev {
251                 if initial_active != prev.get(&self.branches).active_thread_index() {
252                     initial_active = None;
253                 }
254             }
255 
256             let preemptions = prev
257                 .map(|prev| prev.get(&self.branches).preemptions())
258                 .unwrap_or(0);
259 
260             debug_assert!(
261                 self.preemption_bound.is_none() || Some(preemptions) <= self.preemption_bound,
262                 "[loom internal bug] max = {:?}; curr = {}",
263                 self.preemption_bound,
264                 preemptions,
265             );
266 
267             let schedule = schedule_ref.get_mut(&mut self.branches);
268             schedule.initial_active = initial_active;
269             schedule.preemptions = preemptions;
270         }
271 
272         let schedule = object::Ref::from_usize(self.pos)
273             .downcast::<Schedule>(&self.branches)
274             .expect("Reached unexpected exploration state. Is the model fully determistic?")
275             .get(&self.branches);
276 
277         self.pos += 1;
278 
279         schedule
280             .threads
281             .iter()
282             .enumerate()
283             .find(|&(_, ref th)| th.is_active())
284             .map(|(i, _)| thread::Id::new(execution_id, i))
285     }
286 
backtrack(&mut self, point: usize, thread_id: thread::Id)287     pub(super) fn backtrack(&mut self, point: usize, thread_id: thread::Id) {
288         let schedule = object::Ref::from_usize(point)
289             .downcast::<Schedule>(&self.branches)
290             .unwrap()
291             .get_mut(&mut self.branches);
292 
293         // Exhaustive DPOR only requires adding this backtrack point
294         schedule.backtrack(thread_id, self.preemption_bound);
295 
296         let mut curr = if let Some(curr) = schedule.prev {
297             curr
298         } else {
299             return;
300         };
301 
302         if self.preemption_bound.is_some() {
303             loop {
304                 // Preemption bounded DPOR requires conservatively adding
305                 // another backtrack point to cover cases missed by the bounds.
306                 if let Some(prev) = curr.get(&self.branches).prev {
307                     let active_a = curr.get(&self.branches).active_thread_index();
308                     let active_b = prev.get(&self.branches).active_thread_index();
309 
310                     if active_a != active_b {
311                         curr.get_mut(&mut self.branches)
312                             .backtrack(thread_id, self.preemption_bound);
313                         return;
314                     }
315 
316                     curr = prev;
317                 } else {
318                     // This is the very first schedule
319                     curr.get_mut(&mut self.branches)
320                         .backtrack(thread_id, self.preemption_bound);
321                     return;
322                 }
323             }
324         }
325     }
326 
327     /// Reset the path to prepare for the next exploration of the model.
328     ///
329     /// This function will also trim the object store, dropping any objects that
330     /// are created in pruned sections of the path.
step(&mut self) -> bool331     pub(super) fn step(&mut self) -> bool {
332         // Reset the position to zero, the path will start traversing from the
333         // beginning
334         self.pos = 0;
335 
336         // Set the final branch to try the next option. If all options have been
337         // traversed, pop the final branch and try again w/ the one under it.
338         //
339         // This is depth-first tree traversal.
340         //
341         for last in (0..self.branches.len()).rev() {
342             let last = object::Ref::from_usize(last);
343 
344             // Remove all objects that were created **after** this branch
345             self.branches.truncate(last);
346 
347             if let Some(schedule_ref) = last.downcast::<Schedule>(&self.branches) {
348                 let schedule = schedule_ref.get_mut(&mut self.branches);
349 
350                 // Transition the active thread to visited.
351                 schedule
352                     .threads
353                     .iter_mut()
354                     .find(|th| th.is_active())
355                     .map(|th| *th = Thread::Visited);
356 
357                 // Find a pending thread and transition it to active
358                 let rem = schedule
359                     .threads
360                     .iter_mut()
361                     .find(|th| th.is_pending())
362                     .map(|th| {
363                         *th = Thread::Active;
364                     })
365                     .is_some();
366 
367                 if rem {
368                     return true;
369                 }
370             } else if let Some(load_ref) = last.downcast::<Load>(&self.branches) {
371                 let load = load_ref.get_mut(&mut self.branches);
372 
373                 load.pos += 1;
374 
375                 if load.pos < load.len {
376                     return true;
377                 }
378             } else if let Some(spurious_ref) = last.downcast::<Spurious>(&self.branches) {
379                 let spurious = spurious_ref.get_mut(&mut self.branches);
380 
381                 if !spurious.0 {
382                     spurious.0 = true;
383                     return true;
384                 }
385             } else {
386                 unreachable!();
387             }
388         }
389 
390         false
391     }
392 
last_schedule(&self) -> Option<object::Ref<Schedule>>393     fn last_schedule(&self) -> Option<object::Ref<Schedule>> {
394         self.branches.iter_ref::<Schedule>().rev().next()
395     }
396 }
397 
398 impl Schedule {
399     /// Returns the index of the currently active thread
active_thread_index(&self) -> Option<u8>400     fn active_thread_index(&self) -> Option<u8> {
401         self.threads
402             .iter()
403             .enumerate()
404             .find(|(_, th)| th.is_active())
405             .map(|(index, _)| index as u8)
406     }
407 
408     /// Compute the number of preemptions for the current state of the branch
preemptions(&self) -> u8409     fn preemptions(&self) -> u8 {
410         if self.initial_active.is_some() {
411             if self.initial_active != self.active_thread_index() {
412                 return self.preemptions + 1;
413             }
414         }
415 
416         self.preemptions
417     }
418 
backtrack(&mut self, thread_id: thread::Id, preemption_bound: Option<u8>)419     fn backtrack(&mut self, thread_id: thread::Id, preemption_bound: Option<u8>) {
420         if let Some(bound) = preemption_bound {
421             assert!(
422                 self.preemptions <= bound,
423                 "[loom internal bug] actual = {}, bound = {}",
424                 self.preemptions,
425                 bound
426             );
427 
428             if self.preemptions == bound {
429                 return;
430             }
431         }
432 
433         let thread_id = thread_id.as_usize();
434 
435         if thread_id >= self.threads.len() {
436             return;
437         }
438 
439         if self.threads[thread_id].is_enabled() {
440             self.threads[thread_id].explore();
441         } else {
442             for th in &mut self.threads {
443                 th.explore();
444             }
445         }
446     }
447 }
448 
449 impl Thread {
explore(&mut self)450     fn explore(&mut self) {
451         match *self {
452             Thread::Skip => {
453                 *self = Thread::Pending;
454             }
455             _ => {}
456         }
457     }
458 
is_pending(&self) -> bool459     fn is_pending(&self) -> bool {
460         match *self {
461             Thread::Pending => true,
462             _ => false,
463         }
464     }
465 
is_active(&self) -> bool466     fn is_active(&self) -> bool {
467         match *self {
468             Thread::Active => true,
469             _ => false,
470         }
471     }
472 
is_enabled(&self) -> bool473     fn is_enabled(&self) -> bool {
474         !self.is_disabled()
475     }
476 
is_disabled(&self) -> bool477     fn is_disabled(&self) -> bool {
478         *self == Thread::Disabled
479     }
480 }
481