1 use core::mem;
2 use core::sync::atomic::{self, AtomicUsize, Ordering};
3 
4 use crate::Backoff;
5 
6 /// A simple stamped lock.
7 ///
8 /// The state is represented as two `AtomicUsize`: `state_hi` for high bits and `state_lo` for low
9 /// bits.
10 pub(crate) struct SeqLock {
11     /// The high bits of the current state of the lock.
12     state_hi: AtomicUsize,
13 
14     /// The low bits of the current state of the lock.
15     ///
16     /// All bits except the least significant one hold the current stamp. When locked, the state_lo
17     /// equals 1 and doesn't contain a valid stamp.
18     state_lo: AtomicUsize,
19 }
20 
21 impl SeqLock {
new() -> Self22     pub(crate) const fn new() -> Self {
23         Self {
24             state_hi: AtomicUsize::new(0),
25             state_lo: AtomicUsize::new(0),
26         }
27     }
28 
29     /// If not locked, returns the current stamp.
30     ///
31     /// This method should be called before optimistic reads.
32     #[inline]
optimistic_read(&self) -> Option<(usize, usize)>33     pub(crate) fn optimistic_read(&self) -> Option<(usize, usize)> {
34         // The acquire loads from `state_hi` and `state_lo` synchronize with the release stores in
35         // `SeqLockWriteGuard::drop`.
36         //
37         // As a consequence, we can make sure that (1) all writes within the era of `state_hi - 1`
38         // happens before now; and therefore, (2) if `state_lo` is even, all writes within the
39         // critical section of (`state_hi`, `state_lo`) happens before now.
40         let state_hi = self.state_hi.load(Ordering::Acquire);
41         let state_lo = self.state_lo.load(Ordering::Acquire);
42         if state_lo == 1 {
43             None
44         } else {
45             Some((state_hi, state_lo))
46         }
47     }
48 
49     /// Returns `true` if the current stamp is equal to `stamp`.
50     ///
51     /// This method should be called after optimistic reads to check whether they are valid. The
52     /// argument `stamp` should correspond to the one returned by method `optimistic_read`.
53     #[inline]
validate_read(&self, stamp: (usize, usize)) -> bool54     pub(crate) fn validate_read(&self, stamp: (usize, usize)) -> bool {
55         // Thanks to the fence, if we're noticing any modification to the data at the critical
56         // section of `(a, b)`, then the critical section's write of 1 to state_lo should be
57         // visible.
58         atomic::fence(Ordering::Acquire);
59 
60         // So if `state_lo` coincides with `stamp.1`, then either (1) we're noticing no modification
61         // to the data after the critical section of `(stamp.0, stamp.1)`, or (2) `state_lo` wrapped
62         // around.
63         //
64         // If (2) is the case, the acquire ordering ensures we see the new value of `state_hi`.
65         let state_lo = self.state_lo.load(Ordering::Acquire);
66 
67         // If (2) is the case and `state_hi` coincides with `stamp.0`, then `state_hi` also wrapped
68         // around, which we give up to correctly validate the read.
69         let state_hi = self.state_hi.load(Ordering::Relaxed);
70 
71         // Except for the case that both `state_hi` and `state_lo` wrapped around, the following
72         // condition implies that we're noticing no modification to the data after the critical
73         // section of `(stamp.0, stamp.1)`.
74         (state_hi, state_lo) == stamp
75     }
76 
77     /// Grabs the lock for writing.
78     #[inline]
write(&'static self) -> SeqLockWriteGuard79     pub(crate) fn write(&'static self) -> SeqLockWriteGuard {
80         let backoff = Backoff::new();
81         loop {
82             let previous = self.state_lo.swap(1, Ordering::Acquire);
83 
84             if previous != 1 {
85                 // To synchronize with the acquire fence in `validate_read` via any modification to
86                 // the data at the critical section of `(state_hi, previous)`.
87                 atomic::fence(Ordering::Release);
88 
89                 return SeqLockWriteGuard {
90                     lock: self,
91                     state_lo: previous,
92                 };
93             }
94 
95             backoff.snooze();
96         }
97     }
98 }
99 
100 /// An RAII guard that releases the lock and increments the stamp when dropped.
101 pub(crate) struct SeqLockWriteGuard {
102     /// The parent lock.
103     lock: &'static SeqLock,
104 
105     /// The stamp before locking.
106     state_lo: usize,
107 }
108 
109 impl SeqLockWriteGuard {
110     /// Releases the lock without incrementing the stamp.
111     #[inline]
abort(self)112     pub(crate) fn abort(self) {
113         self.lock.state_lo.store(self.state_lo, Ordering::Release);
114         mem::forget(self);
115     }
116 }
117 
118 impl Drop for SeqLockWriteGuard {
119     #[inline]
drop(&mut self)120     fn drop(&mut self) {
121         let state_lo = self.state_lo.wrapping_add(2);
122 
123         // Increase the high bits if the low bits wrap around.
124         //
125         // Release ordering for synchronizing with `optimistic_read`.
126         if state_lo == 0 {
127             let state_hi = self.lock.state_hi.load(Ordering::Relaxed);
128             self.lock
129                 .state_hi
130                 .store(state_hi.wrapping_add(1), Ordering::Release);
131         }
132 
133         // Release the lock and increment the stamp.
134         //
135         // Release ordering for synchronizing with `optimistic_read`.
136         self.lock.state_lo.store(state_lo, Ordering::Release);
137     }
138 }
139 
140 #[cfg(test)]
141 mod tests {
142     use super::SeqLock;
143 
144     #[test]
test_abort()145     fn test_abort() {
146         static LK: SeqLock = SeqLock::new();
147         let before = LK.optimistic_read().unwrap();
148         {
149             let guard = LK.write();
150             guard.abort();
151         }
152         let after = LK.optimistic_read().unwrap();
153         assert_eq!(before, after, "aborted write does not update the stamp");
154     }
155 }
156