1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7
8 use crate::raw_mutex::RawMutex;
9 use lock_api;
10
11 /// A mutual exclusion primitive useful for protecting shared data
12 ///
13 /// This mutex will block threads waiting for the lock to become available. The
14 /// mutex can also be statically initialized or created via a `new`
15 /// constructor. Each mutex has a type parameter which represents the data that
16 /// it is protecting. The data can only be accessed through the RAII guards
17 /// returned from `lock` and `try_lock`, which guarantees that the data is only
18 /// ever accessed when the mutex is locked.
19 ///
20 /// # Fairness
21 ///
22 /// A typical unfair lock can often end up in a situation where a single thread
23 /// quickly acquires and releases the same mutex in succession, which can starve
24 /// other threads waiting to acquire the mutex. While this improves throughput
25 /// because it doesn't force a context switch when a thread tries to re-acquire
26 /// a mutex it has just released, this can starve other threads.
27 ///
28 /// This mutex uses [eventual fairness](https://trac.webkit.org/changeset/203350)
29 /// to ensure that the lock will be fair on average without sacrificing
30 /// throughput. This is done by forcing a fair unlock on average every 0.5ms,
31 /// which will force the lock to go to the next thread waiting for the mutex.
32 ///
33 /// Additionally, any critical section longer than 1ms will always use a fair
34 /// unlock, which has a negligible impact on throughput considering the length
35 /// of the critical section.
36 ///
37 /// You can also force a fair unlock by calling `MutexGuard::unlock_fair` when
38 /// unlocking a mutex instead of simply dropping the `MutexGuard`.
39 ///
40 /// # Differences from the standard library `Mutex`
41 ///
42 /// - No poisoning, the lock is released normally on panic.
43 /// - Only requires 1 byte of space, whereas the standard library boxes the
44 /// `Mutex` due to platform limitations.
45 /// - Can be statically constructed (requires the `const_fn` nightly feature).
46 /// - Does not require any drop glue when dropped.
47 /// - Inline fast path for the uncontended case.
48 /// - Efficient handling of micro-contention using adaptive spinning.
49 /// - Allows raw locking & unlocking without a guard.
50 /// - Supports eventual fairness so that the mutex is fair on average.
51 /// - Optionally allows making the mutex fair by calling `MutexGuard::unlock_fair`.
52 ///
53 /// # Examples
54 ///
55 /// ```
56 /// use parking_lot::Mutex;
57 /// use std::sync::{Arc, mpsc::channel};
58 /// use std::thread;
59 ///
60 /// const N: usize = 10;
61 ///
62 /// // Spawn a few threads to increment a shared variable (non-atomically), and
63 /// // let the main thread know once all increments are done.
64 /// //
65 /// // Here we're using an Arc to share memory among threads, and the data inside
66 /// // the Arc is protected with a mutex.
67 /// let data = Arc::new(Mutex::new(0));
68 ///
69 /// let (tx, rx) = channel();
70 /// for _ in 0..10 {
71 /// let (data, tx) = (Arc::clone(&data), tx.clone());
72 /// thread::spawn(move || {
73 /// // The shared state can only be accessed once the lock is held.
74 /// // Our non-atomic increment is safe because we're the only thread
75 /// // which can access the shared state when the lock is held.
76 /// let mut data = data.lock();
77 /// *data += 1;
78 /// if *data == N {
79 /// tx.send(()).unwrap();
80 /// }
81 /// // the lock is unlocked here when `data` goes out of scope.
82 /// });
83 /// }
84 ///
85 /// rx.recv().unwrap();
86 /// ```
87 pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
88
89 /// Creates a new mutex in an unlocked state ready for use.
90 ///
91 /// This allows creating a mutex in a constant context on stable Rust.
const_mutex<T>(val: T) -> Mutex<T>92 pub const fn const_mutex<T>(val: T) -> Mutex<T> {
93 Mutex::const_new(<RawMutex as lock_api::RawMutex>::INIT, val)
94 }
95
96 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
97 /// dropped (falls out of scope), the lock will be unlocked.
98 ///
99 /// The data protected by the mutex can be accessed through this guard via its
100 /// `Deref` and `DerefMut` implementations.
101 pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>;
102
103 /// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
104 /// subfield of the protected data.
105 ///
106 /// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
107 /// former doesn't support temporarily unlocking and re-locking, since that
108 /// could introduce soundness issues if the locked object is modified by another
109 /// thread.
110 pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
111
112 #[cfg(test)]
113 mod tests {
114 use crate::{Condvar, Mutex};
115 use std::sync::atomic::{AtomicUsize, Ordering};
116 use std::sync::mpsc::channel;
117 use std::sync::Arc;
118 use std::thread;
119
120 #[cfg(feature = "serde")]
121 use bincode::{deserialize, serialize};
122
123 struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
124
125 #[derive(Eq, PartialEq, Debug)]
126 struct NonCopy(i32);
127
128 unsafe impl<T: Send> Send for Packet<T> {}
129 unsafe impl<T> Sync for Packet<T> {}
130
131 #[test]
smoke()132 fn smoke() {
133 let m = Mutex::new(());
134 drop(m.lock());
135 drop(m.lock());
136 }
137
138 #[test]
lots_and_lots()139 fn lots_and_lots() {
140 const J: u32 = 1000;
141 const K: u32 = 3;
142
143 let m = Arc::new(Mutex::new(0));
144
145 fn inc(m: &Mutex<u32>) {
146 for _ in 0..J {
147 *m.lock() += 1;
148 }
149 }
150
151 let (tx, rx) = channel();
152 for _ in 0..K {
153 let tx2 = tx.clone();
154 let m2 = m.clone();
155 thread::spawn(move || {
156 inc(&m2);
157 tx2.send(()).unwrap();
158 });
159 let tx2 = tx.clone();
160 let m2 = m.clone();
161 thread::spawn(move || {
162 inc(&m2);
163 tx2.send(()).unwrap();
164 });
165 }
166
167 drop(tx);
168 for _ in 0..2 * K {
169 rx.recv().unwrap();
170 }
171 assert_eq!(*m.lock(), J * K * 2);
172 }
173
174 #[test]
try_lock()175 fn try_lock() {
176 let m = Mutex::new(());
177 *m.try_lock().unwrap() = ();
178 }
179
180 #[test]
test_into_inner()181 fn test_into_inner() {
182 let m = Mutex::new(NonCopy(10));
183 assert_eq!(m.into_inner(), NonCopy(10));
184 }
185
186 #[test]
test_into_inner_drop()187 fn test_into_inner_drop() {
188 struct Foo(Arc<AtomicUsize>);
189 impl Drop for Foo {
190 fn drop(&mut self) {
191 self.0.fetch_add(1, Ordering::SeqCst);
192 }
193 }
194 let num_drops = Arc::new(AtomicUsize::new(0));
195 let m = Mutex::new(Foo(num_drops.clone()));
196 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
197 {
198 let _inner = m.into_inner();
199 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
200 }
201 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
202 }
203
204 #[test]
test_get_mut()205 fn test_get_mut() {
206 let mut m = Mutex::new(NonCopy(10));
207 *m.get_mut() = NonCopy(20);
208 assert_eq!(m.into_inner(), NonCopy(20));
209 }
210
211 #[test]
test_mutex_arc_condvar()212 fn test_mutex_arc_condvar() {
213 let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
214 let packet2 = Packet(packet.0.clone());
215 let (tx, rx) = channel();
216 let _t = thread::spawn(move || {
217 // wait until parent gets in
218 rx.recv().unwrap();
219 let &(ref lock, ref cvar) = &*packet2.0;
220 let mut lock = lock.lock();
221 *lock = true;
222 cvar.notify_one();
223 });
224
225 let &(ref lock, ref cvar) = &*packet.0;
226 let mut lock = lock.lock();
227 tx.send(()).unwrap();
228 assert!(!*lock);
229 while !*lock {
230 cvar.wait(&mut lock);
231 }
232 }
233
234 #[test]
test_mutex_arc_nested()235 fn test_mutex_arc_nested() {
236 // Tests nested mutexes and access
237 // to underlying data.
238 let arc = Arc::new(Mutex::new(1));
239 let arc2 = Arc::new(Mutex::new(arc));
240 let (tx, rx) = channel();
241 let _t = thread::spawn(move || {
242 let lock = arc2.lock();
243 let lock2 = lock.lock();
244 assert_eq!(*lock2, 1);
245 tx.send(()).unwrap();
246 });
247 rx.recv().unwrap();
248 }
249
250 #[test]
test_mutex_arc_access_in_unwind()251 fn test_mutex_arc_access_in_unwind() {
252 let arc = Arc::new(Mutex::new(1));
253 let arc2 = arc.clone();
254 let _ = thread::spawn(move || {
255 struct Unwinder {
256 i: Arc<Mutex<i32>>,
257 }
258 impl Drop for Unwinder {
259 fn drop(&mut self) {
260 *self.i.lock() += 1;
261 }
262 }
263 let _u = Unwinder { i: arc2 };
264 panic!();
265 })
266 .join();
267 let lock = arc.lock();
268 assert_eq!(*lock, 2);
269 }
270
271 #[test]
test_mutex_unsized()272 fn test_mutex_unsized() {
273 let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
274 {
275 let b = &mut *mutex.lock();
276 b[0] = 4;
277 b[2] = 5;
278 }
279 let comp: &[i32] = &[4, 2, 5];
280 assert_eq!(&*mutex.lock(), comp);
281 }
282
283 #[test]
test_mutexguard_sync()284 fn test_mutexguard_sync() {
285 fn sync<T: Sync>(_: T) {}
286
287 let mutex = Mutex::new(());
288 sync(mutex.lock());
289 }
290
291 #[test]
test_mutex_debug()292 fn test_mutex_debug() {
293 let mutex = Mutex::new(vec![0u8, 10]);
294
295 assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
296 let _lock = mutex.lock();
297 assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }");
298 }
299
300 #[cfg(feature = "serde")]
301 #[test]
test_serde()302 fn test_serde() {
303 let contents: Vec<u8> = vec![0, 1, 2];
304 let mutex = Mutex::new(contents.clone());
305
306 let serialized = serialize(&mutex).unwrap();
307 let deserialized: Mutex<Vec<u8>> = deserialize(&serialized).unwrap();
308
309 assert_eq!(*(mutex.lock()), *(deserialized.lock()));
310 assert_eq!(contents, *(deserialized.lock()));
311 }
312 }
313