1 // Copyright 2016 Amanieu d'Antras 2 // 3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 // copied, modified, or distributed except according to those terms. 7 8 use crate::raw_mutex::RawMutex; 9 use lock_api; 10 11 /// A mutual exclusion primitive useful for protecting shared data 12 /// 13 /// This mutex will block threads waiting for the lock to become available. The 14 /// mutex can also be statically initialized or created via a `new` 15 /// constructor. Each mutex has a type parameter which represents the data that 16 /// it is protecting. The data can only be accessed through the RAII guards 17 /// returned from `lock` and `try_lock`, which guarantees that the data is only 18 /// ever accessed when the mutex is locked. 19 /// 20 /// # Fairness 21 /// 22 /// A typical unfair lock can often end up in a situation where a single thread 23 /// quickly acquires and releases the same mutex in succession, which can starve 24 /// other threads waiting to acquire the mutex. While this improves performance 25 /// because it doesn't force a context switch when a thread tries to re-acquire 26 /// a mutex it has just released, this can starve other threads. 27 /// 28 /// This mutex uses [eventual fairness](https://trac.webkit.org/changeset/203350) 29 /// to ensure that the lock will be fair on average without sacrificing 30 /// performance. This is done by forcing a fair unlock on average every 0.5ms, 31 /// which will force the lock to go to the next thread waiting for the mutex. 32 /// 33 /// Additionally, any critical section longer than 1ms will always use a fair 34 /// unlock, which has a negligible performance impact compared to the length of 35 /// the critical section. 36 /// 37 /// You can also force a fair unlock by calling `MutexGuard::unlock_fair` when 38 /// unlocking a mutex instead of simply dropping the `MutexGuard`. 39 /// 40 /// # Differences from the standard library `Mutex` 41 /// 42 /// - No poisoning, the lock is released normally on panic. 43 /// - Only requires 1 byte of space, whereas the standard library boxes the 44 /// `Mutex` due to platform limitations. 45 /// - Can be statically constructed (requires the `const_fn` nightly feature). 46 /// - Does not require any drop glue when dropped. 47 /// - Inline fast path for the uncontended case. 48 /// - Efficient handling of micro-contention using adaptive spinning. 49 /// - Allows raw locking & unlocking without a guard. 50 /// - Supports eventual fairness so that the mutex is fair on average. 51 /// - Optionally allows making the mutex fair by calling `MutexGuard::unlock_fair`. 52 /// 53 /// # Examples 54 /// 55 /// ``` 56 /// use std::sync::Arc; 57 /// use parking_lot::Mutex; 58 /// use std::thread; 59 /// use std::sync::mpsc::channel; 60 /// 61 /// const N: usize = 10; 62 /// 63 /// // Spawn a few threads to increment a shared variable (non-atomically), and 64 /// // let the main thread know once all increments are done. 65 /// // 66 /// // Here we're using an Arc to share memory among threads, and the data inside 67 /// // the Arc is protected with a mutex. 68 /// let data = Arc::new(Mutex::new(0)); 69 /// 70 /// let (tx, rx) = channel(); 71 /// for _ in 0..10 { 72 /// let (data, tx) = (Arc::clone(&data), tx.clone()); 73 /// thread::spawn(move || { 74 /// // The shared state can only be accessed once the lock is held. 75 /// // Our non-atomic increment is safe because we're the only thread 76 /// // which can access the shared state when the lock is held. 77 /// let mut data = data.lock(); 78 /// *data += 1; 79 /// if *data == N { 80 /// tx.send(()).unwrap(); 81 /// } 82 /// // the lock is unlocked here when `data` goes out of scope. 83 /// }); 84 /// } 85 /// 86 /// rx.recv().unwrap(); 87 /// ``` 88 pub type Mutex<T> = lock_api::Mutex<RawMutex, T>; 89 90 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is 91 /// dropped (falls out of scope), the lock will be unlocked. 92 /// 93 /// The data protected by the mutex can be accessed through this guard via its 94 /// `Deref` and `DerefMut` implementations. 95 pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>; 96 97 /// An RAII mutex guard returned by `MutexGuard::map`, which can point to a 98 /// subfield of the protected data. 99 /// 100 /// The main difference between `MappedMutexGuard` and `MutexGuard` is that the 101 /// former doesn't support temporarily unlocking and re-locking, since that 102 /// could introduce soundness issues if the locked object is modified by another 103 /// thread. 104 pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>; 105 106 #[cfg(test)] 107 mod tests { 108 use crate::{Condvar, Mutex}; 109 use std::sync::atomic::{AtomicUsize, Ordering}; 110 use std::sync::mpsc::channel; 111 use std::sync::Arc; 112 use std::thread; 113 114 #[cfg(feature = "serde")] 115 use bincode::{deserialize, serialize}; 116 117 struct Packet<T>(Arc<(Mutex<T>, Condvar)>); 118 119 #[derive(Eq, PartialEq, Debug)] 120 struct NonCopy(i32); 121 122 unsafe impl<T: Send> Send for Packet<T> {} 123 unsafe impl<T> Sync for Packet<T> {} 124 125 #[test] smoke()126 fn smoke() { 127 let m = Mutex::new(()); 128 drop(m.lock()); 129 drop(m.lock()); 130 } 131 132 #[test] lots_and_lots()133 fn lots_and_lots() { 134 const J: u32 = 1000; 135 const K: u32 = 3; 136 137 let m = Arc::new(Mutex::new(0)); 138 139 fn inc(m: &Mutex<u32>) { 140 for _ in 0..J { 141 *m.lock() += 1; 142 } 143 } 144 145 let (tx, rx) = channel(); 146 for _ in 0..K { 147 let tx2 = tx.clone(); 148 let m2 = m.clone(); 149 thread::spawn(move || { 150 inc(&m2); 151 tx2.send(()).unwrap(); 152 }); 153 let tx2 = tx.clone(); 154 let m2 = m.clone(); 155 thread::spawn(move || { 156 inc(&m2); 157 tx2.send(()).unwrap(); 158 }); 159 } 160 161 drop(tx); 162 for _ in 0..2 * K { 163 rx.recv().unwrap(); 164 } 165 assert_eq!(*m.lock(), J * K * 2); 166 } 167 168 #[test] try_lock()169 fn try_lock() { 170 let m = Mutex::new(()); 171 *m.try_lock().unwrap() = (); 172 } 173 174 #[test] test_into_inner()175 fn test_into_inner() { 176 let m = Mutex::new(NonCopy(10)); 177 assert_eq!(m.into_inner(), NonCopy(10)); 178 } 179 180 #[test] test_into_inner_drop()181 fn test_into_inner_drop() { 182 struct Foo(Arc<AtomicUsize>); 183 impl Drop for Foo { 184 fn drop(&mut self) { 185 self.0.fetch_add(1, Ordering::SeqCst); 186 } 187 } 188 let num_drops = Arc::new(AtomicUsize::new(0)); 189 let m = Mutex::new(Foo(num_drops.clone())); 190 assert_eq!(num_drops.load(Ordering::SeqCst), 0); 191 { 192 let _inner = m.into_inner(); 193 assert_eq!(num_drops.load(Ordering::SeqCst), 0); 194 } 195 assert_eq!(num_drops.load(Ordering::SeqCst), 1); 196 } 197 198 #[test] test_get_mut()199 fn test_get_mut() { 200 let mut m = Mutex::new(NonCopy(10)); 201 *m.get_mut() = NonCopy(20); 202 assert_eq!(m.into_inner(), NonCopy(20)); 203 } 204 205 #[test] test_mutex_arc_condvar()206 fn test_mutex_arc_condvar() { 207 let packet = Packet(Arc::new((Mutex::new(false), Condvar::new()))); 208 let packet2 = Packet(packet.0.clone()); 209 let (tx, rx) = channel(); 210 let _t = thread::spawn(move || { 211 // wait until parent gets in 212 rx.recv().unwrap(); 213 let &(ref lock, ref cvar) = &*packet2.0; 214 let mut lock = lock.lock(); 215 *lock = true; 216 cvar.notify_one(); 217 }); 218 219 let &(ref lock, ref cvar) = &*packet.0; 220 let mut lock = lock.lock(); 221 tx.send(()).unwrap(); 222 assert!(!*lock); 223 while !*lock { 224 cvar.wait(&mut lock); 225 } 226 } 227 228 #[test] test_mutex_arc_nested()229 fn test_mutex_arc_nested() { 230 // Tests nested mutexes and access 231 // to underlying data. 232 let arc = Arc::new(Mutex::new(1)); 233 let arc2 = Arc::new(Mutex::new(arc)); 234 let (tx, rx) = channel(); 235 let _t = thread::spawn(move || { 236 let lock = arc2.lock(); 237 let lock2 = lock.lock(); 238 assert_eq!(*lock2, 1); 239 tx.send(()).unwrap(); 240 }); 241 rx.recv().unwrap(); 242 } 243 244 #[test] test_mutex_arc_access_in_unwind()245 fn test_mutex_arc_access_in_unwind() { 246 let arc = Arc::new(Mutex::new(1)); 247 let arc2 = arc.clone(); 248 let _ = thread::spawn(move || -> () { 249 struct Unwinder { 250 i: Arc<Mutex<i32>>, 251 } 252 impl Drop for Unwinder { 253 fn drop(&mut self) { 254 *self.i.lock() += 1; 255 } 256 } 257 let _u = Unwinder { i: arc2 }; 258 panic!(); 259 }) 260 .join(); 261 let lock = arc.lock(); 262 assert_eq!(*lock, 2); 263 } 264 265 #[test] test_mutex_unsized()266 fn test_mutex_unsized() { 267 let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); 268 { 269 let b = &mut *mutex.lock(); 270 b[0] = 4; 271 b[2] = 5; 272 } 273 let comp: &[i32] = &[4, 2, 5]; 274 assert_eq!(&*mutex.lock(), comp); 275 } 276 277 #[test] test_mutexguard_sync()278 fn test_mutexguard_sync() { 279 fn sync<T: Sync>(_: T) {} 280 281 let mutex = Mutex::new(()); 282 sync(mutex.lock()); 283 } 284 285 #[test] test_mutex_debug()286 fn test_mutex_debug() { 287 let mutex = Mutex::new(vec![0u8, 10]); 288 289 assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }"); 290 let _lock = mutex.lock(); 291 assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }"); 292 } 293 294 #[cfg(feature = "serde")] 295 #[test] test_serde()296 fn test_serde() { 297 let contents: Vec<u8> = vec![0, 1, 2]; 298 let mutex = Mutex::new(contents.clone()); 299 300 let serialized = serialize(&mutex).unwrap(); 301 let deserialized: Mutex<Vec<u8>> = deserialize(&serialized).unwrap(); 302 303 assert_eq!(*(mutex.lock()), *(deserialized.lock())); 304 assert_eq!(contents, *(deserialized.lock())); 305 } 306 } 307