1 // Copyright 2016 Amanieu d'Antras 2 // 3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 // copied, modified, or distributed except according to those terms. 7 8 use crate::raw_rwlock::RawRwLock; 9 use lock_api; 10 11 /// A reader-writer lock 12 /// 13 /// This type of lock allows a number of readers or at most one writer at any 14 /// point in time. The write portion of this lock typically allows modification 15 /// of the underlying data (exclusive access) and the read portion of this lock 16 /// typically allows for read-only access (shared access). 17 /// 18 /// This lock uses a task-fair locking policy which avoids both reader and 19 /// writer starvation. This means that readers trying to acquire the lock will 20 /// block even if the lock is unlocked when there are writers waiting to acquire 21 /// the lock. Because of this, attempts to recursively acquire a read lock 22 /// within a single thread may result in a deadlock. 23 /// 24 /// The type parameter `T` represents the data that this lock protects. It is 25 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to 26 /// allow concurrent access through readers. The RAII guards returned from the 27 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) 28 /// to allow access to the contained of the lock. 29 /// 30 /// # Fairness 31 /// 32 /// A typical unfair lock can often end up in a situation where a single thread 33 /// quickly acquires and releases the same lock in succession, which can starve 34 /// other threads waiting to acquire the rwlock. While this improves performance 35 /// because it doesn't force a context switch when a thread tries to re-acquire 36 /// a rwlock it has just released, this can starve other threads. 37 /// 38 /// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350) 39 /// to ensure that the lock will be fair on average without sacrificing 40 /// performance. This is done by forcing a fair unlock on average every 0.5ms, 41 /// which will force the lock to go to the next thread waiting for the rwlock. 42 /// 43 /// Additionally, any critical section longer than 1ms will always use a fair 44 /// unlock, which has a negligible performance impact compared to the length of 45 /// the critical section. 46 /// 47 /// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair` 48 /// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply 49 /// dropping the guard. 50 /// 51 /// # Differences from the standard library `RwLock` 52 /// 53 /// - Supports atomically downgrading a write lock into a read lock. 54 /// - Task-fair locking policy instead of an unspecified platform default. 55 /// - No poisoning, the lock is released normally on panic. 56 /// - Only requires 1 word of space, whereas the standard library boxes the 57 /// `RwLock` due to platform limitations. 58 /// - Can be statically constructed (requires the `const_fn` nightly feature). 59 /// - Does not require any drop glue when dropped. 60 /// - Inline fast path for the uncontended case. 61 /// - Efficient handling of micro-contention using adaptive spinning. 62 /// - Allows raw locking & unlocking without a guard. 63 /// - Supports eventual fairness so that the rwlock is fair on average. 64 /// - Optionally allows making the rwlock fair by calling 65 /// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`. 66 /// 67 /// # Examples 68 /// 69 /// ``` 70 /// use parking_lot::RwLock; 71 /// 72 /// let lock = RwLock::new(5); 73 /// 74 /// // many reader locks can be held at once 75 /// { 76 /// let r1 = lock.read(); 77 /// let r2 = lock.read(); 78 /// assert_eq!(*r1, 5); 79 /// assert_eq!(*r2, 5); 80 /// } // read locks are dropped at this point 81 /// 82 /// // only one write lock may be held, however 83 /// { 84 /// let mut w = lock.write(); 85 /// *w += 1; 86 /// assert_eq!(*w, 6); 87 /// } // write lock is dropped here 88 /// ``` 89 pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>; 90 91 /// RAII structure used to release the shared read access of a lock when 92 /// dropped. 93 pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>; 94 95 /// RAII structure used to release the exclusive write access of a lock when 96 /// dropped. 97 pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>; 98 99 /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a 100 /// subfield of the protected data. 101 /// 102 /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the 103 /// former doesn't support temporarily unlocking and re-locking, since that 104 /// could introduce soundness issues if the locked object is modified by another 105 /// thread. 106 pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>; 107 108 /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a 109 /// subfield of the protected data. 110 /// 111 /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the 112 /// former doesn't support temporarily unlocking and re-locking, since that 113 /// could introduce soundness issues if the locked object is modified by another 114 /// thread. 115 pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>; 116 117 /// RAII structure used to release the upgradable read access of a lock when 118 /// dropped. 119 pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>; 120 121 #[cfg(test)] 122 mod tests { 123 use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; 124 use rand::Rng; 125 use std::sync::atomic::{AtomicUsize, Ordering}; 126 use std::sync::mpsc::channel; 127 use std::sync::Arc; 128 use std::thread; 129 use std::time::Duration; 130 131 #[cfg(feature = "serde")] 132 use bincode::{deserialize, serialize}; 133 134 #[derive(Eq, PartialEq, Debug)] 135 struct NonCopy(i32); 136 137 #[test] smoke()138 fn smoke() { 139 let l = RwLock::new(()); 140 drop(l.read()); 141 drop(l.write()); 142 drop(l.upgradable_read()); 143 drop((l.read(), l.read())); 144 drop((l.read(), l.upgradable_read())); 145 drop(l.write()); 146 } 147 148 #[test] frob()149 fn frob() { 150 const N: u32 = 10; 151 const M: u32 = 1000; 152 153 let r = Arc::new(RwLock::new(())); 154 155 let (tx, rx) = channel::<()>(); 156 for _ in 0..N { 157 let tx = tx.clone(); 158 let r = r.clone(); 159 thread::spawn(move || { 160 let mut rng = rand::thread_rng(); 161 for _ in 0..M { 162 if rng.gen_bool(1.0 / N as f64) { 163 drop(r.write()); 164 } else { 165 drop(r.read()); 166 } 167 } 168 drop(tx); 169 }); 170 } 171 drop(tx); 172 let _ = rx.recv(); 173 } 174 175 #[test] test_rw_arc_no_poison_wr()176 fn test_rw_arc_no_poison_wr() { 177 let arc = Arc::new(RwLock::new(1)); 178 let arc2 = arc.clone(); 179 let _: Result<(), _> = thread::spawn(move || { 180 let _lock = arc2.write(); 181 panic!(); 182 }) 183 .join(); 184 let lock = arc.read(); 185 assert_eq!(*lock, 1); 186 } 187 188 #[test] test_rw_arc_no_poison_ww()189 fn test_rw_arc_no_poison_ww() { 190 let arc = Arc::new(RwLock::new(1)); 191 let arc2 = arc.clone(); 192 let _: Result<(), _> = thread::spawn(move || { 193 let _lock = arc2.write(); 194 panic!(); 195 }) 196 .join(); 197 let lock = arc.write(); 198 assert_eq!(*lock, 1); 199 } 200 201 #[test] test_rw_arc_no_poison_rr()202 fn test_rw_arc_no_poison_rr() { 203 let arc = Arc::new(RwLock::new(1)); 204 let arc2 = arc.clone(); 205 let _: Result<(), _> = thread::spawn(move || { 206 let _lock = arc2.read(); 207 panic!(); 208 }) 209 .join(); 210 let lock = arc.read(); 211 assert_eq!(*lock, 1); 212 } 213 214 #[test] test_rw_arc_no_poison_rw()215 fn test_rw_arc_no_poison_rw() { 216 let arc = Arc::new(RwLock::new(1)); 217 let arc2 = arc.clone(); 218 let _: Result<(), _> = thread::spawn(move || { 219 let _lock = arc2.read(); 220 panic!() 221 }) 222 .join(); 223 let lock = arc.write(); 224 assert_eq!(*lock, 1); 225 } 226 227 #[test] test_ruw_arc()228 fn test_ruw_arc() { 229 let arc = Arc::new(RwLock::new(0)); 230 let arc2 = arc.clone(); 231 let (tx, rx) = channel(); 232 233 thread::spawn(move || { 234 for _ in 0..10 { 235 let mut lock = arc2.write(); 236 let tmp = *lock; 237 *lock = -1; 238 thread::yield_now(); 239 *lock = tmp + 1; 240 } 241 tx.send(()).unwrap(); 242 }); 243 244 let mut children = Vec::new(); 245 246 // Upgradable readers try to catch the writer in the act and also 247 // try to touch the value 248 for _ in 0..5 { 249 let arc3 = arc.clone(); 250 children.push(thread::spawn(move || { 251 let lock = arc3.upgradable_read(); 252 let tmp = *lock; 253 assert!(tmp >= 0); 254 thread::yield_now(); 255 let mut lock = RwLockUpgradableReadGuard::upgrade(lock); 256 assert_eq!(tmp, *lock); 257 *lock = -1; 258 thread::yield_now(); 259 *lock = tmp + 1; 260 })); 261 } 262 263 // Readers try to catch the writers in the act 264 for _ in 0..5 { 265 let arc4 = arc.clone(); 266 children.push(thread::spawn(move || { 267 let lock = arc4.read(); 268 assert!(*lock >= 0); 269 })); 270 } 271 272 // Wait for children to pass their asserts 273 for r in children { 274 assert!(r.join().is_ok()); 275 } 276 277 // Wait for writer to finish 278 rx.recv().unwrap(); 279 let lock = arc.read(); 280 assert_eq!(*lock, 15); 281 } 282 283 #[test] test_rw_arc()284 fn test_rw_arc() { 285 let arc = Arc::new(RwLock::new(0)); 286 let arc2 = arc.clone(); 287 let (tx, rx) = channel(); 288 289 thread::spawn(move || { 290 let mut lock = arc2.write(); 291 for _ in 0..10 { 292 let tmp = *lock; 293 *lock = -1; 294 thread::yield_now(); 295 *lock = tmp + 1; 296 } 297 tx.send(()).unwrap(); 298 }); 299 300 // Readers try to catch the writer in the act 301 let mut children = Vec::new(); 302 for _ in 0..5 { 303 let arc3 = arc.clone(); 304 children.push(thread::spawn(move || { 305 let lock = arc3.read(); 306 assert!(*lock >= 0); 307 })); 308 } 309 310 // Wait for children to pass their asserts 311 for r in children { 312 assert!(r.join().is_ok()); 313 } 314 315 // Wait for writer to finish 316 rx.recv().unwrap(); 317 let lock = arc.read(); 318 assert_eq!(*lock, 10); 319 } 320 321 #[test] test_rw_arc_access_in_unwind()322 fn test_rw_arc_access_in_unwind() { 323 let arc = Arc::new(RwLock::new(1)); 324 let arc2 = arc.clone(); 325 let _ = thread::spawn(move || -> () { 326 struct Unwinder { 327 i: Arc<RwLock<isize>>, 328 } 329 impl Drop for Unwinder { 330 fn drop(&mut self) { 331 let mut lock = self.i.write(); 332 *lock += 1; 333 } 334 } 335 let _u = Unwinder { i: arc2 }; 336 panic!(); 337 }) 338 .join(); 339 let lock = arc.read(); 340 assert_eq!(*lock, 2); 341 } 342 343 #[test] test_rwlock_unsized()344 fn test_rwlock_unsized() { 345 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); 346 { 347 let b = &mut *rw.write(); 348 b[0] = 4; 349 b[2] = 5; 350 } 351 let comp: &[i32] = &[4, 2, 5]; 352 assert_eq!(&*rw.read(), comp); 353 } 354 355 #[test] test_rwlock_try_read()356 fn test_rwlock_try_read() { 357 let lock = RwLock::new(0isize); 358 { 359 let read_guard = lock.read(); 360 361 let read_result = lock.try_read(); 362 assert!( 363 read_result.is_some(), 364 "try_read should succeed while read_guard is in scope" 365 ); 366 367 drop(read_guard); 368 } 369 { 370 let upgrade_guard = lock.upgradable_read(); 371 372 let read_result = lock.try_read(); 373 assert!( 374 read_result.is_some(), 375 "try_read should succeed while upgrade_guard is in scope" 376 ); 377 378 drop(upgrade_guard); 379 } 380 { 381 let write_guard = lock.write(); 382 383 let read_result = lock.try_read(); 384 assert!( 385 read_result.is_none(), 386 "try_read should fail while write_guard is in scope" 387 ); 388 389 drop(write_guard); 390 } 391 } 392 393 #[test] test_rwlock_try_write()394 fn test_rwlock_try_write() { 395 let lock = RwLock::new(0isize); 396 { 397 let read_guard = lock.read(); 398 399 let write_result = lock.try_write(); 400 assert!( 401 write_result.is_none(), 402 "try_write should fail while read_guard is in scope" 403 ); 404 405 drop(read_guard); 406 } 407 { 408 let upgrade_guard = lock.upgradable_read(); 409 410 let write_result = lock.try_write(); 411 assert!( 412 write_result.is_none(), 413 "try_write should fail while upgrade_guard is in scope" 414 ); 415 416 drop(upgrade_guard); 417 } 418 { 419 let write_guard = lock.write(); 420 421 let write_result = lock.try_write(); 422 assert!( 423 write_result.is_none(), 424 "try_write should fail while write_guard is in scope" 425 ); 426 427 drop(write_guard); 428 } 429 } 430 431 #[test] test_rwlock_try_upgrade()432 fn test_rwlock_try_upgrade() { 433 let lock = RwLock::new(0isize); 434 { 435 let read_guard = lock.read(); 436 437 let upgrade_result = lock.try_upgradable_read(); 438 assert!( 439 upgrade_result.is_some(), 440 "try_upgradable_read should succeed while read_guard is in scope" 441 ); 442 443 drop(read_guard); 444 } 445 { 446 let upgrade_guard = lock.upgradable_read(); 447 448 let upgrade_result = lock.try_upgradable_read(); 449 assert!( 450 upgrade_result.is_none(), 451 "try_upgradable_read should fail while upgrade_guard is in scope" 452 ); 453 454 drop(upgrade_guard); 455 } 456 { 457 let write_guard = lock.write(); 458 459 let upgrade_result = lock.try_upgradable_read(); 460 assert!( 461 upgrade_result.is_none(), 462 "try_upgradable should fail while write_guard is in scope" 463 ); 464 465 drop(write_guard); 466 } 467 } 468 469 #[test] test_into_inner()470 fn test_into_inner() { 471 let m = RwLock::new(NonCopy(10)); 472 assert_eq!(m.into_inner(), NonCopy(10)); 473 } 474 475 #[test] test_into_inner_drop()476 fn test_into_inner_drop() { 477 struct Foo(Arc<AtomicUsize>); 478 impl Drop for Foo { 479 fn drop(&mut self) { 480 self.0.fetch_add(1, Ordering::SeqCst); 481 } 482 } 483 let num_drops = Arc::new(AtomicUsize::new(0)); 484 let m = RwLock::new(Foo(num_drops.clone())); 485 assert_eq!(num_drops.load(Ordering::SeqCst), 0); 486 { 487 let _inner = m.into_inner(); 488 assert_eq!(num_drops.load(Ordering::SeqCst), 0); 489 } 490 assert_eq!(num_drops.load(Ordering::SeqCst), 1); 491 } 492 493 #[test] test_get_mut()494 fn test_get_mut() { 495 let mut m = RwLock::new(NonCopy(10)); 496 *m.get_mut() = NonCopy(20); 497 assert_eq!(m.into_inner(), NonCopy(20)); 498 } 499 500 #[test] test_rwlockguard_sync()501 fn test_rwlockguard_sync() { 502 fn sync<T: Sync>(_: T) {} 503 504 let rwlock = RwLock::new(()); 505 sync(rwlock.read()); 506 sync(rwlock.write()); 507 } 508 509 #[test] test_rwlock_downgrade()510 fn test_rwlock_downgrade() { 511 let x = Arc::new(RwLock::new(0)); 512 let mut handles = Vec::new(); 513 for _ in 0..8 { 514 let x = x.clone(); 515 handles.push(thread::spawn(move || { 516 for _ in 0..100 { 517 let mut writer = x.write(); 518 *writer += 1; 519 let cur_val = *writer; 520 let reader = RwLockWriteGuard::downgrade(writer); 521 assert_eq!(cur_val, *reader); 522 } 523 })); 524 } 525 for handle in handles { 526 handle.join().unwrap() 527 } 528 assert_eq!(*x.read(), 800); 529 } 530 531 #[test] test_rwlock_recursive()532 fn test_rwlock_recursive() { 533 let arc = Arc::new(RwLock::new(1)); 534 let arc2 = arc.clone(); 535 let _lock1 = arc.read(); 536 thread::spawn(move || { 537 let _lock = arc2.write(); 538 }); 539 540 if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) { 541 thread::sleep(Duration::from_millis(100)); 542 } else { 543 // FIXME: https://github.com/fortanix/rust-sgx/issues/31 544 for _ in 0..100 { 545 thread::yield_now(); 546 } 547 } 548 549 // A normal read would block here since there is a pending writer 550 let _lock2 = arc.read_recursive(); 551 } 552 553 #[test] test_rwlock_debug()554 fn test_rwlock_debug() { 555 let x = RwLock::new(vec![0u8, 10]); 556 557 assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }"); 558 let _lock = x.write(); 559 assert_eq!(format!("{:?}", x), "RwLock { data: <locked> }"); 560 } 561 562 #[test] test_clone()563 fn test_clone() { 564 let rwlock = RwLock::new(Arc::new(1)); 565 let a = rwlock.read_recursive(); 566 let b = a.clone(); 567 assert_eq!(Arc::strong_count(&b), 2); 568 } 569 570 #[cfg(feature = "serde")] 571 #[test] test_serde()572 fn test_serde() { 573 let contents: Vec<u8> = vec![0, 1, 2]; 574 let mutex = RwLock::new(contents.clone()); 575 576 let serialized = serialize(&mutex).unwrap(); 577 let deserialized: RwLock<Vec<u8>> = deserialize(&serialized).unwrap(); 578 579 assert_eq!(*(mutex.read()), *(deserialized.read())); 580 assert_eq!(contents, *(deserialized.read())); 581 } 582 } 583