1 // Copyright 2016 Amanieu d'Antras 2 // 3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 // copied, modified, or distributed except according to those terms. 7 8 use core::cell::UnsafeCell; 9 use core::fmt; 10 use core::marker::PhantomData; 11 use core::mem; 12 use core::ops::{Deref, DerefMut}; 13 14 #[cfg(feature = "arc_lock")] 15 use alloc::sync::Arc; 16 #[cfg(feature = "arc_lock")] 17 use core::mem::ManuallyDrop; 18 #[cfg(feature = "arc_lock")] 19 use core::ptr; 20 21 #[cfg(feature = "owning_ref")] 22 use owning_ref::StableAddress; 23 24 #[cfg(feature = "serde")] 25 use serde::{Deserialize, Deserializer, Serialize, Serializer}; 26 27 /// Basic operations for a reader-writer lock. 28 /// 29 /// Types implementing this trait can be used by `RwLock` to form a safe and 30 /// fully-functioning `RwLock` type. 31 /// 32 /// # Safety 33 /// 34 /// Implementations of this trait must ensure that the `RwLock` is actually 35 /// exclusive: an exclusive lock can't be acquired while an exclusive or shared 36 /// lock exists, and a shared lock can't be acquire while an exclusive lock 37 /// exists. 38 pub unsafe trait RawRwLock { 39 /// Initial value for an unlocked `RwLock`. 40 // A “non-constant” const item is a legacy way to supply an initialized value to downstream 41 // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. 42 #[allow(clippy::declare_interior_mutable_const)] 43 const INIT: Self; 44 45 /// Marker type which determines whether a lock guard should be `Send`. Use 46 /// one of the `GuardSend` or `GuardNoSend` helper types here. 47 type GuardMarker; 48 49 /// Acquires a shared lock, blocking the current thread until it is able to do so. lock_shared(&self)50 fn lock_shared(&self); 51 52 /// Attempts to acquire a shared lock without blocking. try_lock_shared(&self) -> bool53 fn try_lock_shared(&self) -> bool; 54 55 /// Releases a shared lock. 56 /// 57 /// # Safety 58 /// 59 /// This method may only be called if a shared lock is held in the current context. unlock_shared(&self)60 unsafe fn unlock_shared(&self); 61 62 /// Acquires an exclusive lock, blocking the current thread until it is able to do so. lock_exclusive(&self)63 fn lock_exclusive(&self); 64 65 /// Attempts to acquire an exclusive lock without blocking. try_lock_exclusive(&self) -> bool66 fn try_lock_exclusive(&self) -> bool; 67 68 /// Releases an exclusive lock. 69 /// 70 /// # Safety 71 /// 72 /// This method may only be called if an exclusive lock is held in the current context. unlock_exclusive(&self)73 unsafe fn unlock_exclusive(&self); 74 75 /// Checks if this `RwLock` is currently locked in any way. 76 #[inline] is_locked(&self) -> bool77 fn is_locked(&self) -> bool { 78 let acquired_lock = self.try_lock_exclusive(); 79 if acquired_lock { 80 // Safety: A lock was successfully acquired above. 81 unsafe { 82 self.unlock_exclusive(); 83 } 84 } 85 !acquired_lock 86 } 87 } 88 89 /// Additional methods for RwLocks which support fair unlocking. 90 /// 91 /// Fair unlocking means that a lock is handed directly over to the next waiting 92 /// thread if there is one, without giving other threads the opportunity to 93 /// "steal" the lock in the meantime. This is typically slower than unfair 94 /// unlocking, but may be necessary in certain circumstances. 95 pub unsafe trait RawRwLockFair: RawRwLock { 96 /// Releases a shared lock using a fair unlock protocol. 97 /// 98 /// # Safety 99 /// 100 /// This method may only be called if a shared lock is held in the current context. unlock_shared_fair(&self)101 unsafe fn unlock_shared_fair(&self); 102 103 /// Releases an exclusive lock using a fair unlock protocol. 104 /// 105 /// # Safety 106 /// 107 /// This method may only be called if an exclusive lock is held in the current context. unlock_exclusive_fair(&self)108 unsafe fn unlock_exclusive_fair(&self); 109 110 /// Temporarily yields a shared lock to a waiting thread if there is one. 111 /// 112 /// This method is functionally equivalent to calling `unlock_shared_fair` followed 113 /// by `lock_shared`, however it can be much more efficient in the case where there 114 /// are no waiting threads. 115 /// 116 /// # Safety 117 /// 118 /// This method may only be called if a shared lock is held in the current context. bump_shared(&self)119 unsafe fn bump_shared(&self) { 120 self.unlock_shared_fair(); 121 self.lock_shared(); 122 } 123 124 /// Temporarily yields an exclusive lock to a waiting thread if there is one. 125 /// 126 /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed 127 /// by `lock_exclusive`, however it can be much more efficient in the case where there 128 /// are no waiting threads. 129 /// 130 /// # Safety 131 /// 132 /// This method may only be called if an exclusive lock is held in the current context. bump_exclusive(&self)133 unsafe fn bump_exclusive(&self) { 134 self.unlock_exclusive_fair(); 135 self.lock_exclusive(); 136 } 137 } 138 139 /// Additional methods for RwLocks which support atomically downgrading an 140 /// exclusive lock to a shared lock. 141 pub unsafe trait RawRwLockDowngrade: RawRwLock { 142 /// Atomically downgrades an exclusive lock into a shared lock without 143 /// allowing any thread to take an exclusive lock in the meantime. 144 /// 145 /// # Safety 146 /// 147 /// This method may only be called if an exclusive lock is held in the current context. downgrade(&self)148 unsafe fn downgrade(&self); 149 } 150 151 /// Additional methods for RwLocks which support locking with timeouts. 152 /// 153 /// The `Duration` and `Instant` types are specified as associated types so that 154 /// this trait is usable even in `no_std` environments. 155 pub unsafe trait RawRwLockTimed: RawRwLock { 156 /// Duration type used for `try_lock_for`. 157 type Duration; 158 159 /// Instant type used for `try_lock_until`. 160 type Instant; 161 162 /// Attempts to acquire a shared lock until a timeout is reached. try_lock_shared_for(&self, timeout: Self::Duration) -> bool163 fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool; 164 165 /// Attempts to acquire a shared lock until a timeout is reached. try_lock_shared_until(&self, timeout: Self::Instant) -> bool166 fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool; 167 168 /// Attempts to acquire an exclusive lock until a timeout is reached. try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool169 fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool; 170 171 /// Attempts to acquire an exclusive lock until a timeout is reached. try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool172 fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool; 173 } 174 175 /// Additional methods for RwLocks which support recursive read locks. 176 /// 177 /// These are guaranteed to succeed without blocking if 178 /// another read lock is held at the time of the call. This allows a thread 179 /// to recursively lock a `RwLock`. However using this method can cause 180 /// writers to starve since readers no longer block if a writer is waiting 181 /// for the lock. 182 pub unsafe trait RawRwLockRecursive: RawRwLock { 183 /// Acquires a shared lock without deadlocking in case of a recursive lock. lock_shared_recursive(&self)184 fn lock_shared_recursive(&self); 185 186 /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock. try_lock_shared_recursive(&self) -> bool187 fn try_lock_shared_recursive(&self) -> bool; 188 } 189 190 /// Additional methods for RwLocks which support recursive read locks and timeouts. 191 pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed { 192 /// Attempts to acquire a shared lock until a timeout is reached, without 193 /// deadlocking in case of a recursive lock. try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool194 fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool; 195 196 /// Attempts to acquire a shared lock until a timeout is reached, without 197 /// deadlocking in case of a recursive lock. try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool198 fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool; 199 } 200 201 /// Additional methods for RwLocks which support atomically upgrading a shared 202 /// lock to an exclusive lock. 203 /// 204 /// This requires acquiring a special "upgradable read lock" instead of a 205 /// normal shared lock. There may only be one upgradable lock at any time, 206 /// otherwise deadlocks could occur when upgrading. 207 pub unsafe trait RawRwLockUpgrade: RawRwLock { 208 /// Acquires an upgradable lock, blocking the current thread until it is able to do so. lock_upgradable(&self)209 fn lock_upgradable(&self); 210 211 /// Attempts to acquire an upgradable lock without blocking. try_lock_upgradable(&self) -> bool212 fn try_lock_upgradable(&self) -> bool; 213 214 /// Releases an upgradable lock. 215 /// 216 /// # Safety 217 /// 218 /// This method may only be called if an upgradable lock is held in the current context. unlock_upgradable(&self)219 unsafe fn unlock_upgradable(&self); 220 221 /// Upgrades an upgradable lock to an exclusive lock. 222 /// 223 /// # Safety 224 /// 225 /// This method may only be called if an upgradable lock is held in the current context. upgrade(&self)226 unsafe fn upgrade(&self); 227 228 /// Attempts to upgrade an upgradable lock to an exclusive lock without 229 /// blocking. 230 /// 231 /// # Safety 232 /// 233 /// This method may only be called if an upgradable lock is held in the current context. try_upgrade(&self) -> bool234 unsafe fn try_upgrade(&self) -> bool; 235 } 236 237 /// Additional methods for RwLocks which support upgradable locks and fair 238 /// unlocking. 239 pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair { 240 /// Releases an upgradable lock using a fair unlock protocol. 241 /// 242 /// # Safety 243 /// 244 /// This method may only be called if an upgradable lock is held in the current context. unlock_upgradable_fair(&self)245 unsafe fn unlock_upgradable_fair(&self); 246 247 /// Temporarily yields an upgradable lock to a waiting thread if there is one. 248 /// 249 /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed 250 /// by `lock_upgradable`, however it can be much more efficient in the case where there 251 /// are no waiting threads. 252 /// 253 /// # Safety 254 /// 255 /// This method may only be called if an upgradable lock is held in the current context. bump_upgradable(&self)256 unsafe fn bump_upgradable(&self) { 257 self.unlock_upgradable_fair(); 258 self.lock_upgradable(); 259 } 260 } 261 262 /// Additional methods for RwLocks which support upgradable locks and lock 263 /// downgrading. 264 pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade { 265 /// Downgrades an upgradable lock to a shared lock. 266 /// 267 /// # Safety 268 /// 269 /// This method may only be called if an upgradable lock is held in the current context. downgrade_upgradable(&self)270 unsafe fn downgrade_upgradable(&self); 271 272 /// Downgrades an exclusive lock to an upgradable lock. 273 /// 274 /// # Safety 275 /// 276 /// This method may only be called if an exclusive lock is held in the current context. downgrade_to_upgradable(&self)277 unsafe fn downgrade_to_upgradable(&self); 278 } 279 280 /// Additional methods for RwLocks which support upgradable locks and locking 281 /// with timeouts. 282 pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed { 283 /// Attempts to acquire an upgradable lock until a timeout is reached. try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool284 fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool; 285 286 /// Attempts to acquire an upgradable lock until a timeout is reached. try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool287 fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool; 288 289 /// Attempts to upgrade an upgradable lock to an exclusive lock until a 290 /// timeout is reached. 291 /// 292 /// # Safety 293 /// 294 /// This method may only be called if an upgradable lock is held in the current context. try_upgrade_for(&self, timeout: Self::Duration) -> bool295 unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool; 296 297 /// Attempts to upgrade an upgradable lock to an exclusive lock until a 298 /// timeout is reached. 299 /// 300 /// # Safety 301 /// 302 /// This method may only be called if an upgradable lock is held in the current context. try_upgrade_until(&self, timeout: Self::Instant) -> bool303 unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool; 304 } 305 306 /// A reader-writer lock 307 /// 308 /// This type of lock allows a number of readers or at most one writer at any 309 /// point in time. The write portion of this lock typically allows modification 310 /// of the underlying data (exclusive access) and the read portion of this lock 311 /// typically allows for read-only access (shared access). 312 /// 313 /// The type parameter `T` represents the data that this lock protects. It is 314 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to 315 /// allow concurrent access through readers. The RAII guards returned from the 316 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) 317 /// to allow access to the contained of the lock. 318 pub struct RwLock<R, T: ?Sized> { 319 raw: R, 320 data: UnsafeCell<T>, 321 } 322 323 // Copied and modified from serde 324 #[cfg(feature = "serde")] 325 impl<R, T> Serialize for RwLock<R, T> 326 where 327 R: RawRwLock, 328 T: Serialize + ?Sized, 329 { serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer,330 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> 331 where 332 S: Serializer, 333 { 334 self.read().serialize(serializer) 335 } 336 } 337 338 #[cfg(feature = "serde")] 339 impl<'de, R, T> Deserialize<'de> for RwLock<R, T> 340 where 341 R: RawRwLock, 342 T: Deserialize<'de> + ?Sized, 343 { deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>,344 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> 345 where 346 D: Deserializer<'de>, 347 { 348 Deserialize::deserialize(deserializer).map(RwLock::new) 349 } 350 } 351 352 unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {} 353 unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {} 354 355 impl<R: RawRwLock, T> RwLock<R, T> { 356 /// Creates a new instance of an `RwLock<T>` which is unlocked. 357 #[cfg(feature = "nightly")] 358 #[inline] new(val: T) -> RwLock<R, T>359 pub const fn new(val: T) -> RwLock<R, T> { 360 RwLock { 361 data: UnsafeCell::new(val), 362 raw: R::INIT, 363 } 364 } 365 366 /// Creates a new instance of an `RwLock<T>` which is unlocked. 367 #[cfg(not(feature = "nightly"))] 368 #[inline] new(val: T) -> RwLock<R, T>369 pub fn new(val: T) -> RwLock<R, T> { 370 RwLock { 371 data: UnsafeCell::new(val), 372 raw: R::INIT, 373 } 374 } 375 376 /// Consumes this `RwLock`, returning the underlying data. 377 #[inline] 378 #[allow(unused_unsafe)] into_inner(self) -> T379 pub fn into_inner(self) -> T { 380 unsafe { self.data.into_inner() } 381 } 382 } 383 384 impl<R, T> RwLock<R, T> { 385 /// Creates a new new instance of an `RwLock<T>` based on a pre-existing 386 /// `RawRwLock<T>`. 387 /// 388 /// This allows creating a `RwLock<T>` in a constant context on stable 389 /// Rust. 390 #[inline] const_new(raw_rwlock: R, val: T) -> RwLock<R, T>391 pub const fn const_new(raw_rwlock: R, val: T) -> RwLock<R, T> { 392 RwLock { 393 data: UnsafeCell::new(val), 394 raw: raw_rwlock, 395 } 396 } 397 } 398 399 impl<R: RawRwLock, T: ?Sized> RwLock<R, T> { 400 /// # Safety 401 /// 402 /// The lock must be held when calling this method. 403 #[inline] read_guard(&self) -> RwLockReadGuard<'_, R, T>404 unsafe fn read_guard(&self) -> RwLockReadGuard<'_, R, T> { 405 RwLockReadGuard { 406 rwlock: self, 407 marker: PhantomData, 408 } 409 } 410 411 /// # Safety 412 /// 413 /// The lock must be held when calling this method. 414 #[inline] write_guard(&self) -> RwLockWriteGuard<'_, R, T>415 unsafe fn write_guard(&self) -> RwLockWriteGuard<'_, R, T> { 416 RwLockWriteGuard { 417 rwlock: self, 418 marker: PhantomData, 419 } 420 } 421 422 /// Locks this `RwLock` with shared read access, blocking the current thread 423 /// until it can be acquired. 424 /// 425 /// The calling thread will be blocked until there are no more writers which 426 /// hold the lock. There may be other readers currently inside the lock when 427 /// this method returns. 428 /// 429 /// Note that attempts to recursively acquire a read lock on a `RwLock` when 430 /// the current thread already holds one may result in a deadlock. 431 /// 432 /// Returns an RAII guard which will release this thread's shared access 433 /// once it is dropped. 434 #[inline] read(&self) -> RwLockReadGuard<'_, R, T>435 pub fn read(&self) -> RwLockReadGuard<'_, R, T> { 436 self.raw.lock_shared(); 437 // SAFETY: The lock is held, as required. 438 unsafe { self.read_guard() } 439 } 440 441 /// Attempts to acquire this `RwLock` with shared read access. 442 /// 443 /// If the access could not be granted at this time, then `None` is returned. 444 /// Otherwise, an RAII guard is returned which will release the shared access 445 /// when it is dropped. 446 /// 447 /// This function does not block. 448 #[inline] try_read(&self) -> Option<RwLockReadGuard<'_, R, T>>449 pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> { 450 if self.raw.try_lock_shared() { 451 // SAFETY: The lock is held, as required. 452 Some(unsafe { self.read_guard() }) 453 } else { 454 None 455 } 456 } 457 458 /// Locks this `RwLock` with exclusive write access, blocking the current 459 /// thread until it can be acquired. 460 /// 461 /// This function will not return while other writers or other readers 462 /// currently have access to the lock. 463 /// 464 /// Returns an RAII guard which will drop the write access of this `RwLock` 465 /// when dropped. 466 #[inline] write(&self) -> RwLockWriteGuard<'_, R, T>467 pub fn write(&self) -> RwLockWriteGuard<'_, R, T> { 468 self.raw.lock_exclusive(); 469 // SAFETY: The lock is held, as required. 470 unsafe { self.write_guard() } 471 } 472 473 /// Attempts to lock this `RwLock` with exclusive write access. 474 /// 475 /// If the lock could not be acquired at this time, then `None` is returned. 476 /// Otherwise, an RAII guard is returned which will release the lock when 477 /// it is dropped. 478 /// 479 /// This function does not block. 480 #[inline] try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>>481 pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> { 482 if self.raw.try_lock_exclusive() { 483 // SAFETY: The lock is held, as required. 484 Some(unsafe { self.write_guard() }) 485 } else { 486 None 487 } 488 } 489 490 /// Returns a mutable reference to the underlying data. 491 /// 492 /// Since this call borrows the `RwLock` mutably, no actual locking needs to 493 /// take place---the mutable borrow statically guarantees no locks exist. 494 #[inline] get_mut(&mut self) -> &mut T495 pub fn get_mut(&mut self) -> &mut T { 496 unsafe { &mut *self.data.get() } 497 } 498 499 /// Checks whether this `RwLock` is currently locked in any way. 500 #[inline] is_locked(&self) -> bool501 pub fn is_locked(&self) -> bool { 502 self.raw.is_locked() 503 } 504 505 /// Forcibly unlocks a read lock. 506 /// 507 /// This is useful when combined with `mem::forget` to hold a lock without 508 /// the need to maintain a `RwLockReadGuard` object alive, for example when 509 /// dealing with FFI. 510 /// 511 /// # Safety 512 /// 513 /// This method must only be called if the current thread logically owns a 514 /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`. 515 /// Behavior is undefined if a rwlock is read-unlocked when not read-locked. 516 #[inline] force_unlock_read(&self)517 pub unsafe fn force_unlock_read(&self) { 518 self.raw.unlock_shared(); 519 } 520 521 /// Forcibly unlocks a write lock. 522 /// 523 /// This is useful when combined with `mem::forget` to hold a lock without 524 /// the need to maintain a `RwLockWriteGuard` object alive, for example when 525 /// dealing with FFI. 526 /// 527 /// # Safety 528 /// 529 /// This method must only be called if the current thread logically owns a 530 /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`. 531 /// Behavior is undefined if a rwlock is write-unlocked when not write-locked. 532 #[inline] force_unlock_write(&self)533 pub unsafe fn force_unlock_write(&self) { 534 self.raw.unlock_exclusive(); 535 } 536 537 /// Returns the underlying raw reader-writer lock object. 538 /// 539 /// Note that you will most likely need to import the `RawRwLock` trait from 540 /// `lock_api` to be able to call functions on the raw 541 /// reader-writer lock. 542 /// 543 /// # Safety 544 /// 545 /// This method is unsafe because it allows unlocking a mutex while 546 /// still holding a reference to a lock guard. raw(&self) -> &R547 pub unsafe fn raw(&self) -> &R { 548 &self.raw 549 } 550 551 /// Returns a raw pointer to the underlying data. 552 /// 553 /// This is useful when combined with `mem::forget` to hold a lock without 554 /// the need to maintain a `RwLockReadGuard` or `RwLockWriteGuard` object 555 /// alive, for example when dealing with FFI. 556 /// 557 /// # Safety 558 /// 559 /// You must ensure that there are no data races when dereferencing the 560 /// returned pointer, for example if the current thread logically owns a 561 /// `RwLockReadGuard` or `RwLockWriteGuard` but that guard has been discarded 562 /// using `mem::forget`. 563 #[inline] data_ptr(&self) -> *mut T564 pub fn data_ptr(&self) -> *mut T { 565 self.data.get() 566 } 567 568 /// # Safety 569 /// 570 /// The lock must be held when calling this method. 571 #[cfg(feature = "arc_lock")] 572 #[inline] read_guard_arc(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T>573 unsafe fn read_guard_arc(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> { 574 ArcRwLockReadGuard { 575 rwlock: self.clone(), 576 marker: PhantomData, 577 } 578 } 579 580 /// # Safety 581 /// 582 /// The lock must be held when calling this method. 583 #[cfg(feature = "arc_lock")] 584 #[inline] write_guard_arc(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T>585 unsafe fn write_guard_arc(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T> { 586 ArcRwLockWriteGuard { 587 rwlock: self.clone(), 588 marker: PhantomData, 589 } 590 } 591 592 /// Locks this `RwLock` with read access, through an `Arc`. 593 /// 594 /// This method is similar to the `read` method; however, it requires the `RwLock` to be inside of an `Arc` 595 /// and the resulting read guard has no lifetime requirements. 596 #[cfg(feature = "arc_lock")] 597 #[inline] read_arc(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T>598 pub fn read_arc(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> { 599 self.raw.lock_shared(); 600 // SAFETY: locking guarantee is upheld 601 unsafe { self.read_guard_arc() } 602 } 603 604 /// Attempts to lock this `RwLock` with read access, through an `Arc`. 605 /// 606 /// This method is similar to the `try_read` method; however, it requires the `RwLock` to be inside of an 607 /// `Arc` and the resulting read guard has no lifetime requirements. 608 #[cfg(feature = "arc_lock")] 609 #[inline] try_read_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>>610 pub fn try_read_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>> { 611 if self.raw.try_lock_shared() { 612 // SAFETY: locking guarantee is upheld 613 Some(unsafe { self.read_guard_arc() }) 614 } else { 615 None 616 } 617 } 618 619 /// Locks this `RwLock` with write access, through an `Arc`. 620 /// 621 /// This method is similar to the `write` method; however, it requires the `RwLock` to be inside of an `Arc` 622 /// and the resulting write guard has no lifetime requirements. 623 #[cfg(feature = "arc_lock")] 624 #[inline] write_arc(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T>625 pub fn write_arc(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T> { 626 self.raw.lock_exclusive(); 627 // SAFETY: locking guarantee is upheld 628 unsafe { self.write_guard_arc() } 629 } 630 631 /// Attempts to lock this `RwLock` with writ access, through an `Arc`. 632 /// 633 /// This method is similar to the `try_write` method; however, it requires the `RwLock` to be inside of an 634 /// `Arc` and the resulting write guard has no lifetime requirements. 635 #[cfg(feature = "arc_lock")] 636 #[inline] try_write_arc(self: &Arc<Self>) -> Option<ArcRwLockWriteGuard<R, T>>637 pub fn try_write_arc(self: &Arc<Self>) -> Option<ArcRwLockWriteGuard<R, T>> { 638 if self.raw.try_lock_exclusive() { 639 // SAFETY: locking guarantee is upheld 640 Some(unsafe { self.write_guard_arc() }) 641 } else { 642 None 643 } 644 } 645 } 646 647 impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> { 648 /// Forcibly unlocks a read lock using a fair unlock procotol. 649 /// 650 /// This is useful when combined with `mem::forget` to hold a lock without 651 /// the need to maintain a `RwLockReadGuard` object alive, for example when 652 /// dealing with FFI. 653 /// 654 /// # Safety 655 /// 656 /// This method must only be called if the current thread logically owns a 657 /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`. 658 /// Behavior is undefined if a rwlock is read-unlocked when not read-locked. 659 #[inline] force_unlock_read_fair(&self)660 pub unsafe fn force_unlock_read_fair(&self) { 661 self.raw.unlock_shared_fair(); 662 } 663 664 /// Forcibly unlocks a write lock using a fair unlock procotol. 665 /// 666 /// This is useful when combined with `mem::forget` to hold a lock without 667 /// the need to maintain a `RwLockWriteGuard` object alive, for example when 668 /// dealing with FFI. 669 /// 670 /// # Safety 671 /// 672 /// This method must only be called if the current thread logically owns a 673 /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`. 674 /// Behavior is undefined if a rwlock is write-unlocked when not write-locked. 675 #[inline] force_unlock_write_fair(&self)676 pub unsafe fn force_unlock_write_fair(&self) { 677 self.raw.unlock_exclusive_fair(); 678 } 679 } 680 681 impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> { 682 /// Attempts to acquire this `RwLock` with shared read access until a timeout 683 /// is reached. 684 /// 685 /// If the access could not be granted before the timeout expires, then 686 /// `None` is returned. Otherwise, an RAII guard is returned which will 687 /// release the shared access when it is dropped. 688 #[inline] try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>>689 pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> { 690 if self.raw.try_lock_shared_for(timeout) { 691 // SAFETY: The lock is held, as required. 692 Some(unsafe { self.read_guard() }) 693 } else { 694 None 695 } 696 } 697 698 /// Attempts to acquire this `RwLock` with shared read access until a timeout 699 /// is reached. 700 /// 701 /// If the access could not be granted before the timeout expires, then 702 /// `None` is returned. Otherwise, an RAII guard is returned which will 703 /// release the shared access when it is dropped. 704 #[inline] try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>>705 pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> { 706 if self.raw.try_lock_shared_until(timeout) { 707 // SAFETY: The lock is held, as required. 708 Some(unsafe { self.read_guard() }) 709 } else { 710 None 711 } 712 } 713 714 /// Attempts to acquire this `RwLock` with exclusive write access until a 715 /// timeout is reached. 716 /// 717 /// If the access could not be granted before the timeout expires, then 718 /// `None` is returned. Otherwise, an RAII guard is returned which will 719 /// release the exclusive access when it is dropped. 720 #[inline] try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>>721 pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> { 722 if self.raw.try_lock_exclusive_for(timeout) { 723 // SAFETY: The lock is held, as required. 724 Some(unsafe { self.write_guard() }) 725 } else { 726 None 727 } 728 } 729 730 /// Attempts to acquire this `RwLock` with exclusive write access until a 731 /// timeout is reached. 732 /// 733 /// If the access could not be granted before the timeout expires, then 734 /// `None` is returned. Otherwise, an RAII guard is returned which will 735 /// release the exclusive access when it is dropped. 736 #[inline] try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>>737 pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> { 738 if self.raw.try_lock_exclusive_until(timeout) { 739 // SAFETY: The lock is held, as required. 740 Some(unsafe { self.write_guard() }) 741 } else { 742 None 743 } 744 } 745 746 /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. 747 /// 748 /// This method is similar to the `try_read_for` method; however, it requires the `RwLock` to be inside of an 749 /// `Arc` and the resulting read guard has no lifetime requirements. 750 #[cfg(feature = "arc_lock")] 751 #[inline] try_read_arc_for(self: &Arc<Self>, timeout: R::Duration) -> Option<ArcRwLockReadGuard<R, T>>752 pub fn try_read_arc_for(self: &Arc<Self>, timeout: R::Duration) -> Option<ArcRwLockReadGuard<R, T>> { 753 if self.raw.try_lock_shared_for(timeout) { 754 // SAFETY: locking guarantee is upheld 755 Some(unsafe { self.read_guard_arc() }) 756 } else { 757 None 758 } 759 } 760 761 /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. 762 /// 763 /// This method is similar to the `try_read_until` method; however, it requires the `RwLock` to be inside of 764 /// an `Arc` and the resulting read guard has no lifetime requirements. 765 #[cfg(feature = "arc_lock")] 766 #[inline] try_read_arc_until(self: &Arc<Self>, timeout: R::Instant) -> Option<ArcRwLockReadGuard<R, T>>767 pub fn try_read_arc_until(self: &Arc<Self>, timeout: R::Instant) -> Option<ArcRwLockReadGuard<R, T>> { 768 if self.raw.try_lock_shared_until(timeout) { 769 // SAFETY: locking guarantee is upheld 770 Some(unsafe { self.read_guard_arc() }) 771 } else { 772 None 773 } 774 } 775 776 /// Attempts to acquire this `RwLock` with write access until a timeout is reached, through an `Arc`. 777 /// 778 /// This method is similar to the `try_write_for` method; however, it requires the `RwLock` to be inside of 779 /// an `Arc` and the resulting write guard has no lifetime requirements. 780 #[cfg(feature = "arc_lock")] 781 #[inline] try_write_arc_for(self: &Arc<Self>, timeout: R::Duration) -> Option<ArcRwLockWriteGuard<R, T>>782 pub fn try_write_arc_for(self: &Arc<Self>, timeout: R::Duration) -> Option<ArcRwLockWriteGuard<R, T>> { 783 if self.raw.try_lock_exclusive_for(timeout) { 784 // SAFETY: locking guarantee is upheld 785 Some(unsafe { self.write_guard_arc() }) 786 } else { 787 None 788 } 789 } 790 791 /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. 792 /// 793 /// This method is similar to the `try_write_until` method; however, it requires the `RwLock` to be inside of 794 /// an `Arc` and the resulting read guard has no lifetime requirements. 795 #[cfg(feature = "arc_lock")] 796 #[inline] try_write_arc_until(self: &Arc<Self>, timeout: R::Instant) -> Option<ArcRwLockWriteGuard<R, T>>797 pub fn try_write_arc_until(self: &Arc<Self>, timeout: R::Instant) -> Option<ArcRwLockWriteGuard<R, T>> { 798 if self.raw.try_lock_exclusive_until(timeout) { 799 // SAFETY: locking guarantee is upheld 800 Some(unsafe { self.write_guard_arc() }) 801 } else { 802 None 803 } 804 } 805 } 806 807 impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> { 808 /// Locks this `RwLock` with shared read access, blocking the current thread 809 /// until it can be acquired. 810 /// 811 /// The calling thread will be blocked until there are no more writers which 812 /// hold the lock. There may be other readers currently inside the lock when 813 /// this method returns. 814 /// 815 /// Unlike `read`, this method is guaranteed to succeed without blocking if 816 /// another read lock is held at the time of the call. This allows a thread 817 /// to recursively lock a `RwLock`. However using this method can cause 818 /// writers to starve since readers no longer block if a writer is waiting 819 /// for the lock. 820 /// 821 /// Returns an RAII guard which will release this thread's shared access 822 /// once it is dropped. 823 #[inline] read_recursive(&self) -> RwLockReadGuard<'_, R, T>824 pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> { 825 self.raw.lock_shared_recursive(); 826 // SAFETY: The lock is held, as required. 827 unsafe { self.read_guard() } 828 } 829 830 /// Attempts to acquire this `RwLock` with shared read access. 831 /// 832 /// If the access could not be granted at this time, then `None` is returned. 833 /// Otherwise, an RAII guard is returned which will release the shared access 834 /// when it is dropped. 835 /// 836 /// This method is guaranteed to succeed if another read lock is held at the 837 /// time of the call. See the documentation for `read_recursive` for details. 838 /// 839 /// This function does not block. 840 #[inline] try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>>841 pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> { 842 if self.raw.try_lock_shared_recursive() { 843 // SAFETY: The lock is held, as required. 844 Some(unsafe { self.read_guard() }) 845 } else { 846 None 847 } 848 } 849 850 /// Locks this `RwLock` with shared read access, through an `Arc`. 851 /// 852 /// This method is similar to the `read_recursive` method; however, it requires the `RwLock` to be inside of 853 /// an `Arc` and the resulting read guard has no lifetime requirements. 854 #[cfg(feature = "arc_lock")] 855 #[inline] read_arc_recursive(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T>856 pub fn read_arc_recursive(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> { 857 self.raw.lock_shared_recursive(); 858 // SAFETY: locking guarantee is upheld 859 unsafe { self.read_guard_arc() } 860 } 861 862 /// Attempts to lock this `RwLock` with shared read access, through an `Arc`. 863 /// 864 /// This method is similar to the `try_read_recursive` method; however, it requires the `RwLock` to be inside 865 /// of an `Arc` and the resulting read guard has no lifetime requirements. 866 #[cfg(feature = "arc_lock")] 867 #[inline] try_read_recursive_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>>868 pub fn try_read_recursive_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>> { 869 if self.raw.try_lock_shared_recursive() { 870 // SAFETY: locking guarantee is upheld 871 Some(unsafe { self.read_guard_arc() }) 872 } else { 873 None 874 } 875 } 876 } 877 878 impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> { 879 /// Attempts to acquire this `RwLock` with shared read access until a timeout 880 /// is reached. 881 /// 882 /// If the access could not be granted before the timeout expires, then 883 /// `None` is returned. Otherwise, an RAII guard is returned which will 884 /// release the shared access when it is dropped. 885 /// 886 /// This method is guaranteed to succeed without blocking if another read 887 /// lock is held at the time of the call. See the documentation for 888 /// `read_recursive` for details. 889 #[inline] try_read_recursive_for( &self, timeout: R::Duration, ) -> Option<RwLockReadGuard<'_, R, T>>890 pub fn try_read_recursive_for( 891 &self, 892 timeout: R::Duration, 893 ) -> Option<RwLockReadGuard<'_, R, T>> { 894 if self.raw.try_lock_shared_recursive_for(timeout) { 895 // SAFETY: The lock is held, as required. 896 Some(unsafe { self.read_guard() }) 897 } else { 898 None 899 } 900 } 901 902 /// Attempts to acquire this `RwLock` with shared read access until a timeout 903 /// is reached. 904 /// 905 /// If the access could not be granted before the timeout expires, then 906 /// `None` is returned. Otherwise, an RAII guard is returned which will 907 /// release the shared access when it is dropped. 908 #[inline] try_read_recursive_until( &self, timeout: R::Instant, ) -> Option<RwLockReadGuard<'_, R, T>>909 pub fn try_read_recursive_until( 910 &self, 911 timeout: R::Instant, 912 ) -> Option<RwLockReadGuard<'_, R, T>> { 913 if self.raw.try_lock_shared_recursive_until(timeout) { 914 // SAFETY: The lock is held, as required. 915 Some(unsafe { self.read_guard() }) 916 } else { 917 None 918 } 919 } 920 921 /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`. 922 /// 923 /// This method is similar to the `try_read_recursive_for` method; however, it requires the `RwLock` to be 924 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 925 #[cfg(feature = "arc_lock")] 926 #[inline] try_read_arc_recursive_for(self: &Arc<Self>, timeout: R::Duration) -> Option<ArcRwLockReadGuard<R, T>>927 pub fn try_read_arc_recursive_for(self: &Arc<Self>, timeout: R::Duration) -> Option<ArcRwLockReadGuard<R, T>> { 928 if self.raw.try_lock_shared_recursive_for(timeout) { 929 // SAFETY: locking guarantee is upheld 930 Some(unsafe { self.read_guard_arc() }) 931 } else { 932 None 933 } 934 } 935 936 /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`. 937 /// 938 /// This method is similar to the `try_read_recursive_until` method; however, it requires the `RwLock` to be 939 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 940 #[cfg(feature = "arc_lock")] 941 #[inline] try_read_arc_recursive_until(self: &Arc<Self>, timeout: R::Instant) -> Option<ArcRwLockReadGuard<R, T>>942 pub fn try_read_arc_recursive_until(self: &Arc<Self>, timeout: R::Instant) -> Option<ArcRwLockReadGuard<R, T>> { 943 if self.raw.try_lock_shared_recursive_until(timeout) { 944 // SAFETY: locking guarantee is upheld 945 Some(unsafe { self.read_guard_arc() }) 946 } else { 947 None 948 } 949 } 950 } 951 952 impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> { 953 /// # Safety 954 /// 955 /// The lock must be held when calling this method. 956 #[inline] upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T>957 unsafe fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T> { 958 RwLockUpgradableReadGuard { 959 rwlock: self, 960 marker: PhantomData, 961 } 962 } 963 964 /// Locks this `RwLock` with upgradable read access, blocking the current thread 965 /// until it can be acquired. 966 /// 967 /// The calling thread will be blocked until there are no more writers or other 968 /// upgradable reads which hold the lock. There may be other readers currently 969 /// inside the lock when this method returns. 970 /// 971 /// Returns an RAII guard which will release this thread's shared access 972 /// once it is dropped. 973 #[inline] upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T>974 pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> { 975 self.raw.lock_upgradable(); 976 // SAFETY: The lock is held, as required. 977 unsafe { self.upgradable_guard() } 978 } 979 980 /// Attempts to acquire this `RwLock` with upgradable read access. 981 /// 982 /// If the access could not be granted at this time, then `None` is returned. 983 /// Otherwise, an RAII guard is returned which will release the shared access 984 /// when it is dropped. 985 /// 986 /// This function does not block. 987 #[inline] try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>>988 pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> { 989 if self.raw.try_lock_upgradable() { 990 // SAFETY: The lock is held, as required. 991 Some(unsafe { self.upgradable_guard() }) 992 } else { 993 None 994 } 995 } 996 997 /// # Safety 998 /// 999 /// The lock must be held when calling this method. 1000 #[cfg(feature = "arc_lock")] 1001 #[inline] upgradable_guard_arc(self: &Arc<Self>) -> ArcRwLockUpgradableReadGuard<R, T>1002 unsafe fn upgradable_guard_arc(self: &Arc<Self>) -> ArcRwLockUpgradableReadGuard<R, T> { 1003 ArcRwLockUpgradableReadGuard { 1004 rwlock: self.clone(), 1005 marker: PhantomData 1006 } 1007 } 1008 1009 /// Locks this `RwLock` with upgradable read access, through an `Arc`. 1010 /// 1011 /// This method is similar to the `upgradable_read` method; however, it requires the `RwLock` to be 1012 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 1013 #[cfg(feature = "arc_lock")] 1014 #[inline] upgradable_read_arc(self: &Arc<Self>) -> ArcRwLockUpgradableReadGuard<R, T>1015 pub fn upgradable_read_arc(self: &Arc<Self>) -> ArcRwLockUpgradableReadGuard<R, T> { 1016 self.raw.lock_upgradable(); 1017 // SAFETY: locking guarantee is upheld 1018 unsafe { self.upgradable_guard_arc() } 1019 } 1020 1021 /// Attempts to lock this `RwLock` with upgradable read access, through an `Arc`. 1022 /// 1023 /// This method is similar to the `try_upgradable_read` method; however, it requires the `RwLock` to be 1024 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 1025 #[cfg(feature = "arc_lock")] 1026 #[inline] try_upgradable_read_arc(self: &Arc<Self>) -> Option<ArcRwLockUpgradableReadGuard<R, T>>1027 pub fn try_upgradable_read_arc(self: &Arc<Self>) -> Option<ArcRwLockUpgradableReadGuard<R, T>> { 1028 if self.raw.try_lock_upgradable() { 1029 // SAFETY: locking guarantee is upheld 1030 Some(unsafe { self.upgradable_guard_arc() }) 1031 } else { 1032 None 1033 } 1034 } 1035 } 1036 1037 impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> { 1038 /// Attempts to acquire this `RwLock` with upgradable read access until a timeout 1039 /// is reached. 1040 /// 1041 /// If the access could not be granted before the timeout expires, then 1042 /// `None` is returned. Otherwise, an RAII guard is returned which will 1043 /// release the shared access when it is dropped. 1044 #[inline] try_upgradable_read_for( &self, timeout: R::Duration, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>>1045 pub fn try_upgradable_read_for( 1046 &self, 1047 timeout: R::Duration, 1048 ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> { 1049 if self.raw.try_lock_upgradable_for(timeout) { 1050 // SAFETY: The lock is held, as required. 1051 Some(unsafe { self.upgradable_guard() }) 1052 } else { 1053 None 1054 } 1055 } 1056 1057 /// Attempts to acquire this `RwLock` with upgradable read access until a timeout 1058 /// is reached. 1059 /// 1060 /// If the access could not be granted before the timeout expires, then 1061 /// `None` is returned. Otherwise, an RAII guard is returned which will 1062 /// release the shared access when it is dropped. 1063 #[inline] try_upgradable_read_until( &self, timeout: R::Instant, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>>1064 pub fn try_upgradable_read_until( 1065 &self, 1066 timeout: R::Instant, 1067 ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> { 1068 if self.raw.try_lock_upgradable_until(timeout) { 1069 // SAFETY: The lock is held, as required. 1070 Some(unsafe { self.upgradable_guard() }) 1071 } else { 1072 None 1073 } 1074 } 1075 1076 /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`. 1077 /// 1078 /// This method is similar to the `try_upgradable_read_for` method; however, it requires the `RwLock` to be 1079 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 1080 #[cfg(feature = "arc_lock")] 1081 #[inline] try_upgradable_read_arc_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcRwLockUpgradableReadGuard<R, T>>1082 pub fn try_upgradable_read_arc_for( 1083 self: &Arc<Self>, 1084 timeout: R::Duration, 1085 ) -> Option<ArcRwLockUpgradableReadGuard<R, T>> { 1086 if self.raw.try_lock_upgradable_for(timeout) { 1087 // SAFETY: locking guarantee is upheld 1088 Some(unsafe { self.upgradable_guard_arc() }) 1089 } else { 1090 None 1091 } 1092 } 1093 1094 /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`. 1095 /// 1096 /// This method is similar to the `try_upgradable_read_until` method; however, it requires the `RwLock` to be 1097 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 1098 #[cfg(feature = "arc_lock")] 1099 #[inline] try_upgradable_read_arc_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcRwLockUpgradableReadGuard<R, T>>1100 pub fn try_upgradable_read_arc_until( 1101 self: &Arc<Self>, 1102 timeout: R::Instant, 1103 ) -> Option<ArcRwLockUpgradableReadGuard<R, T>> { 1104 if self.raw.try_lock_upgradable_until(timeout) { 1105 // SAFETY: locking guarantee is upheld 1106 Some(unsafe { self.upgradable_guard_arc() }) 1107 } else { 1108 None 1109 } 1110 } 1111 } 1112 1113 impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> { 1114 #[inline] default() -> RwLock<R, T>1115 fn default() -> RwLock<R, T> { 1116 RwLock::new(Default::default()) 1117 } 1118 } 1119 1120 impl<R: RawRwLock, T> From<T> for RwLock<R, T> { 1121 #[inline] from(t: T) -> RwLock<R, T>1122 fn from(t: T) -> RwLock<R, T> { 1123 RwLock::new(t) 1124 } 1125 } 1126 1127 impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1128 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1129 match self.try_read() { 1130 Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(), 1131 None => { 1132 struct LockedPlaceholder; 1133 impl fmt::Debug for LockedPlaceholder { 1134 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1135 f.write_str("<locked>") 1136 } 1137 } 1138 1139 f.debug_struct("RwLock") 1140 .field("data", &LockedPlaceholder) 1141 .finish() 1142 } 1143 } 1144 } 1145 } 1146 1147 /// RAII structure used to release the shared read access of a lock when 1148 /// dropped. 1149 #[must_use = "if unused the RwLock will immediately unlock"] 1150 pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> { 1151 rwlock: &'a RwLock<R, T>, 1152 marker: PhantomData<(&'a T, R::GuardMarker)>, 1153 } 1154 1155 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> { 1156 /// Returns a reference to the original reader-writer lock object. rwlock(s: &Self) -> &'a RwLock<R, T>1157 pub fn rwlock(s: &Self) -> &'a RwLock<R, T> { 1158 s.rwlock 1159 } 1160 1161 /// Make a new `MappedRwLockReadGuard` for a component of the locked data. 1162 /// 1163 /// This operation cannot fail as the `RwLockReadGuard` passed 1164 /// in already locked the data. 1165 /// 1166 /// This is an associated function that needs to be 1167 /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of 1168 /// the same name on the contents of the locked data. 1169 #[inline] map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,1170 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> 1171 where 1172 F: FnOnce(&T) -> &U, 1173 { 1174 let raw = &s.rwlock.raw; 1175 let data = f(unsafe { &*s.rwlock.data.get() }); 1176 mem::forget(s); 1177 MappedRwLockReadGuard { 1178 raw, 1179 data, 1180 marker: PhantomData, 1181 } 1182 } 1183 1184 /// Attempts to make a new `MappedRwLockReadGuard` for a component of the 1185 /// locked data. The original guard is return if the closure returns `None`. 1186 /// 1187 /// This operation cannot fail as the `RwLockReadGuard` passed 1188 /// in already locked the data. 1189 /// 1190 /// This is an associated function that needs to be 1191 /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of 1192 /// the same name on the contents of the locked data. 1193 #[inline] try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,1194 pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> 1195 where 1196 F: FnOnce(&T) -> Option<&U>, 1197 { 1198 let raw = &s.rwlock.raw; 1199 let data = match f(unsafe { &*s.rwlock.data.get() }) { 1200 Some(data) => data, 1201 None => return Err(s), 1202 }; 1203 mem::forget(s); 1204 Ok(MappedRwLockReadGuard { 1205 raw, 1206 data, 1207 marker: PhantomData, 1208 }) 1209 } 1210 1211 /// Temporarily unlocks the `RwLock` to execute the given function. 1212 /// 1213 /// The `RwLock` is unlocked a fair unlock protocol. 1214 /// 1215 /// This is safe because `&mut` guarantees that there exist no other 1216 /// references to the data protected by the `RwLock`. 1217 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1218 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 1219 where 1220 F: FnOnce() -> U, 1221 { 1222 // Safety: An RwLockReadGuard always holds a shared lock. 1223 unsafe { 1224 s.rwlock.raw.unlock_shared(); 1225 } 1226 defer!(s.rwlock.raw.lock_shared()); 1227 f() 1228 } 1229 } 1230 1231 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> { 1232 /// Unlocks the `RwLock` using a fair unlock protocol. 1233 /// 1234 /// By default, `RwLock` is unfair and allow the current thread to re-lock 1235 /// the `RwLock` before another has the chance to acquire the lock, even if 1236 /// that thread has been blocked on the `RwLock` for a long time. This is 1237 /// the default because it allows much higher throughput as it avoids 1238 /// forcing a context switch on every `RwLock` unlock. This can result in one 1239 /// thread acquiring a `RwLock` many more times than other threads. 1240 /// 1241 /// However in some cases it can be beneficial to ensure fairness by forcing 1242 /// the lock to pass on to a waiting thread if there is one. This is done by 1243 /// using this method instead of dropping the `RwLockReadGuard` normally. 1244 #[inline] unlock_fair(s: Self)1245 pub fn unlock_fair(s: Self) { 1246 // Safety: An RwLockReadGuard always holds a shared lock. 1247 unsafe { 1248 s.rwlock.raw.unlock_shared_fair(); 1249 } 1250 mem::forget(s); 1251 } 1252 1253 /// Temporarily unlocks the `RwLock` to execute the given function. 1254 /// 1255 /// The `RwLock` is unlocked a fair unlock protocol. 1256 /// 1257 /// This is safe because `&mut` guarantees that there exist no other 1258 /// references to the data protected by the `RwLock`. 1259 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1260 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 1261 where 1262 F: FnOnce() -> U, 1263 { 1264 // Safety: An RwLockReadGuard always holds a shared lock. 1265 unsafe { 1266 s.rwlock.raw.unlock_shared_fair(); 1267 } 1268 defer!(s.rwlock.raw.lock_shared()); 1269 f() 1270 } 1271 1272 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 1273 /// 1274 /// This method is functionally equivalent to calling `unlock_fair` followed 1275 /// by `read`, however it can be much more efficient in the case where there 1276 /// are no waiting threads. 1277 #[inline] bump(s: &mut Self)1278 pub fn bump(s: &mut Self) { 1279 // Safety: An RwLockReadGuard always holds a shared lock. 1280 unsafe { 1281 s.rwlock.raw.bump_shared(); 1282 } 1283 } 1284 } 1285 1286 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> { 1287 type Target = T; 1288 #[inline] deref(&self) -> &T1289 fn deref(&self) -> &T { 1290 unsafe { &*self.rwlock.data.get() } 1291 } 1292 } 1293 1294 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> { 1295 #[inline] drop(&mut self)1296 fn drop(&mut self) { 1297 // Safety: An RwLockReadGuard always holds a shared lock. 1298 unsafe { 1299 self.rwlock.raw.unlock_shared(); 1300 } 1301 } 1302 } 1303 1304 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1305 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1306 fmt::Debug::fmt(&**self, f) 1307 } 1308 } 1309 1310 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display 1311 for RwLockReadGuard<'a, R, T> 1312 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1313 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1314 (**self).fmt(f) 1315 } 1316 } 1317 1318 #[cfg(feature = "owning_ref")] 1319 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {} 1320 1321 /// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. 1322 /// 1323 /// This is similar to the `RwLockReadGuard` struct, except instead of using a reference to unlock the `RwLock` 1324 /// it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` lifetime. 1325 #[cfg(feature = "arc_lock")] 1326 #[must_use = "if unused the RwLock will immediately unlock"] 1327 pub struct ArcRwLockReadGuard<R: RawRwLock, T: ?Sized> { 1328 rwlock: Arc<RwLock<R, T>>, 1329 marker: PhantomData<R::GuardMarker>, 1330 } 1331 1332 #[cfg(feature = "arc_lock")] 1333 impl<R: RawRwLock, T: ?Sized> ArcRwLockReadGuard<R, T> { 1334 /// Returns a reference to the rwlock, contained in its `Arc`. rwlock(s: &Self) -> &Arc<RwLock<R, T>>1335 pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> { 1336 &s.rwlock 1337 } 1338 1339 /// Temporarily unlocks the `RwLock` to execute the given function. 1340 /// 1341 /// This is functionally identical to the `unlocked` method on [`RwLockReadGuard`]. 1342 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1343 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 1344 where 1345 F: FnOnce() -> U, 1346 { 1347 // Safety: An RwLockReadGuard always holds a shared lock. 1348 unsafe { 1349 s.rwlock.raw.unlock_shared(); 1350 } 1351 defer!(s.rwlock.raw.lock_shared()); 1352 f() 1353 } 1354 } 1355 1356 #[cfg(feature = "arc_lock")] 1357 impl<R: RawRwLockFair, T: ?Sized> ArcRwLockReadGuard<R, T> { 1358 /// Unlocks the `RwLock` using a fair unlock protocol. 1359 /// 1360 /// This is functionally identical to the `unlock_fair` method on [`RwLockReadGuard`]. 1361 #[inline] unlock_fair(s: Self)1362 pub fn unlock_fair(s: Self) { 1363 // Safety: An RwLockReadGuard always holds a shared lock. 1364 unsafe { 1365 s.rwlock.raw.unlock_shared_fair(); 1366 } 1367 1368 // SAFETY: ensure the Arc has its refcount decremented 1369 let mut s = ManuallyDrop::new(s); 1370 unsafe { ptr::drop_in_place(&mut s.rwlock) }; 1371 } 1372 1373 /// Temporarily unlocks the `RwLock` to execute the given function. 1374 /// 1375 /// This is functionally identical to the `unlocked_fair` method on [`RwLockReadGuard`]. 1376 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1377 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 1378 where 1379 F: FnOnce() -> U, 1380 { 1381 // Safety: An RwLockReadGuard always holds a shared lock. 1382 unsafe { 1383 s.rwlock.raw.unlock_shared_fair(); 1384 } 1385 defer!(s.rwlock.raw.lock_shared()); 1386 f() 1387 } 1388 1389 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 1390 /// 1391 /// This is functionally identical to the `bump` method on [`RwLockReadGuard`]. 1392 #[inline] bump(s: &mut Self)1393 pub fn bump(s: &mut Self) { 1394 // Safety: An RwLockReadGuard always holds a shared lock. 1395 unsafe { 1396 s.rwlock.raw.bump_shared(); 1397 } 1398 } 1399 } 1400 1401 #[cfg(feature = "arc_lock")] 1402 impl<R: RawRwLock, T: ?Sized> Deref for ArcRwLockReadGuard<R, T> { 1403 type Target = T; 1404 #[inline] deref(&self) -> &T1405 fn deref(&self) -> &T { 1406 unsafe { &*self.rwlock.data.get() } 1407 } 1408 } 1409 1410 #[cfg(feature = "arc_lock")] 1411 impl<R: RawRwLock, T: ?Sized> Drop for ArcRwLockReadGuard<R, T> { 1412 #[inline] drop(&mut self)1413 fn drop(&mut self) { 1414 // Safety: An RwLockReadGuard always holds a shared lock. 1415 unsafe { 1416 self.rwlock.raw.unlock_shared(); 1417 } 1418 } 1419 } 1420 1421 #[cfg(feature = "arc_lock")] 1422 impl<R: RawRwLock, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockReadGuard<R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1423 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1424 fmt::Debug::fmt(&**self, f) 1425 } 1426 } 1427 1428 #[cfg(feature = "arc_lock")] 1429 impl<R: RawRwLock, T: fmt::Display + ?Sized> fmt::Display 1430 for ArcRwLockReadGuard<R, T> 1431 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1432 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1433 (**self).fmt(f) 1434 } 1435 } 1436 1437 /// RAII structure used to release the exclusive write access of a lock when 1438 /// dropped. 1439 #[must_use = "if unused the RwLock will immediately unlock"] 1440 pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> { 1441 rwlock: &'a RwLock<R, T>, 1442 marker: PhantomData<(&'a mut T, R::GuardMarker)>, 1443 } 1444 1445 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { 1446 /// Returns a reference to the original reader-writer lock object. rwlock(s: &Self) -> &'a RwLock<R, T>1447 pub fn rwlock(s: &Self) -> &'a RwLock<R, T> { 1448 s.rwlock 1449 } 1450 1451 /// Make a new `MappedRwLockWriteGuard` for a component of the locked data. 1452 /// 1453 /// This operation cannot fail as the `RwLockWriteGuard` passed 1454 /// in already locked the data. 1455 /// 1456 /// This is an associated function that needs to be 1457 /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of 1458 /// the same name on the contents of the locked data. 1459 #[inline] map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,1460 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> 1461 where 1462 F: FnOnce(&mut T) -> &mut U, 1463 { 1464 let raw = &s.rwlock.raw; 1465 let data = f(unsafe { &mut *s.rwlock.data.get() }); 1466 mem::forget(s); 1467 MappedRwLockWriteGuard { 1468 raw, 1469 data, 1470 marker: PhantomData, 1471 } 1472 } 1473 1474 /// Attempts to make a new `MappedRwLockWriteGuard` for a component of the 1475 /// locked data. The original guard is return if the closure returns `None`. 1476 /// 1477 /// This operation cannot fail as the `RwLockWriteGuard` passed 1478 /// in already locked the data. 1479 /// 1480 /// This is an associated function that needs to be 1481 /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of 1482 /// the same name on the contents of the locked data. 1483 #[inline] try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,1484 pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> 1485 where 1486 F: FnOnce(&mut T) -> Option<&mut U>, 1487 { 1488 let raw = &s.rwlock.raw; 1489 let data = match f(unsafe { &mut *s.rwlock.data.get() }) { 1490 Some(data) => data, 1491 None => return Err(s), 1492 }; 1493 mem::forget(s); 1494 Ok(MappedRwLockWriteGuard { 1495 raw, 1496 data, 1497 marker: PhantomData, 1498 }) 1499 } 1500 1501 /// Temporarily unlocks the `RwLock` to execute the given function. 1502 /// 1503 /// This is safe because `&mut` guarantees that there exist no other 1504 /// references to the data protected by the `RwLock`. 1505 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1506 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 1507 where 1508 F: FnOnce() -> U, 1509 { 1510 // Safety: An RwLockReadGuard always holds a shared lock. 1511 unsafe { 1512 s.rwlock.raw.unlock_exclusive(); 1513 } 1514 defer!(s.rwlock.raw.lock_exclusive()); 1515 f() 1516 } 1517 } 1518 1519 impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { 1520 /// Atomically downgrades a write lock into a read lock without allowing any 1521 /// writers to take exclusive access of the lock in the meantime. 1522 /// 1523 /// Note that if there are any writers currently waiting to take the lock 1524 /// then other readers may not be able to acquire the lock even if it was 1525 /// downgraded. downgrade(s: Self) -> RwLockReadGuard<'a, R, T>1526 pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> { 1527 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1528 unsafe { 1529 s.rwlock.raw.downgrade(); 1530 } 1531 let rwlock = s.rwlock; 1532 mem::forget(s); 1533 RwLockReadGuard { 1534 rwlock, 1535 marker: PhantomData, 1536 } 1537 } 1538 } 1539 1540 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { 1541 /// Atomically downgrades a write lock into an upgradable read lock without allowing any 1542 /// writers to take exclusive access of the lock in the meantime. 1543 /// 1544 /// Note that if there are any writers currently waiting to take the lock 1545 /// then other readers may not be able to acquire the lock even if it was 1546 /// downgraded. downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T>1547 pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> { 1548 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1549 unsafe { 1550 s.rwlock.raw.downgrade_to_upgradable(); 1551 } 1552 let rwlock = s.rwlock; 1553 mem::forget(s); 1554 RwLockUpgradableReadGuard { 1555 rwlock, 1556 marker: PhantomData, 1557 } 1558 } 1559 } 1560 1561 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { 1562 /// Unlocks the `RwLock` using a fair unlock protocol. 1563 /// 1564 /// By default, `RwLock` is unfair and allow the current thread to re-lock 1565 /// the `RwLock` before another has the chance to acquire the lock, even if 1566 /// that thread has been blocked on the `RwLock` for a long time. This is 1567 /// the default because it allows much higher throughput as it avoids 1568 /// forcing a context switch on every `RwLock` unlock. This can result in one 1569 /// thread acquiring a `RwLock` many more times than other threads. 1570 /// 1571 /// However in some cases it can be beneficial to ensure fairness by forcing 1572 /// the lock to pass on to a waiting thread if there is one. This is done by 1573 /// using this method instead of dropping the `RwLockWriteGuard` normally. 1574 #[inline] unlock_fair(s: Self)1575 pub fn unlock_fair(s: Self) { 1576 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1577 unsafe { 1578 s.rwlock.raw.unlock_exclusive_fair(); 1579 } 1580 mem::forget(s); 1581 } 1582 1583 /// Temporarily unlocks the `RwLock` to execute the given function. 1584 /// 1585 /// The `RwLock` is unlocked a fair unlock protocol. 1586 /// 1587 /// This is safe because `&mut` guarantees that there exist no other 1588 /// references to the data protected by the `RwLock`. 1589 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1590 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 1591 where 1592 F: FnOnce() -> U, 1593 { 1594 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1595 unsafe { 1596 s.rwlock.raw.unlock_exclusive_fair(); 1597 } 1598 defer!(s.rwlock.raw.lock_exclusive()); 1599 f() 1600 } 1601 1602 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 1603 /// 1604 /// This method is functionally equivalent to calling `unlock_fair` followed 1605 /// by `write`, however it can be much more efficient in the case where there 1606 /// are no waiting threads. 1607 #[inline] bump(s: &mut Self)1608 pub fn bump(s: &mut Self) { 1609 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1610 unsafe { 1611 s.rwlock.raw.bump_exclusive(); 1612 } 1613 } 1614 } 1615 1616 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> { 1617 type Target = T; 1618 #[inline] deref(&self) -> &T1619 fn deref(&self) -> &T { 1620 unsafe { &*self.rwlock.data.get() } 1621 } 1622 } 1623 1624 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> { 1625 #[inline] deref_mut(&mut self) -> &mut T1626 fn deref_mut(&mut self) -> &mut T { 1627 unsafe { &mut *self.rwlock.data.get() } 1628 } 1629 } 1630 1631 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> { 1632 #[inline] drop(&mut self)1633 fn drop(&mut self) { 1634 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1635 unsafe { 1636 self.rwlock.raw.unlock_exclusive(); 1637 } 1638 } 1639 } 1640 1641 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1642 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1643 fmt::Debug::fmt(&**self, f) 1644 } 1645 } 1646 1647 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display 1648 for RwLockWriteGuard<'a, R, T> 1649 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1650 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1651 (**self).fmt(f) 1652 } 1653 } 1654 1655 #[cfg(feature = "owning_ref")] 1656 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {} 1657 1658 /// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. 1659 /// This is similar to the `RwLockWriteGuard` struct, except instead of using a reference to unlock the `RwLock` 1660 /// it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` lifetime. 1661 #[cfg(feature = "arc_lock")] 1662 #[must_use = "if unused the RwLock will immediately unlock"] 1663 pub struct ArcRwLockWriteGuard<R: RawRwLock, T: ?Sized> { 1664 rwlock: Arc<RwLock<R, T>>, 1665 marker: PhantomData<R::GuardMarker>, 1666 } 1667 1668 #[cfg(feature = "arc_lock")] 1669 impl<R: RawRwLock, T: ?Sized> ArcRwLockWriteGuard<R, T> { 1670 /// Returns a reference to the rwlock, contained in its `Arc`. rwlock(s: &Self) -> &Arc<RwLock<R, T>>1671 pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> { 1672 &s.rwlock 1673 } 1674 1675 /// Temporarily unlocks the `RwLock` to execute the given function. 1676 /// 1677 /// This is functionally equivalent to the `unlocked` method on [`RwLockWriteGuard`]. 1678 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1679 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 1680 where 1681 F: FnOnce() -> U, 1682 { 1683 // Safety: An RwLockWriteGuard always holds a shared lock. 1684 unsafe { 1685 s.rwlock.raw.unlock_exclusive(); 1686 } 1687 defer!(s.rwlock.raw.lock_exclusive()); 1688 f() 1689 } 1690 } 1691 1692 #[cfg(feature = "arc_lock")] 1693 impl<R: RawRwLockDowngrade, T: ?Sized> ArcRwLockWriteGuard<R, T> { 1694 /// Atomically downgrades a write lock into a read lock without allowing any 1695 /// writers to take exclusive access of the lock in the meantime. 1696 /// 1697 /// This is functionally equivalent to the `downgrade` method on [`RwLockWriteGuard`]. downgrade(s: Self) -> ArcRwLockReadGuard<R, T>1698 pub fn downgrade(s: Self) -> ArcRwLockReadGuard<R, T> { 1699 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1700 unsafe { 1701 s.rwlock.raw.downgrade(); 1702 } 1703 1704 // SAFETY: prevent the arc's refcount from changing using ManuallyDrop and ptr::read 1705 let s = ManuallyDrop::new(s); 1706 let rwlock = unsafe { ptr::read(&s.rwlock) }; 1707 1708 ArcRwLockReadGuard { 1709 rwlock, 1710 marker: PhantomData, 1711 } 1712 } 1713 } 1714 1715 #[cfg(feature = "arc_lock")] 1716 impl<R: RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockWriteGuard<R, T> { 1717 /// Atomically downgrades a write lock into an upgradable read lock without allowing any 1718 /// writers to take exclusive access of the lock in the meantime. 1719 /// 1720 /// This is functionally identical to the `downgrade_to_upgradable` method on [`RwLockWriteGuard`]. downgrade_to_upgradable(s: Self) -> ArcRwLockUpgradableReadGuard<R, T>1721 pub fn downgrade_to_upgradable(s: Self) -> ArcRwLockUpgradableReadGuard<R, T> { 1722 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1723 unsafe { 1724 s.rwlock.raw.downgrade_to_upgradable(); 1725 } 1726 1727 // SAFETY: same as above 1728 let s = ManuallyDrop::new(s); 1729 let rwlock = unsafe { ptr::read(&s.rwlock) }; 1730 1731 ArcRwLockUpgradableReadGuard { 1732 rwlock, 1733 marker: PhantomData, 1734 } 1735 } 1736 } 1737 1738 #[cfg(feature = "arc_lock")] 1739 impl<R: RawRwLockFair, T: ?Sized> ArcRwLockWriteGuard<R, T> { 1740 /// Unlocks the `RwLock` using a fair unlock protocol. 1741 /// 1742 /// This is functionally equivalent to the `unlock_fair` method on [`RwLockWriteGuard`]. 1743 #[inline] unlock_fair(s: Self)1744 pub fn unlock_fair(s: Self) { 1745 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1746 unsafe { 1747 s.rwlock.raw.unlock_exclusive_fair(); 1748 } 1749 1750 // SAFETY: prevent the Arc from leaking memory 1751 let mut s = ManuallyDrop::new(s); 1752 unsafe { ptr::drop_in_place(&mut s.rwlock) }; 1753 } 1754 1755 /// Temporarily unlocks the `RwLock` to execute the given function. 1756 /// 1757 /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockWriteGuard`]. 1758 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1759 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 1760 where 1761 F: FnOnce() -> U, 1762 { 1763 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1764 unsafe { 1765 s.rwlock.raw.unlock_exclusive_fair(); 1766 } 1767 defer!(s.rwlock.raw.lock_exclusive()); 1768 f() 1769 } 1770 1771 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 1772 /// 1773 /// This method is functionally equivalent to the `bump` method on [`RwLockWriteGuard`]. 1774 #[inline] bump(s: &mut Self)1775 pub fn bump(s: &mut Self) { 1776 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1777 unsafe { 1778 s.rwlock.raw.bump_exclusive(); 1779 } 1780 } 1781 } 1782 1783 #[cfg(feature = "arc_lock")] 1784 impl<R: RawRwLock, T: ?Sized> Deref for ArcRwLockWriteGuard<R, T> { 1785 type Target = T; 1786 #[inline] deref(&self) -> &T1787 fn deref(&self) -> &T { 1788 unsafe { &*self.rwlock.data.get() } 1789 } 1790 } 1791 1792 #[cfg(feature = "arc_lock")] 1793 impl<R: RawRwLock, T: ?Sized> DerefMut for ArcRwLockWriteGuard<R, T> { 1794 #[inline] deref_mut(&mut self) -> &mut T1795 fn deref_mut(&mut self) -> &mut T { 1796 unsafe { &mut *self.rwlock.data.get() } 1797 } 1798 } 1799 1800 #[cfg(feature = "arc_lock")] 1801 impl<R: RawRwLock, T: ?Sized> Drop for ArcRwLockWriteGuard<R, T> { 1802 #[inline] drop(&mut self)1803 fn drop(&mut self) { 1804 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1805 unsafe { 1806 self.rwlock.raw.unlock_exclusive(); 1807 } 1808 } 1809 } 1810 1811 #[cfg(feature = "arc_lock")] 1812 impl<R: RawRwLock, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockWriteGuard<R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1813 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1814 fmt::Debug::fmt(&**self, f) 1815 } 1816 } 1817 1818 #[cfg(feature = "arc_lock")] 1819 impl<R: RawRwLock, T: fmt::Display + ?Sized> fmt::Display 1820 for ArcRwLockWriteGuard<R, T> 1821 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1822 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1823 (**self).fmt(f) 1824 } 1825 } 1826 1827 /// RAII structure used to release the upgradable read access of a lock when 1828 /// dropped. 1829 #[must_use = "if unused the RwLock will immediately unlock"] 1830 pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> { 1831 rwlock: &'a RwLock<R, T>, 1832 marker: PhantomData<(&'a T, R::GuardMarker)>, 1833 } 1834 1835 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync 1836 for RwLockUpgradableReadGuard<'a, R, T> 1837 { 1838 } 1839 1840 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { 1841 /// Returns a reference to the original reader-writer lock object. rwlock(s: &Self) -> &'a RwLock<R, T>1842 pub fn rwlock(s: &Self) -> &'a RwLock<R, T> { 1843 s.rwlock 1844 } 1845 1846 /// Temporarily unlocks the `RwLock` to execute the given function. 1847 /// 1848 /// This is safe because `&mut` guarantees that there exist no other 1849 /// references to the data protected by the `RwLock`. 1850 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1851 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 1852 where 1853 F: FnOnce() -> U, 1854 { 1855 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1856 unsafe { 1857 s.rwlock.raw.unlock_upgradable(); 1858 } 1859 defer!(s.rwlock.raw.lock_upgradable()); 1860 f() 1861 } 1862 1863 /// Atomically upgrades an upgradable read lock lock into a exclusive write lock, 1864 /// blocking the current thread until it can be acquired. upgrade(s: Self) -> RwLockWriteGuard<'a, R, T>1865 pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> { 1866 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1867 unsafe { 1868 s.rwlock.raw.upgrade(); 1869 } 1870 let rwlock = s.rwlock; 1871 mem::forget(s); 1872 RwLockWriteGuard { 1873 rwlock, 1874 marker: PhantomData, 1875 } 1876 } 1877 1878 /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock. 1879 /// 1880 /// If the access could not be granted at this time, then the current guard is returned. try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self>1881 pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> { 1882 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1883 if unsafe { s.rwlock.raw.try_upgrade() } { 1884 let rwlock = s.rwlock; 1885 mem::forget(s); 1886 Ok(RwLockWriteGuard { 1887 rwlock, 1888 marker: PhantomData, 1889 }) 1890 } else { 1891 Err(s) 1892 } 1893 } 1894 } 1895 1896 impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { 1897 /// Unlocks the `RwLock` using a fair unlock protocol. 1898 /// 1899 /// By default, `RwLock` is unfair and allow the current thread to re-lock 1900 /// the `RwLock` before another has the chance to acquire the lock, even if 1901 /// that thread has been blocked on the `RwLock` for a long time. This is 1902 /// the default because it allows much higher throughput as it avoids 1903 /// forcing a context switch on every `RwLock` unlock. This can result in one 1904 /// thread acquiring a `RwLock` many more times than other threads. 1905 /// 1906 /// However in some cases it can be beneficial to ensure fairness by forcing 1907 /// the lock to pass on to a waiting thread if there is one. This is done by 1908 /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally. 1909 #[inline] unlock_fair(s: Self)1910 pub fn unlock_fair(s: Self) { 1911 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1912 unsafe { 1913 s.rwlock.raw.unlock_upgradable_fair(); 1914 } 1915 mem::forget(s); 1916 } 1917 1918 /// Temporarily unlocks the `RwLock` to execute the given function. 1919 /// 1920 /// The `RwLock` is unlocked a fair unlock protocol. 1921 /// 1922 /// This is safe because `&mut` guarantees that there exist no other 1923 /// references to the data protected by the `RwLock`. 1924 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1925 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 1926 where 1927 F: FnOnce() -> U, 1928 { 1929 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1930 unsafe { 1931 s.rwlock.raw.unlock_upgradable_fair(); 1932 } 1933 defer!(s.rwlock.raw.lock_upgradable()); 1934 f() 1935 } 1936 1937 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 1938 /// 1939 /// This method is functionally equivalent to calling `unlock_fair` followed 1940 /// by `upgradable_read`, however it can be much more efficient in the case where there 1941 /// are no waiting threads. 1942 #[inline] bump(s: &mut Self)1943 pub fn bump(s: &mut Self) { 1944 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1945 unsafe { 1946 s.rwlock.raw.bump_upgradable(); 1947 } 1948 } 1949 } 1950 1951 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { 1952 /// Atomically downgrades an upgradable read lock lock into a shared read lock 1953 /// without allowing any writers to take exclusive access of the lock in the 1954 /// meantime. 1955 /// 1956 /// Note that if there are any writers currently waiting to take the lock 1957 /// then other readers may not be able to acquire the lock even if it was 1958 /// downgraded. downgrade(s: Self) -> RwLockReadGuard<'a, R, T>1959 pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> { 1960 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1961 unsafe { 1962 s.rwlock.raw.downgrade_upgradable(); 1963 } 1964 let rwlock = s.rwlock; 1965 mem::forget(s); 1966 RwLockReadGuard { 1967 rwlock, 1968 marker: PhantomData, 1969 } 1970 } 1971 } 1972 1973 impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { 1974 /// Tries to atomically upgrade an upgradable read lock into a exclusive 1975 /// write lock, until a timeout is reached. 1976 /// 1977 /// If the access could not be granted before the timeout expires, then 1978 /// the current guard is returned. try_upgrade_for( s: Self, timeout: R::Duration, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>1979 pub fn try_upgrade_for( 1980 s: Self, 1981 timeout: R::Duration, 1982 ) -> Result<RwLockWriteGuard<'a, R, T>, Self> { 1983 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1984 if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } { 1985 let rwlock = s.rwlock; 1986 mem::forget(s); 1987 Ok(RwLockWriteGuard { 1988 rwlock, 1989 marker: PhantomData, 1990 }) 1991 } else { 1992 Err(s) 1993 } 1994 } 1995 1996 /// Tries to atomically upgrade an upgradable read lock into a exclusive 1997 /// write lock, until a timeout is reached. 1998 /// 1999 /// If the access could not be granted before the timeout expires, then 2000 /// the current guard is returned. 2001 #[inline] try_upgrade_until( s: Self, timeout: R::Instant, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>2002 pub fn try_upgrade_until( 2003 s: Self, 2004 timeout: R::Instant, 2005 ) -> Result<RwLockWriteGuard<'a, R, T>, Self> { 2006 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2007 if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } { 2008 let rwlock = s.rwlock; 2009 mem::forget(s); 2010 Ok(RwLockWriteGuard { 2011 rwlock, 2012 marker: PhantomData, 2013 }) 2014 } else { 2015 Err(s) 2016 } 2017 } 2018 } 2019 2020 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> { 2021 type Target = T; 2022 #[inline] deref(&self) -> &T2023 fn deref(&self) -> &T { 2024 unsafe { &*self.rwlock.data.get() } 2025 } 2026 } 2027 2028 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> { 2029 #[inline] drop(&mut self)2030 fn drop(&mut self) { 2031 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2032 unsafe { 2033 self.rwlock.raw.unlock_upgradable(); 2034 } 2035 } 2036 } 2037 2038 impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug 2039 for RwLockUpgradableReadGuard<'a, R, T> 2040 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2041 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2042 fmt::Debug::fmt(&**self, f) 2043 } 2044 } 2045 2046 impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display 2047 for RwLockUpgradableReadGuard<'a, R, T> 2048 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2049 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2050 (**self).fmt(f) 2051 } 2052 } 2053 2054 #[cfg(feature = "owning_ref")] 2055 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress 2056 for RwLockUpgradableReadGuard<'a, R, T> 2057 { 2058 } 2059 2060 /// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. 2061 /// This is similar to the `RwLockUpgradableReadGuard` struct, except instead of using a reference to unlock the 2062 /// `RwLock` it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` 2063 /// lifetime. 2064 #[cfg(feature = "arc_lock")] 2065 #[must_use = "if unused the RwLock will immediately unlock"] 2066 pub struct ArcRwLockUpgradableReadGuard<R: RawRwLockUpgrade, T: ?Sized> { 2067 rwlock: Arc<RwLock<R, T>>, 2068 marker: PhantomData<R::GuardMarker>, 2069 } 2070 2071 #[cfg(feature = "arc_lock")] 2072 impl<R: RawRwLockUpgrade , T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { 2073 /// Returns a reference to the rwlock, contained in its original `Arc`. rwlock(s: &Self) -> &Arc<RwLock<R, T>>2074 pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> { 2075 &s.rwlock 2076 } 2077 2078 /// Temporarily unlocks the `RwLock` to execute the given function. 2079 /// 2080 /// This is functionally identical to the `unlocked` method on [`RwLockUpgradableReadGuard`]. 2081 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,2082 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 2083 where 2084 F: FnOnce() -> U, 2085 { 2086 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2087 unsafe { 2088 s.rwlock.raw.unlock_upgradable(); 2089 } 2090 defer!(s.rwlock.raw.lock_upgradable()); 2091 f() 2092 } 2093 2094 /// Atomically upgrades an upgradable read lock lock into a exclusive write lock, 2095 /// blocking the current thread until it can be acquired. upgrade(s: Self) -> ArcRwLockWriteGuard<R, T>2096 pub fn upgrade(s: Self) -> ArcRwLockWriteGuard<R, T> { 2097 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2098 unsafe { 2099 s.rwlock.raw.upgrade(); 2100 } 2101 2102 // SAFETY: avoid incrementing or decrementing the refcount using ManuallyDrop and reading the Arc out 2103 // of the struct 2104 let s = ManuallyDrop::new(s); 2105 let rwlock = unsafe { ptr::read(&s.rwlock) }; 2106 2107 ArcRwLockWriteGuard { 2108 rwlock, 2109 marker: PhantomData, 2110 } 2111 } 2112 2113 /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock. 2114 /// 2115 /// If the access could not be granted at this time, then the current guard is returned. try_upgrade(s: Self) -> Result<ArcRwLockWriteGuard<R, T>, Self>2116 pub fn try_upgrade(s: Self) -> Result<ArcRwLockWriteGuard<R, T>, Self> { 2117 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2118 if unsafe { s.rwlock.raw.try_upgrade() } { 2119 // SAFETY: same as above 2120 let s = ManuallyDrop::new(s); 2121 let rwlock = unsafe { ptr::read(&s.rwlock) }; 2122 2123 Ok(ArcRwLockWriteGuard { 2124 rwlock, 2125 marker: PhantomData, 2126 }) 2127 } else { 2128 Err(s) 2129 } 2130 } 2131 } 2132 2133 #[cfg(feature = "arc_lock")] 2134 impl<R: RawRwLockUpgradeFair, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { 2135 /// Unlocks the `RwLock` using a fair unlock protocol. 2136 /// 2137 /// This is functionally identical to the `unlock_fair` method on [`RwLockUpgradableReadGuard`]. 2138 #[inline] unlock_fair(s: Self)2139 pub fn unlock_fair(s: Self) { 2140 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2141 unsafe { 2142 s.rwlock.raw.unlock_upgradable_fair(); 2143 } 2144 2145 // SAFETY: make sure we decrement the refcount properly 2146 let mut s = ManuallyDrop::new(s); 2147 unsafe { ptr::drop_in_place(&mut s.rwlock) }; 2148 } 2149 2150 /// Temporarily unlocks the `RwLock` to execute the given function. 2151 /// 2152 /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockUpgradableReadGuard`]. 2153 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,2154 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 2155 where 2156 F: FnOnce() -> U, 2157 { 2158 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2159 unsafe { 2160 s.rwlock.raw.unlock_upgradable_fair(); 2161 } 2162 defer!(s.rwlock.raw.lock_upgradable()); 2163 f() 2164 } 2165 2166 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 2167 /// 2168 /// This method is functionally equivalent to calling `bump` on [`RwLockUpgradableReadGuard`]. 2169 #[inline] bump(s: &mut Self)2170 pub fn bump(s: &mut Self) { 2171 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2172 unsafe { 2173 s.rwlock.raw.bump_upgradable(); 2174 } 2175 } 2176 } 2177 2178 #[cfg(feature = "arc_lock")] 2179 impl<R: RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { 2180 /// Atomically downgrades an upgradable read lock lock into a shared read lock 2181 /// without allowing any writers to take exclusive access of the lock in the 2182 /// meantime. 2183 /// 2184 /// Note that if there are any writers currently waiting to take the lock 2185 /// then other readers may not be able to acquire the lock even if it was 2186 /// downgraded. downgrade(s: Self) -> ArcRwLockReadGuard<R, T>2187 pub fn downgrade(s: Self) -> ArcRwLockReadGuard<R, T> { 2188 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2189 unsafe { 2190 s.rwlock.raw.downgrade_upgradable(); 2191 } 2192 2193 // SAFETY: use ManuallyDrop and ptr::read to ensure the refcount is not changed 2194 let s = ManuallyDrop::new(s); 2195 let rwlock = unsafe { ptr::read(&s.rwlock) }; 2196 2197 ArcRwLockReadGuard { 2198 rwlock, 2199 marker: PhantomData, 2200 } 2201 } 2202 } 2203 2204 #[cfg(feature = "arc_lock")] 2205 impl<R: RawRwLockUpgradeTimed, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { 2206 /// Tries to atomically upgrade an upgradable read lock into a exclusive 2207 /// write lock, until a timeout is reached. 2208 /// 2209 /// If the access could not be granted before the timeout expires, then 2210 /// the current guard is returned. try_upgrade_for( s: Self, timeout: R::Duration, ) -> Result<ArcRwLockWriteGuard<R, T>, Self>2211 pub fn try_upgrade_for( 2212 s: Self, 2213 timeout: R::Duration, 2214 ) -> Result<ArcRwLockWriteGuard<R, T>, Self> { 2215 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2216 if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } { 2217 // SAFETY: same as above 2218 let s = ManuallyDrop::new(s); 2219 let rwlock = unsafe { ptr::read(&s.rwlock) }; 2220 2221 Ok(ArcRwLockWriteGuard { 2222 rwlock, 2223 marker: PhantomData, 2224 }) 2225 } else { 2226 Err(s) 2227 } 2228 } 2229 2230 /// Tries to atomically upgrade an upgradable read lock into a exclusive 2231 /// write lock, until a timeout is reached. 2232 /// 2233 /// If the access could not be granted before the timeout expires, then 2234 /// the current guard is returned. 2235 #[inline] try_upgrade_until( s: Self, timeout: R::Instant, ) -> Result<ArcRwLockWriteGuard<R, T>, Self>2236 pub fn try_upgrade_until( 2237 s: Self, 2238 timeout: R::Instant, 2239 ) -> Result<ArcRwLockWriteGuard<R, T>, Self> { 2240 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2241 if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } { 2242 // SAFETY: same as above 2243 let s = ManuallyDrop::new(s); 2244 let rwlock = unsafe { ptr::read(&s.rwlock) }; 2245 2246 Ok(ArcRwLockWriteGuard { 2247 rwlock, 2248 marker: PhantomData, 2249 }) 2250 } else { 2251 Err(s) 2252 } 2253 } 2254 } 2255 2256 #[cfg(feature = "arc_lock")] 2257 impl<R: RawRwLockUpgrade, T: ?Sized> Deref for ArcRwLockUpgradableReadGuard<R, T> { 2258 type Target = T; 2259 #[inline] deref(&self) -> &T2260 fn deref(&self) -> &T { 2261 unsafe { &*self.rwlock.data.get() } 2262 } 2263 } 2264 2265 #[cfg(feature = "arc_lock")] 2266 impl<R: RawRwLockUpgrade, T: ?Sized> Drop for ArcRwLockUpgradableReadGuard<R, T> { 2267 #[inline] drop(&mut self)2268 fn drop(&mut self) { 2269 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2270 unsafe { 2271 self.rwlock.raw.unlock_upgradable(); 2272 } 2273 } 2274 } 2275 2276 #[cfg(feature = "arc_lock")] 2277 impl<R: RawRwLockUpgrade, T: fmt::Debug + ?Sized> fmt::Debug 2278 for ArcRwLockUpgradableReadGuard<R, T> 2279 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2280 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2281 fmt::Debug::fmt(&**self, f) 2282 } 2283 } 2284 2285 #[cfg(feature = "arc_lock")] 2286 impl<R: RawRwLockUpgrade, T: fmt::Display + ?Sized> fmt::Display 2287 for ArcRwLockUpgradableReadGuard<R, T> 2288 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2289 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2290 (**self).fmt(f) 2291 } 2292 } 2293 2294 2295 /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a 2296 /// subfield of the protected data. 2297 /// 2298 /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the 2299 /// former doesn't support temporarily unlocking and re-locking, since that 2300 /// could introduce soundness issues if the locked object is modified by another 2301 /// thread. 2302 #[must_use = "if unused the RwLock will immediately unlock"] 2303 pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> { 2304 raw: &'a R, 2305 data: *const T, 2306 marker: PhantomData<&'a T>, 2307 } 2308 2309 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {} 2310 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Send for MappedRwLockReadGuard<'a, R, T> where 2311 R::GuardMarker: Send 2312 { 2313 } 2314 2315 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { 2316 /// Make a new `MappedRwLockReadGuard` for a component of the locked data. 2317 /// 2318 /// This operation cannot fail as the `MappedRwLockReadGuard` passed 2319 /// in already locked the data. 2320 /// 2321 /// This is an associated function that needs to be 2322 /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of 2323 /// the same name on the contents of the locked data. 2324 #[inline] map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,2325 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> 2326 where 2327 F: FnOnce(&T) -> &U, 2328 { 2329 let raw = s.raw; 2330 let data = f(unsafe { &*s.data }); 2331 mem::forget(s); 2332 MappedRwLockReadGuard { 2333 raw, 2334 data, 2335 marker: PhantomData, 2336 } 2337 } 2338 2339 /// Attempts to make a new `MappedRwLockReadGuard` for a component of the 2340 /// locked data. The original guard is return if the closure returns `None`. 2341 /// 2342 /// This operation cannot fail as the `MappedRwLockReadGuard` passed 2343 /// in already locked the data. 2344 /// 2345 /// This is an associated function that needs to be 2346 /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of 2347 /// the same name on the contents of the locked data. 2348 #[inline] try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,2349 pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> 2350 where 2351 F: FnOnce(&T) -> Option<&U>, 2352 { 2353 let raw = s.raw; 2354 let data = match f(unsafe { &*s.data }) { 2355 Some(data) => data, 2356 None => return Err(s), 2357 }; 2358 mem::forget(s); 2359 Ok(MappedRwLockReadGuard { 2360 raw, 2361 data, 2362 marker: PhantomData, 2363 }) 2364 } 2365 } 2366 2367 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { 2368 /// Unlocks the `RwLock` using a fair unlock protocol. 2369 /// 2370 /// By default, `RwLock` is unfair and allow the current thread to re-lock 2371 /// the `RwLock` before another has the chance to acquire the lock, even if 2372 /// that thread has been blocked on the `RwLock` for a long time. This is 2373 /// the default because it allows much higher throughput as it avoids 2374 /// forcing a context switch on every `RwLock` unlock. This can result in one 2375 /// thread acquiring a `RwLock` many more times than other threads. 2376 /// 2377 /// However in some cases it can be beneficial to ensure fairness by forcing 2378 /// the lock to pass on to a waiting thread if there is one. This is done by 2379 /// using this method instead of dropping the `MappedRwLockReadGuard` normally. 2380 #[inline] unlock_fair(s: Self)2381 pub fn unlock_fair(s: Self) { 2382 // Safety: A MappedRwLockReadGuard always holds a shared lock. 2383 unsafe { 2384 s.raw.unlock_shared_fair(); 2385 } 2386 mem::forget(s); 2387 } 2388 } 2389 2390 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> { 2391 type Target = T; 2392 #[inline] deref(&self) -> &T2393 fn deref(&self) -> &T { 2394 unsafe { &*self.data } 2395 } 2396 } 2397 2398 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> { 2399 #[inline] drop(&mut self)2400 fn drop(&mut self) { 2401 // Safety: A MappedRwLockReadGuard always holds a shared lock. 2402 unsafe { 2403 self.raw.unlock_shared(); 2404 } 2405 } 2406 } 2407 2408 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug 2409 for MappedRwLockReadGuard<'a, R, T> 2410 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2411 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2412 fmt::Debug::fmt(&**self, f) 2413 } 2414 } 2415 2416 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display 2417 for MappedRwLockReadGuard<'a, R, T> 2418 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2419 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2420 (**self).fmt(f) 2421 } 2422 } 2423 2424 #[cfg(feature = "owning_ref")] 2425 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress 2426 for MappedRwLockReadGuard<'a, R, T> 2427 { 2428 } 2429 2430 /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a 2431 /// subfield of the protected data. 2432 /// 2433 /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the 2434 /// former doesn't support temporarily unlocking and re-locking, since that 2435 /// could introduce soundness issues if the locked object is modified by another 2436 /// thread. 2437 #[must_use = "if unused the RwLock will immediately unlock"] 2438 pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> { 2439 raw: &'a R, 2440 data: *mut T, 2441 marker: PhantomData<&'a mut T>, 2442 } 2443 2444 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync 2445 for MappedRwLockWriteGuard<'a, R, T> 2446 { 2447 } 2448 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Send + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where 2449 R::GuardMarker: Send 2450 { 2451 } 2452 2453 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { 2454 /// Make a new `MappedRwLockWriteGuard` for a component of the locked data. 2455 /// 2456 /// This operation cannot fail as the `MappedRwLockWriteGuard` passed 2457 /// in already locked the data. 2458 /// 2459 /// This is an associated function that needs to be 2460 /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of 2461 /// the same name on the contents of the locked data. 2462 #[inline] map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,2463 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> 2464 where 2465 F: FnOnce(&mut T) -> &mut U, 2466 { 2467 let raw = s.raw; 2468 let data = f(unsafe { &mut *s.data }); 2469 mem::forget(s); 2470 MappedRwLockWriteGuard { 2471 raw, 2472 data, 2473 marker: PhantomData, 2474 } 2475 } 2476 2477 /// Attempts to make a new `MappedRwLockWriteGuard` for a component of the 2478 /// locked data. The original guard is return if the closure returns `None`. 2479 /// 2480 /// This operation cannot fail as the `MappedRwLockWriteGuard` passed 2481 /// in already locked the data. 2482 /// 2483 /// This is an associated function that needs to be 2484 /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of 2485 /// the same name on the contents of the locked data. 2486 #[inline] try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,2487 pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> 2488 where 2489 F: FnOnce(&mut T) -> Option<&mut U>, 2490 { 2491 let raw = s.raw; 2492 let data = match f(unsafe { &mut *s.data }) { 2493 Some(data) => data, 2494 None => return Err(s), 2495 }; 2496 mem::forget(s); 2497 Ok(MappedRwLockWriteGuard { 2498 raw, 2499 data, 2500 marker: PhantomData, 2501 }) 2502 } 2503 } 2504 2505 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { 2506 /// Unlocks the `RwLock` using a fair unlock protocol. 2507 /// 2508 /// By default, `RwLock` is unfair and allow the current thread to re-lock 2509 /// the `RwLock` before another has the chance to acquire the lock, even if 2510 /// that thread has been blocked on the `RwLock` for a long time. This is 2511 /// the default because it allows much higher throughput as it avoids 2512 /// forcing a context switch on every `RwLock` unlock. This can result in one 2513 /// thread acquiring a `RwLock` many more times than other threads. 2514 /// 2515 /// However in some cases it can be beneficial to ensure fairness by forcing 2516 /// the lock to pass on to a waiting thread if there is one. This is done by 2517 /// using this method instead of dropping the `MappedRwLockWriteGuard` normally. 2518 #[inline] unlock_fair(s: Self)2519 pub fn unlock_fair(s: Self) { 2520 // Safety: A MappedRwLockWriteGuard always holds an exclusive lock. 2521 unsafe { 2522 s.raw.unlock_exclusive_fair(); 2523 } 2524 mem::forget(s); 2525 } 2526 } 2527 2528 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> { 2529 type Target = T; 2530 #[inline] deref(&self) -> &T2531 fn deref(&self) -> &T { 2532 unsafe { &*self.data } 2533 } 2534 } 2535 2536 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> { 2537 #[inline] deref_mut(&mut self) -> &mut T2538 fn deref_mut(&mut self) -> &mut T { 2539 unsafe { &mut *self.data } 2540 } 2541 } 2542 2543 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> { 2544 #[inline] drop(&mut self)2545 fn drop(&mut self) { 2546 // Safety: A MappedRwLockWriteGuard always holds an exclusive lock. 2547 unsafe { 2548 self.raw.unlock_exclusive(); 2549 } 2550 } 2551 } 2552 2553 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug 2554 for MappedRwLockWriteGuard<'a, R, T> 2555 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2556 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2557 fmt::Debug::fmt(&**self, f) 2558 } 2559 } 2560 2561 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display 2562 for MappedRwLockWriteGuard<'a, R, T> 2563 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2564 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2565 (**self).fmt(f) 2566 } 2567 } 2568 2569 #[cfg(feature = "owning_ref")] 2570 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress 2571 for MappedRwLockWriteGuard<'a, R, T> 2572 { 2573 } 2574