1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7 
8 use core::cell::UnsafeCell;
9 use core::fmt;
10 use core::marker::PhantomData;
11 use core::mem;
12 use core::ops::{Deref, DerefMut};
13 
14 #[cfg(feature = "owning_ref")]
15 use owning_ref::StableAddress;
16 
17 #[cfg(feature = "serde")]
18 use serde::{Deserialize, Deserializer, Serialize, Serializer};
19 
20 /// Basic operations for a reader-writer lock.
21 ///
22 /// Types implementing this trait can be used by `RwLock` to form a safe and
23 /// fully-functioning `RwLock` type.
24 ///
25 /// # Safety
26 ///
27 /// Implementations of this trait must ensure that the `RwLock` is actually
28 /// exclusive: an exclusive lock can't be acquired while an exclusive or shared
29 /// lock exists, and a shared lock can't be acquire while an exclusive lock
30 /// exists.
31 pub unsafe trait RawRwLock {
32     /// Initial value for an unlocked `RwLock`.
33     // A “non-constant” const item is a legacy way to supply an initialized value to downstream
34     // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
35     #[allow(clippy::declare_interior_mutable_const)]
36     const INIT: Self;
37 
38     /// Marker type which determines whether a lock guard should be `Send`. Use
39     /// one of the `GuardSend` or `GuardNoSend` helper types here.
40     type GuardMarker;
41 
42     /// Acquires a shared lock, blocking the current thread until it is able to do so.
lock_shared(&self)43     fn lock_shared(&self);
44 
45     /// Attempts to acquire a shared lock without blocking.
try_lock_shared(&self) -> bool46     fn try_lock_shared(&self) -> bool;
47 
48     /// Releases a shared lock.
unlock_shared(&self)49     fn unlock_shared(&self);
50 
51     /// Acquires an exclusive lock, blocking the current thread until it is able to do so.
lock_exclusive(&self)52     fn lock_exclusive(&self);
53 
54     /// Attempts to acquire an exclusive lock without blocking.
try_lock_exclusive(&self) -> bool55     fn try_lock_exclusive(&self) -> bool;
56 
57     /// Releases an exclusive lock.
unlock_exclusive(&self)58     fn unlock_exclusive(&self);
59 }
60 
61 /// Additional methods for RwLocks which support fair unlocking.
62 ///
63 /// Fair unlocking means that a lock is handed directly over to the next waiting
64 /// thread if there is one, without giving other threads the opportunity to
65 /// "steal" the lock in the meantime. This is typically slower than unfair
66 /// unlocking, but may be necessary in certain circumstances.
67 pub unsafe trait RawRwLockFair: RawRwLock {
68     /// Releases a shared lock using a fair unlock protocol.
unlock_shared_fair(&self)69     fn unlock_shared_fair(&self);
70 
71     /// Releases an exclusive lock using a fair unlock protocol.
unlock_exclusive_fair(&self)72     fn unlock_exclusive_fair(&self);
73 
74     /// Temporarily yields a shared lock to a waiting thread if there is one.
75     ///
76     /// This method is functionally equivalent to calling `unlock_shared_fair` followed
77     /// by `lock_shared`, however it can be much more efficient in the case where there
78     /// are no waiting threads.
bump_shared(&self)79     fn bump_shared(&self) {
80         self.unlock_shared_fair();
81         self.lock_shared();
82     }
83 
84     /// Temporarily yields an exclusive lock to a waiting thread if there is one.
85     ///
86     /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
87     /// by `lock_exclusive`, however it can be much more efficient in the case where there
88     /// are no waiting threads.
bump_exclusive(&self)89     fn bump_exclusive(&self) {
90         self.unlock_exclusive_fair();
91         self.lock_exclusive();
92     }
93 }
94 
95 /// Additional methods for RwLocks which support atomically downgrading an
96 /// exclusive lock to a shared lock.
97 pub unsafe trait RawRwLockDowngrade: RawRwLock {
98     /// Atomically downgrades an exclusive lock into a shared lock without
99     /// allowing any thread to take an exclusive lock in the meantime.
downgrade(&self)100     fn downgrade(&self);
101 }
102 
103 /// Additional methods for RwLocks which support locking with timeouts.
104 ///
105 /// The `Duration` and `Instant` types are specified as associated types so that
106 /// this trait is usable even in `no_std` environments.
107 pub unsafe trait RawRwLockTimed: RawRwLock {
108     /// Duration type used for `try_lock_for`.
109     type Duration;
110 
111     /// Instant type used for `try_lock_until`.
112     type Instant;
113 
114     /// Attempts to acquire a shared lock until a timeout is reached.
try_lock_shared_for(&self, timeout: Self::Duration) -> bool115     fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool;
116 
117     /// Attempts to acquire a shared lock until a timeout is reached.
try_lock_shared_until(&self, timeout: Self::Instant) -> bool118     fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool;
119 
120     /// Attempts to acquire an exclusive lock until a timeout is reached.
try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool121     fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool;
122 
123     /// Attempts to acquire an exclusive lock until a timeout is reached.
try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool124     fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool;
125 }
126 
127 /// Additional methods for RwLocks which support recursive read locks.
128 ///
129 /// These are guaranteed to succeed without blocking if
130 /// another read lock is held at the time of the call. This allows a thread
131 /// to recursively lock a `RwLock`. However using this method can cause
132 /// writers to starve since readers no longer block if a writer is waiting
133 /// for the lock.
134 pub unsafe trait RawRwLockRecursive: RawRwLock {
135     /// Acquires a shared lock without deadlocking in case of a recursive lock.
lock_shared_recursive(&self)136     fn lock_shared_recursive(&self);
137 
138     /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock.
try_lock_shared_recursive(&self) -> bool139     fn try_lock_shared_recursive(&self) -> bool;
140 }
141 
142 /// Additional methods for RwLocks which support recursive read locks and timeouts.
143 pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed {
144     /// Attempts to acquire a shared lock until a timeout is reached, without
145     /// deadlocking in case of a recursive lock.
try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool146     fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool;
147 
148     /// Attempts to acquire a shared lock until a timeout is reached, without
149     /// deadlocking in case of a recursive lock.
try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool150     fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool;
151 }
152 
153 /// Additional methods for RwLocks which support atomically upgrading a shared
154 /// lock to an exclusive lock.
155 ///
156 /// This requires acquiring a special "upgradable read lock" instead of a
157 /// normal shared lock. There may only be one upgradable lock at any time,
158 /// otherwise deadlocks could occur when upgrading.
159 pub unsafe trait RawRwLockUpgrade: RawRwLock {
160     /// Acquires an upgradable lock, blocking the current thread until it is able to do so.
lock_upgradable(&self)161     fn lock_upgradable(&self);
162 
163     /// Attempts to acquire an upgradable lock without blocking.
try_lock_upgradable(&self) -> bool164     fn try_lock_upgradable(&self) -> bool;
165 
166     /// Releases an upgradable lock.
unlock_upgradable(&self)167     fn unlock_upgradable(&self);
168 
169     /// Upgrades an upgradable lock to an exclusive lock.
upgrade(&self)170     fn upgrade(&self);
171 
172     /// Attempts to upgrade an upgradable lock to an exclusive lock without
173     /// blocking.
try_upgrade(&self) -> bool174     fn try_upgrade(&self) -> bool;
175 }
176 
177 /// Additional methods for RwLocks which support upgradable locks and fair
178 /// unlocking.
179 pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
180     /// Releases an upgradable lock using a fair unlock protocol.
unlock_upgradable_fair(&self)181     fn unlock_upgradable_fair(&self);
182 
183     /// Temporarily yields an upgradable lock to a waiting thread if there is one.
184     ///
185     /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
186     /// by `lock_upgradable`, however it can be much more efficient in the case where there
187     /// are no waiting threads.
bump_upgradable(&self)188     fn bump_upgradable(&self) {
189         self.unlock_upgradable_fair();
190         self.lock_upgradable();
191     }
192 }
193 
194 /// Additional methods for RwLocks which support upgradable locks and lock
195 /// downgrading.
196 pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
197     /// Downgrades an upgradable lock to a shared lock.
downgrade_upgradable(&self)198     fn downgrade_upgradable(&self);
199 
200     /// Downgrades an exclusive lock to an upgradable lock.
downgrade_to_upgradable(&self)201     fn downgrade_to_upgradable(&self);
202 }
203 
204 /// Additional methods for RwLocks which support upgradable locks and locking
205 /// with timeouts.
206 pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
207     /// Attempts to acquire an upgradable lock until a timeout is reached.
try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool208     fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool;
209 
210     /// Attempts to acquire an upgradable lock until a timeout is reached.
try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool211     fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool;
212 
213     /// Attempts to upgrade an upgradable lock to an exclusive lock until a
214     /// timeout is reached.
try_upgrade_for(&self, timeout: Self::Duration) -> bool215     fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
216 
217     /// Attempts to upgrade an upgradable lock to an exclusive lock until a
218     /// timeout is reached.
try_upgrade_until(&self, timeout: Self::Instant) -> bool219     fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
220 }
221 
222 /// A reader-writer lock
223 ///
224 /// This type of lock allows a number of readers or at most one writer at any
225 /// point in time. The write portion of this lock typically allows modification
226 /// of the underlying data (exclusive access) and the read portion of this lock
227 /// typically allows for read-only access (shared access).
228 ///
229 /// The type parameter `T` represents the data that this lock protects. It is
230 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
231 /// allow concurrent access through readers. The RAII guards returned from the
232 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
233 /// to allow access to the contained of the lock.
234 pub struct RwLock<R, T: ?Sized> {
235     raw: R,
236     data: UnsafeCell<T>,
237 }
238 
239 // Copied and modified from serde
240 #[cfg(feature = "serde")]
241 impl<R, T> Serialize for RwLock<R, T>
242 where
243     R: RawRwLock,
244     T: Serialize + ?Sized,
245 {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer,246     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
247     where
248         S: Serializer,
249     {
250         self.read().serialize(serializer)
251     }
252 }
253 
254 #[cfg(feature = "serde")]
255 impl<'de, R, T> Deserialize<'de> for RwLock<R, T>
256 where
257     R: RawRwLock,
258     T: Deserialize<'de> + ?Sized,
259 {
deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>,260     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
261     where
262         D: Deserializer<'de>,
263     {
264         Deserialize::deserialize(deserializer).map(RwLock::new)
265     }
266 }
267 
268 unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
269 unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
270 
271 impl<R: RawRwLock, T> RwLock<R, T> {
272     /// Creates a new instance of an `RwLock<T>` which is unlocked.
273     #[cfg(feature = "nightly")]
274     #[inline]
new(val: T) -> RwLock<R, T>275     pub const fn new(val: T) -> RwLock<R, T> {
276         RwLock {
277             data: UnsafeCell::new(val),
278             raw: R::INIT,
279         }
280     }
281 
282     /// Creates a new instance of an `RwLock<T>` which is unlocked.
283     #[cfg(not(feature = "nightly"))]
284     #[inline]
new(val: T) -> RwLock<R, T>285     pub fn new(val: T) -> RwLock<R, T> {
286         RwLock {
287             data: UnsafeCell::new(val),
288             raw: R::INIT,
289         }
290     }
291 
292     /// Consumes this `RwLock`, returning the underlying data.
293     #[inline]
294     #[allow(unused_unsafe)]
into_inner(self) -> T295     pub fn into_inner(self) -> T {
296         unsafe { self.data.into_inner() }
297     }
298 }
299 
300 impl<R, T> RwLock<R, T> {
301     /// Creates a new new instance of an `RwLock<T>` based on a pre-existing
302     /// `RawRwLock<T>`.
303     ///
304     /// This allows creating a `RwLock<T>` in a constant context on stable
305     /// Rust.
306     #[inline]
const_new(raw_rwlock: R, val: T) -> RwLock<R, T>307     pub const fn const_new(raw_rwlock: R, val: T) -> RwLock<R, T> {
308         RwLock {
309             data: UnsafeCell::new(val),
310             raw: raw_rwlock,
311         }
312     }
313 }
314 
315 impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
316     /// # Safety
317     ///
318     /// The lock must be held when calling this method.
319     #[inline]
read_guard(&self) -> RwLockReadGuard<'_, R, T>320     unsafe fn read_guard(&self) -> RwLockReadGuard<'_, R, T> {
321         RwLockReadGuard {
322             rwlock: self,
323             marker: PhantomData,
324         }
325     }
326 
327     /// # Safety
328     ///
329     /// The lock must be held when calling this method.
330     #[inline]
write_guard(&self) -> RwLockWriteGuard<'_, R, T>331     unsafe fn write_guard(&self) -> RwLockWriteGuard<'_, R, T> {
332         RwLockWriteGuard {
333             rwlock: self,
334             marker: PhantomData,
335         }
336     }
337 
338     /// Locks this `RwLock` with shared read access, blocking the current thread
339     /// until it can be acquired.
340     ///
341     /// The calling thread will be blocked until there are no more writers which
342     /// hold the lock. There may be other readers currently inside the lock when
343     /// this method returns.
344     ///
345     /// Note that attempts to recursively acquire a read lock on a `RwLock` when
346     /// the current thread already holds one may result in a deadlock.
347     ///
348     /// Returns an RAII guard which will release this thread's shared access
349     /// once it is dropped.
350     #[inline]
read(&self) -> RwLockReadGuard<'_, R, T>351     pub fn read(&self) -> RwLockReadGuard<'_, R, T> {
352         self.raw.lock_shared();
353         // SAFETY: The lock is held, as required.
354         unsafe { self.read_guard() }
355     }
356 
357     /// Attempts to acquire this `RwLock` with shared read access.
358     ///
359     /// If the access could not be granted at this time, then `None` is returned.
360     /// Otherwise, an RAII guard is returned which will release the shared access
361     /// when it is dropped.
362     ///
363     /// This function does not block.
364     #[inline]
try_read(&self) -> Option<RwLockReadGuard<'_, R, T>>365     pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> {
366         if self.raw.try_lock_shared() {
367             // SAFETY: The lock is held, as required.
368             Some(unsafe { self.read_guard() })
369         } else {
370             None
371         }
372     }
373 
374     /// Locks this `RwLock` with exclusive write access, blocking the current
375     /// thread until it can be acquired.
376     ///
377     /// This function will not return while other writers or other readers
378     /// currently have access to the lock.
379     ///
380     /// Returns an RAII guard which will drop the write access of this `RwLock`
381     /// when dropped.
382     #[inline]
write(&self) -> RwLockWriteGuard<'_, R, T>383     pub fn write(&self) -> RwLockWriteGuard<'_, R, T> {
384         self.raw.lock_exclusive();
385         // SAFETY: The lock is held, as required.
386         unsafe { self.write_guard() }
387     }
388 
389     /// Attempts to lock this `RwLock` with exclusive write access.
390     ///
391     /// If the lock could not be acquired at this time, then `None` is returned.
392     /// Otherwise, an RAII guard is returned which will release the lock when
393     /// it is dropped.
394     ///
395     /// This function does not block.
396     #[inline]
try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>>397     pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> {
398         if self.raw.try_lock_exclusive() {
399             // SAFETY: The lock is held, as required.
400             Some(unsafe { self.write_guard() })
401         } else {
402             None
403         }
404     }
405 
406     /// Returns a mutable reference to the underlying data.
407     ///
408     /// Since this call borrows the `RwLock` mutably, no actual locking needs to
409     /// take place---the mutable borrow statically guarantees no locks exist.
410     #[inline]
get_mut(&mut self) -> &mut T411     pub fn get_mut(&mut self) -> &mut T {
412         unsafe { &mut *self.data.get() }
413     }
414 
415     /// Forcibly unlocks a read lock.
416     ///
417     /// This is useful when combined with `mem::forget` to hold a lock without
418     /// the need to maintain a `RwLockReadGuard` object alive, for example when
419     /// dealing with FFI.
420     ///
421     /// # Safety
422     ///
423     /// This method must only be called if the current thread logically owns a
424     /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
425     /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
426     #[inline]
force_unlock_read(&self)427     pub unsafe fn force_unlock_read(&self) {
428         self.raw.unlock_shared();
429     }
430 
431     /// Forcibly unlocks a write lock.
432     ///
433     /// This is useful when combined with `mem::forget` to hold a lock without
434     /// the need to maintain a `RwLockWriteGuard` object alive, for example when
435     /// dealing with FFI.
436     ///
437     /// # Safety
438     ///
439     /// This method must only be called if the current thread logically owns a
440     /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
441     /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
442     #[inline]
force_unlock_write(&self)443     pub unsafe fn force_unlock_write(&self) {
444         self.raw.unlock_exclusive();
445     }
446 
447     /// Returns the underlying raw reader-writer lock object.
448     ///
449     /// Note that you will most likely need to import the `RawRwLock` trait from
450     /// `lock_api` to be able to call functions on the raw
451     /// reader-writer lock.
452     ///
453     /// # Safety
454     ///
455     /// This method is unsafe because it allows unlocking a mutex while
456     /// still holding a reference to a lock guard.
raw(&self) -> &R457     pub unsafe fn raw(&self) -> &R {
458         &self.raw
459     }
460 }
461 
462 impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
463     /// Forcibly unlocks a read lock using a fair unlock procotol.
464     ///
465     /// This is useful when combined with `mem::forget` to hold a lock without
466     /// the need to maintain a `RwLockReadGuard` object alive, for example when
467     /// dealing with FFI.
468     ///
469     /// # Safety
470     ///
471     /// This method must only be called if the current thread logically owns a
472     /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
473     /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
474     #[inline]
force_unlock_read_fair(&self)475     pub unsafe fn force_unlock_read_fair(&self) {
476         self.raw.unlock_shared_fair();
477     }
478 
479     /// Forcibly unlocks a write lock using a fair unlock procotol.
480     ///
481     /// This is useful when combined with `mem::forget` to hold a lock without
482     /// the need to maintain a `RwLockWriteGuard` object alive, for example when
483     /// dealing with FFI.
484     ///
485     /// # Safety
486     ///
487     /// This method must only be called if the current thread logically owns a
488     /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
489     /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
490     #[inline]
force_unlock_write_fair(&self)491     pub unsafe fn force_unlock_write_fair(&self) {
492         self.raw.unlock_exclusive_fair();
493     }
494 }
495 
496 impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
497     /// Attempts to acquire this `RwLock` with shared read access until a timeout
498     /// is reached.
499     ///
500     /// If the access could not be granted before the timeout expires, then
501     /// `None` is returned. Otherwise, an RAII guard is returned which will
502     /// release the shared access when it is dropped.
503     #[inline]
try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>>504     pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> {
505         if self.raw.try_lock_shared_for(timeout) {
506             // SAFETY: The lock is held, as required.
507             Some(unsafe { self.read_guard() })
508         } else {
509             None
510         }
511     }
512 
513     /// Attempts to acquire this `RwLock` with shared read access until a timeout
514     /// is reached.
515     ///
516     /// If the access could not be granted before the timeout expires, then
517     /// `None` is returned. Otherwise, an RAII guard is returned which will
518     /// release the shared access when it is dropped.
519     #[inline]
try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>>520     pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> {
521         if self.raw.try_lock_shared_until(timeout) {
522             // SAFETY: The lock is held, as required.
523             Some(unsafe { self.read_guard() })
524         } else {
525             None
526         }
527     }
528 
529     /// Attempts to acquire this `RwLock` with exclusive write access until a
530     /// timeout is reached.
531     ///
532     /// If the access could not be granted before the timeout expires, then
533     /// `None` is returned. Otherwise, an RAII guard is returned which will
534     /// release the exclusive access when it is dropped.
535     #[inline]
try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>>536     pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> {
537         if self.raw.try_lock_exclusive_for(timeout) {
538             // SAFETY: The lock is held, as required.
539             Some(unsafe { self.write_guard() })
540         } else {
541             None
542         }
543     }
544 
545     /// Attempts to acquire this `RwLock` with exclusive write access until a
546     /// timeout is reached.
547     ///
548     /// If the access could not be granted before the timeout expires, then
549     /// `None` is returned. Otherwise, an RAII guard is returned which will
550     /// release the exclusive access when it is dropped.
551     #[inline]
try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>>552     pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> {
553         if self.raw.try_lock_exclusive_until(timeout) {
554             // SAFETY: The lock is held, as required.
555             Some(unsafe { self.write_guard() })
556         } else {
557             None
558         }
559     }
560 }
561 
562 impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
563     /// Locks this `RwLock` with shared read access, blocking the current thread
564     /// until it can be acquired.
565     ///
566     /// The calling thread will be blocked until there are no more writers which
567     /// hold the lock. There may be other readers currently inside the lock when
568     /// this method returns.
569     ///
570     /// Unlike `read`, this method is guaranteed to succeed without blocking if
571     /// another read lock is held at the time of the call. This allows a thread
572     /// to recursively lock a `RwLock`. However using this method can cause
573     /// writers to starve since readers no longer block if a writer is waiting
574     /// for the lock.
575     ///
576     /// Returns an RAII guard which will release this thread's shared access
577     /// once it is dropped.
578     #[inline]
read_recursive(&self) -> RwLockReadGuard<'_, R, T>579     pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> {
580         self.raw.lock_shared_recursive();
581         // SAFETY: The lock is held, as required.
582         unsafe { self.read_guard() }
583     }
584 
585     /// Attempts to acquire this `RwLock` with shared read access.
586     ///
587     /// If the access could not be granted at this time, then `None` is returned.
588     /// Otherwise, an RAII guard is returned which will release the shared access
589     /// when it is dropped.
590     ///
591     /// This method is guaranteed to succeed if another read lock is held at the
592     /// time of the call. See the documentation for `read_recursive` for details.
593     ///
594     /// This function does not block.
595     #[inline]
try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>>596     pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> {
597         if self.raw.try_lock_shared_recursive() {
598             // SAFETY: The lock is held, as required.
599             Some(unsafe { self.read_guard() })
600         } else {
601             None
602         }
603     }
604 }
605 
606 impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
607     /// Attempts to acquire this `RwLock` with shared read access until a timeout
608     /// is reached.
609     ///
610     /// If the access could not be granted before the timeout expires, then
611     /// `None` is returned. Otherwise, an RAII guard is returned which will
612     /// release the shared access when it is dropped.
613     ///
614     /// This method is guaranteed to succeed without blocking if another read
615     /// lock is held at the time of the call. See the documentation for
616     /// `read_recursive` for details.
617     #[inline]
try_read_recursive_for( &self, timeout: R::Duration, ) -> Option<RwLockReadGuard<'_, R, T>>618     pub fn try_read_recursive_for(
619         &self,
620         timeout: R::Duration,
621     ) -> Option<RwLockReadGuard<'_, R, T>> {
622         if self.raw.try_lock_shared_recursive_for(timeout) {
623             // SAFETY: The lock is held, as required.
624             Some(unsafe { self.read_guard() })
625         } else {
626             None
627         }
628     }
629 
630     /// Attempts to acquire this `RwLock` with shared read access until a timeout
631     /// is reached.
632     ///
633     /// If the access could not be granted before the timeout expires, then
634     /// `None` is returned. Otherwise, an RAII guard is returned which will
635     /// release the shared access when it is dropped.
636     #[inline]
try_read_recursive_until( &self, timeout: R::Instant, ) -> Option<RwLockReadGuard<'_, R, T>>637     pub fn try_read_recursive_until(
638         &self,
639         timeout: R::Instant,
640     ) -> Option<RwLockReadGuard<'_, R, T>> {
641         if self.raw.try_lock_shared_recursive_until(timeout) {
642             // SAFETY: The lock is held, as required.
643             Some(unsafe { self.read_guard() })
644         } else {
645             None
646         }
647     }
648 }
649 
650 impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
651     /// # Safety
652     ///
653     /// The lock must be held when calling this method.
654     #[inline]
upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T>655     unsafe fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
656         RwLockUpgradableReadGuard {
657             rwlock: self,
658             marker: PhantomData,
659         }
660     }
661 
662     /// Locks this `RwLock` with upgradable read access, blocking the current thread
663     /// until it can be acquired.
664     ///
665     /// The calling thread will be blocked until there are no more writers or other
666     /// upgradable reads which hold the lock. There may be other readers currently
667     /// inside the lock when this method returns.
668     ///
669     /// Returns an RAII guard which will release this thread's shared access
670     /// once it is dropped.
671     #[inline]
upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T>672     pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
673         self.raw.lock_upgradable();
674         // SAFETY: The lock is held, as required.
675         unsafe { self.upgradable_guard() }
676     }
677 
678     /// Attempts to acquire this `RwLock` with upgradable read access.
679     ///
680     /// If the access could not be granted at this time, then `None` is returned.
681     /// Otherwise, an RAII guard is returned which will release the shared access
682     /// when it is dropped.
683     ///
684     /// This function does not block.
685     #[inline]
try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>>686     pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
687         if self.raw.try_lock_upgradable() {
688             // SAFETY: The lock is held, as required.
689             Some(unsafe { self.upgradable_guard() })
690         } else {
691             None
692         }
693     }
694 }
695 
696 impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
697     /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
698     /// is reached.
699     ///
700     /// If the access could not be granted before the timeout expires, then
701     /// `None` is returned. Otherwise, an RAII guard is returned which will
702     /// release the shared access when it is dropped.
703     #[inline]
try_upgradable_read_for( &self, timeout: R::Duration, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>>704     pub fn try_upgradable_read_for(
705         &self,
706         timeout: R::Duration,
707     ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
708         if self.raw.try_lock_upgradable_for(timeout) {
709             // SAFETY: The lock is held, as required.
710             Some(unsafe { self.upgradable_guard() })
711         } else {
712             None
713         }
714     }
715 
716     /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
717     /// is reached.
718     ///
719     /// If the access could not be granted before the timeout expires, then
720     /// `None` is returned. Otherwise, an RAII guard is returned which will
721     /// release the shared access when it is dropped.
722     #[inline]
try_upgradable_read_until( &self, timeout: R::Instant, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>>723     pub fn try_upgradable_read_until(
724         &self,
725         timeout: R::Instant,
726     ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
727         if self.raw.try_lock_upgradable_until(timeout) {
728             // SAFETY: The lock is held, as required.
729             Some(unsafe { self.upgradable_guard() })
730         } else {
731             None
732         }
733     }
734 }
735 
736 impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> {
737     #[inline]
default() -> RwLock<R, T>738     fn default() -> RwLock<R, T> {
739         RwLock::new(Default::default())
740     }
741 }
742 
743 impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
744     #[inline]
from(t: T) -> RwLock<R, T>745     fn from(t: T) -> RwLock<R, T> {
746         RwLock::new(t)
747     }
748 }
749 
750 impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result751     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
752         match self.try_read() {
753             Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
754             None => {
755                 struct LockedPlaceholder;
756                 impl fmt::Debug for LockedPlaceholder {
757                     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
758                         f.write_str("<locked>")
759                     }
760                 }
761 
762                 f.debug_struct("RwLock")
763                     .field("data", &LockedPlaceholder)
764                     .finish()
765             }
766         }
767     }
768 }
769 
770 /// RAII structure used to release the shared read access of a lock when
771 /// dropped.
772 #[must_use = "if unused the RwLock will immediately unlock"]
773 pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
774     rwlock: &'a RwLock<R, T>,
775     marker: PhantomData<(&'a T, R::GuardMarker)>,
776 }
777 
778 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockReadGuard<'a, R, T> {}
779 
780 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
781     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>782     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
783         s.rwlock
784     }
785 
786     /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
787     ///
788     /// This operation cannot fail as the `RwLockReadGuard` passed
789     /// in already locked the data.
790     ///
791     /// This is an associated function that needs to be
792     /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
793     /// the same name on the contents of the locked data.
794     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,795     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
796     where
797         F: FnOnce(&T) -> &U,
798     {
799         let raw = &s.rwlock.raw;
800         let data = f(unsafe { &*s.rwlock.data.get() });
801         mem::forget(s);
802         MappedRwLockReadGuard {
803             raw,
804             data,
805             marker: PhantomData,
806         }
807     }
808 
809     /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
810     /// locked data. The original guard is return if the closure returns `None`.
811     ///
812     /// This operation cannot fail as the `RwLockReadGuard` passed
813     /// in already locked the data.
814     ///
815     /// This is an associated function that needs to be
816     /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
817     /// the same name on the contents of the locked data.
818     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,819     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
820     where
821         F: FnOnce(&T) -> Option<&U>,
822     {
823         let raw = &s.rwlock.raw;
824         let data = match f(unsafe { &*s.rwlock.data.get() }) {
825             Some(data) => data,
826             None => return Err(s),
827         };
828         mem::forget(s);
829         Ok(MappedRwLockReadGuard {
830             raw,
831             data,
832             marker: PhantomData,
833         })
834     }
835 
836     /// Temporarily unlocks the `RwLock` to execute the given function.
837     ///
838     /// The `RwLock` is unlocked a fair unlock protocol.
839     ///
840     /// This is safe because `&mut` guarantees that there exist no other
841     /// references to the data protected by the `RwLock`.
842     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,843     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
844     where
845         F: FnOnce() -> U,
846     {
847         s.rwlock.raw.unlock_shared();
848         defer!(s.rwlock.raw.lock_shared());
849         f()
850     }
851 }
852 
853 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
854     /// Unlocks the `RwLock` using a fair unlock protocol.
855     ///
856     /// By default, `RwLock` is unfair and allow the current thread to re-lock
857     /// the `RwLock` before another has the chance to acquire the lock, even if
858     /// that thread has been blocked on the `RwLock` for a long time. This is
859     /// the default because it allows much higher throughput as it avoids
860     /// forcing a context switch on every `RwLock` unlock. This can result in one
861     /// thread acquiring a `RwLock` many more times than other threads.
862     ///
863     /// However in some cases it can be beneficial to ensure fairness by forcing
864     /// the lock to pass on to a waiting thread if there is one. This is done by
865     /// using this method instead of dropping the `RwLockReadGuard` normally.
866     #[inline]
unlock_fair(s: Self)867     pub fn unlock_fair(s: Self) {
868         s.rwlock.raw.unlock_shared_fair();
869         mem::forget(s);
870     }
871 
872     /// Temporarily unlocks the `RwLock` to execute the given function.
873     ///
874     /// The `RwLock` is unlocked a fair unlock protocol.
875     ///
876     /// This is safe because `&mut` guarantees that there exist no other
877     /// references to the data protected by the `RwLock`.
878     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,879     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
880     where
881         F: FnOnce() -> U,
882     {
883         s.rwlock.raw.unlock_shared_fair();
884         defer!(s.rwlock.raw.lock_shared());
885         f()
886     }
887 
888     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
889     ///
890     /// This method is functionally equivalent to calling `unlock_fair` followed
891     /// by `read`, however it can be much more efficient in the case where there
892     /// are no waiting threads.
893     #[inline]
bump(s: &mut Self)894     pub fn bump(s: &mut Self) {
895         s.rwlock.raw.bump_shared();
896     }
897 }
898 
899 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> {
900     type Target = T;
901     #[inline]
deref(&self) -> &T902     fn deref(&self) -> &T {
903         unsafe { &*self.rwlock.data.get() }
904     }
905 }
906 
907 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
908     #[inline]
drop(&mut self)909     fn drop(&mut self) {
910         self.rwlock.raw.unlock_shared();
911     }
912 }
913 
914 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result915     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
916         fmt::Debug::fmt(&**self, f)
917     }
918 }
919 
920 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
921     for RwLockReadGuard<'a, R, T>
922 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result923     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
924         (**self).fmt(f)
925     }
926 }
927 
928 #[cfg(feature = "owning_ref")]
929 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
930 
931 /// RAII structure used to release the exclusive write access of a lock when
932 /// dropped.
933 #[must_use = "if unused the RwLock will immediately unlock"]
934 pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
935     rwlock: &'a RwLock<R, T>,
936     marker: PhantomData<(&'a mut T, R::GuardMarker)>,
937 }
938 
939 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockWriteGuard<'a, R, T> {}
940 
941 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
942     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>943     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
944         s.rwlock
945     }
946 
947     /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
948     ///
949     /// This operation cannot fail as the `RwLockWriteGuard` passed
950     /// in already locked the data.
951     ///
952     /// This is an associated function that needs to be
953     /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
954     /// the same name on the contents of the locked data.
955     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,956     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
957     where
958         F: FnOnce(&mut T) -> &mut U,
959     {
960         let raw = &s.rwlock.raw;
961         let data = f(unsafe { &mut *s.rwlock.data.get() });
962         mem::forget(s);
963         MappedRwLockWriteGuard {
964             raw,
965             data,
966             marker: PhantomData,
967         }
968     }
969 
970     /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
971     /// locked data. The original guard is return if the closure returns `None`.
972     ///
973     /// This operation cannot fail as the `RwLockWriteGuard` passed
974     /// in already locked the data.
975     ///
976     /// This is an associated function that needs to be
977     /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
978     /// the same name on the contents of the locked data.
979     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,980     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
981     where
982         F: FnOnce(&mut T) -> Option<&mut U>,
983     {
984         let raw = &s.rwlock.raw;
985         let data = match f(unsafe { &mut *s.rwlock.data.get() }) {
986             Some(data) => data,
987             None => return Err(s),
988         };
989         mem::forget(s);
990         Ok(MappedRwLockWriteGuard {
991             raw,
992             data,
993             marker: PhantomData,
994         })
995     }
996 
997     /// Temporarily unlocks the `RwLock` to execute the given function.
998     ///
999     /// This is safe because `&mut` guarantees that there exist no other
1000     /// references to the data protected by the `RwLock`.
1001     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1002     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1003     where
1004         F: FnOnce() -> U,
1005     {
1006         s.rwlock.raw.unlock_exclusive();
1007         defer!(s.rwlock.raw.lock_exclusive());
1008         f()
1009     }
1010 }
1011 
1012 impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1013     /// Atomically downgrades a write lock into a read lock without allowing any
1014     /// writers to take exclusive access of the lock in the meantime.
1015     ///
1016     /// Note that if there are any writers currently waiting to take the lock
1017     /// then other readers may not be able to acquire the lock even if it was
1018     /// downgraded.
downgrade(s: Self) -> RwLockReadGuard<'a, R, T>1019     pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
1020         s.rwlock.raw.downgrade();
1021         let rwlock = s.rwlock;
1022         mem::forget(s);
1023         RwLockReadGuard {
1024             rwlock,
1025             marker: PhantomData,
1026         }
1027     }
1028 }
1029 
1030 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1031     /// Atomically downgrades a write lock into an upgradable read lock without allowing any
1032     /// writers to take exclusive access of the lock in the meantime.
1033     ///
1034     /// Note that if there are any writers currently waiting to take the lock
1035     /// then other readers may not be able to acquire the lock even if it was
1036     /// downgraded.
downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T>1037     pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
1038         s.rwlock.raw.downgrade_to_upgradable();
1039         let rwlock = s.rwlock;
1040         mem::forget(s);
1041         RwLockUpgradableReadGuard {
1042             rwlock,
1043             marker: PhantomData,
1044         }
1045     }
1046 }
1047 
1048 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1049     /// Unlocks the `RwLock` using a fair unlock protocol.
1050     ///
1051     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1052     /// the `RwLock` before another has the chance to acquire the lock, even if
1053     /// that thread has been blocked on the `RwLock` for a long time. This is
1054     /// the default because it allows much higher throughput as it avoids
1055     /// forcing a context switch on every `RwLock` unlock. This can result in one
1056     /// thread acquiring a `RwLock` many more times than other threads.
1057     ///
1058     /// However in some cases it can be beneficial to ensure fairness by forcing
1059     /// the lock to pass on to a waiting thread if there is one. This is done by
1060     /// using this method instead of dropping the `RwLockWriteGuard` normally.
1061     #[inline]
unlock_fair(s: Self)1062     pub fn unlock_fair(s: Self) {
1063         s.rwlock.raw.unlock_exclusive_fair();
1064         mem::forget(s);
1065     }
1066 
1067     /// Temporarily unlocks the `RwLock` to execute the given function.
1068     ///
1069     /// The `RwLock` is unlocked a fair unlock protocol.
1070     ///
1071     /// This is safe because `&mut` guarantees that there exist no other
1072     /// references to the data protected by the `RwLock`.
1073     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1074     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1075     where
1076         F: FnOnce() -> U,
1077     {
1078         s.rwlock.raw.unlock_exclusive_fair();
1079         defer!(s.rwlock.raw.lock_exclusive());
1080         f()
1081     }
1082 
1083     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1084     ///
1085     /// This method is functionally equivalent to calling `unlock_fair` followed
1086     /// by `write`, however it can be much more efficient in the case where there
1087     /// are no waiting threads.
1088     #[inline]
bump(s: &mut Self)1089     pub fn bump(s: &mut Self) {
1090         s.rwlock.raw.bump_exclusive();
1091     }
1092 }
1093 
1094 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> {
1095     type Target = T;
1096     #[inline]
deref(&self) -> &T1097     fn deref(&self) -> &T {
1098         unsafe { &*self.rwlock.data.get() }
1099     }
1100 }
1101 
1102 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> {
1103     #[inline]
deref_mut(&mut self) -> &mut T1104     fn deref_mut(&mut self) -> &mut T {
1105         unsafe { &mut *self.rwlock.data.get() }
1106     }
1107 }
1108 
1109 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
1110     #[inline]
drop(&mut self)1111     fn drop(&mut self) {
1112         self.rwlock.raw.unlock_exclusive();
1113     }
1114 }
1115 
1116 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1117     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1118         fmt::Debug::fmt(&**self, f)
1119     }
1120 }
1121 
1122 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1123     for RwLockWriteGuard<'a, R, T>
1124 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1125     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1126         (**self).fmt(f)
1127     }
1128 }
1129 
1130 #[cfg(feature = "owning_ref")]
1131 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
1132 
1133 /// RAII structure used to release the upgradable read access of a lock when
1134 /// dropped.
1135 #[must_use = "if unused the RwLock will immediately unlock"]
1136 pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> {
1137     rwlock: &'a RwLock<R, T>,
1138     marker: PhantomData<(&'a T, R::GuardMarker)>,
1139 }
1140 
1141 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
1142     for RwLockUpgradableReadGuard<'a, R, T>
1143 {
1144 }
1145 
1146 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1147     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>1148     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
1149         s.rwlock
1150     }
1151 
1152     /// Temporarily unlocks the `RwLock` to execute the given function.
1153     ///
1154     /// This is safe because `&mut` guarantees that there exist no other
1155     /// references to the data protected by the `RwLock`.
1156     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1157     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1158     where
1159         F: FnOnce() -> U,
1160     {
1161         s.rwlock.raw.unlock_upgradable();
1162         defer!(s.rwlock.raw.lock_upgradable());
1163         f()
1164     }
1165 
1166     /// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
1167     /// blocking the current thread until it can be acquired.
upgrade(s: Self) -> RwLockWriteGuard<'a, R, T>1168     pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
1169         s.rwlock.raw.upgrade();
1170         let rwlock = s.rwlock;
1171         mem::forget(s);
1172         RwLockWriteGuard {
1173             rwlock,
1174             marker: PhantomData,
1175         }
1176     }
1177 
1178     /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock.
1179     ///
1180     /// If the access could not be granted at this time, then the current guard is returned.
try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self>1181     pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1182         if s.rwlock.raw.try_upgrade() {
1183             let rwlock = s.rwlock;
1184             mem::forget(s);
1185             Ok(RwLockWriteGuard {
1186                 rwlock,
1187                 marker: PhantomData,
1188             })
1189         } else {
1190             Err(s)
1191         }
1192     }
1193 }
1194 
1195 impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1196     /// Unlocks the `RwLock` using a fair unlock protocol.
1197     ///
1198     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1199     /// the `RwLock` before another has the chance to acquire the lock, even if
1200     /// that thread has been blocked on the `RwLock` for a long time. This is
1201     /// the default because it allows much higher throughput as it avoids
1202     /// forcing a context switch on every `RwLock` unlock. This can result in one
1203     /// thread acquiring a `RwLock` many more times than other threads.
1204     ///
1205     /// However in some cases it can be beneficial to ensure fairness by forcing
1206     /// the lock to pass on to a waiting thread if there is one. This is done by
1207     /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
1208     #[inline]
unlock_fair(s: Self)1209     pub fn unlock_fair(s: Self) {
1210         s.rwlock.raw.unlock_upgradable_fair();
1211         mem::forget(s);
1212     }
1213 
1214     /// Temporarily unlocks the `RwLock` to execute the given function.
1215     ///
1216     /// The `RwLock` is unlocked a fair unlock protocol.
1217     ///
1218     /// This is safe because `&mut` guarantees that there exist no other
1219     /// references to the data protected by the `RwLock`.
1220     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1221     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1222     where
1223         F: FnOnce() -> U,
1224     {
1225         s.rwlock.raw.unlock_upgradable_fair();
1226         defer!(s.rwlock.raw.lock_upgradable());
1227         f()
1228     }
1229 
1230     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1231     ///
1232     /// This method is functionally equivalent to calling `unlock_fair` followed
1233     /// by `upgradable_read`, however it can be much more efficient in the case where there
1234     /// are no waiting threads.
1235     #[inline]
bump(s: &mut Self)1236     pub fn bump(s: &mut Self) {
1237         s.rwlock.raw.bump_upgradable();
1238     }
1239 }
1240 
1241 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1242     /// Atomically downgrades an upgradable read lock lock into a shared read lock
1243     /// without allowing any writers to take exclusive access of the lock in the
1244     /// meantime.
1245     ///
1246     /// Note that if there are any writers currently waiting to take the lock
1247     /// then other readers may not be able to acquire the lock even if it was
1248     /// downgraded.
downgrade(s: Self) -> RwLockReadGuard<'a, R, T>1249     pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
1250         s.rwlock.raw.downgrade_upgradable();
1251         let rwlock = s.rwlock;
1252         mem::forget(s);
1253         RwLockReadGuard {
1254             rwlock,
1255             marker: PhantomData,
1256         }
1257     }
1258 }
1259 
1260 impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1261     /// Tries to atomically upgrade an upgradable read lock into a exclusive
1262     /// write lock, until a timeout is reached.
1263     ///
1264     /// If the access could not be granted before the timeout expires, then
1265     /// the current guard is returned.
try_upgrade_for( s: Self, timeout: R::Duration, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>1266     pub fn try_upgrade_for(
1267         s: Self,
1268         timeout: R::Duration,
1269     ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1270         if s.rwlock.raw.try_upgrade_for(timeout) {
1271             let rwlock = s.rwlock;
1272             mem::forget(s);
1273             Ok(RwLockWriteGuard {
1274                 rwlock,
1275                 marker: PhantomData,
1276             })
1277         } else {
1278             Err(s)
1279         }
1280     }
1281 
1282     /// Tries to atomically upgrade an upgradable read lock into a exclusive
1283     /// write lock, until a timeout is reached.
1284     ///
1285     /// If the access could not be granted before the timeout expires, then
1286     /// the current guard is returned.
1287     #[inline]
try_upgrade_until( s: Self, timeout: R::Instant, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>1288     pub fn try_upgrade_until(
1289         s: Self,
1290         timeout: R::Instant,
1291     ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1292         if s.rwlock.raw.try_upgrade_until(timeout) {
1293             let rwlock = s.rwlock;
1294             mem::forget(s);
1295             Ok(RwLockWriteGuard {
1296                 rwlock,
1297                 marker: PhantomData,
1298             })
1299         } else {
1300             Err(s)
1301         }
1302     }
1303 }
1304 
1305 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> {
1306     type Target = T;
1307     #[inline]
deref(&self) -> &T1308     fn deref(&self) -> &T {
1309         unsafe { &*self.rwlock.data.get() }
1310     }
1311 }
1312 
1313 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
1314     #[inline]
drop(&mut self)1315     fn drop(&mut self) {
1316         self.rwlock.raw.unlock_upgradable();
1317     }
1318 }
1319 
1320 impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1321     for RwLockUpgradableReadGuard<'a, R, T>
1322 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1323     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1324         fmt::Debug::fmt(&**self, f)
1325     }
1326 }
1327 
1328 impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1329     for RwLockUpgradableReadGuard<'a, R, T>
1330 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1331     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1332         (**self).fmt(f)
1333     }
1334 }
1335 
1336 #[cfg(feature = "owning_ref")]
1337 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
1338     for RwLockUpgradableReadGuard<'a, R, T>
1339 {
1340 }
1341 
1342 /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
1343 /// subfield of the protected data.
1344 ///
1345 /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
1346 /// former doesn't support temporarily unlocking and re-locking, since that
1347 /// could introduce soundness issues if the locked object is modified by another
1348 /// thread.
1349 #[must_use = "if unused the RwLock will immediately unlock"]
1350 pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
1351     raw: &'a R,
1352     data: *const T,
1353     marker: PhantomData<&'a T>,
1354 }
1355 
1356 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
1357 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
1358     R::GuardMarker: Send
1359 {
1360 }
1361 
1362 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
1363     /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
1364     ///
1365     /// This operation cannot fail as the `MappedRwLockReadGuard` passed
1366     /// in already locked the data.
1367     ///
1368     /// This is an associated function that needs to be
1369     /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
1370     /// the same name on the contents of the locked data.
1371     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,1372     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
1373     where
1374         F: FnOnce(&T) -> &U,
1375     {
1376         let raw = s.raw;
1377         let data = f(unsafe { &*s.data });
1378         mem::forget(s);
1379         MappedRwLockReadGuard {
1380             raw,
1381             data,
1382             marker: PhantomData,
1383         }
1384     }
1385 
1386     /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
1387     /// locked data. The original guard is return if the closure returns `None`.
1388     ///
1389     /// This operation cannot fail as the `MappedRwLockReadGuard` passed
1390     /// in already locked the data.
1391     ///
1392     /// This is an associated function that needs to be
1393     /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
1394     /// the same name on the contents of the locked data.
1395     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,1396     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
1397     where
1398         F: FnOnce(&T) -> Option<&U>,
1399     {
1400         let raw = s.raw;
1401         let data = match f(unsafe { &*s.data }) {
1402             Some(data) => data,
1403             None => return Err(s),
1404         };
1405         mem::forget(s);
1406         Ok(MappedRwLockReadGuard {
1407             raw,
1408             data,
1409             marker: PhantomData,
1410         })
1411     }
1412 }
1413 
1414 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
1415     /// Unlocks the `RwLock` using a fair unlock protocol.
1416     ///
1417     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1418     /// the `RwLock` before another has the chance to acquire the lock, even if
1419     /// that thread has been blocked on the `RwLock` for a long time. This is
1420     /// the default because it allows much higher throughput as it avoids
1421     /// forcing a context switch on every `RwLock` unlock. This can result in one
1422     /// thread acquiring a `RwLock` many more times than other threads.
1423     ///
1424     /// However in some cases it can be beneficial to ensure fairness by forcing
1425     /// the lock to pass on to a waiting thread if there is one. This is done by
1426     /// using this method instead of dropping the `MappedRwLockReadGuard` normally.
1427     #[inline]
unlock_fair(s: Self)1428     pub fn unlock_fair(s: Self) {
1429         s.raw.unlock_shared_fair();
1430         mem::forget(s);
1431     }
1432 }
1433 
1434 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> {
1435     type Target = T;
1436     #[inline]
deref(&self) -> &T1437     fn deref(&self) -> &T {
1438         unsafe { &*self.data }
1439     }
1440 }
1441 
1442 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
1443     #[inline]
drop(&mut self)1444     fn drop(&mut self) {
1445         self.raw.unlock_shared();
1446     }
1447 }
1448 
1449 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1450     for MappedRwLockReadGuard<'a, R, T>
1451 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1452     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1453         fmt::Debug::fmt(&**self, f)
1454     }
1455 }
1456 
1457 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1458     for MappedRwLockReadGuard<'a, R, T>
1459 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1460     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1461         (**self).fmt(f)
1462     }
1463 }
1464 
1465 #[cfg(feature = "owning_ref")]
1466 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
1467     for MappedRwLockReadGuard<'a, R, T>
1468 {
1469 }
1470 
1471 /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
1472 /// subfield of the protected data.
1473 ///
1474 /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
1475 /// former doesn't support temporarily unlocking and re-locking, since that
1476 /// could introduce soundness issues if the locked object is modified by another
1477 /// thread.
1478 #[must_use = "if unused the RwLock will immediately unlock"]
1479 pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
1480     raw: &'a R,
1481     data: *mut T,
1482     marker: PhantomData<&'a mut T>,
1483 }
1484 
1485 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
1486     for MappedRwLockWriteGuard<'a, R, T>
1487 {
1488 }
1489 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
1490     R::GuardMarker: Send
1491 {
1492 }
1493 
1494 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1495     /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
1496     ///
1497     /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
1498     /// in already locked the data.
1499     ///
1500     /// This is an associated function that needs to be
1501     /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
1502     /// the same name on the contents of the locked data.
1503     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,1504     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
1505     where
1506         F: FnOnce(&mut T) -> &mut U,
1507     {
1508         let raw = s.raw;
1509         let data = f(unsafe { &mut *s.data });
1510         mem::forget(s);
1511         MappedRwLockWriteGuard {
1512             raw,
1513             data,
1514             marker: PhantomData,
1515         }
1516     }
1517 
1518     /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
1519     /// locked data. The original guard is return if the closure returns `None`.
1520     ///
1521     /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
1522     /// in already locked the data.
1523     ///
1524     /// This is an associated function that needs to be
1525     /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
1526     /// the same name on the contents of the locked data.
1527     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,1528     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
1529     where
1530         F: FnOnce(&mut T) -> Option<&mut U>,
1531     {
1532         let raw = s.raw;
1533         let data = match f(unsafe { &mut *s.data }) {
1534             Some(data) => data,
1535             None => return Err(s),
1536         };
1537         mem::forget(s);
1538         Ok(MappedRwLockWriteGuard {
1539             raw,
1540             data,
1541             marker: PhantomData,
1542         })
1543     }
1544 }
1545 
1546 impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1547     /// Atomically downgrades a write lock into a read lock without allowing any
1548     /// writers to take exclusive access of the lock in the meantime.
1549     ///
1550     /// Note that if there are any writers currently waiting to take the lock
1551     /// then other readers may not be able to acquire the lock even if it was
1552     /// downgraded.
1553     #[deprecated(
1554         since = "0.3.3",
1555         note = "This function is unsound and will be removed in the future, see issue #198"
1556     )]
downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T>1557     pub fn downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T> {
1558         s.raw.downgrade();
1559         let raw = s.raw;
1560         let data = s.data;
1561         mem::forget(s);
1562         MappedRwLockReadGuard {
1563             raw,
1564             data,
1565             marker: PhantomData,
1566         }
1567     }
1568 }
1569 
1570 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1571     /// Unlocks the `RwLock` using a fair unlock protocol.
1572     ///
1573     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1574     /// the `RwLock` before another has the chance to acquire the lock, even if
1575     /// that thread has been blocked on the `RwLock` for a long time. This is
1576     /// the default because it allows much higher throughput as it avoids
1577     /// forcing a context switch on every `RwLock` unlock. This can result in one
1578     /// thread acquiring a `RwLock` many more times than other threads.
1579     ///
1580     /// However in some cases it can be beneficial to ensure fairness by forcing
1581     /// the lock to pass on to a waiting thread if there is one. This is done by
1582     /// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
1583     #[inline]
unlock_fair(s: Self)1584     pub fn unlock_fair(s: Self) {
1585         s.raw.unlock_exclusive_fair();
1586         mem::forget(s);
1587     }
1588 }
1589 
1590 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> {
1591     type Target = T;
1592     #[inline]
deref(&self) -> &T1593     fn deref(&self) -> &T {
1594         unsafe { &*self.data }
1595     }
1596 }
1597 
1598 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> {
1599     #[inline]
deref_mut(&mut self) -> &mut T1600     fn deref_mut(&mut self) -> &mut T {
1601         unsafe { &mut *self.data }
1602     }
1603 }
1604 
1605 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
1606     #[inline]
drop(&mut self)1607     fn drop(&mut self) {
1608         self.raw.unlock_exclusive();
1609     }
1610 }
1611 
1612 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1613     for MappedRwLockWriteGuard<'a, R, T>
1614 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1615     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1616         fmt::Debug::fmt(&**self, f)
1617     }
1618 }
1619 
1620 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1621     for MappedRwLockWriteGuard<'a, R, T>
1622 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1623     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1624         (**self).fmt(f)
1625     }
1626 }
1627 
1628 #[cfg(feature = "owning_ref")]
1629 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
1630     for MappedRwLockWriteGuard<'a, R, T>
1631 {
1632 }
1633