1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7 
8 use core::cell::UnsafeCell;
9 use core::fmt;
10 use core::marker::PhantomData;
11 use core::mem;
12 use core::ops::{Deref, DerefMut};
13 
14 #[cfg(feature = "owning_ref")]
15 use owning_ref::StableAddress;
16 
17 #[cfg(feature = "serde")]
18 use serde::{Deserialize, Deserializer, Serialize, Serializer};
19 
20 /// Basic operations for a reader-writer lock.
21 ///
22 /// Types implementing this trait can be used by `RwLock` to form a safe and
23 /// fully-functioning `RwLock` type.
24 ///
25 /// # Safety
26 ///
27 /// Implementations of this trait must ensure that the `RwLock` is actually
28 /// exclusive: an exclusive lock can't be acquired while an exclusive or shared
29 /// lock exists, and a shared lock can't be acquire while an exclusive lock
30 /// exists.
31 pub unsafe trait RawRwLock {
32     /// Initial value for an unlocked `RwLock`.
33     const INIT: Self;
34 
35     /// Marker type which determines whether a lock guard should be `Send`. Use
36     /// one of the `GuardSend` or `GuardNoSend` helper types here.
37     type GuardMarker;
38 
39     /// Acquires a shared lock, blocking the current thread until it is able to do so.
lock_shared(&self)40     fn lock_shared(&self);
41 
42     /// Attempts to acquire a shared lock without blocking.
try_lock_shared(&self) -> bool43     fn try_lock_shared(&self) -> bool;
44 
45     /// Releases a shared lock.
unlock_shared(&self)46     fn unlock_shared(&self);
47 
48     /// Acquires an exclusive lock, blocking the current thread until it is able to do so.
lock_exclusive(&self)49     fn lock_exclusive(&self);
50 
51     /// Attempts to acquire an exclusive lock without blocking.
try_lock_exclusive(&self) -> bool52     fn try_lock_exclusive(&self) -> bool;
53 
54     /// Releases an exclusive lock.
unlock_exclusive(&self)55     fn unlock_exclusive(&self);
56 }
57 
58 /// Additional methods for RwLocks which support fair unlocking.
59 ///
60 /// Fair unlocking means that a lock is handed directly over to the next waiting
61 /// thread if there is one, without giving other threads the opportunity to
62 /// "steal" the lock in the meantime. This is typically slower than unfair
63 /// unlocking, but may be necessary in certain circumstances.
64 pub unsafe trait RawRwLockFair: RawRwLock {
65     /// Releases a shared lock using a fair unlock protocol.
unlock_shared_fair(&self)66     fn unlock_shared_fair(&self);
67 
68     /// Releases an exclusive lock using a fair unlock protocol.
unlock_exclusive_fair(&self)69     fn unlock_exclusive_fair(&self);
70 
71     /// Temporarily yields a shared lock to a waiting thread if there is one.
72     ///
73     /// This method is functionally equivalent to calling `unlock_shared_fair` followed
74     /// by `lock_shared`, however it can be much more efficient in the case where there
75     /// are no waiting threads.
bump_shared(&self)76     fn bump_shared(&self) {
77         self.unlock_shared_fair();
78         self.lock_shared();
79     }
80 
81     /// Temporarily yields an exclusive lock to a waiting thread if there is one.
82     ///
83     /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
84     /// by `lock_exclusive`, however it can be much more efficient in the case where there
85     /// are no waiting threads.
bump_exclusive(&self)86     fn bump_exclusive(&self) {
87         self.unlock_exclusive_fair();
88         self.lock_exclusive();
89     }
90 }
91 
92 /// Additional methods for RwLocks which support atomically downgrading an
93 /// exclusive lock to a shared lock.
94 pub unsafe trait RawRwLockDowngrade: RawRwLock {
95     /// Atomically downgrades an exclusive lock into a shared lock without
96     /// allowing any thread to take an exclusive lock in the meantime.
downgrade(&self)97     fn downgrade(&self);
98 }
99 
100 /// Additional methods for RwLocks which support locking with timeouts.
101 ///
102 /// The `Duration` and `Instant` types are specified as associated types so that
103 /// this trait is usable even in `no_std` environments.
104 pub unsafe trait RawRwLockTimed: RawRwLock {
105     /// Duration type used for `try_lock_for`.
106     type Duration;
107 
108     /// Instant type used for `try_lock_until`.
109     type Instant;
110 
111     /// Attempts to acquire a shared lock until a timeout is reached.
try_lock_shared_for(&self, timeout: Self::Duration) -> bool112     fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool;
113 
114     /// Attempts to acquire a shared lock until a timeout is reached.
try_lock_shared_until(&self, timeout: Self::Instant) -> bool115     fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool;
116 
117     /// Attempts to acquire an exclusive lock until a timeout is reached.
try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool118     fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool;
119 
120     /// Attempts to acquire an exclusive lock until a timeout is reached.
try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool121     fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool;
122 }
123 
124 /// Additional methods for RwLocks which support recursive read locks.
125 ///
126 /// These are guaranteed to succeed without blocking if
127 /// another read lock is held at the time of the call. This allows a thread
128 /// to recursively lock a `RwLock`. However using this method can cause
129 /// writers to starve since readers no longer block if a writer is waiting
130 /// for the lock.
131 pub unsafe trait RawRwLockRecursive: RawRwLock {
132     /// Acquires a shared lock without deadlocking in case of a recursive lock.
lock_shared_recursive(&self)133     fn lock_shared_recursive(&self);
134 
135     /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock.
try_lock_shared_recursive(&self) -> bool136     fn try_lock_shared_recursive(&self) -> bool;
137 }
138 
139 /// Additional methods for RwLocks which support recursive read locks and timeouts.
140 pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed {
141     /// Attempts to acquire a shared lock until a timeout is reached, without
142     /// deadlocking in case of a recursive lock.
try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool143     fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool;
144 
145     /// Attempts to acquire a shared lock until a timeout is reached, without
146     /// deadlocking in case of a recursive lock.
try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool147     fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool;
148 }
149 
150 /// Additional methods for RwLocks which support atomically upgrading a shared
151 /// lock to an exclusive lock.
152 ///
153 /// This requires acquiring a special "upgradable read lock" instead of a
154 /// normal shared lock. There may only be one upgradable lock at any time,
155 /// otherwise deadlocks could occur when upgrading.
156 pub unsafe trait RawRwLockUpgrade: RawRwLock {
157     /// Acquires an upgradable lock, blocking the current thread until it is able to do so.
lock_upgradable(&self)158     fn lock_upgradable(&self);
159 
160     /// Attempts to acquire an upgradable lock without blocking.
try_lock_upgradable(&self) -> bool161     fn try_lock_upgradable(&self) -> bool;
162 
163     /// Releases an upgradable lock.
unlock_upgradable(&self)164     fn unlock_upgradable(&self);
165 
166     /// Upgrades an upgradable lock to an exclusive lock.
upgrade(&self)167     fn upgrade(&self);
168 
169     /// Attempts to upgrade an upgradable lock to an exclusive lock without
170     /// blocking.
try_upgrade(&self) -> bool171     fn try_upgrade(&self) -> bool;
172 }
173 
174 /// Additional methods for RwLocks which support upgradable locks and fair
175 /// unlocking.
176 pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
177     /// Releases an upgradable lock using a fair unlock protocol.
unlock_upgradable_fair(&self)178     fn unlock_upgradable_fair(&self);
179 
180     /// Temporarily yields an upgradable lock to a waiting thread if there is one.
181     ///
182     /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
183     /// by `lock_upgradable`, however it can be much more efficient in the case where there
184     /// are no waiting threads.
bump_upgradable(&self)185     fn bump_upgradable(&self) {
186         self.unlock_upgradable_fair();
187         self.lock_upgradable();
188     }
189 }
190 
191 /// Additional methods for RwLocks which support upgradable locks and lock
192 /// downgrading.
193 pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
194     /// Downgrades an upgradable lock to a shared lock.
downgrade_upgradable(&self)195     fn downgrade_upgradable(&self);
196 
197     /// Downgrades an exclusive lock to an upgradable lock.
downgrade_to_upgradable(&self)198     fn downgrade_to_upgradable(&self);
199 }
200 
201 /// Additional methods for RwLocks which support upgradable locks and locking
202 /// with timeouts.
203 pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
204     /// Attempts to acquire an upgradable lock until a timeout is reached.
try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool205     fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool;
206 
207     /// Attempts to acquire an upgradable lock until a timeout is reached.
try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool208     fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool;
209 
210     /// Attempts to upgrade an upgradable lock to an exclusive lock until a
211     /// timeout is reached.
try_upgrade_for(&self, timeout: Self::Duration) -> bool212     fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
213 
214     /// Attempts to upgrade an upgradable lock to an exclusive lock until a
215     /// timeout is reached.
try_upgrade_until(&self, timeout: Self::Instant) -> bool216     fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
217 }
218 
219 /// A reader-writer lock
220 ///
221 /// This type of lock allows a number of readers or at most one writer at any
222 /// point in time. The write portion of this lock typically allows modification
223 /// of the underlying data (exclusive access) and the read portion of this lock
224 /// typically allows for read-only access (shared access).
225 ///
226 /// The type parameter `T` represents the data that this lock protects. It is
227 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
228 /// allow concurrent access through readers. The RAII guards returned from the
229 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
230 /// to allow access to the contained of the lock.
231 pub struct RwLock<R: RawRwLock, T: ?Sized> {
232     raw: R,
233     data: UnsafeCell<T>,
234 }
235 
236 // Copied and modified from serde
237 #[cfg(feature = "serde")]
238 impl<R, T> Serialize for RwLock<R, T>
239 where
240     R: RawRwLock,
241     T: Serialize + ?Sized,
242 {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer,243     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
244     where
245         S: Serializer,
246     {
247         self.read().serialize(serializer)
248     }
249 }
250 
251 #[cfg(feature = "serde")]
252 impl<'de, R, T> Deserialize<'de> for RwLock<R, T>
253 where
254     R: RawRwLock,
255     T: Deserialize<'de> + ?Sized,
256 {
deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>,257     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
258     where
259         D: Deserializer<'de>,
260     {
261         Deserialize::deserialize(deserializer).map(RwLock::new)
262     }
263 }
264 
265 unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
266 unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
267 
268 impl<R: RawRwLock, T> RwLock<R, T> {
269     /// Creates a new instance of an `RwLock<T>` which is unlocked.
270     #[cfg(feature = "nightly")]
271     #[inline]
new(val: T) -> RwLock<R, T>272     pub const fn new(val: T) -> RwLock<R, T> {
273         RwLock { data: UnsafeCell::new(val), raw: R::INIT }
274     }
275 
276     /// Creates a new instance of an `RwLock<T>` which is unlocked.
277     #[cfg(not(feature = "nightly"))]
278     #[inline]
new(val: T) -> RwLock<R, T>279     pub fn new(val: T) -> RwLock<R, T> {
280         RwLock { data: UnsafeCell::new(val), raw: R::INIT }
281     }
282 
283     /// Consumes this `RwLock`, returning the underlying data.
284     #[inline]
285     #[allow(unused_unsafe)]
into_inner(self) -> T286     pub fn into_inner(self) -> T {
287         unsafe { self.data.into_inner() }
288     }
289 }
290 
291 impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
292     /// # Safety
293     ///
294     /// The lock must be held when calling this method.
295     #[inline]
read_guard(&self) -> RwLockReadGuard<'_, R, T>296     unsafe fn read_guard(&self) -> RwLockReadGuard<'_, R, T> {
297         RwLockReadGuard { rwlock: self, marker: PhantomData }
298     }
299 
300     /// # Safety
301     ///
302     /// The lock must be held when calling this method.
303     #[inline]
write_guard(&self) -> RwLockWriteGuard<'_, R, T>304     unsafe fn write_guard(&self) -> RwLockWriteGuard<'_, R, T> {
305         RwLockWriteGuard { rwlock: self, marker: PhantomData }
306     }
307 
308     /// Locks this `RwLock` with shared read access, blocking the current thread
309     /// until it can be acquired.
310     ///
311     /// The calling thread will be blocked until there are no more writers which
312     /// hold the lock. There may be other readers currently inside the lock when
313     /// this method returns.
314     ///
315     /// Note that attempts to recursively acquire a read lock on a `RwLock` when
316     /// the current thread already holds one may result in a deadlock.
317     ///
318     /// Returns an RAII guard which will release this thread's shared access
319     /// once it is dropped.
320     #[inline]
read(&self) -> RwLockReadGuard<'_, R, T>321     pub fn read(&self) -> RwLockReadGuard<'_, R, T> {
322         self.raw.lock_shared();
323         // SAFETY: The lock is held, as required.
324         unsafe { self.read_guard() }
325     }
326 
327     /// Attempts to acquire this `RwLock` with shared read access.
328     ///
329     /// If the access could not be granted at this time, then `None` is returned.
330     /// Otherwise, an RAII guard is returned which will release the shared access
331     /// when it is dropped.
332     ///
333     /// This function does not block.
334     #[inline]
try_read(&self) -> Option<RwLockReadGuard<'_, R, T>>335     pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> {
336         if self.raw.try_lock_shared() {
337             // SAFETY: The lock is held, as required.
338             Some(unsafe { self.read_guard() })
339         } else {
340             None
341         }
342     }
343 
344     /// Locks this `RwLock` with exclusive write access, blocking the current
345     /// thread until it can be acquired.
346     ///
347     /// This function will not return while other writers or other readers
348     /// currently have access to the lock.
349     ///
350     /// Returns an RAII guard which will drop the write access of this `RwLock`
351     /// when dropped.
352     #[inline]
write(&self) -> RwLockWriteGuard<'_, R, T>353     pub fn write(&self) -> RwLockWriteGuard<'_, R, T> {
354         self.raw.lock_exclusive();
355         // SAFETY: The lock is held, as required.
356         unsafe { self.write_guard() }
357     }
358 
359     /// Attempts to lock this `RwLock` with exclusive write access.
360     ///
361     /// If the lock could not be acquired at this time, then `None` is returned.
362     /// Otherwise, an RAII guard is returned which will release the lock when
363     /// it is dropped.
364     ///
365     /// This function does not block.
366     #[inline]
try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>>367     pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> {
368         if self.raw.try_lock_exclusive() {
369             // SAFETY: The lock is held, as required.
370             Some(unsafe { self.write_guard() })
371         } else {
372             None
373         }
374     }
375 
376     /// Returns a mutable reference to the underlying data.
377     ///
378     /// Since this call borrows the `RwLock` mutably, no actual locking needs to
379     /// take place---the mutable borrow statically guarantees no locks exist.
380     #[inline]
get_mut(&mut self) -> &mut T381     pub fn get_mut(&mut self) -> &mut T {
382         unsafe { &mut *self.data.get() }
383     }
384 
385     /// Forcibly unlocks a read lock.
386     ///
387     /// This is useful when combined with `mem::forget` to hold a lock without
388     /// the need to maintain a `RwLockReadGuard` object alive, for example when
389     /// dealing with FFI.
390     ///
391     /// # Safety
392     ///
393     /// This method must only be called if the current thread logically owns a
394     /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
395     /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
396     #[inline]
force_unlock_read(&self)397     pub unsafe fn force_unlock_read(&self) {
398         self.raw.unlock_shared();
399     }
400 
401     /// Forcibly unlocks a write lock.
402     ///
403     /// This is useful when combined with `mem::forget` to hold a lock without
404     /// the need to maintain a `RwLockWriteGuard` object alive, for example when
405     /// dealing with FFI.
406     ///
407     /// # Safety
408     ///
409     /// This method must only be called if the current thread logically owns a
410     /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
411     /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
412     #[inline]
force_unlock_write(&self)413     pub unsafe fn force_unlock_write(&self) {
414         self.raw.unlock_exclusive();
415     }
416 
417     /// Returns the underlying raw reader-writer lock object.
418     ///
419     /// Note that you will most likely need to import the `RawRwLock` trait from
420     /// `lock_api` to be able to call functions on the raw
421     /// reader-writer lock.
422     ///
423     /// # Safety
424     ///
425     /// This method is unsafe because it allows unlocking a mutex while
426     /// still holding a reference to a lock guard.
raw(&self) -> &R427     pub unsafe fn raw(&self) -> &R {
428         &self.raw
429     }
430 }
431 
432 impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
433     /// Forcibly unlocks a read lock using a fair unlock procotol.
434     ///
435     /// This is useful when combined with `mem::forget` to hold a lock without
436     /// the need to maintain a `RwLockReadGuard` object alive, for example when
437     /// dealing with FFI.
438     ///
439     /// # Safety
440     ///
441     /// This method must only be called if the current thread logically owns a
442     /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
443     /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
444     #[inline]
force_unlock_read_fair(&self)445     pub unsafe fn force_unlock_read_fair(&self) {
446         self.raw.unlock_shared_fair();
447     }
448 
449     /// Forcibly unlocks a write lock using a fair unlock procotol.
450     ///
451     /// This is useful when combined with `mem::forget` to hold a lock without
452     /// the need to maintain a `RwLockWriteGuard` object alive, for example when
453     /// dealing with FFI.
454     ///
455     /// # Safety
456     ///
457     /// This method must only be called if the current thread logically owns a
458     /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
459     /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
460     #[inline]
force_unlock_write_fair(&self)461     pub unsafe fn force_unlock_write_fair(&self) {
462         self.raw.unlock_exclusive_fair();
463     }
464 }
465 
466 impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
467     /// Attempts to acquire this `RwLock` with shared read access until a timeout
468     /// is reached.
469     ///
470     /// If the access could not be granted before the timeout expires, then
471     /// `None` is returned. Otherwise, an RAII guard is returned which will
472     /// release the shared access when it is dropped.
473     #[inline]
try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>>474     pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> {
475         if self.raw.try_lock_shared_for(timeout) {
476             // SAFETY: The lock is held, as required.
477             Some(unsafe { self.read_guard() })
478         } else {
479             None
480         }
481     }
482 
483     /// Attempts to acquire this `RwLock` with shared read access until a timeout
484     /// is reached.
485     ///
486     /// If the access could not be granted before the timeout expires, then
487     /// `None` is returned. Otherwise, an RAII guard is returned which will
488     /// release the shared access when it is dropped.
489     #[inline]
try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>>490     pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> {
491         if self.raw.try_lock_shared_until(timeout) {
492             // SAFETY: The lock is held, as required.
493             Some(unsafe { self.read_guard() })
494         } else {
495             None
496         }
497     }
498 
499     /// Attempts to acquire this `RwLock` with exclusive write access until a
500     /// timeout is reached.
501     ///
502     /// If the access could not be granted before the timeout expires, then
503     /// `None` is returned. Otherwise, an RAII guard is returned which will
504     /// release the exclusive access when it is dropped.
505     #[inline]
try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>>506     pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> {
507         if self.raw.try_lock_exclusive_for(timeout) {
508             // SAFETY: The lock is held, as required.
509             Some(unsafe { self.write_guard() })
510         } else {
511             None
512         }
513     }
514 
515     /// Attempts to acquire this `RwLock` with exclusive write access until a
516     /// timeout is reached.
517     ///
518     /// If the access could not be granted before the timeout expires, then
519     /// `None` is returned. Otherwise, an RAII guard is returned which will
520     /// release the exclusive access when it is dropped.
521     #[inline]
try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>>522     pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> {
523         if self.raw.try_lock_exclusive_until(timeout) {
524             // SAFETY: The lock is held, as required.
525             Some(unsafe { self.write_guard() })
526         } else {
527             None
528         }
529     }
530 }
531 
532 impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
533     /// Locks this `RwLock` with shared read access, blocking the current thread
534     /// until it can be acquired.
535     ///
536     /// The calling thread will be blocked until there are no more writers which
537     /// hold the lock. There may be other readers currently inside the lock when
538     /// this method returns.
539     ///
540     /// Unlike `read`, this method is guaranteed to succeed without blocking if
541     /// another read lock is held at the time of the call. This allows a thread
542     /// to recursively lock a `RwLock`. However using this method can cause
543     /// writers to starve since readers no longer block if a writer is waiting
544     /// for the lock.
545     ///
546     /// Returns an RAII guard which will release this thread's shared access
547     /// once it is dropped.
548     #[inline]
read_recursive(&self) -> RwLockReadGuard<'_, R, T>549     pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> {
550         self.raw.lock_shared_recursive();
551         // SAFETY: The lock is held, as required.
552         unsafe { self.read_guard() }
553     }
554 
555     /// Attempts to acquire this `RwLock` with shared read access.
556     ///
557     /// If the access could not be granted at this time, then `None` is returned.
558     /// Otherwise, an RAII guard is returned which will release the shared access
559     /// when it is dropped.
560     ///
561     /// This method is guaranteed to succeed if another read lock is held at the
562     /// time of the call. See the documentation for `read_recursive` for details.
563     ///
564     /// This function does not block.
565     #[inline]
try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>>566     pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> {
567         if self.raw.try_lock_shared_recursive() {
568             // SAFETY: The lock is held, as required.
569             Some(unsafe { self.read_guard() })
570         } else {
571             None
572         }
573     }
574 }
575 
576 impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
577     /// Attempts to acquire this `RwLock` with shared read access until a timeout
578     /// is reached.
579     ///
580     /// If the access could not be granted before the timeout expires, then
581     /// `None` is returned. Otherwise, an RAII guard is returned which will
582     /// release the shared access when it is dropped.
583     ///
584     /// This method is guaranteed to succeed without blocking if another read
585     /// lock is held at the time of the call. See the documentation for
586     /// `read_recursive` for details.
587     #[inline]
try_read_recursive_for( &self, timeout: R::Duration, ) -> Option<RwLockReadGuard<'_, R, T>>588     pub fn try_read_recursive_for(
589         &self,
590         timeout: R::Duration,
591     ) -> Option<RwLockReadGuard<'_, R, T>> {
592         if self.raw.try_lock_shared_recursive_for(timeout) {
593             // SAFETY: The lock is held, as required.
594             Some(unsafe { self.read_guard() })
595         } else {
596             None
597         }
598     }
599 
600     /// Attempts to acquire this `RwLock` with shared read access until a timeout
601     /// is reached.
602     ///
603     /// If the access could not be granted before the timeout expires, then
604     /// `None` is returned. Otherwise, an RAII guard is returned which will
605     /// release the shared access when it is dropped.
606     #[inline]
try_read_recursive_until( &self, timeout: R::Instant, ) -> Option<RwLockReadGuard<'_, R, T>>607     pub fn try_read_recursive_until(
608         &self,
609         timeout: R::Instant,
610     ) -> Option<RwLockReadGuard<'_, R, T>> {
611         if self.raw.try_lock_shared_recursive_until(timeout) {
612             // SAFETY: The lock is held, as required.
613             Some(unsafe { self.read_guard() })
614         } else {
615             None
616         }
617     }
618 }
619 
620 impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
621     /// # Safety
622     ///
623     /// The lock must be held when calling this method.
624     #[inline]
upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T>625     unsafe fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
626         RwLockUpgradableReadGuard { rwlock: self, marker: PhantomData }
627     }
628 
629     /// Locks this `RwLock` with upgradable read access, blocking the current thread
630     /// until it can be acquired.
631     ///
632     /// The calling thread will be blocked until there are no more writers or other
633     /// upgradable reads which hold the lock. There may be other readers currently
634     /// inside the lock when this method returns.
635     ///
636     /// Returns an RAII guard which will release this thread's shared access
637     /// once it is dropped.
638     #[inline]
upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T>639     pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
640         self.raw.lock_upgradable();
641         // SAFETY: The lock is held, as required.
642         unsafe { self.upgradable_guard() }
643     }
644 
645     /// Attempts to acquire this `RwLock` with upgradable read access.
646     ///
647     /// If the access could not be granted at this time, then `None` is returned.
648     /// Otherwise, an RAII guard is returned which will release the shared access
649     /// when it is dropped.
650     ///
651     /// This function does not block.
652     #[inline]
try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>>653     pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
654         if self.raw.try_lock_upgradable() {
655             // SAFETY: The lock is held, as required.
656             Some(unsafe { self.upgradable_guard() })
657         } else {
658             None
659         }
660     }
661 }
662 
663 impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
664     /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
665     /// is reached.
666     ///
667     /// If the access could not be granted before the timeout expires, then
668     /// `None` is returned. Otherwise, an RAII guard is returned which will
669     /// release the shared access when it is dropped.
670     #[inline]
try_upgradable_read_for( &self, timeout: R::Duration, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>>671     pub fn try_upgradable_read_for(
672         &self,
673         timeout: R::Duration,
674     ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
675         if self.raw.try_lock_upgradable_for(timeout) {
676             // SAFETY: The lock is held, as required.
677             Some(unsafe { self.upgradable_guard() })
678         } else {
679             None
680         }
681     }
682 
683     /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
684     /// is reached.
685     ///
686     /// If the access could not be granted before the timeout expires, then
687     /// `None` is returned. Otherwise, an RAII guard is returned which will
688     /// release the shared access when it is dropped.
689     #[inline]
try_upgradable_read_until( &self, timeout: R::Instant, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>>690     pub fn try_upgradable_read_until(
691         &self,
692         timeout: R::Instant,
693     ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
694         if self.raw.try_lock_upgradable_until(timeout) {
695             // SAFETY: The lock is held, as required.
696             Some(unsafe { self.upgradable_guard() })
697         } else {
698             None
699         }
700     }
701 }
702 
703 impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> {
704     #[inline]
default() -> RwLock<R, T>705     fn default() -> RwLock<R, T> {
706         RwLock::new(Default::default())
707     }
708 }
709 
710 impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
711     #[inline]
from(t: T) -> RwLock<R, T>712     fn from(t: T) -> RwLock<R, T> {
713         RwLock::new(t)
714     }
715 }
716 
717 impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result718     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
719         match self.try_read() {
720             Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
721             None => {
722                 struct LockedPlaceholder;
723                 impl fmt::Debug for LockedPlaceholder {
724                     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
725                         f.write_str("<locked>")
726                     }
727                 }
728 
729                 f.debug_struct("RwLock").field("data", &LockedPlaceholder).finish()
730             }
731         }
732     }
733 }
734 
735 /// RAII structure used to release the shared read access of a lock when
736 /// dropped.
737 #[must_use = "if unused the RwLock will immediately unlock"]
738 pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
739     rwlock: &'a RwLock<R, T>,
740     marker: PhantomData<(&'a T, R::GuardMarker)>,
741 }
742 
743 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockReadGuard<'a, R, T> {}
744 
745 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
746     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>747     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
748         s.rwlock
749     }
750 
751     /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
752     ///
753     /// This operation cannot fail as the `RwLockReadGuard` passed
754     /// in already locked the data.
755     ///
756     /// This is an associated function that needs to be
757     /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
758     /// the same name on the contents of the locked data.
759     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,760     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
761     where
762         F: FnOnce(&T) -> &U,
763     {
764         let raw = &s.rwlock.raw;
765         let data = f(unsafe { &*s.rwlock.data.get() });
766         mem::forget(s);
767         MappedRwLockReadGuard { raw, data, marker: PhantomData }
768     }
769 
770     /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
771     /// locked data. The original guard is return if the closure returns `None`.
772     ///
773     /// This operation cannot fail as the `RwLockReadGuard` passed
774     /// in already locked the data.
775     ///
776     /// This is an associated function that needs to be
777     /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
778     /// the same name on the contents of the locked data.
779     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,780     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
781     where
782         F: FnOnce(&T) -> Option<&U>,
783     {
784         let raw = &s.rwlock.raw;
785         let data = match f(unsafe { &*s.rwlock.data.get() }) {
786             Some(data) => data,
787             None => return Err(s),
788         };
789         mem::forget(s);
790         Ok(MappedRwLockReadGuard { raw, data, marker: PhantomData })
791     }
792 
793     /// Temporarily unlocks the `RwLock` to execute the given function.
794     ///
795     /// The `RwLock` is unlocked a fair unlock protocol.
796     ///
797     /// This is safe because `&mut` guarantees that there exist no other
798     /// references to the data protected by the `RwLock`.
799     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,800     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
801     where
802         F: FnOnce() -> U,
803     {
804         s.rwlock.raw.unlock_shared();
805         defer!(s.rwlock.raw.lock_shared());
806         f()
807     }
808 }
809 
810 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
811     /// Unlocks the `RwLock` using a fair unlock protocol.
812     ///
813     /// By default, `RwLock` is unfair and allow the current thread to re-lock
814     /// the `RwLock` before another has the chance to acquire the lock, even if
815     /// that thread has been blocked on the `RwLock` for a long time. This is
816     /// the default because it allows much higher throughput as it avoids
817     /// forcing a context switch on every `RwLock` unlock. This can result in one
818     /// thread acquiring a `RwLock` many more times than other threads.
819     ///
820     /// However in some cases it can be beneficial to ensure fairness by forcing
821     /// the lock to pass on to a waiting thread if there is one. This is done by
822     /// using this method instead of dropping the `RwLockReadGuard` normally.
823     #[inline]
unlock_fair(s: Self)824     pub fn unlock_fair(s: Self) {
825         s.rwlock.raw.unlock_shared_fair();
826         mem::forget(s);
827     }
828 
829     /// Temporarily unlocks the `RwLock` to execute the given function.
830     ///
831     /// The `RwLock` is unlocked a fair unlock protocol.
832     ///
833     /// This is safe because `&mut` guarantees that there exist no other
834     /// references to the data protected by the `RwLock`.
835     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,836     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
837     where
838         F: FnOnce() -> U,
839     {
840         s.rwlock.raw.unlock_shared_fair();
841         defer!(s.rwlock.raw.lock_shared());
842         f()
843     }
844 
845     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
846     ///
847     /// This method is functionally equivalent to calling `unlock_fair` followed
848     /// by `read`, however it can be much more efficient in the case where there
849     /// are no waiting threads.
850     #[inline]
bump(s: &mut Self)851     pub fn bump(s: &mut Self) {
852         s.rwlock.raw.bump_shared();
853     }
854 }
855 
856 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> {
857     type Target = T;
858     #[inline]
deref(&self) -> &T859     fn deref(&self) -> &T {
860         unsafe { &*self.rwlock.data.get() }
861     }
862 }
863 
864 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
865     #[inline]
drop(&mut self)866     fn drop(&mut self) {
867         self.rwlock.raw.unlock_shared();
868     }
869 }
870 
871 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result872     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
873         fmt::Debug::fmt(&**self, f)
874     }
875 }
876 
877 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
878     for RwLockReadGuard<'a, R, T>
879 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result880     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
881         (**self).fmt(f)
882     }
883 }
884 
885 #[cfg(feature = "owning_ref")]
886 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
887 
888 /// RAII structure used to release the exclusive write access of a lock when
889 /// dropped.
890 #[must_use = "if unused the RwLock will immediately unlock"]
891 pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
892     rwlock: &'a RwLock<R, T>,
893     marker: PhantomData<(&'a mut T, R::GuardMarker)>,
894 }
895 
896 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockWriteGuard<'a, R, T> {}
897 
898 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
899     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>900     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
901         s.rwlock
902     }
903 
904     /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
905     ///
906     /// This operation cannot fail as the `RwLockWriteGuard` passed
907     /// in already locked the data.
908     ///
909     /// This is an associated function that needs to be
910     /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
911     /// the same name on the contents of the locked data.
912     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,913     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
914     where
915         F: FnOnce(&mut T) -> &mut U,
916     {
917         let raw = &s.rwlock.raw;
918         let data = f(unsafe { &mut *s.rwlock.data.get() });
919         mem::forget(s);
920         MappedRwLockWriteGuard { raw, data, marker: PhantomData }
921     }
922 
923     /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
924     /// locked data. The original guard is return if the closure returns `None`.
925     ///
926     /// This operation cannot fail as the `RwLockWriteGuard` passed
927     /// in already locked the data.
928     ///
929     /// This is an associated function that needs to be
930     /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
931     /// the same name on the contents of the locked data.
932     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,933     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
934     where
935         F: FnOnce(&mut T) -> Option<&mut U>,
936     {
937         let raw = &s.rwlock.raw;
938         let data = match f(unsafe { &mut *s.rwlock.data.get() }) {
939             Some(data) => data,
940             None => return Err(s),
941         };
942         mem::forget(s);
943         Ok(MappedRwLockWriteGuard { raw, data, marker: PhantomData })
944     }
945 
946     /// Temporarily unlocks the `RwLock` to execute the given function.
947     ///
948     /// This is safe because `&mut` guarantees that there exist no other
949     /// references to the data protected by the `RwLock`.
950     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,951     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
952     where
953         F: FnOnce() -> U,
954     {
955         s.rwlock.raw.unlock_exclusive();
956         defer!(s.rwlock.raw.lock_exclusive());
957         f()
958     }
959 }
960 
961 impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
962     /// Atomically downgrades a write lock into a read lock without allowing any
963     /// writers to take exclusive access of the lock in the meantime.
964     ///
965     /// Note that if there are any writers currently waiting to take the lock
966     /// then other readers may not be able to acquire the lock even if it was
967     /// downgraded.
downgrade(s: Self) -> RwLockReadGuard<'a, R, T>968     pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
969         s.rwlock.raw.downgrade();
970         let rwlock = s.rwlock;
971         mem::forget(s);
972         RwLockReadGuard { rwlock, marker: PhantomData }
973     }
974 }
975 
976 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
977     /// Atomically downgrades a write lock into an upgradable read lock without allowing any
978     /// writers to take exclusive access of the lock in the meantime.
979     ///
980     /// Note that if there are any writers currently waiting to take the lock
981     /// then other readers may not be able to acquire the lock even if it was
982     /// downgraded.
downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T>983     pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
984         s.rwlock.raw.downgrade_to_upgradable();
985         let rwlock = s.rwlock;
986         mem::forget(s);
987         RwLockUpgradableReadGuard { rwlock, marker: PhantomData }
988     }
989 }
990 
991 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
992     /// Unlocks the `RwLock` using a fair unlock protocol.
993     ///
994     /// By default, `RwLock` is unfair and allow the current thread to re-lock
995     /// the `RwLock` before another has the chance to acquire the lock, even if
996     /// that thread has been blocked on the `RwLock` for a long time. This is
997     /// the default because it allows much higher throughput as it avoids
998     /// forcing a context switch on every `RwLock` unlock. This can result in one
999     /// thread acquiring a `RwLock` many more times than other threads.
1000     ///
1001     /// However in some cases it can be beneficial to ensure fairness by forcing
1002     /// the lock to pass on to a waiting thread if there is one. This is done by
1003     /// using this method instead of dropping the `RwLockWriteGuard` normally.
1004     #[inline]
unlock_fair(s: Self)1005     pub fn unlock_fair(s: Self) {
1006         s.rwlock.raw.unlock_exclusive_fair();
1007         mem::forget(s);
1008     }
1009 
1010     /// Temporarily unlocks the `RwLock` to execute the given function.
1011     ///
1012     /// The `RwLock` is unlocked a fair unlock protocol.
1013     ///
1014     /// This is safe because `&mut` guarantees that there exist no other
1015     /// references to the data protected by the `RwLock`.
1016     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1017     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1018     where
1019         F: FnOnce() -> U,
1020     {
1021         s.rwlock.raw.unlock_exclusive_fair();
1022         defer!(s.rwlock.raw.lock_exclusive());
1023         f()
1024     }
1025 
1026     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1027     ///
1028     /// This method is functionally equivalent to calling `unlock_fair` followed
1029     /// by `write`, however it can be much more efficient in the case where there
1030     /// are no waiting threads.
1031     #[inline]
bump(s: &mut Self)1032     pub fn bump(s: &mut Self) {
1033         s.rwlock.raw.bump_exclusive();
1034     }
1035 }
1036 
1037 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> {
1038     type Target = T;
1039     #[inline]
deref(&self) -> &T1040     fn deref(&self) -> &T {
1041         unsafe { &*self.rwlock.data.get() }
1042     }
1043 }
1044 
1045 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> {
1046     #[inline]
deref_mut(&mut self) -> &mut T1047     fn deref_mut(&mut self) -> &mut T {
1048         unsafe { &mut *self.rwlock.data.get() }
1049     }
1050 }
1051 
1052 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
1053     #[inline]
drop(&mut self)1054     fn drop(&mut self) {
1055         self.rwlock.raw.unlock_exclusive();
1056     }
1057 }
1058 
1059 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1060     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1061         fmt::Debug::fmt(&**self, f)
1062     }
1063 }
1064 
1065 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1066     for RwLockWriteGuard<'a, R, T>
1067 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1068     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1069         (**self).fmt(f)
1070     }
1071 }
1072 
1073 #[cfg(feature = "owning_ref")]
1074 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
1075 
1076 /// RAII structure used to release the upgradable read access of a lock when
1077 /// dropped.
1078 #[must_use = "if unused the RwLock will immediately unlock"]
1079 pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> {
1080     rwlock: &'a RwLock<R, T>,
1081     marker: PhantomData<(&'a T, R::GuardMarker)>,
1082 }
1083 
1084 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
1085     for RwLockUpgradableReadGuard<'a, R, T>
1086 {
1087 }
1088 
1089 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1090     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>1091     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
1092         s.rwlock
1093     }
1094 
1095     /// Temporarily unlocks the `RwLock` to execute the given function.
1096     ///
1097     /// This is safe because `&mut` guarantees that there exist no other
1098     /// references to the data protected by the `RwLock`.
1099     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1100     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1101     where
1102         F: FnOnce() -> U,
1103     {
1104         s.rwlock.raw.unlock_upgradable();
1105         defer!(s.rwlock.raw.lock_upgradable());
1106         f()
1107     }
1108 
1109     /// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
1110     /// blocking the current thread until it can be acquired.
upgrade(s: Self) -> RwLockWriteGuard<'a, R, T>1111     pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
1112         s.rwlock.raw.upgrade();
1113         let rwlock = s.rwlock;
1114         mem::forget(s);
1115         RwLockWriteGuard { rwlock, marker: PhantomData }
1116     }
1117 
1118     /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock.
1119     ///
1120     /// If the access could not be granted at this time, then the current guard is returned.
try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self>1121     pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1122         if s.rwlock.raw.try_upgrade() {
1123             let rwlock = s.rwlock;
1124             mem::forget(s);
1125             Ok(RwLockWriteGuard { rwlock, marker: PhantomData })
1126         } else {
1127             Err(s)
1128         }
1129     }
1130 }
1131 
1132 impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1133     /// Unlocks the `RwLock` using a fair unlock protocol.
1134     ///
1135     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1136     /// the `RwLock` before another has the chance to acquire the lock, even if
1137     /// that thread has been blocked on the `RwLock` for a long time. This is
1138     /// the default because it allows much higher throughput as it avoids
1139     /// forcing a context switch on every `RwLock` unlock. This can result in one
1140     /// thread acquiring a `RwLock` many more times than other threads.
1141     ///
1142     /// However in some cases it can be beneficial to ensure fairness by forcing
1143     /// the lock to pass on to a waiting thread if there is one. This is done by
1144     /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
1145     #[inline]
unlock_fair(s: Self)1146     pub fn unlock_fair(s: Self) {
1147         s.rwlock.raw.unlock_upgradable_fair();
1148         mem::forget(s);
1149     }
1150 
1151     /// Temporarily unlocks the `RwLock` to execute the given function.
1152     ///
1153     /// The `RwLock` is unlocked a fair unlock protocol.
1154     ///
1155     /// This is safe because `&mut` guarantees that there exist no other
1156     /// references to the data protected by the `RwLock`.
1157     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1158     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1159     where
1160         F: FnOnce() -> U,
1161     {
1162         s.rwlock.raw.unlock_upgradable_fair();
1163         defer!(s.rwlock.raw.lock_upgradable());
1164         f()
1165     }
1166 
1167     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1168     ///
1169     /// This method is functionally equivalent to calling `unlock_fair` followed
1170     /// by `upgradable_read`, however it can be much more efficient in the case where there
1171     /// are no waiting threads.
1172     #[inline]
bump(s: &mut Self)1173     pub fn bump(s: &mut Self) {
1174         s.rwlock.raw.bump_upgradable();
1175     }
1176 }
1177 
1178 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1179     /// Atomically downgrades an upgradable read lock lock into a shared read lock
1180     /// without allowing any writers to take exclusive access of the lock in the
1181     /// meantime.
1182     ///
1183     /// Note that if there are any writers currently waiting to take the lock
1184     /// then other readers may not be able to acquire the lock even if it was
1185     /// downgraded.
downgrade(s: Self) -> RwLockReadGuard<'a, R, T>1186     pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
1187         s.rwlock.raw.downgrade_upgradable();
1188         let rwlock = s.rwlock;
1189         mem::forget(s);
1190         RwLockReadGuard { rwlock, marker: PhantomData }
1191     }
1192 }
1193 
1194 impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1195     /// Tries to atomically upgrade an upgradable read lock into a exclusive
1196     /// write lock, until a timeout is reached.
1197     ///
1198     /// If the access could not be granted before the timeout expires, then
1199     /// the current guard is returned.
try_upgrade_for( s: Self, timeout: R::Duration, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>1200     pub fn try_upgrade_for(
1201         s: Self,
1202         timeout: R::Duration,
1203     ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1204         if s.rwlock.raw.try_upgrade_for(timeout) {
1205             let rwlock = s.rwlock;
1206             mem::forget(s);
1207             Ok(RwLockWriteGuard { rwlock, marker: PhantomData })
1208         } else {
1209             Err(s)
1210         }
1211     }
1212 
1213     /// Tries to atomically upgrade an upgradable read lock into a exclusive
1214     /// write lock, until a timeout is reached.
1215     ///
1216     /// If the access could not be granted before the timeout expires, then
1217     /// the current guard is returned.
1218     #[inline]
try_upgrade_until( s: Self, timeout: R::Instant, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>1219     pub fn try_upgrade_until(
1220         s: Self,
1221         timeout: R::Instant,
1222     ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1223         if s.rwlock.raw.try_upgrade_until(timeout) {
1224             let rwlock = s.rwlock;
1225             mem::forget(s);
1226             Ok(RwLockWriteGuard { rwlock, marker: PhantomData })
1227         } else {
1228             Err(s)
1229         }
1230     }
1231 }
1232 
1233 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> {
1234     type Target = T;
1235     #[inline]
deref(&self) -> &T1236     fn deref(&self) -> &T {
1237         unsafe { &*self.rwlock.data.get() }
1238     }
1239 }
1240 
1241 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
1242     #[inline]
drop(&mut self)1243     fn drop(&mut self) {
1244         self.rwlock.raw.unlock_upgradable();
1245     }
1246 }
1247 
1248 impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1249     for RwLockUpgradableReadGuard<'a, R, T>
1250 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1251     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1252         fmt::Debug::fmt(&**self, f)
1253     }
1254 }
1255 
1256 impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1257     for RwLockUpgradableReadGuard<'a, R, T>
1258 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1259     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1260         (**self).fmt(f)
1261     }
1262 }
1263 
1264 #[cfg(feature = "owning_ref")]
1265 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
1266     for RwLockUpgradableReadGuard<'a, R, T>
1267 {
1268 }
1269 
1270 /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
1271 /// subfield of the protected data.
1272 ///
1273 /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
1274 /// former doesn't support temporarily unlocking and re-locking, since that
1275 /// could introduce soundness issues if the locked object is modified by another
1276 /// thread.
1277 #[must_use = "if unused the RwLock will immediately unlock"]
1278 pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
1279     raw: &'a R,
1280     data: *const T,
1281     marker: PhantomData<&'a T>,
1282 }
1283 
1284 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
1285 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
1286     R::GuardMarker: Send
1287 {
1288 }
1289 
1290 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
1291     /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
1292     ///
1293     /// This operation cannot fail as the `MappedRwLockReadGuard` passed
1294     /// in already locked the data.
1295     ///
1296     /// This is an associated function that needs to be
1297     /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
1298     /// the same name on the contents of the locked data.
1299     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,1300     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
1301     where
1302         F: FnOnce(&T) -> &U,
1303     {
1304         let raw = s.raw;
1305         let data = f(unsafe { &*s.data });
1306         mem::forget(s);
1307         MappedRwLockReadGuard { raw, data, marker: PhantomData }
1308     }
1309 
1310     /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
1311     /// locked data. The original guard is return if the closure returns `None`.
1312     ///
1313     /// This operation cannot fail as the `MappedRwLockReadGuard` passed
1314     /// in already locked the data.
1315     ///
1316     /// This is an associated function that needs to be
1317     /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
1318     /// the same name on the contents of the locked data.
1319     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,1320     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
1321     where
1322         F: FnOnce(&T) -> Option<&U>,
1323     {
1324         let raw = s.raw;
1325         let data = match f(unsafe { &*s.data }) {
1326             Some(data) => data,
1327             None => return Err(s),
1328         };
1329         mem::forget(s);
1330         Ok(MappedRwLockReadGuard { raw, data, marker: PhantomData })
1331     }
1332 }
1333 
1334 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
1335     /// Unlocks the `RwLock` using a fair unlock protocol.
1336     ///
1337     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1338     /// the `RwLock` before another has the chance to acquire the lock, even if
1339     /// that thread has been blocked on the `RwLock` for a long time. This is
1340     /// the default because it allows much higher throughput as it avoids
1341     /// forcing a context switch on every `RwLock` unlock. This can result in one
1342     /// thread acquiring a `RwLock` many more times than other threads.
1343     ///
1344     /// However in some cases it can be beneficial to ensure fairness by forcing
1345     /// the lock to pass on to a waiting thread if there is one. This is done by
1346     /// using this method instead of dropping the `MappedRwLockReadGuard` normally.
1347     #[inline]
unlock_fair(s: Self)1348     pub fn unlock_fair(s: Self) {
1349         s.raw.unlock_shared_fair();
1350         mem::forget(s);
1351     }
1352 }
1353 
1354 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> {
1355     type Target = T;
1356     #[inline]
deref(&self) -> &T1357     fn deref(&self) -> &T {
1358         unsafe { &*self.data }
1359     }
1360 }
1361 
1362 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
1363     #[inline]
drop(&mut self)1364     fn drop(&mut self) {
1365         self.raw.unlock_shared();
1366     }
1367 }
1368 
1369 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1370     for MappedRwLockReadGuard<'a, R, T>
1371 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1372     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1373         fmt::Debug::fmt(&**self, f)
1374     }
1375 }
1376 
1377 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1378     for MappedRwLockReadGuard<'a, R, T>
1379 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1380     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1381         (**self).fmt(f)
1382     }
1383 }
1384 
1385 #[cfg(feature = "owning_ref")]
1386 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
1387     for MappedRwLockReadGuard<'a, R, T>
1388 {
1389 }
1390 
1391 /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
1392 /// subfield of the protected data.
1393 ///
1394 /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
1395 /// former doesn't support temporarily unlocking and re-locking, since that
1396 /// could introduce soundness issues if the locked object is modified by another
1397 /// thread.
1398 #[must_use = "if unused the RwLock will immediately unlock"]
1399 pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
1400     raw: &'a R,
1401     data: *mut T,
1402     marker: PhantomData<&'a mut T>,
1403 }
1404 
1405 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
1406     for MappedRwLockWriteGuard<'a, R, T>
1407 {
1408 }
1409 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
1410     R::GuardMarker: Send
1411 {
1412 }
1413 
1414 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1415     /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
1416     ///
1417     /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
1418     /// in already locked the data.
1419     ///
1420     /// This is an associated function that needs to be
1421     /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
1422     /// the same name on the contents of the locked data.
1423     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,1424     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
1425     where
1426         F: FnOnce(&mut T) -> &mut U,
1427     {
1428         let raw = s.raw;
1429         let data = f(unsafe { &mut *s.data });
1430         mem::forget(s);
1431         MappedRwLockWriteGuard { raw, data, marker: PhantomData }
1432     }
1433 
1434     /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
1435     /// locked data. The original guard is return if the closure returns `None`.
1436     ///
1437     /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
1438     /// in already locked the data.
1439     ///
1440     /// This is an associated function that needs to be
1441     /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
1442     /// the same name on the contents of the locked data.
1443     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,1444     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
1445     where
1446         F: FnOnce(&mut T) -> Option<&mut U>,
1447     {
1448         let raw = s.raw;
1449         let data = match f(unsafe { &mut *s.data }) {
1450             Some(data) => data,
1451             None => return Err(s),
1452         };
1453         mem::forget(s);
1454         Ok(MappedRwLockWriteGuard { raw, data, marker: PhantomData })
1455     }
1456 }
1457 
1458 impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1459     /// Atomically downgrades a write lock into a read lock without allowing any
1460     /// writers to take exclusive access of the lock in the meantime.
1461     ///
1462     /// Note that if there are any writers currently waiting to take the lock
1463     /// then other readers may not be able to acquire the lock even if it was
1464     /// downgraded.
downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T>1465     pub fn downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T> {
1466         s.raw.downgrade();
1467         let raw = s.raw;
1468         let data = s.data;
1469         mem::forget(s);
1470         MappedRwLockReadGuard { raw, data, marker: PhantomData }
1471     }
1472 }
1473 
1474 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1475     /// Unlocks the `RwLock` using a fair unlock protocol.
1476     ///
1477     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1478     /// the `RwLock` before another has the chance to acquire the lock, even if
1479     /// that thread has been blocked on the `RwLock` for a long time. This is
1480     /// the default because it allows much higher throughput as it avoids
1481     /// forcing a context switch on every `RwLock` unlock. This can result in one
1482     /// thread acquiring a `RwLock` many more times than other threads.
1483     ///
1484     /// However in some cases it can be beneficial to ensure fairness by forcing
1485     /// the lock to pass on to a waiting thread if there is one. This is done by
1486     /// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
1487     #[inline]
unlock_fair(s: Self)1488     pub fn unlock_fair(s: Self) {
1489         s.raw.unlock_exclusive_fair();
1490         mem::forget(s);
1491     }
1492 }
1493 
1494 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> {
1495     type Target = T;
1496     #[inline]
deref(&self) -> &T1497     fn deref(&self) -> &T {
1498         unsafe { &*self.data }
1499     }
1500 }
1501 
1502 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> {
1503     #[inline]
deref_mut(&mut self) -> &mut T1504     fn deref_mut(&mut self) -> &mut T {
1505         unsafe { &mut *self.data }
1506     }
1507 }
1508 
1509 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
1510     #[inline]
drop(&mut self)1511     fn drop(&mut self) {
1512         self.raw.unlock_exclusive();
1513     }
1514 }
1515 
1516 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1517     for MappedRwLockWriteGuard<'a, R, T>
1518 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1519     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1520         fmt::Debug::fmt(&**self, f)
1521     }
1522 }
1523 
1524 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1525     for MappedRwLockWriteGuard<'a, R, T>
1526 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1527     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1528         (**self).fmt(f)
1529     }
1530 }
1531 
1532 #[cfg(feature = "owning_ref")]
1533 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
1534     for MappedRwLockWriteGuard<'a, R, T>
1535 {
1536 }
1537