1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7 
8 use core::cell::UnsafeCell;
9 use core::fmt;
10 use core::marker::PhantomData;
11 use core::mem;
12 use core::ops::{Deref, DerefMut};
13 
14 #[cfg(feature = "owning_ref")]
15 use owning_ref::StableAddress;
16 
17 /// Basic operations for a reader-writer lock.
18 ///
19 /// Types implementing this trait can be used by `RwLock` to form a safe and
20 /// fully-functioning `RwLock` type.
21 ///
22 /// # Safety
23 ///
24 /// Implementations of this trait must ensure that the `RwLock` is actually
25 /// exclusive: an exclusive lock can't be acquired while an exclusive or shared
26 /// lock exists, and a shared lock can't be acquire while an exclusive lock
27 /// exists.
28 pub unsafe trait RawRwLock {
29     /// Initial value for an unlocked `RwLock`.
30     const INIT: Self;
31 
32     /// Marker type which determines whether a lock guard should be `Send`. Use
33     /// one of the `GuardSend` or `GuardNoSend` helper types here.
34     type GuardMarker;
35 
36     /// Acquires a shared lock, blocking the current thread until it is able to do so.
lock_shared(&self)37     fn lock_shared(&self);
38 
39     /// Attempts to acquire a shared lock without blocking.
try_lock_shared(&self) -> bool40     fn try_lock_shared(&self) -> bool;
41 
42     /// Releases a shared lock.
unlock_shared(&self)43     fn unlock_shared(&self);
44 
45     /// Acquires an exclusive lock, blocking the current thread until it is able to do so.
lock_exclusive(&self)46     fn lock_exclusive(&self);
47 
48     /// Attempts to acquire an exclusive lock without blocking.
try_lock_exclusive(&self) -> bool49     fn try_lock_exclusive(&self) -> bool;
50 
51     /// Releases an exclusive lock.
unlock_exclusive(&self)52     fn unlock_exclusive(&self);
53 }
54 
55 /// Additional methods for RwLocks which support fair unlocking.
56 ///
57 /// Fair unlocking means that a lock is handed directly over to the next waiting
58 /// thread if there is one, without giving other threads the opportunity to
59 /// "steal" the lock in the meantime. This is typically slower than unfair
60 /// unlocking, but may be necessary in certain circumstances.
61 pub unsafe trait RawRwLockFair: RawRwLock {
62     /// Releases a shared lock using a fair unlock protocol.
unlock_shared_fair(&self)63     fn unlock_shared_fair(&self);
64 
65     /// Releases an exclusive lock using a fair unlock protocol.
unlock_exclusive_fair(&self)66     fn unlock_exclusive_fair(&self);
67 
68     /// Temporarily yields a shared lock to a waiting thread if there is one.
69     ///
70     /// This method is functionally equivalent to calling `unlock_shared_fair` followed
71     /// by `lock_shared`, however it can be much more efficient in the case where there
72     /// are no waiting threads.
bump_shared(&self)73     fn bump_shared(&self) {
74         self.unlock_shared_fair();
75         self.lock_shared();
76     }
77 
78     /// Temporarily yields an exclusive lock to a waiting thread if there is one.
79     ///
80     /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
81     /// by `lock_exclusive`, however it can be much more efficient in the case where there
82     /// are no waiting threads.
bump_exclusive(&self)83     fn bump_exclusive(&self) {
84         self.unlock_exclusive_fair();
85         self.lock_exclusive();
86     }
87 }
88 
89 /// Additional methods for RwLocks which support atomically downgrading an
90 /// exclusive lock to a shared lock.
91 pub unsafe trait RawRwLockDowngrade: RawRwLock {
92     /// Atomically downgrades an exclusive lock into a shared lock without
93     /// allowing any thread to take an exclusive lock in the meantime.
downgrade(&self)94     fn downgrade(&self);
95 }
96 
97 /// Additional methods for RwLocks which support locking with timeouts.
98 ///
99 /// The `Duration` and `Instant` types are specified as associated types so that
100 /// this trait is usable even in `no_std` environments.
101 pub unsafe trait RawRwLockTimed: RawRwLock {
102     /// Duration type used for `try_lock_for`.
103     type Duration;
104 
105     /// Instant type used for `try_lock_until`.
106     type Instant;
107 
108     /// Attempts to acquire a shared lock until a timeout is reached.
try_lock_shared_for(&self, timeout: Self::Duration) -> bool109     fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool;
110 
111     /// Attempts to acquire a shared lock until a timeout is reached.
try_lock_shared_until(&self, timeout: Self::Instant) -> bool112     fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool;
113 
114     /// Attempts to acquire an exclusive lock until a timeout is reached.
try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool115     fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool;
116 
117     /// Attempts to acquire an exclusive lock until a timeout is reached.
try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool118     fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool;
119 }
120 
121 /// Additional methods for RwLocks which support recursive read locks.
122 ///
123 /// These are guaranteed to succeed without blocking if
124 /// another read lock is held at the time of the call. This allows a thread
125 /// to recursively lock a `RwLock`. However using this method can cause
126 /// writers to starve since readers no longer block if a writer is waiting
127 /// for the lock.
128 pub unsafe trait RawRwLockRecursive: RawRwLock {
129     /// Acquires a shared lock without deadlocking in case of a recursive lock.
lock_shared_recursive(&self)130     fn lock_shared_recursive(&self);
131 
132     /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock.
try_lock_shared_recursive(&self) -> bool133     fn try_lock_shared_recursive(&self) -> bool;
134 }
135 
136 /// Additional methods for RwLocks which support recursive read locks and timeouts.
137 pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed {
138     /// Attempts to acquire a shared lock until a timeout is reached, without
139     /// deadlocking in case of a recursive lock.
try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool140     fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool;
141 
142     /// Attempts to acquire a shared lock until a timeout is reached, without
143     /// deadlocking in case of a recursive lock.
try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool144     fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool;
145 }
146 
147 /// Additional methods for RwLocks which support atomically upgrading a shared
148 /// lock to an exclusive lock.
149 ///
150 /// This requires acquiring a special "upgradable read lock" instead of a
151 /// normal shared lock. There may only be one upgradable lock at any time,
152 /// otherwise deadlocks could occur when upgrading.
153 pub unsafe trait RawRwLockUpgrade: RawRwLock {
154     /// Acquires an upgradable lock, blocking the current thread until it is able to do so.
lock_upgradable(&self)155     fn lock_upgradable(&self);
156 
157     /// Attempts to acquire an upgradable lock without blocking.
try_lock_upgradable(&self) -> bool158     fn try_lock_upgradable(&self) -> bool;
159 
160     /// Releases an upgradable lock.
unlock_upgradable(&self)161     fn unlock_upgradable(&self);
162 
163     /// Upgrades an upgradable lock to an exclusive lock.
upgrade(&self)164     fn upgrade(&self);
165 
166     /// Attempts to upgrade an upgradable lock to an exclusive lock without
167     /// blocking.
try_upgrade(&self) -> bool168     fn try_upgrade(&self) -> bool;
169 }
170 
171 /// Additional methods for RwLocks which support upgradable locks and fair
172 /// unlocking.
173 pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
174     /// Releases an upgradable lock using a fair unlock protocol.
unlock_upgradable_fair(&self)175     fn unlock_upgradable_fair(&self);
176 
177     /// Temporarily yields an upgradable lock to a waiting thread if there is one.
178     ///
179     /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
180     /// by `lock_upgradable`, however it can be much more efficient in the case where there
181     /// are no waiting threads.
bump_upgradable(&self)182     fn bump_upgradable(&self) {
183         self.unlock_upgradable_fair();
184         self.lock_upgradable();
185     }
186 }
187 
188 /// Additional methods for RwLocks which support upgradable locks and lock
189 /// downgrading.
190 pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
191     /// Downgrades an upgradable lock to a shared lock.
downgrade_upgradable(&self)192     fn downgrade_upgradable(&self);
193 
194     /// Downgrades an exclusive lock to an upgradable lock.
downgrade_to_upgradable(&self)195     fn downgrade_to_upgradable(&self);
196 }
197 
198 /// Additional methods for RwLocks which support upgradable locks and locking
199 /// with timeouts.
200 pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
201     /// Attempts to acquire an upgradable lock until a timeout is reached.
try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool202     fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool;
203 
204     /// Attempts to acquire an upgradable lock until a timeout is reached.
try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool205     fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool;
206 
207     /// Attempts to upgrade an upgradable lock to an exclusive lock until a
208     /// timeout is reached.
try_upgrade_for(&self, timeout: Self::Duration) -> bool209     fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
210 
211     /// Attempts to upgrade an upgradable lock to an exclusive lock until a
212     /// timeout is reached.
try_upgrade_until(&self, timeout: Self::Instant) -> bool213     fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
214 }
215 
216 /// A reader-writer lock
217 ///
218 /// This type of lock allows a number of readers or at most one writer at any
219 /// point in time. The write portion of this lock typically allows modification
220 /// of the underlying data (exclusive access) and the read portion of this lock
221 /// typically allows for read-only access (shared access).
222 ///
223 /// The type parameter `T` represents the data that this lock protects. It is
224 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
225 /// allow concurrent access through readers. The RAII guards returned from the
226 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
227 /// to allow access to the contained of the lock.
228 pub struct RwLock<R: RawRwLock, T: ?Sized> {
229     raw: R,
230     data: UnsafeCell<T>,
231 }
232 
233 unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
234 unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
235 
236 impl<R: RawRwLock, T> RwLock<R, T> {
237     /// Creates a new instance of an `RwLock<T>` which is unlocked.
238     #[cfg(feature = "nightly")]
239     #[inline]
new(val: T) -> RwLock<R, T>240     pub const fn new(val: T) -> RwLock<R, T> {
241         RwLock {
242             data: UnsafeCell::new(val),
243             raw: R::INIT,
244         }
245     }
246 
247     /// Creates a new instance of an `RwLock<T>` which is unlocked.
248     #[cfg(not(feature = "nightly"))]
249     #[inline]
new(val: T) -> RwLock<R, T>250     pub fn new(val: T) -> RwLock<R, T> {
251         RwLock {
252             data: UnsafeCell::new(val),
253             raw: R::INIT,
254         }
255     }
256 
257     /// Consumes this `RwLock`, returning the underlying data.
258     #[inline]
259     #[allow(unused_unsafe)]
into_inner(self) -> T260     pub fn into_inner(self) -> T {
261         unsafe { self.data.into_inner() }
262     }
263 }
264 
265 impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
266     #[inline]
read_guard(&self) -> RwLockReadGuard<R, T>267     fn read_guard(&self) -> RwLockReadGuard<R, T> {
268         RwLockReadGuard {
269             rwlock: self,
270             marker: PhantomData,
271         }
272     }
273 
274     #[inline]
write_guard(&self) -> RwLockWriteGuard<R, T>275     fn write_guard(&self) -> RwLockWriteGuard<R, T> {
276         RwLockWriteGuard {
277             rwlock: self,
278             marker: PhantomData,
279         }
280     }
281 
282     /// Locks this `RwLock` with shared read access, blocking the current thread
283     /// until it can be acquired.
284     ///
285     /// The calling thread will be blocked until there are no more writers which
286     /// hold the lock. There may be other readers currently inside the lock when
287     /// this method returns.
288     ///
289     /// Note that attempts to recursively acquire a read lock on a `RwLock` when
290     /// the current thread already holds one may result in a deadlock.
291     ///
292     /// Returns an RAII guard which will release this thread's shared access
293     /// once it is dropped.
294     #[inline]
read(&self) -> RwLockReadGuard<R, T>295     pub fn read(&self) -> RwLockReadGuard<R, T> {
296         self.raw.lock_shared();
297         self.read_guard()
298     }
299 
300     /// Attempts to acquire this `RwLock` with shared read access.
301     ///
302     /// If the access could not be granted at this time, then `None` is returned.
303     /// Otherwise, an RAII guard is returned which will release the shared access
304     /// when it is dropped.
305     ///
306     /// This function does not block.
307     #[inline]
try_read(&self) -> Option<RwLockReadGuard<R, T>>308     pub fn try_read(&self) -> Option<RwLockReadGuard<R, T>> {
309         if self.raw.try_lock_shared() {
310             Some(self.read_guard())
311         } else {
312             None
313         }
314     }
315 
316     /// Locks this `RwLock` with exclusive write access, blocking the current
317     /// thread until it can be acquired.
318     ///
319     /// This function will not return while other writers or other readers
320     /// currently have access to the lock.
321     ///
322     /// Returns an RAII guard which will drop the write access of this `RwLock`
323     /// when dropped.
324     #[inline]
write(&self) -> RwLockWriteGuard<R, T>325     pub fn write(&self) -> RwLockWriteGuard<R, T> {
326         self.raw.lock_exclusive();
327         self.write_guard()
328     }
329 
330     /// Attempts to lock this `RwLock` with exclusive write access.
331     ///
332     /// If the lock could not be acquired at this time, then `None` is returned.
333     /// Otherwise, an RAII guard is returned which will release the lock when
334     /// it is dropped.
335     ///
336     /// This function does not block.
337     #[inline]
try_write(&self) -> Option<RwLockWriteGuard<R, T>>338     pub fn try_write(&self) -> Option<RwLockWriteGuard<R, T>> {
339         if self.raw.try_lock_exclusive() {
340             Some(self.write_guard())
341         } else {
342             None
343         }
344     }
345 
346     /// Returns a mutable reference to the underlying data.
347     ///
348     /// Since this call borrows the `RwLock` mutably, no actual locking needs to
349     /// take place---the mutable borrow statically guarantees no locks exist.
350     #[inline]
get_mut(&mut self) -> &mut T351     pub fn get_mut(&mut self) -> &mut T {
352         unsafe { &mut *self.data.get() }
353     }
354 
355     /// Forcibly unlocks a read lock.
356     ///
357     /// This is useful when combined with `mem::forget` to hold a lock without
358     /// the need to maintain a `RwLockReadGuard` object alive, for example when
359     /// dealing with FFI.
360     ///
361     /// # Safety
362     ///
363     /// This method must only be called if the current thread logically owns a
364     /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
365     /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
366     #[inline]
force_unlock_read(&self)367     pub unsafe fn force_unlock_read(&self) {
368         self.raw.unlock_shared();
369     }
370 
371     /// Forcibly unlocks a write lock.
372     ///
373     /// This is useful when combined with `mem::forget` to hold a lock without
374     /// the need to maintain a `RwLockWriteGuard` object alive, for example when
375     /// dealing with FFI.
376     ///
377     /// # Safety
378     ///
379     /// This method must only be called if the current thread logically owns a
380     /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
381     /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
382     #[inline]
force_unlock_write(&self)383     pub unsafe fn force_unlock_write(&self) {
384         self.raw.unlock_exclusive();
385     }
386 
387     /// Returns the underlying raw reader-writer lock object.
388     ///
389     /// Note that you will most likely need to import the `RawRwLock` trait from
390     /// `lock_api` to be able to call functions on the raw
391     /// reader-writer lock.
392     ///
393     /// # Safety
394     ///
395     /// This method is unsafe because it allows unlocking a mutex while
396     /// still holding a reference to a lock guard.
raw(&self) -> &R397     pub unsafe fn raw(&self) -> &R {
398         &self.raw
399     }
400 }
401 
402 impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
403     /// Forcibly unlocks a read lock using a fair unlock procotol.
404     ///
405     /// This is useful when combined with `mem::forget` to hold a lock without
406     /// the need to maintain a `RwLockReadGuard` object alive, for example when
407     /// dealing with FFI.
408     ///
409     /// # Safety
410     ///
411     /// This method must only be called if the current thread logically owns a
412     /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
413     /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
414     #[inline]
force_unlock_read_fair(&self)415     pub unsafe fn force_unlock_read_fair(&self) {
416         self.raw.unlock_shared_fair();
417     }
418 
419     /// Forcibly unlocks a write lock using a fair unlock procotol.
420     ///
421     /// This is useful when combined with `mem::forget` to hold a lock without
422     /// the need to maintain a `RwLockWriteGuard` object alive, for example when
423     /// dealing with FFI.
424     ///
425     /// # Safety
426     ///
427     /// This method must only be called if the current thread logically owns a
428     /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
429     /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
430     #[inline]
force_unlock_write_fair(&self)431     pub unsafe fn force_unlock_write_fair(&self) {
432         self.raw.unlock_exclusive_fair();
433     }
434 }
435 
436 impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
437     /// Attempts to acquire this `RwLock` with shared read access until a timeout
438     /// is reached.
439     ///
440     /// If the access could not be granted before the timeout expires, then
441     /// `None` is returned. Otherwise, an RAII guard is returned which will
442     /// release the shared access when it is dropped.
443     #[inline]
try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>>444     pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>> {
445         if self.raw.try_lock_shared_for(timeout) {
446             Some(self.read_guard())
447         } else {
448             None
449         }
450     }
451 
452     /// Attempts to acquire this `RwLock` with shared read access until a timeout
453     /// is reached.
454     ///
455     /// If the access could not be granted before the timeout expires, then
456     /// `None` is returned. Otherwise, an RAII guard is returned which will
457     /// release the shared access when it is dropped.
458     #[inline]
try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>>459     pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>> {
460         if self.raw.try_lock_shared_until(timeout) {
461             Some(self.read_guard())
462         } else {
463             None
464         }
465     }
466 
467     /// Attempts to acquire this `RwLock` with exclusive write access until a
468     /// timeout is reached.
469     ///
470     /// If the access could not be granted before the timeout expires, then
471     /// `None` is returned. Otherwise, an RAII guard is returned which will
472     /// release the exclusive access when it is dropped.
473     #[inline]
try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<R, T>>474     pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<R, T>> {
475         if self.raw.try_lock_exclusive_for(timeout) {
476             Some(self.write_guard())
477         } else {
478             None
479         }
480     }
481 
482     /// Attempts to acquire this `RwLock` with exclusive write access until a
483     /// timeout is reached.
484     ///
485     /// If the access could not be granted before the timeout expires, then
486     /// `None` is returned. Otherwise, an RAII guard is returned which will
487     /// release the exclusive access when it is dropped.
488     #[inline]
try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<R, T>>489     pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<R, T>> {
490         if self.raw.try_lock_exclusive_until(timeout) {
491             Some(self.write_guard())
492         } else {
493             None
494         }
495     }
496 }
497 
498 impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
499     /// Locks this `RwLock` with shared read access, blocking the current thread
500     /// until it can be acquired.
501     ///
502     /// The calling thread will be blocked until there are no more writers which
503     /// hold the lock. There may be other readers currently inside the lock when
504     /// this method returns.
505     ///
506     /// Unlike `read`, this method is guaranteed to succeed without blocking if
507     /// another read lock is held at the time of the call. This allows a thread
508     /// to recursively lock a `RwLock`. However using this method can cause
509     /// writers to starve since readers no longer block if a writer is waiting
510     /// for the lock.
511     ///
512     /// Returns an RAII guard which will release this thread's shared access
513     /// once it is dropped.
514     #[inline]
read_recursive(&self) -> RwLockReadGuard<R, T>515     pub fn read_recursive(&self) -> RwLockReadGuard<R, T> {
516         self.raw.lock_shared_recursive();
517         self.read_guard()
518     }
519 
520     /// Attempts to acquire this `RwLock` with shared read access.
521     ///
522     /// If the access could not be granted at this time, then `None` is returned.
523     /// Otherwise, an RAII guard is returned which will release the shared access
524     /// when it is dropped.
525     ///
526     /// This method is guaranteed to succeed if another read lock is held at the
527     /// time of the call. See the documentation for `read_recursive` for details.
528     ///
529     /// This function does not block.
530     #[inline]
try_read_recursive(&self) -> Option<RwLockReadGuard<R, T>>531     pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<R, T>> {
532         if self.raw.try_lock_shared_recursive() {
533             Some(self.read_guard())
534         } else {
535             None
536         }
537     }
538 }
539 
540 impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
541     /// Attempts to acquire this `RwLock` with shared read access until a timeout
542     /// is reached.
543     ///
544     /// If the access could not be granted before the timeout expires, then
545     /// `None` is returned. Otherwise, an RAII guard is returned which will
546     /// release the shared access when it is dropped.
547     ///
548     /// This method is guaranteed to succeed without blocking if another read
549     /// lock is held at the time of the call. See the documentation for
550     /// `read_recursive` for details.
551     #[inline]
try_read_recursive_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>>552     pub fn try_read_recursive_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>> {
553         if self.raw.try_lock_shared_recursive_for(timeout) {
554             Some(self.read_guard())
555         } else {
556             None
557         }
558     }
559 
560     /// Attempts to acquire this `RwLock` with shared read access until a timeout
561     /// is reached.
562     ///
563     /// If the access could not be granted before the timeout expires, then
564     /// `None` is returned. Otherwise, an RAII guard is returned which will
565     /// release the shared access when it is dropped.
566     #[inline]
try_read_recursive_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>>567     pub fn try_read_recursive_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>> {
568         if self.raw.try_lock_shared_recursive_until(timeout) {
569             Some(self.read_guard())
570         } else {
571             None
572         }
573     }
574 }
575 
576 impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
577     #[inline]
upgradable_guard(&self) -> RwLockUpgradableReadGuard<R, T>578     fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<R, T> {
579         RwLockUpgradableReadGuard {
580             rwlock: self,
581             marker: PhantomData,
582         }
583     }
584 
585     /// Locks this `RwLock` with upgradable read access, blocking the current thread
586     /// until it can be acquired.
587     ///
588     /// The calling thread will be blocked until there are no more writers or other
589     /// upgradable reads which hold the lock. There may be other readers currently
590     /// inside the lock when this method returns.
591     ///
592     /// Returns an RAII guard which will release this thread's shared access
593     /// once it is dropped.
594     #[inline]
upgradable_read(&self) -> RwLockUpgradableReadGuard<R, T>595     pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<R, T> {
596         self.raw.lock_upgradable();
597         self.upgradable_guard()
598     }
599 
600     /// Attempts to acquire this `RwLock` with upgradable read access.
601     ///
602     /// If the access could not be granted at this time, then `None` is returned.
603     /// Otherwise, an RAII guard is returned which will release the shared access
604     /// when it is dropped.
605     ///
606     /// This function does not block.
607     #[inline]
try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<R, T>>608     pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<R, T>> {
609         if self.raw.try_lock_upgradable() {
610             Some(self.upgradable_guard())
611         } else {
612             None
613         }
614     }
615 }
616 
617 impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
618     /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
619     /// is reached.
620     ///
621     /// If the access could not be granted before the timeout expires, then
622     /// `None` is returned. Otherwise, an RAII guard is returned which will
623     /// release the shared access when it is dropped.
624     #[inline]
try_upgradable_read_for( &self, timeout: R::Duration, ) -> Option<RwLockUpgradableReadGuard<R, T>>625     pub fn try_upgradable_read_for(
626         &self,
627         timeout: R::Duration,
628     ) -> Option<RwLockUpgradableReadGuard<R, T>> {
629         if self.raw.try_lock_upgradable_for(timeout) {
630             Some(self.upgradable_guard())
631         } else {
632             None
633         }
634     }
635 
636     /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
637     /// is reached.
638     ///
639     /// If the access could not be granted before the timeout expires, then
640     /// `None` is returned. Otherwise, an RAII guard is returned which will
641     /// release the shared access when it is dropped.
642     #[inline]
try_upgradable_read_until( &self, timeout: R::Instant, ) -> Option<RwLockUpgradableReadGuard<R, T>>643     pub fn try_upgradable_read_until(
644         &self,
645         timeout: R::Instant,
646     ) -> Option<RwLockUpgradableReadGuard<R, T>> {
647         if self.raw.try_lock_upgradable_until(timeout) {
648             Some(self.upgradable_guard())
649         } else {
650             None
651         }
652     }
653 }
654 
655 impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> {
656     #[inline]
default() -> RwLock<R, T>657     fn default() -> RwLock<R, T> {
658         RwLock::new(Default::default())
659     }
660 }
661 
662 impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
663     #[inline]
from(t: T) -> RwLock<R, T>664     fn from(t: T) -> RwLock<R, T> {
665         RwLock::new(t)
666     }
667 }
668 
669 impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result670     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
671         match self.try_read() {
672             Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
673             None => f.pad("RwLock { <locked> }"),
674         }
675     }
676 }
677 
678 /// RAII structure used to release the shared read access of a lock when
679 /// dropped.
680 #[must_use]
681 pub struct RwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
682     rwlock: &'a RwLock<R, T>,
683     marker: PhantomData<(&'a T, R::GuardMarker)>,
684 }
685 
686 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockReadGuard<'a, R, T> {}
687 
688 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
689     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>690     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
691         s.rwlock
692     }
693 
694     /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
695     ///
696     /// This operation cannot fail as the `RwLockReadGuard` passed
697     /// in already locked the data.
698     ///
699     /// This is an associated function that needs to be
700     /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
701     /// the same name on the contents of the locked data.
702     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,703     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
704     where
705         F: FnOnce(&T) -> &U,
706     {
707         let raw = &s.rwlock.raw;
708         let data = f(unsafe { &*s.rwlock.data.get() });
709         mem::forget(s);
710         MappedRwLockReadGuard {
711             raw,
712             data,
713             marker: PhantomData,
714         }
715     }
716 
717     /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
718     /// locked data. The original guard is return if the closure returns `None`.
719     ///
720     /// This operation cannot fail as the `RwLockReadGuard` passed
721     /// in already locked the data.
722     ///
723     /// This is an associated function that needs to be
724     /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
725     /// the same name on the contents of the locked data.
726     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,727     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
728     where
729         F: FnOnce(&T) -> Option<&U>,
730     {
731         let raw = &s.rwlock.raw;
732         let data = match f(unsafe { &*s.rwlock.data.get() }) {
733             Some(data) => data,
734             None => return Err(s),
735         };
736         mem::forget(s);
737         Ok(MappedRwLockReadGuard {
738             raw,
739             data,
740             marker: PhantomData,
741         })
742     }
743 
744     /// Temporarily unlocks the `RwLock` to execute the given function.
745     ///
746     /// The `RwLock` is unlocked a fair unlock protocol.
747     ///
748     /// This is safe because `&mut` guarantees that there exist no other
749     /// references to the data protected by the `RwLock`.
750     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,751     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
752     where
753         F: FnOnce() -> U,
754     {
755         s.rwlock.raw.unlock_shared();
756         defer!(s.rwlock.raw.lock_shared());
757         f()
758     }
759 }
760 
761 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
762     /// Unlocks the `RwLock` using a fair unlock protocol.
763     ///
764     /// By default, `RwLock` is unfair and allow the current thread to re-lock
765     /// the `RwLock` before another has the chance to acquire the lock, even if
766     /// that thread has been blocked on the `RwLock` for a long time. This is
767     /// the default because it allows much higher throughput as it avoids
768     /// forcing a context switch on every `RwLock` unlock. This can result in one
769     /// thread acquiring a `RwLock` many more times than other threads.
770     ///
771     /// However in some cases it can be beneficial to ensure fairness by forcing
772     /// the lock to pass on to a waiting thread if there is one. This is done by
773     /// using this method instead of dropping the `RwLockReadGuard` normally.
774     #[inline]
unlock_fair(s: Self)775     pub fn unlock_fair(s: Self) {
776         s.rwlock.raw.unlock_shared_fair();
777         mem::forget(s);
778     }
779 
780     /// Temporarily unlocks the `RwLock` to execute the given function.
781     ///
782     /// The `RwLock` is unlocked a fair unlock protocol.
783     ///
784     /// This is safe because `&mut` guarantees that there exist no other
785     /// references to the data protected by the `RwLock`.
786     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,787     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
788     where
789         F: FnOnce() -> U,
790     {
791         s.rwlock.raw.unlock_shared_fair();
792         defer!(s.rwlock.raw.lock_shared());
793         f()
794     }
795 
796     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
797     ///
798     /// This method is functionally equivalent to calling `unlock_fair` followed
799     /// by `read`, however it can be much more efficient in the case where there
800     /// are no waiting threads.
801     #[inline]
bump(s: &mut Self)802     pub fn bump(s: &mut Self) {
803         s.rwlock.raw.bump_shared();
804     }
805 }
806 
807 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> {
808     type Target = T;
809     #[inline]
deref(&self) -> &T810     fn deref(&self) -> &T {
811         unsafe { &*self.rwlock.data.get() }
812     }
813 }
814 
815 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
816     #[inline]
drop(&mut self)817     fn drop(&mut self) {
818         self.rwlock.raw.unlock_shared();
819     }
820 }
821 
822 #[cfg(feature = "owning_ref")]
823 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
824 
825 /// RAII structure used to release the exclusive write access of a lock when
826 /// dropped.
827 #[must_use]
828 pub struct RwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
829     rwlock: &'a RwLock<R, T>,
830     marker: PhantomData<(&'a mut T, R::GuardMarker)>,
831 }
832 
833 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockWriteGuard<'a, R, T> {}
834 
835 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
836     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>837     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
838         s.rwlock
839     }
840 
841     /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
842     ///
843     /// This operation cannot fail as the `RwLockWriteGuard` passed
844     /// in already locked the data.
845     ///
846     /// This is an associated function that needs to be
847     /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
848     /// the same name on the contents of the locked data.
849     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,850     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
851     where
852         F: FnOnce(&mut T) -> &mut U,
853     {
854         let raw = &s.rwlock.raw;
855         let data = f(unsafe { &mut *s.rwlock.data.get() });
856         mem::forget(s);
857         MappedRwLockWriteGuard {
858             raw,
859             data,
860             marker: PhantomData,
861         }
862     }
863 
864     /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
865     /// locked data. The original guard is return if the closure returns `None`.
866     ///
867     /// This operation cannot fail as the `RwLockWriteGuard` passed
868     /// in already locked the data.
869     ///
870     /// This is an associated function that needs to be
871     /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
872     /// the same name on the contents of the locked data.
873     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,874     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
875     where
876         F: FnOnce(&mut T) -> Option<&mut U>,
877     {
878         let raw = &s.rwlock.raw;
879         let data = match f(unsafe { &mut *s.rwlock.data.get() }) {
880             Some(data) => data,
881             None => return Err(s),
882         };
883         mem::forget(s);
884         Ok(MappedRwLockWriteGuard {
885             raw,
886             data,
887             marker: PhantomData,
888         })
889     }
890 
891     /// Temporarily unlocks the `RwLock` to execute the given function.
892     ///
893     /// This is safe because `&mut` guarantees that there exist no other
894     /// references to the data protected by the `RwLock`.
895     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,896     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
897     where
898         F: FnOnce() -> U,
899     {
900         s.rwlock.raw.unlock_exclusive();
901         defer!(s.rwlock.raw.lock_exclusive());
902         f()
903     }
904 }
905 
906 impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
907     /// Atomically downgrades a write lock into a read lock without allowing any
908     /// writers to take exclusive access of the lock in the meantime.
909     ///
910     /// Note that if there are any writers currently waiting to take the lock
911     /// then other readers may not be able to acquire the lock even if it was
912     /// downgraded.
downgrade(s: Self) -> RwLockReadGuard<'a, R, T>913     pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
914         s.rwlock.raw.downgrade();
915         let rwlock = s.rwlock;
916         mem::forget(s);
917         RwLockReadGuard {
918             rwlock,
919             marker: PhantomData,
920         }
921     }
922 }
923 
924 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
925     /// Atomically downgrades a write lock into an upgradable read lock without allowing any
926     /// writers to take exclusive access of the lock in the meantime.
927     ///
928     /// Note that if there are any writers currently waiting to take the lock
929     /// then other readers may not be able to acquire the lock even if it was
930     /// downgraded.
downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T>931     pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
932         s.rwlock.raw.downgrade_to_upgradable();
933         let rwlock = s.rwlock;
934         mem::forget(s);
935         RwLockUpgradableReadGuard {
936             rwlock,
937             marker: PhantomData,
938         }
939     }
940 }
941 
942 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
943     /// Unlocks the `RwLock` using a fair unlock protocol.
944     ///
945     /// By default, `RwLock` is unfair and allow the current thread to re-lock
946     /// the `RwLock` before another has the chance to acquire the lock, even if
947     /// that thread has been blocked on the `RwLock` for a long time. This is
948     /// the default because it allows much higher throughput as it avoids
949     /// forcing a context switch on every `RwLock` unlock. This can result in one
950     /// thread acquiring a `RwLock` many more times than other threads.
951     ///
952     /// However in some cases it can be beneficial to ensure fairness by forcing
953     /// the lock to pass on to a waiting thread if there is one. This is done by
954     /// using this method instead of dropping the `RwLockWriteGuard` normally.
955     #[inline]
unlock_fair(s: Self)956     pub fn unlock_fair(s: Self) {
957         s.rwlock.raw.unlock_exclusive_fair();
958         mem::forget(s);
959     }
960 
961     /// Temporarily unlocks the `RwLock` to execute the given function.
962     ///
963     /// The `RwLock` is unlocked a fair unlock protocol.
964     ///
965     /// This is safe because `&mut` guarantees that there exist no other
966     /// references to the data protected by the `RwLock`.
967     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,968     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
969     where
970         F: FnOnce() -> U,
971     {
972         s.rwlock.raw.unlock_exclusive_fair();
973         defer!(s.rwlock.raw.lock_exclusive());
974         f()
975     }
976 
977     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
978     ///
979     /// This method is functionally equivalent to calling `unlock_fair` followed
980     /// by `write`, however it can be much more efficient in the case where there
981     /// are no waiting threads.
982     #[inline]
bump(s: &mut Self)983     pub fn bump(s: &mut Self) {
984         s.rwlock.raw.bump_exclusive();
985     }
986 }
987 
988 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> {
989     type Target = T;
990     #[inline]
deref(&self) -> &T991     fn deref(&self) -> &T {
992         unsafe { &*self.rwlock.data.get() }
993     }
994 }
995 
996 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> {
997     #[inline]
deref_mut(&mut self) -> &mut T998     fn deref_mut(&mut self) -> &mut T {
999         unsafe { &mut *self.rwlock.data.get() }
1000     }
1001 }
1002 
1003 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
1004     #[inline]
drop(&mut self)1005     fn drop(&mut self) {
1006         self.rwlock.raw.unlock_exclusive();
1007     }
1008 }
1009 
1010 #[cfg(feature = "owning_ref")]
1011 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
1012 
1013 /// RAII structure used to release the upgradable read access of a lock when
1014 /// dropped.
1015 #[must_use]
1016 pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> {
1017     rwlock: &'a RwLock<R, T>,
1018     marker: PhantomData<(&'a T, R::GuardMarker)>,
1019 }
1020 
1021 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
1022     for RwLockUpgradableReadGuard<'a, R, T>
1023 {}
1024 
1025 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1026     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>1027     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
1028         s.rwlock
1029     }
1030 
1031     /// Temporarily unlocks the `RwLock` to execute the given function.
1032     ///
1033     /// This is safe because `&mut` guarantees that there exist no other
1034     /// references to the data protected by the `RwLock`.
1035     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1036     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1037     where
1038         F: FnOnce() -> U,
1039     {
1040         s.rwlock.raw.unlock_upgradable();
1041         defer!(s.rwlock.raw.lock_upgradable());
1042         f()
1043     }
1044 
1045     /// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
1046     /// blocking the current thread until it can be acquired.
upgrade(s: Self) -> RwLockWriteGuard<'a, R, T>1047     pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
1048         s.rwlock.raw.upgrade();
1049         let rwlock = s.rwlock;
1050         mem::forget(s);
1051         RwLockWriteGuard {
1052             rwlock,
1053             marker: PhantomData,
1054         }
1055     }
1056 
1057     /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock.
1058     ///
1059     /// If the access could not be granted at this time, then the current guard is returned.
try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self>1060     pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1061         if s.rwlock.raw.try_upgrade() {
1062             let rwlock = s.rwlock;
1063             mem::forget(s);
1064             Ok(RwLockWriteGuard {
1065                 rwlock,
1066                 marker: PhantomData,
1067             })
1068         } else {
1069             Err(s)
1070         }
1071     }
1072 }
1073 
1074 impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1075     /// Unlocks the `RwLock` using a fair unlock protocol.
1076     ///
1077     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1078     /// the `RwLock` before another has the chance to acquire the lock, even if
1079     /// that thread has been blocked on the `RwLock` for a long time. This is
1080     /// the default because it allows much higher throughput as it avoids
1081     /// forcing a context switch on every `RwLock` unlock. This can result in one
1082     /// thread acquiring a `RwLock` many more times than other threads.
1083     ///
1084     /// However in some cases it can be beneficial to ensure fairness by forcing
1085     /// the lock to pass on to a waiting thread if there is one. This is done by
1086     /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
1087     #[inline]
unlock_fair(s: Self)1088     pub fn unlock_fair(s: Self) {
1089         s.rwlock.raw.unlock_upgradable_fair();
1090         mem::forget(s);
1091     }
1092 
1093     /// Temporarily unlocks the `RwLock` to execute the given function.
1094     ///
1095     /// The `RwLock` is unlocked a fair unlock protocol.
1096     ///
1097     /// This is safe because `&mut` guarantees that there exist no other
1098     /// references to the data protected by the `RwLock`.
1099     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1100     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1101     where
1102         F: FnOnce() -> U,
1103     {
1104         s.rwlock.raw.unlock_upgradable_fair();
1105         defer!(s.rwlock.raw.lock_upgradable());
1106         f()
1107     }
1108 
1109     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1110     ///
1111     /// This method is functionally equivalent to calling `unlock_fair` followed
1112     /// by `upgradable_read`, however it can be much more efficient in the case where there
1113     /// are no waiting threads.
1114     #[inline]
bump(s: &mut Self)1115     pub fn bump(s: &mut Self) {
1116         s.rwlock.raw.bump_upgradable();
1117     }
1118 }
1119 
1120 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1121     /// Atomically downgrades an upgradable read lock lock into a shared read lock
1122     /// without allowing any writers to take exclusive access of the lock in the
1123     /// meantime.
1124     ///
1125     /// Note that if there are any writers currently waiting to take the lock
1126     /// then other readers may not be able to acquire the lock even if it was
1127     /// downgraded.
downgrade(s: Self) -> RwLockReadGuard<'a, R, T>1128     pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
1129         s.rwlock.raw.downgrade_upgradable();
1130         let rwlock = s.rwlock;
1131         mem::forget(s);
1132         RwLockReadGuard {
1133             rwlock,
1134             marker: PhantomData,
1135         }
1136     }
1137 }
1138 
1139 impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1140     /// Tries to atomically upgrade an upgradable read lock into a exclusive
1141     /// write lock, until a timeout is reached.
1142     ///
1143     /// If the access could not be granted before the timeout expires, then
1144     /// the current guard is returned.
try_upgrade_for( s: Self, timeout: R::Duration, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>1145     pub fn try_upgrade_for(
1146         s: Self,
1147         timeout: R::Duration,
1148     ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1149         if s.rwlock.raw.try_upgrade_for(timeout) {
1150             let rwlock = s.rwlock;
1151             mem::forget(s);
1152             Ok(RwLockWriteGuard {
1153                 rwlock,
1154                 marker: PhantomData,
1155             })
1156         } else {
1157             Err(s)
1158         }
1159     }
1160 
1161     /// Tries to atomically upgrade an upgradable read lock into a exclusive
1162     /// write lock, until a timeout is reached.
1163     ///
1164     /// If the access could not be granted before the timeout expires, then
1165     /// the current guard is returned.
1166     #[inline]
try_upgrade_until( s: Self, timeout: R::Instant, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>1167     pub fn try_upgrade_until(
1168         s: Self,
1169         timeout: R::Instant,
1170     ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1171         if s.rwlock.raw.try_upgrade_until(timeout) {
1172             let rwlock = s.rwlock;
1173             mem::forget(s);
1174             Ok(RwLockWriteGuard {
1175                 rwlock,
1176                 marker: PhantomData,
1177             })
1178         } else {
1179             Err(s)
1180         }
1181     }
1182 }
1183 
1184 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> {
1185     type Target = T;
1186     #[inline]
deref(&self) -> &T1187     fn deref(&self) -> &T {
1188         unsafe { &*self.rwlock.data.get() }
1189     }
1190 }
1191 
1192 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
1193     #[inline]
drop(&mut self)1194     fn drop(&mut self) {
1195         self.rwlock.raw.unlock_upgradable();
1196     }
1197 }
1198 
1199 #[cfg(feature = "owning_ref")]
1200 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
1201     for RwLockUpgradableReadGuard<'a, R, T>
1202 {}
1203 
1204 /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
1205 /// subfield of the protected data.
1206 ///
1207 /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
1208 /// former doesn't support temporarily unlocking and re-locking, since that
1209 /// could introduce soundness issues if the locked object is modified by another
1210 /// thread.
1211 #[must_use]
1212 pub struct MappedRwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
1213     raw: &'a R,
1214     data: *const T,
1215     marker: PhantomData<&'a T>,
1216 }
1217 
1218 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
1219 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
1220     R::GuardMarker: Send
1221 {}
1222 
1223 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
1224     /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
1225     ///
1226     /// This operation cannot fail as the `MappedRwLockReadGuard` passed
1227     /// in already locked the data.
1228     ///
1229     /// This is an associated function that needs to be
1230     /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
1231     /// the same name on the contents of the locked data.
1232     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,1233     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
1234     where
1235         F: FnOnce(&T) -> &U,
1236     {
1237         let raw = s.raw;
1238         let data = f(unsafe { &*s.data });
1239         mem::forget(s);
1240         MappedRwLockReadGuard {
1241             raw,
1242             data,
1243             marker: PhantomData,
1244         }
1245     }
1246 
1247     /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
1248     /// locked data. The original guard is return if the closure returns `None`.
1249     ///
1250     /// This operation cannot fail as the `MappedRwLockReadGuard` passed
1251     /// in already locked the data.
1252     ///
1253     /// This is an associated function that needs to be
1254     /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
1255     /// the same name on the contents of the locked data.
1256     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,1257     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
1258     where
1259         F: FnOnce(&T) -> Option<&U>,
1260     {
1261         let raw = s.raw;
1262         let data = match f(unsafe { &*s.data }) {
1263             Some(data) => data,
1264             None => return Err(s),
1265         };
1266         mem::forget(s);
1267         Ok(MappedRwLockReadGuard {
1268             raw,
1269             data,
1270             marker: PhantomData,
1271         })
1272     }
1273 }
1274 
1275 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
1276     /// Unlocks the `RwLock` using a fair unlock protocol.
1277     ///
1278     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1279     /// the `RwLock` before another has the chance to acquire the lock, even if
1280     /// that thread has been blocked on the `RwLock` for a long time. This is
1281     /// the default because it allows much higher throughput as it avoids
1282     /// forcing a context switch on every `RwLock` unlock. This can result in one
1283     /// thread acquiring a `RwLock` many more times than other threads.
1284     ///
1285     /// However in some cases it can be beneficial to ensure fairness by forcing
1286     /// the lock to pass on to a waiting thread if there is one. This is done by
1287     /// using this method instead of dropping the `MappedRwLockReadGuard` normally.
1288     #[inline]
unlock_fair(s: Self)1289     pub fn unlock_fair(s: Self) {
1290         s.raw.unlock_shared_fair();
1291         mem::forget(s);
1292     }
1293 }
1294 
1295 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> {
1296     type Target = T;
1297     #[inline]
deref(&self) -> &T1298     fn deref(&self) -> &T {
1299         unsafe { &*self.data }
1300     }
1301 }
1302 
1303 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
1304     #[inline]
drop(&mut self)1305     fn drop(&mut self) {
1306         self.raw.unlock_shared();
1307     }
1308 }
1309 
1310 #[cfg(feature = "owning_ref")]
1311 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
1312     for MappedRwLockReadGuard<'a, R, T>
1313 {}
1314 
1315 /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
1316 /// subfield of the protected data.
1317 ///
1318 /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
1319 /// former doesn't support temporarily unlocking and re-locking, since that
1320 /// could introduce soundness issues if the locked object is modified by another
1321 /// thread.
1322 #[must_use]
1323 pub struct MappedRwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
1324     raw: &'a R,
1325     data: *mut T,
1326     marker: PhantomData<&'a mut T>,
1327 }
1328 
1329 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
1330     for MappedRwLockWriteGuard<'a, R, T>
1331 {}
1332 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
1333     R::GuardMarker: Send
1334 {}
1335 
1336 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1337     /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
1338     ///
1339     /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
1340     /// in already locked the data.
1341     ///
1342     /// This is an associated function that needs to be
1343     /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
1344     /// the same name on the contents of the locked data.
1345     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,1346     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
1347     where
1348         F: FnOnce(&mut T) -> &mut U,
1349     {
1350         let raw = s.raw;
1351         let data = f(unsafe { &mut *s.data });
1352         mem::forget(s);
1353         MappedRwLockWriteGuard {
1354             raw,
1355             data,
1356             marker: PhantomData,
1357         }
1358     }
1359 
1360     /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
1361     /// locked data. The original guard is return if the closure returns `None`.
1362     ///
1363     /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
1364     /// in already locked the data.
1365     ///
1366     /// This is an associated function that needs to be
1367     /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
1368     /// the same name on the contents of the locked data.
1369     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,1370     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
1371     where
1372         F: FnOnce(&mut T) -> Option<&mut U>,
1373     {
1374         let raw = s.raw;
1375         let data = match f(unsafe { &mut *s.data }) {
1376             Some(data) => data,
1377             None => return Err(s),
1378         };
1379         mem::forget(s);
1380         Ok(MappedRwLockWriteGuard {
1381             raw,
1382             data,
1383             marker: PhantomData,
1384         })
1385     }
1386 }
1387 
1388 impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1389     /// Atomically downgrades a write lock into a read lock without allowing any
1390     /// writers to take exclusive access of the lock in the meantime.
1391     ///
1392     /// Note that if there are any writers currently waiting to take the lock
1393     /// then other readers may not be able to acquire the lock even if it was
1394     /// downgraded.
downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T>1395     pub fn downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T> {
1396         s.raw.downgrade();
1397         let raw = s.raw;
1398         let data = s.data;
1399         mem::forget(s);
1400         MappedRwLockReadGuard {
1401             raw,
1402             data,
1403             marker: PhantomData,
1404         }
1405     }
1406 }
1407 
1408 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1409     /// Unlocks the `RwLock` using a fair unlock protocol.
1410     ///
1411     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1412     /// the `RwLock` before another has the chance to acquire the lock, even if
1413     /// that thread has been blocked on the `RwLock` for a long time. This is
1414     /// the default because it allows much higher throughput as it avoids
1415     /// forcing a context switch on every `RwLock` unlock. This can result in one
1416     /// thread acquiring a `RwLock` many more times than other threads.
1417     ///
1418     /// However in some cases it can be beneficial to ensure fairness by forcing
1419     /// the lock to pass on to a waiting thread if there is one. This is done by
1420     /// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
1421     #[inline]
unlock_fair(s: Self)1422     pub fn unlock_fair(s: Self) {
1423         s.raw.unlock_exclusive_fair();
1424         mem::forget(s);
1425     }
1426 }
1427 
1428 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> {
1429     type Target = T;
1430     #[inline]
deref(&self) -> &T1431     fn deref(&self) -> &T {
1432         unsafe { &*self.data }
1433     }
1434 }
1435 
1436 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> {
1437     #[inline]
deref_mut(&mut self) -> &mut T1438     fn deref_mut(&mut self) -> &mut T {
1439         unsafe { &mut *self.data }
1440     }
1441 }
1442 
1443 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
1444     #[inline]
drop(&mut self)1445     fn drop(&mut self) {
1446         self.raw.unlock_exclusive();
1447     }
1448 }
1449 
1450 #[cfg(feature = "owning_ref")]
1451 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
1452     for MappedRwLockWriteGuard<'a, R, T>
1453 {}
1454