1 // Copyright 2018 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7 
8 use crate::{
9     mutex::{RawMutex, RawMutexFair, RawMutexTimed},
10     GuardNoSend,
11 };
12 use core::{
13     cell::{Cell, UnsafeCell},
14     fmt,
15     marker::PhantomData,
16     mem,
17     num::NonZeroUsize,
18     ops::Deref,
19     sync::atomic::{AtomicUsize, Ordering},
20 };
21 
22 #[cfg(feature = "owning_ref")]
23 use owning_ref::StableAddress;
24 
25 #[cfg(feature = "serde")]
26 use serde::{Deserialize, Deserializer, Serialize, Serializer};
27 
28 /// Helper trait which returns a non-zero thread ID.
29 ///
30 /// The simplest way to implement this trait is to return the address of a
31 /// thread-local variable.
32 ///
33 /// # Safety
34 ///
35 /// Implementations of this trait must ensure that no two active threads share
36 /// the same thread ID. However the ID of a thread that has exited can be
37 /// re-used since that thread is no longer active.
38 pub unsafe trait GetThreadId {
39     /// Initial value.
40     // A “non-constant” const item is a legacy way to supply an initialized value to downstream
41     // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
42     #[allow(clippy::declare_interior_mutable_const)]
43     const INIT: Self;
44 
45     /// Returns a non-zero thread ID which identifies the current thread of
46     /// execution.
nonzero_thread_id(&self) -> NonZeroUsize47     fn nonzero_thread_id(&self) -> NonZeroUsize;
48 }
49 
50 /// A raw mutex type that wraps another raw mutex to provide reentrancy.
51 ///
52 /// Although this has the same methods as the [`RawMutex`] trait, it does
53 /// not implement it, and should not be used in the same way, since this
54 /// mutex can successfully acquire a lock multiple times in the same thread.
55 /// Only use this when you know you want a raw mutex that can be locked
56 /// reentrantly; you probably want [`ReentrantMutex`] instead.
57 ///
58 /// [`RawMutex`]: trait.RawMutex.html
59 /// [`ReentrantMutex`]: struct.ReentrantMutex.html
60 pub struct RawReentrantMutex<R, G> {
61     owner: AtomicUsize,
62     lock_count: Cell<usize>,
63     mutex: R,
64     get_thread_id: G,
65 }
66 
67 unsafe impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G> {}
68 unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G> {}
69 
70 impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
71     /// Initial value for an unlocked mutex.
72     #[allow(clippy::declare_interior_mutable_const)]
73     pub const INIT: Self = RawReentrantMutex {
74         owner: AtomicUsize::new(0),
75         lock_count: Cell::new(0),
76         mutex: R::INIT,
77         get_thread_id: G::INIT,
78     };
79 
80     #[inline]
lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool81     fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
82         let id = self.get_thread_id.nonzero_thread_id().get();
83         if self.owner.load(Ordering::Relaxed) == id {
84             self.lock_count.set(
85                 self.lock_count
86                     .get()
87                     .checked_add(1)
88                     .expect("ReentrantMutex lock count overflow"),
89             );
90         } else {
91             if !try_lock() {
92                 return false;
93             }
94             self.owner.store(id, Ordering::Relaxed);
95             debug_assert_eq!(self.lock_count.get(), 0);
96             self.lock_count.set(1);
97         }
98         true
99     }
100 
101     /// Acquires this mutex, blocking if it's held by another thread.
102     #[inline]
lock(&self)103     pub fn lock(&self) {
104         self.lock_internal(|| {
105             self.mutex.lock();
106             true
107         });
108     }
109 
110     /// Attempts to acquire this mutex without blocking. Returns `true`
111     /// if the lock was successfully acquired and `false` otherwise.
112     #[inline]
try_lock(&self) -> bool113     pub fn try_lock(&self) -> bool {
114         self.lock_internal(|| self.mutex.try_lock())
115     }
116 
117     /// Unlocks this mutex. The inner mutex may not be unlocked if
118     /// this mutex was acquired previously in the current thread.
119     ///
120     /// # Safety
121     ///
122     /// This method may only be called if the mutex is held by the current thread.
123     #[inline]
unlock(&self)124     pub unsafe fn unlock(&self) {
125         let lock_count = self.lock_count.get() - 1;
126         self.lock_count.set(lock_count);
127         if lock_count == 0 {
128             self.owner.store(0, Ordering::Relaxed);
129             self.mutex.unlock();
130         }
131     }
132 
133     /// Checks whether the mutex is currently locked.
134     #[inline]
is_locked(&self) -> bool135     pub fn is_locked(&self) -> bool {
136         self.mutex.is_locked()
137     }
138 
139     /// Checks whether the mutex is currently held by the current thread.
140     #[inline]
is_owned_by_current_thread(&self) -> bool141     pub fn is_owned_by_current_thread(&self) -> bool {
142         let id = self.get_thread_id.nonzero_thread_id().get();
143         self.owner.load(Ordering::Relaxed) == id
144     }
145 }
146 
147 impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
148     /// Unlocks this mutex using a fair unlock protocol. The inner mutex
149     /// may not be unlocked if this mutex was acquired previously in the
150     /// current thread.
151     ///
152     /// # Safety
153     ///
154     /// This method may only be called if the mutex is held by the current thread.
155     #[inline]
unlock_fair(&self)156     pub unsafe fn unlock_fair(&self) {
157         let lock_count = self.lock_count.get() - 1;
158         self.lock_count.set(lock_count);
159         if lock_count == 0 {
160             self.owner.store(0, Ordering::Relaxed);
161             self.mutex.unlock_fair();
162         }
163     }
164 
165     /// Temporarily yields the mutex to a waiting thread if there is one.
166     ///
167     /// This method is functionally equivalent to calling `unlock_fair` followed
168     /// by `lock`, however it can be much more efficient in the case where there
169     /// are no waiting threads.
170     ///
171     /// # Safety
172     ///
173     /// This method may only be called if the mutex is held by the current thread.
174     #[inline]
bump(&self)175     pub unsafe fn bump(&self) {
176         if self.lock_count.get() == 1 {
177             let id = self.owner.load(Ordering::Relaxed);
178             self.owner.store(0, Ordering::Relaxed);
179             self.mutex.bump();
180             self.owner.store(id, Ordering::Relaxed);
181         }
182     }
183 }
184 
185 impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
186     /// Attempts to acquire this lock until a timeout is reached.
187     #[inline]
try_lock_until(&self, timeout: R::Instant) -> bool188     pub fn try_lock_until(&self, timeout: R::Instant) -> bool {
189         self.lock_internal(|| self.mutex.try_lock_until(timeout))
190     }
191 
192     /// Attempts to acquire this lock until a timeout is reached.
193     #[inline]
try_lock_for(&self, timeout: R::Duration) -> bool194     pub fn try_lock_for(&self, timeout: R::Duration) -> bool {
195         self.lock_internal(|| self.mutex.try_lock_for(timeout))
196     }
197 }
198 
199 /// A mutex which can be recursively locked by a single thread.
200 ///
201 /// This type is identical to `Mutex` except for the following points:
202 ///
203 /// - Locking multiple times from the same thread will work correctly instead of
204 ///   deadlocking.
205 /// - `ReentrantMutexGuard` does not give mutable references to the locked data.
206 ///   Use a `RefCell` if you need this.
207 ///
208 /// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
209 /// primitive.
210 pub struct ReentrantMutex<R, G, T: ?Sized> {
211     raw: RawReentrantMutex<R, G>,
212     data: UnsafeCell<T>,
213 }
214 
215 unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
216     for ReentrantMutex<R, G, T>
217 {
218 }
219 unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
220     for ReentrantMutex<R, G, T>
221 {
222 }
223 
224 impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
225     /// Creates a new reentrant mutex in an unlocked state ready for use.
226     #[cfg(feature = "nightly")]
227     #[inline]
new(val: T) -> ReentrantMutex<R, G, T>228     pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
229         ReentrantMutex {
230             data: UnsafeCell::new(val),
231             raw: RawReentrantMutex {
232                 owner: AtomicUsize::new(0),
233                 lock_count: Cell::new(0),
234                 mutex: R::INIT,
235                 get_thread_id: G::INIT,
236             },
237         }
238     }
239 
240     /// Creates a new reentrant mutex in an unlocked state ready for use.
241     #[cfg(not(feature = "nightly"))]
242     #[inline]
new(val: T) -> ReentrantMutex<R, G, T>243     pub fn new(val: T) -> ReentrantMutex<R, G, T> {
244         ReentrantMutex {
245             data: UnsafeCell::new(val),
246             raw: RawReentrantMutex {
247                 owner: AtomicUsize::new(0),
248                 lock_count: Cell::new(0),
249                 mutex: R::INIT,
250                 get_thread_id: G::INIT,
251             },
252         }
253     }
254 
255     /// Consumes this mutex, returning the underlying data.
256     #[inline]
into_inner(self) -> T257     pub fn into_inner(self) -> T {
258         self.data.into_inner()
259     }
260 }
261 
262 impl<R, G, T> ReentrantMutex<R, G, T> {
263     /// Creates a new reentrant mutex based on a pre-existing raw mutex and a
264     /// helper to get the thread ID.
265     ///
266     /// This allows creating a reentrant mutex in a constant context on stable
267     /// Rust.
268     #[inline]
const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T>269     pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
270         ReentrantMutex {
271             data: UnsafeCell::new(val),
272             raw: RawReentrantMutex {
273                 owner: AtomicUsize::new(0),
274                 lock_count: Cell::new(0),
275                 mutex: raw_mutex,
276                 get_thread_id,
277             },
278         }
279     }
280 }
281 
282 impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
283     /// # Safety
284     ///
285     /// The lock must be held when calling this method.
286     #[inline]
guard(&self) -> ReentrantMutexGuard<'_, R, G, T>287     unsafe fn guard(&self) -> ReentrantMutexGuard<'_, R, G, T> {
288         ReentrantMutexGuard {
289             remutex: &self,
290             marker: PhantomData,
291         }
292     }
293 
294     /// Acquires a reentrant mutex, blocking the current thread until it is able
295     /// to do so.
296     ///
297     /// If the mutex is held by another thread then this function will block the
298     /// local thread until it is available to acquire the mutex. If the mutex is
299     /// already held by the current thread then this function will increment the
300     /// lock reference count and return immediately. Upon returning,
301     /// the thread is the only thread with the mutex held. An RAII guard is
302     /// returned to allow scoped unlock of the lock. When the guard goes out of
303     /// scope, the mutex will be unlocked.
304     #[inline]
lock(&self) -> ReentrantMutexGuard<'_, R, G, T>305     pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> {
306         self.raw.lock();
307         // SAFETY: The lock is held, as required.
308         unsafe { self.guard() }
309     }
310 
311     /// Attempts to acquire this lock.
312     ///
313     /// If the lock could not be acquired at this time, then `None` is returned.
314     /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
315     /// guard is dropped.
316     ///
317     /// This function does not block.
318     #[inline]
try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>>319     pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
320         if self.raw.try_lock() {
321             // SAFETY: The lock is held, as required.
322             Some(unsafe { self.guard() })
323         } else {
324             None
325         }
326     }
327 
328     /// Returns a mutable reference to the underlying data.
329     ///
330     /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
331     /// take place---the mutable borrow statically guarantees no locks exist.
332     #[inline]
get_mut(&mut self) -> &mut T333     pub fn get_mut(&mut self) -> &mut T {
334         unsafe { &mut *self.data.get() }
335     }
336 
337     /// Checks whether the mutex is currently locked.
338     #[inline]
is_locked(&self) -> bool339     pub fn is_locked(&self) -> bool {
340         self.raw.is_locked()
341     }
342 
343     /// Checks whether the mutex is currently held by the current thread.
344     #[inline]
is_owned_by_current_thread(&self) -> bool345     pub fn is_owned_by_current_thread(&self) -> bool {
346         self.raw.is_owned_by_current_thread()
347     }
348 
349     /// Forcibly unlocks the mutex.
350     ///
351     /// This is useful when combined with `mem::forget` to hold a lock without
352     /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
353     /// dealing with FFI.
354     ///
355     /// # Safety
356     ///
357     /// This method must only be called if the current thread logically owns a
358     /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
359     /// Behavior is undefined if a mutex is unlocked when not locked.
360     #[inline]
force_unlock(&self)361     pub unsafe fn force_unlock(&self) {
362         self.raw.unlock();
363     }
364 
365     /// Returns the underlying raw mutex object.
366     ///
367     /// Note that you will most likely need to import the `RawMutex` trait from
368     /// `lock_api` to be able to call functions on the raw mutex.
369     ///
370     /// # Safety
371     ///
372     /// This method is unsafe because it allows unlocking a mutex while
373     /// still holding a reference to a `ReentrantMutexGuard`.
374     #[inline]
raw(&self) -> &R375     pub unsafe fn raw(&self) -> &R {
376         &self.raw.mutex
377     }
378 
379     /// Returns a raw pointer to the underlying data.
380     ///
381     /// This is useful when combined with `mem::forget` to hold a lock without
382     /// the need to maintain a `ReentrantMutexGuard` object alive, for example
383     /// when dealing with FFI.
384     ///
385     /// # Safety
386     ///
387     /// You must ensure that there are no data races when dereferencing the
388     /// returned pointer, for example if the current thread logically owns a
389     /// `ReentrantMutexGuard` but that guard has been discarded using
390     /// `mem::forget`.
391     #[inline]
data_ptr(&self) -> *mut T392     pub fn data_ptr(&self) -> *mut T {
393         self.data.get()
394     }
395 }
396 
397 impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
398     /// Forcibly unlocks the mutex using a fair unlock protocol.
399     ///
400     /// This is useful when combined with `mem::forget` to hold a lock without
401     /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
402     /// dealing with FFI.
403     ///
404     /// # Safety
405     ///
406     /// This method must only be called if the current thread logically owns a
407     /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
408     /// Behavior is undefined if a mutex is unlocked when not locked.
409     #[inline]
force_unlock_fair(&self)410     pub unsafe fn force_unlock_fair(&self) {
411         self.raw.unlock_fair();
412     }
413 }
414 
415 impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
416     /// Attempts to acquire this lock until a timeout is reached.
417     ///
418     /// If the lock could not be acquired before the timeout expired, then
419     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
420     /// be unlocked when the guard is dropped.
421     #[inline]
try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>>422     pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
423         if self.raw.try_lock_for(timeout) {
424             // SAFETY: The lock is held, as required.
425             Some(unsafe { self.guard() })
426         } else {
427             None
428         }
429     }
430 
431     /// Attempts to acquire this lock until a timeout is reached.
432     ///
433     /// If the lock could not be acquired before the timeout expired, then
434     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
435     /// be unlocked when the guard is dropped.
436     #[inline]
try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>>437     pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
438         if self.raw.try_lock_until(timeout) {
439             // SAFETY: The lock is held, as required.
440             Some(unsafe { self.guard() })
441         } else {
442             None
443         }
444     }
445 }
446 
447 impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
448     #[inline]
default() -> ReentrantMutex<R, G, T>449     fn default() -> ReentrantMutex<R, G, T> {
450         ReentrantMutex::new(Default::default())
451     }
452 }
453 
454 impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
455     #[inline]
from(t: T) -> ReentrantMutex<R, G, T>456     fn from(t: T) -> ReentrantMutex<R, G, T> {
457         ReentrantMutex::new(t)
458     }
459 }
460 
461 impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result462     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
463         match self.try_lock() {
464             Some(guard) => f
465                 .debug_struct("ReentrantMutex")
466                 .field("data", &&*guard)
467                 .finish(),
468             None => {
469                 struct LockedPlaceholder;
470                 impl fmt::Debug for LockedPlaceholder {
471                     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
472                         f.write_str("<locked>")
473                     }
474                 }
475 
476                 f.debug_struct("ReentrantMutex")
477                     .field("data", &LockedPlaceholder)
478                     .finish()
479             }
480         }
481     }
482 }
483 
484 // Copied and modified from serde
485 #[cfg(feature = "serde")]
486 impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
487 where
488     R: RawMutex,
489     G: GetThreadId,
490     T: Serialize + ?Sized,
491 {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer,492     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
493     where
494         S: Serializer,
495     {
496         self.lock().serialize(serializer)
497     }
498 }
499 
500 #[cfg(feature = "serde")]
501 impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
502 where
503     R: RawMutex,
504     G: GetThreadId,
505     T: Deserialize<'de> + ?Sized,
506 {
deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>,507     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
508     where
509         D: Deserializer<'de>,
510     {
511         Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
512     }
513 }
514 
515 /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
516 /// is dropped (falls out of scope), the lock will be unlocked.
517 ///
518 /// The data protected by the mutex can be accessed through this guard via its
519 /// `Deref` implementation.
520 #[must_use = "if unused the ReentrantMutex will immediately unlock"]
521 pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
522     remutex: &'a ReentrantMutex<R, G, T>,
523     marker: PhantomData<(&'a T, GuardNoSend)>,
524 }
525 
526 unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
527     for ReentrantMutexGuard<'a, R, G, T>
528 {
529 }
530 
531 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
532     /// Returns a reference to the original `ReentrantMutex` object.
remutex(s: &Self) -> &'a ReentrantMutex<R, G, T>533     pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
534         s.remutex
535     }
536 
537     /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
538     ///
539     /// This operation cannot fail as the `ReentrantMutexGuard` passed
540     /// in already locked the mutex.
541     ///
542     /// This is an associated function that needs to be
543     /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
544     /// the same name on the contents of the locked data.
545     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> where F: FnOnce(&T) -> &U,546     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
547     where
548         F: FnOnce(&T) -> &U,
549     {
550         let raw = &s.remutex.raw;
551         let data = f(unsafe { &*s.remutex.data.get() });
552         mem::forget(s);
553         MappedReentrantMutexGuard {
554             raw,
555             data,
556             marker: PhantomData,
557         }
558     }
559 
560     /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
561     /// locked data. The original guard is return if the closure returns `None`.
562     ///
563     /// This operation cannot fail as the `ReentrantMutexGuard` passed
564     /// in already locked the mutex.
565     ///
566     /// This is an associated function that needs to be
567     /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
568     /// the same name on the contents of the locked data.
569     #[inline]
try_map<U: ?Sized, F>( s: Self, f: F, ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,570     pub fn try_map<U: ?Sized, F>(
571         s: Self,
572         f: F,
573     ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
574     where
575         F: FnOnce(&mut T) -> Option<&mut U>,
576     {
577         let raw = &s.remutex.raw;
578         let data = match f(unsafe { &mut *s.remutex.data.get() }) {
579             Some(data) => data,
580             None => return Err(s),
581         };
582         mem::forget(s);
583         Ok(MappedReentrantMutexGuard {
584             raw,
585             data,
586             marker: PhantomData,
587         })
588     }
589 
590     /// Temporarily unlocks the mutex to execute the given function.
591     ///
592     /// This is safe because `&mut` guarantees that there exist no other
593     /// references to the data protected by the mutex.
594     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,595     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
596     where
597         F: FnOnce() -> U,
598     {
599         // Safety: A ReentrantMutexGuard always holds the lock.
600         unsafe {
601             s.remutex.raw.unlock();
602         }
603         defer!(s.remutex.raw.lock());
604         f()
605     }
606 }
607 
608 impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
609     ReentrantMutexGuard<'a, R, G, T>
610 {
611     /// Unlocks the mutex using a fair unlock protocol.
612     ///
613     /// By default, mutexes are unfair and allow the current thread to re-lock
614     /// the mutex before another has the chance to acquire the lock, even if
615     /// that thread has been blocked on the mutex for a long time. This is the
616     /// default because it allows much higher throughput as it avoids forcing a
617     /// context switch on every mutex unlock. This can result in one thread
618     /// acquiring a mutex many more times than other threads.
619     ///
620     /// However in some cases it can be beneficial to ensure fairness by forcing
621     /// the lock to pass on to a waiting thread if there is one. This is done by
622     /// using this method instead of dropping the `ReentrantMutexGuard` normally.
623     #[inline]
unlock_fair(s: Self)624     pub fn unlock_fair(s: Self) {
625         // Safety: A ReentrantMutexGuard always holds the lock
626         unsafe {
627             s.remutex.raw.unlock_fair();
628         }
629         mem::forget(s);
630     }
631 
632     /// Temporarily unlocks the mutex to execute the given function.
633     ///
634     /// The mutex is unlocked a fair unlock protocol.
635     ///
636     /// This is safe because `&mut` guarantees that there exist no other
637     /// references to the data protected by the mutex.
638     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,639     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
640     where
641         F: FnOnce() -> U,
642     {
643         // Safety: A ReentrantMutexGuard always holds the lock
644         unsafe {
645             s.remutex.raw.unlock_fair();
646         }
647         defer!(s.remutex.raw.lock());
648         f()
649     }
650 
651     /// Temporarily yields the mutex to a waiting thread if there is one.
652     ///
653     /// This method is functionally equivalent to calling `unlock_fair` followed
654     /// by `lock`, however it can be much more efficient in the case where there
655     /// are no waiting threads.
656     #[inline]
bump(s: &mut Self)657     pub fn bump(s: &mut Self) {
658         // Safety: A ReentrantMutexGuard always holds the lock
659         unsafe {
660             s.remutex.raw.bump();
661         }
662     }
663 }
664 
665 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
666     for ReentrantMutexGuard<'a, R, G, T>
667 {
668     type Target = T;
669     #[inline]
deref(&self) -> &T670     fn deref(&self) -> &T {
671         unsafe { &*self.remutex.data.get() }
672     }
673 }
674 
675 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
676     for ReentrantMutexGuard<'a, R, G, T>
677 {
678     #[inline]
drop(&mut self)679     fn drop(&mut self) {
680         // Safety: A ReentrantMutexGuard always holds the lock.
681         unsafe {
682             self.remutex.raw.unlock();
683         }
684     }
685 }
686 
687 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
688     for ReentrantMutexGuard<'a, R, G, T>
689 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result690     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
691         fmt::Debug::fmt(&**self, f)
692     }
693 }
694 
695 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
696     for ReentrantMutexGuard<'a, R, G, T>
697 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result698     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
699         (**self).fmt(f)
700     }
701 }
702 
703 #[cfg(feature = "owning_ref")]
704 unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
705     for ReentrantMutexGuard<'a, R, G, T>
706 {
707 }
708 
709 /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
710 /// subfield of the protected data.
711 ///
712 /// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
713 /// former doesn't support temporarily unlocking and re-locking, since that
714 /// could introduce soundness issues if the locked object is modified by another
715 /// thread.
716 #[must_use = "if unused the ReentrantMutex will immediately unlock"]
717 pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
718     raw: &'a RawReentrantMutex<R, G>,
719     data: *const T,
720     marker: PhantomData<&'a T>,
721 }
722 
723 unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
724     for MappedReentrantMutexGuard<'a, R, G, T>
725 {
726 }
727 
728 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
729     MappedReentrantMutexGuard<'a, R, G, T>
730 {
731     /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
732     ///
733     /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
734     /// in already locked the mutex.
735     ///
736     /// This is an associated function that needs to be
737     /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
738     /// the same name on the contents of the locked data.
739     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> where F: FnOnce(&T) -> &U,740     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
741     where
742         F: FnOnce(&T) -> &U,
743     {
744         let raw = s.raw;
745         let data = f(unsafe { &*s.data });
746         mem::forget(s);
747         MappedReentrantMutexGuard {
748             raw,
749             data,
750             marker: PhantomData,
751         }
752     }
753 
754     /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
755     /// locked data. The original guard is return if the closure returns `None`.
756     ///
757     /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
758     /// in already locked the mutex.
759     ///
760     /// This is an associated function that needs to be
761     /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
762     /// the same name on the contents of the locked data.
763     #[inline]
try_map<U: ?Sized, F>( s: Self, f: F, ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self> where F: FnOnce(&T) -> Option<&U>,764     pub fn try_map<U: ?Sized, F>(
765         s: Self,
766         f: F,
767     ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
768     where
769         F: FnOnce(&T) -> Option<&U>,
770     {
771         let raw = s.raw;
772         let data = match f(unsafe { &*s.data }) {
773             Some(data) => data,
774             None => return Err(s),
775         };
776         mem::forget(s);
777         Ok(MappedReentrantMutexGuard {
778             raw,
779             data,
780             marker: PhantomData,
781         })
782     }
783 }
784 
785 impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
786     MappedReentrantMutexGuard<'a, R, G, T>
787 {
788     /// Unlocks the mutex using a fair unlock protocol.
789     ///
790     /// By default, mutexes are unfair and allow the current thread to re-lock
791     /// the mutex before another has the chance to acquire the lock, even if
792     /// that thread has been blocked on the mutex for a long time. This is the
793     /// default because it allows much higher throughput as it avoids forcing a
794     /// context switch on every mutex unlock. This can result in one thread
795     /// acquiring a mutex many more times than other threads.
796     ///
797     /// However in some cases it can be beneficial to ensure fairness by forcing
798     /// the lock to pass on to a waiting thread if there is one. This is done by
799     /// using this method instead of dropping the `ReentrantMutexGuard` normally.
800     #[inline]
unlock_fair(s: Self)801     pub fn unlock_fair(s: Self) {
802         // Safety: A MappedReentrantMutexGuard always holds the lock
803         unsafe {
804             s.raw.unlock_fair();
805         }
806         mem::forget(s);
807     }
808 }
809 
810 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
811     for MappedReentrantMutexGuard<'a, R, G, T>
812 {
813     type Target = T;
814     #[inline]
deref(&self) -> &T815     fn deref(&self) -> &T {
816         unsafe { &*self.data }
817     }
818 }
819 
820 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
821     for MappedReentrantMutexGuard<'a, R, G, T>
822 {
823     #[inline]
drop(&mut self)824     fn drop(&mut self) {
825         // Safety: A MappedReentrantMutexGuard always holds the lock.
826         unsafe {
827             self.raw.unlock();
828         }
829     }
830 }
831 
832 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
833     for MappedReentrantMutexGuard<'a, R, G, T>
834 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result835     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
836         fmt::Debug::fmt(&**self, f)
837     }
838 }
839 
840 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
841     for MappedReentrantMutexGuard<'a, R, G, T>
842 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result843     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
844         (**self).fmt(f)
845     }
846 }
847 
848 #[cfg(feature = "owning_ref")]
849 unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
850     for MappedReentrantMutexGuard<'a, R, G, T>
851 {
852 }
853