1 //! A "Manual Arc" which allows manually frobbing the reference count
2 //!
3 //! This module contains a copy of the `Arc` found in the standard library,
4 //! stripped down to the bare bones of what we actually need. The reason this is
5 //! done is for the ability to concretely know the memory layout of the `Inner`
6 //! structure of the arc pointer itself (e.g. `ArcInner` in the standard
7 //! library).
8 //!
9 //! We do some unsafe casting from `*mut OVERLAPPED` to a `FromRawArc<T>` to
10 //! ensure that data lives for the length of an I/O operation, but this means
11 //! that we have to know the layouts of the structures involved. This
12 //! representation primarily guarantees that the data, `T` is at the front of
13 //! the inner pointer always.
14 //!
15 //! Note that we're missing out on some various optimizations implemented in the
16 //! standard library:
17 //!
18 //! * The size of `FromRawArc` is actually two words because of the drop flag
19 //! * The compiler doesn't understand that the pointer in `FromRawArc` is never
20 //!   null, so Option<FromRawArc<T>> is not a nullable pointer.
21 
22 use std::ops::Deref;
23 use std::mem;
24 use std::sync::atomic::{self, AtomicUsize, Ordering};
25 
26 pub struct FromRawArc<T> {
27     _inner: *mut Inner<T>,
28 }
29 
30 unsafe impl<T: Sync + Send> Send for FromRawArc<T> { }
31 unsafe impl<T: Sync + Send> Sync for FromRawArc<T> { }
32 
33 #[repr(C)]
34 struct Inner<T> {
35     data: T,
36     cnt: AtomicUsize,
37 }
38 
39 impl<T> FromRawArc<T> {
new(data: T) -> FromRawArc<T>40     pub fn new(data: T) -> FromRawArc<T> {
41         let x = Box::new(Inner {
42             data: data,
43             cnt: AtomicUsize::new(1),
44         });
45         FromRawArc { _inner: unsafe { mem::transmute(x) } }
46     }
47 
from_raw(ptr: *mut T) -> FromRawArc<T>48     pub unsafe fn from_raw(ptr: *mut T) -> FromRawArc<T> {
49         // Note that if we could use `mem::transmute` here to get a libstd Arc
50         // (guaranteed) then we could just use std::sync::Arc, but this is the
51         // crucial reason this currently exists.
52         FromRawArc { _inner: ptr as *mut Inner<T> }
53     }
54 }
55 
56 impl<T> Clone for FromRawArc<T> {
clone(&self) -> FromRawArc<T>57     fn clone(&self) -> FromRawArc<T> {
58         // Atomic ordering of Relaxed lifted from libstd, but the general idea
59         // is that you need synchronization to communicate this increment to
60         // another thread, so this itself doesn't need to be synchronized.
61         unsafe {
62             (*self._inner).cnt.fetch_add(1, Ordering::Relaxed);
63         }
64         FromRawArc { _inner: self._inner }
65     }
66 }
67 
68 impl<T> Deref for FromRawArc<T> {
69     type Target = T;
70 
deref(&self) -> &T71     fn deref(&self) -> &T {
72         unsafe { &(*self._inner).data }
73     }
74 }
75 
76 impl<T> Drop for FromRawArc<T> {
drop(&mut self)77     fn drop(&mut self) {
78         unsafe {
79             // Atomic orderings lifted from the standard library
80             if (*self._inner).cnt.fetch_sub(1, Ordering::Release) != 1 {
81                 return
82             }
83             atomic::fence(Ordering::Acquire);
84             drop(mem::transmute::<_, Box<T>>(self._inner));
85         }
86     }
87 }
88 
89 #[cfg(test)]
90 mod tests {
91     use super::FromRawArc;
92 
93     #[test]
smoke()94     fn smoke() {
95         let a = FromRawArc::new(1);
96         assert_eq!(*a, 1);
97         assert_eq!(*a.clone(), 1);
98     }
99 
100     #[test]
drops()101     fn drops() {
102         struct A<'a>(&'a mut bool);
103         impl<'a> Drop for A<'a> {
104             fn drop(&mut self) {
105                 *self.0 = true;
106             }
107         }
108         let mut a = false;
109         {
110             let a = FromRawArc::new(A(&mut a));
111             let _ = a.clone();
112             assert!(!*a.0);
113         }
114         assert!(a);
115     }
116 }
117