1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10 
11 //! Fork of Arc for Servo. This has the following advantages over std::sync::Arc:
12 //!
13 //! * We don't waste storage on the weak reference count.
14 //! * We don't do extra RMU operations to handle the possibility of weak references.
15 //! * We can experiment with arena allocation (todo).
16 //! * We can add methods to support our custom use cases [1].
17 //! * We have support for dynamically-sized types (see from_header_and_iter).
18 //! * We have support for thin arcs to unsized types (see ThinArc).
19 //!
20 //! [1]: https://bugzilla.mozilla.org/show_bug.cgi?id=1360883
21 
22 // The semantics of Arc are alread documented in the Rust docs, so we don't
23 // duplicate those here.
24 #![allow(missing_docs)]
25 
26 extern crate nodrop;
27 #[cfg(feature = "servo")] extern crate serde;
28 extern crate stable_deref_trait;
29 
30 use nodrop::NoDrop;
31 #[cfg(feature = "servo")]
32 use serde::{Deserialize, Serialize};
33 use stable_deref_trait::{CloneStableDeref, StableDeref};
34 use std::{isize, usize};
35 use std::borrow;
36 use std::cmp::Ordering;
37 use std::convert::From;
38 use std::fmt;
39 use std::hash::{Hash, Hasher};
40 use std::iter::{ExactSizeIterator, Iterator};
41 use std::mem;
42 use std::ops::{Deref, DerefMut};
43 use std::os::raw::c_void;
44 use std::process;
45 use std::ptr;
46 use std::slice;
47 use std::sync::atomic;
48 use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
49 
50 // Private macro to get the offset of a struct field in bytes from the address of the struct.
51 macro_rules! offset_of {
52     ($container:path, $field:ident) => {{
53         // Make sure the field actually exists. This line ensures that a compile-time error is
54         // generated if $field is accessed through a Deref impl.
55         let $container { $field: _, .. };
56 
57         // Create an (invalid) instance of the container and calculate the offset to its
58         // field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to
59         // be nullptr deref.
60         let invalid: $container = ::std::mem::uninitialized();
61         let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize;
62 
63         // Do not run destructors on the made up invalid instance.
64         ::std::mem::forget(invalid);
65         offset as isize
66     }};
67 }
68 
69 /// A soft limit on the amount of references that may be made to an `Arc`.
70 ///
71 /// Going above this limit will abort your program (although not
72 /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
73 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
74 
75 /// Wrapper type for pointers to get the non-zero optimization. When
76 /// NonZero/Shared/Unique are stabilized, we should just use Shared
77 /// here to get the same effect. Gankro is working on this in [1].
78 ///
79 /// It's unfortunate that this needs to infect all the caller types
80 /// with 'static. It would be nice to just use a &() and a PhantomData<T>
81 /// instead, but then the compiler can't determine whether the &() should
82 /// be thin or fat (which depends on whether or not T is sized). Given
83 /// that this is all a temporary hack, this restriction is fine for now.
84 ///
85 /// [1]: https://github.com/rust-lang/rust/issues/27730
86 // FIXME: remove this and use std::ptr::NonNull when Firefox requires Rust 1.25+
87 pub struct NonZeroPtrMut<T: ?Sized + 'static>(&'static mut T);
88 impl<T: ?Sized> NonZeroPtrMut<T> {
new(ptr: *mut T) -> Self89     pub fn new(ptr: *mut T) -> Self {
90         assert!(!(ptr as *mut u8).is_null());
91         NonZeroPtrMut(unsafe { mem::transmute(ptr) })
92     }
93 
ptr(&self) -> *mut T94     pub fn ptr(&self) -> *mut T {
95         self.0 as *const T as *mut T
96     }
97 }
98 
99 impl<T: ?Sized + 'static> Clone for NonZeroPtrMut<T> {
clone(&self) -> Self100     fn clone(&self) -> Self {
101         NonZeroPtrMut::new(self.ptr())
102     }
103 }
104 
105 impl<T: ?Sized + 'static> fmt::Pointer for NonZeroPtrMut<T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result106     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
107         fmt::Pointer::fmt(&self.ptr(), f)
108     }
109 }
110 
111 impl<T: ?Sized + 'static> fmt::Debug for NonZeroPtrMut<T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result112     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
113         <Self as fmt::Pointer>::fmt(self, f)
114     }
115 }
116 
117 impl<T: ?Sized + 'static> PartialEq for NonZeroPtrMut<T> {
eq(&self, other: &Self) -> bool118     fn eq(&self, other: &Self) -> bool {
119         self.ptr() == other.ptr()
120     }
121 }
122 
123 impl<T: ?Sized + 'static> Eq for NonZeroPtrMut<T> {}
124 
125 impl<T: Sized + 'static> Hash for NonZeroPtrMut<T> {
hash<H: Hasher>(&self, state: &mut H)126     fn hash<H: Hasher>(&self, state: &mut H) {
127         self.ptr().hash(state)
128     }
129 }
130 
131 #[repr(C)]
132 pub struct Arc<T: ?Sized + 'static> {
133     p: NonZeroPtrMut<ArcInner<T>>,
134 }
135 
136 /// An Arc that is known to be uniquely owned
137 ///
138 /// This lets us build arcs that we can mutate before
139 /// freezing, without needing to change the allocation
140 pub struct UniqueArc<T: ?Sized + 'static>(Arc<T>);
141 
142 impl<T> UniqueArc<T> {
143     #[inline]
144     /// Construct a new UniqueArc
new(data: T) -> Self145     pub fn new(data: T) -> Self {
146         UniqueArc(Arc::new(data))
147     }
148 
149     #[inline]
150     /// Convert to a shareable Arc<T> once we're done using it
shareable(self) -> Arc<T>151     pub fn shareable(self) -> Arc<T> {
152         self.0
153     }
154 }
155 
156 impl<T> Deref for UniqueArc<T> {
157     type Target = T;
deref(&self) -> &T158     fn deref(&self) -> &T {
159         &*self.0
160     }
161 }
162 
163 impl<T> DerefMut for UniqueArc<T> {
deref_mut(&mut self) -> &mut T164     fn deref_mut(&mut self) -> &mut T {
165         // We know this to be uniquely owned
166         unsafe { &mut (*self.0.ptr()).data }
167     }
168 }
169 
170 unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
171 unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
172 
173 #[repr(C)]
174 struct ArcInner<T: ?Sized> {
175     count: atomic::AtomicUsize,
176     data: T,
177 }
178 
179 unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
180 unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
181 
182 impl<T> Arc<T> {
183     #[inline]
new(data: T) -> Self184     pub fn new(data: T) -> Self {
185         let x = Box::new(ArcInner {
186             count: atomic::AtomicUsize::new(1),
187             data: data,
188         });
189         Arc { p: NonZeroPtrMut::new(Box::into_raw(x)) }
190     }
191 
192     #[inline]
into_raw(this: Self) -> *const T193     pub fn into_raw(this: Self) -> *const T {
194         let ptr = unsafe { &((*this.ptr()).data) as *const _ };
195         mem::forget(this);
196         ptr
197     }
198 
199     #[inline]
from_raw(ptr: *const T) -> Self200     unsafe fn from_raw(ptr: *const T) -> Self {
201         // To find the corresponding pointer to the `ArcInner` we need
202         // to subtract the offset of the `data` field from the pointer.
203         let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data));
204         Arc {
205             p: NonZeroPtrMut::new(ptr as *mut ArcInner<T>),
206         }
207     }
208 
209     /// Produce a pointer to the data that can be converted back
210     /// to an arc
211     #[inline]
borrow_arc<'a>(&'a self) -> ArcBorrow<'a, T>212     pub fn borrow_arc<'a>(&'a self) -> ArcBorrow<'a, T> {
213         ArcBorrow(&**self)
214     }
215 
216     /// Temporarily converts |self| into a bonafide RawOffsetArc and exposes it to the
217     /// provided callback. The refcount is not modified.
218     #[inline(always)]
with_raw_offset_arc<F, U>(&self, f: F) -> U where F: FnOnce(&RawOffsetArc<T>) -> U219     pub fn with_raw_offset_arc<F, U>(&self, f: F) -> U
220         where F: FnOnce(&RawOffsetArc<T>) -> U
221     {
222         // Synthesize transient Arc, which never touches the refcount of the ArcInner.
223         let transient = unsafe { NoDrop::new(Arc::into_raw_offset(ptr::read(self))) };
224 
225         // Expose the transient Arc to the callback, which may clone it if it wants.
226         let result = f(&transient);
227 
228         // Forget the transient Arc to leave the refcount untouched.
229         mem::forget(transient);
230 
231         // Forward the result.
232         result
233     }
234 
235     /// Returns the address on the heap of the Arc itself -- not the T within it -- for memory
236     /// reporting.
heap_ptr(&self) -> *const c_void237     pub fn heap_ptr(&self) -> *const c_void {
238         self.p.ptr() as *const ArcInner<T> as *const c_void
239     }
240 }
241 
242 impl<T: ?Sized> Arc<T> {
243     #[inline]
inner(&self) -> &ArcInner<T>244     fn inner(&self) -> &ArcInner<T> {
245         // This unsafety is ok because while this arc is alive we're guaranteed
246         // that the inner pointer is valid. Furthermore, we know that the
247         // `ArcInner` structure itself is `Sync` because the inner data is
248         // `Sync` as well, so we're ok loaning out an immutable pointer to these
249         // contents.
250         unsafe { &*self.ptr() }
251     }
252 
253     // Non-inlined part of `drop`. Just invokes the destructor.
254     #[inline(never)]
drop_slow(&mut self)255     unsafe fn drop_slow(&mut self) {
256         let _ = Box::from_raw(self.ptr());
257     }
258 
259 
260     #[inline]
ptr_eq(this: &Self, other: &Self) -> bool261     pub fn ptr_eq(this: &Self, other: &Self) -> bool {
262         this.ptr() == other.ptr()
263     }
264 
ptr(&self) -> *mut ArcInner<T>265     fn ptr(&self) -> *mut ArcInner<T> {
266         self.p.ptr()
267     }
268 }
269 
270 impl<T: ?Sized> Clone for Arc<T> {
271     #[inline]
clone(&self) -> Self272     fn clone(&self) -> Self {
273         // Using a relaxed ordering is alright here, as knowledge of the
274         // original reference prevents other threads from erroneously deleting
275         // the object.
276         //
277         // As explained in the [Boost documentation][1], Increasing the
278         // reference counter can always be done with memory_order_relaxed: New
279         // references to an object can only be formed from an existing
280         // reference, and passing an existing reference from one thread to
281         // another must already provide any required synchronization.
282         //
283         // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
284         let old_size = self.inner().count.fetch_add(1, Relaxed);
285 
286         // However we need to guard against massive refcounts in case someone
287         // is `mem::forget`ing Arcs. If we don't do this the count can overflow
288         // and users will use-after free. We racily saturate to `isize::MAX` on
289         // the assumption that there aren't ~2 billion threads incrementing
290         // the reference count at once. This branch will never be taken in
291         // any realistic program.
292         //
293         // We abort because such a program is incredibly degenerate, and we
294         // don't care to support it.
295         if old_size > MAX_REFCOUNT {
296             process::abort();
297         }
298 
299         Arc { p: NonZeroPtrMut::new(self.ptr()) }
300     }
301 }
302 
303 impl<T: ?Sized> Deref for Arc<T> {
304     type Target = T;
305 
306     #[inline]
deref(&self) -> &T307     fn deref(&self) -> &T {
308         &self.inner().data
309     }
310 }
311 
312 impl<T: Clone> Arc<T> {
313     #[inline]
make_mut(this: &mut Self) -> &mut T314     pub fn make_mut(this: &mut Self) -> &mut T {
315         if !this.is_unique() {
316             // Another pointer exists; clone
317             *this = Arc::new((**this).clone());
318         }
319 
320         unsafe {
321             // This unsafety is ok because we're guaranteed that the pointer
322             // returned is the *only* pointer that will ever be returned to T. Our
323             // reference count is guaranteed to be 1 at this point, and we required
324             // the Arc itself to be `mut`, so we're returning the only possible
325             // reference to the inner data.
326             &mut (*this.ptr()).data
327         }
328     }
329 }
330 
331 impl<T: ?Sized> Arc<T> {
332     #[inline]
get_mut(this: &mut Self) -> Option<&mut T>333     pub fn get_mut(this: &mut Self) -> Option<&mut T> {
334         if this.is_unique() {
335             unsafe {
336                 // See make_mut() for documentation of the threadsafety here.
337                 Some(&mut (*this.ptr()).data)
338             }
339         } else {
340             None
341         }
342     }
343 
344     #[inline]
is_unique(&self) -> bool345     pub fn is_unique(&self) -> bool {
346         // We can use Relaxed here, but the justification is a bit subtle.
347         //
348         // The reason to use Acquire would be to synchronize with other threads
349         // that are modifying the refcount with Release, i.e. to ensure that
350         // their writes to memory guarded by this refcount are flushed. However,
351         // we know that threads only modify the contents of the Arc when they
352         // observe the refcount to be 1, and no other thread could observe that
353         // because we're holding one strong reference here.
354         self.inner().count.load(Relaxed) == 1
355     }
356 }
357 
358 impl<T: ?Sized> Drop for Arc<T> {
359     #[inline]
drop(&mut self)360     fn drop(&mut self) {
361         // Because `fetch_sub` is already atomic, we do not need to synchronize
362         // with other threads unless we are going to delete the object.
363         if self.inner().count.fetch_sub(1, Release) != 1 {
364             return;
365         }
366 
367         // FIXME(bholley): Use the updated comment when [2] is merged.
368         //
369         // This load is needed to prevent reordering of use of the data and
370         // deletion of the data.  Because it is marked `Release`, the decreasing
371         // of the reference count synchronizes with this `Acquire` load. This
372         // means that use of the data happens before decreasing the reference
373         // count, which happens before this load, which happens before the
374         // deletion of the data.
375         //
376         // As explained in the [Boost documentation][1],
377         //
378         // > It is important to enforce any possible access to the object in one
379         // > thread (through an existing reference) to *happen before* deleting
380         // > the object in a different thread. This is achieved by a "release"
381         // > operation after dropping a reference (any access to the object
382         // > through this reference must obviously happened before), and an
383         // > "acquire" operation before deleting the object.
384         //
385         // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
386         // [2]: https://github.com/rust-lang/rust/pull/41714
387         self.inner().count.load(Acquire);
388 
389         unsafe {
390             self.drop_slow();
391         }
392     }
393 }
394 
395 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
eq(&self, other: &Arc<T>) -> bool396     fn eq(&self, other: &Arc<T>) -> bool {
397         Self::ptr_eq(self, other) || *(*self) == *(*other)
398     }
399 
ne(&self, other: &Arc<T>) -> bool400     fn ne(&self, other: &Arc<T>) -> bool {
401         !Self::ptr_eq(self, other) && *(*self) != *(*other)
402     }
403 }
404 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
partial_cmp(&self, other: &Arc<T>) -> Option<Ordering>405     fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
406         (**self).partial_cmp(&**other)
407     }
408 
lt(&self, other: &Arc<T>) -> bool409     fn lt(&self, other: &Arc<T>) -> bool {
410         *(*self) < *(*other)
411     }
412 
le(&self, other: &Arc<T>) -> bool413     fn le(&self, other: &Arc<T>) -> bool {
414         *(*self) <= *(*other)
415     }
416 
gt(&self, other: &Arc<T>) -> bool417     fn gt(&self, other: &Arc<T>) -> bool {
418         *(*self) > *(*other)
419     }
420 
ge(&self, other: &Arc<T>) -> bool421     fn ge(&self, other: &Arc<T>) -> bool {
422         *(*self) >= *(*other)
423     }
424 }
425 impl<T: ?Sized + Ord> Ord for Arc<T> {
cmp(&self, other: &Arc<T>) -> Ordering426     fn cmp(&self, other: &Arc<T>) -> Ordering {
427         (**self).cmp(&**other)
428     }
429 }
430 impl<T: ?Sized + Eq> Eq for Arc<T> {}
431 
432 impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result433     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
434         fmt::Display::fmt(&**self, f)
435     }
436 }
437 
438 impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result439     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
440         fmt::Debug::fmt(&**self, f)
441     }
442 }
443 
444 impl<T: ?Sized> fmt::Pointer for Arc<T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result445     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
446         fmt::Pointer::fmt(&self.ptr(), f)
447     }
448 }
449 
450 impl<T: Default> Default for Arc<T> {
default() -> Arc<T>451     fn default() -> Arc<T> {
452         Arc::new(Default::default())
453     }
454 }
455 
456 impl<T: ?Sized + Hash> Hash for Arc<T> {
hash<H: Hasher>(&self, state: &mut H)457     fn hash<H: Hasher>(&self, state: &mut H) {
458         (**self).hash(state)
459     }
460 }
461 
462 impl<T> From<T> for Arc<T> {
463     #[inline]
from(t: T) -> Self464     fn from(t: T) -> Self {
465         Arc::new(t)
466     }
467 }
468 
469 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
470     #[inline]
borrow(&self) -> &T471     fn borrow(&self) -> &T {
472         &**self
473     }
474 }
475 
476 impl<T: ?Sized> AsRef<T> for Arc<T> {
477     #[inline]
as_ref(&self) -> &T478     fn as_ref(&self) -> &T {
479         &**self
480     }
481 }
482 
483 unsafe impl<T: ?Sized> StableDeref for Arc<T> {}
484 unsafe impl<T: ?Sized> CloneStableDeref for Arc<T> {}
485 
486 #[cfg(feature = "servo")]
487 impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T>
488 {
deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error> where D: ::serde::de::Deserializer<'de>,489     fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
490     where
491         D: ::serde::de::Deserializer<'de>,
492     {
493         T::deserialize(deserializer).map(Arc::new)
494     }
495 }
496 
497 #[cfg(feature = "servo")]
498 impl<T: Serialize> Serialize for Arc<T>
499 {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ::serde::ser::Serializer,500     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
501     where
502         S: ::serde::ser::Serializer,
503     {
504         (**self).serialize(serializer)
505     }
506 }
507 
508 /// Structure to allow Arc-managing some fixed-sized data and a variably-sized
509 /// slice in a single allocation.
510 #[derive(Debug, Eq, PartialEq, PartialOrd)]
511 pub struct HeaderSlice<H, T: ?Sized> {
512     /// The fixed-sized data.
513     pub header: H,
514 
515     /// The dynamically-sized data.
516     pub slice: T,
517 }
518 
519 #[inline(always)]
divide_rounding_up(dividend: usize, divisor: usize) -> usize520 fn divide_rounding_up(dividend: usize, divisor: usize) -> usize {
521     (dividend + divisor - 1) / divisor
522 }
523 
524 impl<H, T> Arc<HeaderSlice<H, [T]>> {
525     /// Creates an Arc for a HeaderSlice using the given header struct and
526     /// iterator to generate the slice. The resulting Arc will be fat.
527     #[inline]
from_header_and_iter<I>(header: H, mut items: I) -> Self where I: Iterator<Item=T> + ExactSizeIterator528     pub fn from_header_and_iter<I>(header: H, mut items: I) -> Self
529         where I: Iterator<Item=T> + ExactSizeIterator
530     {
531         use ::std::mem::size_of;
532         assert_ne!(size_of::<T>(), 0, "Need to think about ZST");
533 
534         // Compute the required size for the allocation.
535         let num_items = items.len();
536         let size = {
537             // First, determine the alignment of a hypothetical pointer to a
538             // HeaderSlice.
539             let fake_slice_ptr_align: usize = mem::align_of::<ArcInner<HeaderSlice<H, [T; 1]>>>();
540 
541             // Next, synthesize a totally garbage (but properly aligned) pointer
542             // to a sequence of T.
543             let fake_slice_ptr = fake_slice_ptr_align as *const T;
544 
545             // Convert that sequence to a fat pointer. The address component of
546             // the fat pointer will be garbage, but the length will be correct.
547             let fake_slice = unsafe { slice::from_raw_parts(fake_slice_ptr, num_items) };
548 
549             // Pretend the garbage address points to our allocation target (with
550             // a trailing sequence of T), rather than just a sequence of T.
551             let fake_ptr = fake_slice as *const [T] as *const ArcInner<HeaderSlice<H, [T]>>;
552             let fake_ref: &ArcInner<HeaderSlice<H, [T]>> = unsafe { &*fake_ptr };
553 
554             // Use size_of_val, which will combine static information about the
555             // type with the length from the fat pointer. The garbage address
556             // will not be used.
557             mem::size_of_val(fake_ref)
558         };
559 
560         let ptr: *mut ArcInner<HeaderSlice<H, [T]>>;
561         unsafe {
562             // Allocate the buffer. We use Vec because the underlying allocation
563             // machinery isn't available in stable Rust.
564             //
565             // To avoid alignment issues, we allocate words rather than bytes,
566             // rounding up to the nearest word size.
567             let buffer = if mem::align_of::<T>() <= mem::align_of::<usize>() {
568                 Self::allocate_buffer::<usize>(size)
569             } else if mem::align_of::<T>() <= mem::align_of::<u64>() {
570                 // On 32-bit platforms <T> may have 8 byte alignment while usize has 4 byte aligment.
571                 // Use u64 to avoid over-alignment.
572                 // This branch will compile away in optimized builds.
573                 Self::allocate_buffer::<u64>(size)
574             } else {
575                 panic!("Over-aligned type not handled");
576             };
577 
578             // Synthesize the fat pointer. We do this by claiming we have a direct
579             // pointer to a [T], and then changing the type of the borrow. The key
580             // point here is that the length portion of the fat pointer applies
581             // only to the number of elements in the dynamically-sized portion of
582             // the type, so the value will be the same whether it points to a [T]
583             // or something else with a [T] as its last member.
584             let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items);
585             ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>;
586 
587             // Write the data.
588             //
589             // Note that any panics here (i.e. from the iterator) are safe, since
590             // we'll just leak the uninitialized memory.
591             ptr::write(&mut ((*ptr).count), atomic::AtomicUsize::new(1));
592             ptr::write(&mut ((*ptr).data.header), header);
593             let mut current: *mut T = &mut (*ptr).data.slice[0];
594             for _ in 0..num_items {
595                 ptr::write(current, items.next().expect("ExactSizeIterator over-reported length"));
596                 current = current.offset(1);
597             }
598             assert!(items.next().is_none(), "ExactSizeIterator under-reported length");
599 
600             // We should have consumed the buffer exactly.
601             debug_assert_eq!(current as *mut u8, buffer.offset(size as isize));
602         }
603 
604         // Return the fat Arc.
605         assert_eq!(size_of::<Self>(), size_of::<usize>() * 2, "The Arc will be fat");
606         Arc { p: NonZeroPtrMut::new(ptr) }
607     }
608 
609     #[inline]
allocate_buffer<W>(size: usize) -> *mut u8610     unsafe fn allocate_buffer<W>(size: usize) -> *mut u8 {
611         let words_to_allocate = divide_rounding_up(size, mem::size_of::<W>());
612         let mut vec = Vec::<W>::with_capacity(words_to_allocate);
613         vec.set_len(words_to_allocate);
614         Box::into_raw(vec.into_boxed_slice()) as *mut W as *mut u8
615     }
616 }
617 
618 /// Header data with an inline length. Consumers that use HeaderWithLength as the
619 /// Header type in HeaderSlice can take advantage of ThinArc.
620 #[derive(Debug, Eq, PartialEq, PartialOrd)]
621 pub struct HeaderWithLength<H> {
622     /// The fixed-sized data.
623     pub header: H,
624 
625     /// The slice length.
626     length: usize,
627 }
628 
629 impl<H> HeaderWithLength<H> {
630     /// Creates a new HeaderWithLength.
new(header: H, length: usize) -> Self631     pub fn new(header: H, length: usize) -> Self {
632         HeaderWithLength {
633             header: header,
634             length: length,
635         }
636     }
637 }
638 
639 type HeaderSliceWithLength<H, T> = HeaderSlice<HeaderWithLength<H>, T>;
640 pub struct ThinArc<H: 'static, T: 'static> {
641     ptr: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>,
642 }
643 
644 unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {}
645 unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {}
646 
647 // Synthesize a fat pointer from a thin pointer.
648 //
649 // See the comment around the analogous operation in from_header_and_iter.
thin_to_thick<H, T>(thin: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>) -> *mut ArcInner<HeaderSliceWithLength<H, [T]>>650 fn thin_to_thick<H, T>(thin: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>)
651     -> *mut ArcInner<HeaderSliceWithLength<H, [T]>>
652 {
653     let len = unsafe { (*thin).data.header.length };
654     let fake_slice: *mut [T] = unsafe {
655         slice::from_raw_parts_mut(thin as *mut T, len)
656     };
657 
658     fake_slice as *mut ArcInner<HeaderSliceWithLength<H, [T]>>
659 }
660 
661 impl<H: 'static, T: 'static> ThinArc<H, T> {
662     /// Temporarily converts |self| into a bonafide Arc and exposes it to the
663     /// provided callback. The refcount is not modified.
664     #[inline]
with_arc<F, U>(&self, f: F) -> U where F: FnOnce(&Arc<HeaderSliceWithLength<H, [T]>>) -> U665     pub fn with_arc<F, U>(&self, f: F) -> U
666         where F: FnOnce(&Arc<HeaderSliceWithLength<H, [T]>>) -> U
667     {
668         // Synthesize transient Arc, which never touches the refcount of the ArcInner.
669         let transient = NoDrop::new(Arc {
670             p: NonZeroPtrMut::new(thin_to_thick(self.ptr))
671         });
672 
673         // Expose the transient Arc to the callback, which may clone it if it wants.
674         let result = f(&transient);
675 
676         // Forget the transient Arc to leave the refcount untouched.
677         // XXXManishearth this can be removed when unions stabilize,
678         // since then NoDrop becomes zero overhead
679         mem::forget(transient);
680 
681         // Forward the result.
682         result
683     }
684 
685     /// Returns the address on the heap of the ThinArc itself -- not the T
686     /// within it -- for memory reporting.
687     #[inline]
heap_ptr(&self) -> *const c_void688     pub fn heap_ptr(&self) -> *const c_void {
689         self.ptr as *const ArcInner<T> as *const c_void
690     }
691 }
692 
693 impl<H, T> Deref for ThinArc<H, T> {
694     type Target = HeaderSliceWithLength<H, [T]>;
695 
696     #[inline]
deref(&self) -> &Self::Target697     fn deref(&self) -> &Self::Target {
698         unsafe { &(*thin_to_thick(self.ptr)).data }
699     }
700 }
701 
702 impl<H: 'static, T: 'static> Clone for ThinArc<H, T> {
703     #[inline]
clone(&self) -> Self704     fn clone(&self) -> Self {
705         ThinArc::with_arc(self, |a| Arc::into_thin(a.clone()))
706     }
707 }
708 
709 impl<H: 'static, T: 'static> Drop for ThinArc<H, T> {
710     #[inline]
drop(&mut self)711     fn drop(&mut self) {
712         let _ = Arc::from_thin(ThinArc { ptr: self.ptr });
713     }
714 }
715 
716 impl<H: 'static, T: 'static> Arc<HeaderSliceWithLength<H, [T]>> {
717     /// Converts an Arc into a ThinArc. This consumes the Arc, so the refcount
718     /// is not modified.
719     #[inline]
into_thin(a: Self) -> ThinArc<H, T>720     pub fn into_thin(a: Self) -> ThinArc<H, T> {
721         assert_eq!(a.header.length, a.slice.len(),
722                 "Length needs to be correct for ThinArc to work");
723         let fat_ptr: *mut ArcInner<HeaderSliceWithLength<H, [T]>> = a.ptr();
724         mem::forget(a);
725         let thin_ptr = fat_ptr as *mut [usize] as *mut usize;
726         ThinArc {
727             ptr: thin_ptr as *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>
728         }
729     }
730 
731     /// Converts a ThinArc into an Arc. This consumes the ThinArc, so the refcount
732     /// is not modified.
733     #[inline]
from_thin(a: ThinArc<H, T>) -> Self734     pub fn from_thin(a: ThinArc<H, T>) -> Self {
735         let ptr = thin_to_thick(a.ptr);
736         mem::forget(a);
737         Arc {
738             p: NonZeroPtrMut::new(ptr)
739         }
740     }
741 }
742 
743 impl<H: PartialEq + 'static, T: PartialEq + 'static> PartialEq for ThinArc<H, T> {
744     #[inline]
eq(&self, other: &ThinArc<H, T>) -> bool745     fn eq(&self, other: &ThinArc<H, T>) -> bool {
746         ThinArc::with_arc(self, |a| {
747             ThinArc::with_arc(other, |b| {
748                 *a == *b
749             })
750         })
751     }
752 }
753 
754 impl<H: Eq + 'static, T: Eq + 'static> Eq for ThinArc<H, T> {}
755 
756 /// An Arc, except it holds a pointer to the T instead of to the
757 /// entire ArcInner.
758 ///
759 /// ```text
760 ///  Arc<T>    RawOffsetArc<T>
761 ///   |          |
762 ///   v          v
763 ///  ---------------------
764 /// | RefCount | T (data) | [ArcInner<T>]
765 ///  ---------------------
766 /// ```
767 ///
768 /// This means that this is a direct pointer to
769 /// its contained data (and can be read from by both C++ and Rust),
770 /// but we can also convert it to a "regular" Arc<T> by removing the offset
771 #[derive(Eq)]
772 #[repr(C)]
773 pub struct RawOffsetArc<T: 'static> {
774     ptr: NonZeroPtrMut<T>,
775 }
776 
777 unsafe impl<T: 'static + Sync + Send> Send for RawOffsetArc<T> {}
778 unsafe impl<T: 'static + Sync + Send> Sync for RawOffsetArc<T> {}
779 
780 impl<T: 'static> Deref for RawOffsetArc<T> {
781     type Target = T;
deref(&self) -> &Self::Target782     fn deref(&self) -> &Self::Target {
783         unsafe { &*self.ptr.ptr() }
784     }
785 }
786 
787 impl<T: 'static> Clone for RawOffsetArc<T> {
788     #[inline]
clone(&self) -> Self789     fn clone(&self) -> Self {
790         Arc::into_raw_offset(self.clone_arc())
791     }
792 }
793 
794 impl<T: 'static> Drop for RawOffsetArc<T> {
drop(&mut self)795     fn drop(&mut self) {
796         let _ = Arc::from_raw_offset(RawOffsetArc { ptr: self.ptr.clone() });
797     }
798 }
799 
800 
801 impl<T: fmt::Debug + 'static> fmt::Debug for RawOffsetArc<T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result802     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
803         fmt::Debug::fmt(&**self, f)
804     }
805 }
806 
807 impl<T: PartialEq> PartialEq for RawOffsetArc<T> {
eq(&self, other: &RawOffsetArc<T>) -> bool808     fn eq(&self, other: &RawOffsetArc<T>) -> bool {
809         *(*self) == *(*other)
810     }
811 
ne(&self, other: &RawOffsetArc<T>) -> bool812     fn ne(&self, other: &RawOffsetArc<T>) -> bool {
813         *(*self) != *(*other)
814     }
815 }
816 
817 impl<T: 'static> RawOffsetArc<T> {
818     /// Temporarily converts |self| into a bonafide Arc and exposes it to the
819     /// provided callback. The refcount is not modified.
820     #[inline]
with_arc<F, U>(&self, f: F) -> U where F: FnOnce(&Arc<T>) -> U821     pub fn with_arc<F, U>(&self, f: F) -> U
822         where F: FnOnce(&Arc<T>) -> U
823     {
824         // Synthesize transient Arc, which never touches the refcount of the ArcInner.
825         let transient = unsafe { NoDrop::new(Arc::from_raw(self.ptr.ptr())) };
826 
827         // Expose the transient Arc to the callback, which may clone it if it wants.
828         let result = f(&transient);
829 
830         // Forget the transient Arc to leave the refcount untouched.
831         // XXXManishearth this can be removed when unions stabilize,
832         // since then NoDrop becomes zero overhead
833         mem::forget(transient);
834 
835         // Forward the result.
836         result
837     }
838 
839     /// If uniquely owned, provide a mutable reference
840     /// Else create a copy, and mutate that
841     #[inline]
make_mut(&mut self) -> &mut T where T: Clone842     pub fn make_mut(&mut self) -> &mut T where T: Clone {
843         unsafe {
844             // extract the RawOffsetArc as an owned variable
845             let this = ptr::read(self);
846             // treat it as a real Arc
847             let mut arc = Arc::from_raw_offset(this);
848             // obtain the mutable reference. Cast away the lifetime
849             // This may mutate `arc`
850             let ret = Arc::make_mut(&mut arc) as *mut _;
851             // Store the possibly-mutated arc back inside, after converting
852             // it to a RawOffsetArc again
853             ptr::write(self, Arc::into_raw_offset(arc));
854             &mut *ret
855         }
856     }
857 
858     /// Clone it as an Arc
859     #[inline]
clone_arc(&self) -> Arc<T>860     pub fn clone_arc(&self) -> Arc<T> {
861         RawOffsetArc::with_arc(self, |a| a.clone())
862     }
863 
864     /// Produce a pointer to the data that can be converted back
865     /// to an arc
866     #[inline]
borrow_arc<'a>(&'a self) -> ArcBorrow<'a, T>867     pub fn borrow_arc<'a>(&'a self) -> ArcBorrow<'a, T> {
868         ArcBorrow(&**self)
869     }
870 }
871 
872 impl<T: 'static> Arc<T> {
873     /// Converts an Arc into a RawOffsetArc. This consumes the Arc, so the refcount
874     /// is not modified.
875     #[inline]
into_raw_offset(a: Self) -> RawOffsetArc<T>876     pub fn into_raw_offset(a: Self) -> RawOffsetArc<T> {
877         RawOffsetArc {
878             ptr: NonZeroPtrMut::new(Arc::into_raw(a) as *mut T),
879         }
880     }
881 
882     /// Converts a RawOffsetArc into an Arc. This consumes the RawOffsetArc, so the refcount
883     /// is not modified.
884     #[inline]
from_raw_offset(a: RawOffsetArc<T>) -> Self885     pub fn from_raw_offset(a: RawOffsetArc<T>) -> Self {
886         let ptr = a.ptr.ptr();
887         mem::forget(a);
888         unsafe { Arc::from_raw(ptr) }
889     }
890 }
891 
892 /// A "borrowed Arc". This is a pointer to
893 /// a T that is known to have been allocated within an
894 /// Arc.
895 ///
896 /// This is equivalent in guarantees to `&Arc<T>`, however it is
897 /// a bit more flexible. To obtain an `&Arc<T>` you must have
898 /// an Arc<T> instance somewhere pinned down until we're done with it.
899 ///
900 /// However, Gecko hands us refcounted things as pointers to T directly,
901 /// so we have to conjure up a temporary Arc on the stack each time. The
902 /// same happens for when the object is managed by a RawOffsetArc.
903 ///
904 /// ArcBorrow lets us deal with borrows of known-refcounted objects
905 /// without needing to worry about how they're actually stored.
906 #[derive(Eq, PartialEq)]
907 pub struct ArcBorrow<'a, T: 'a>(&'a T);
908 
909 impl<'a, T> Copy for ArcBorrow<'a, T> {}
910 impl<'a, T> Clone for ArcBorrow<'a, T> {
911     #[inline]
clone(&self) -> Self912     fn clone(&self) -> Self {
913         *self
914     }
915 }
916 
917 impl<'a, T> ArcBorrow<'a, T> {
918     #[inline]
clone_arc(&self) -> Arc<T>919     pub fn clone_arc(&self) -> Arc<T> {
920         let arc = unsafe { Arc::from_raw(self.0) };
921         // addref it!
922         mem::forget(arc.clone());
923         arc
924     }
925 
926     /// For constructing from a reference known to be Arc-backed,
927     /// e.g. if we obtain such a reference over FFI
928     #[inline]
from_ref(r: &'a T) -> Self929     pub unsafe fn from_ref(r: &'a T) -> Self {
930         ArcBorrow(r)
931     }
932 
933     #[inline]
with_arc<F, U>(&self, f: F) -> U where F: FnOnce(&Arc<T>) -> U, T: 'static934     pub fn with_arc<F, U>(&self, f: F) -> U where F: FnOnce(&Arc<T>) -> U, T: 'static {
935         // Synthesize transient Arc, which never touches the refcount.
936         let transient = unsafe { NoDrop::new(Arc::from_raw(self.0)) };
937 
938         // Expose the transient Arc to the callback, which may clone it if it wants.
939         let result = f(&transient);
940 
941         // Forget the transient Arc to leave the refcount untouched.
942         // XXXManishearth this can be removed when unions stabilize,
943         // since then NoDrop becomes zero overhead
944         mem::forget(transient);
945 
946         // Forward the result.
947         result
948     }
949 }
950 
951 impl<'a, T> Deref for ArcBorrow<'a, T> {
952     type Target = T;
953 
954     #[inline]
deref(&self) -> &T955     fn deref(&self) -> &T {
956         &*self.0
957     }
958 }
959 
960 #[cfg(test)]
961 mod tests {
962     use std::clone::Clone;
963     use std::ops::Drop;
964     use std::sync::atomic;
965     use std::sync::atomic::Ordering::{Acquire, SeqCst};
966     use super::{Arc, HeaderWithLength, ThinArc};
967 
968     #[derive(PartialEq)]
969     struct Canary(*mut atomic::AtomicUsize);
970 
971     impl Drop for Canary {
drop(&mut self)972         fn drop(&mut self) {
973             unsafe { (*self.0).fetch_add(1, SeqCst); }
974         }
975     }
976 
977     #[test]
slices_and_thin()978     fn slices_and_thin() {
979         let mut canary = atomic::AtomicUsize::new(0);
980         let c = Canary(&mut canary as *mut atomic::AtomicUsize);
981         let v = vec![5, 6];
982         let header = HeaderWithLength::new(c, v.len());
983         {
984             let x = Arc::into_thin(Arc::from_header_and_iter(header, v.into_iter()));
985             let y = ThinArc::with_arc(&x, |q| q.clone());
986             let _ = y.clone();
987             let _ = x == x;
988             Arc::from_thin(x.clone());
989         }
990         assert_eq!(canary.load(Acquire), 1);
991     }
992 }
993