1 use crate::loom::cell::UnsafeCell;
2 use crate::util::slab::{Address, Entry, Slot, TransferStack, INITIAL_PAGE_SIZE};
3
4 use std::fmt;
5
6 /// Data accessed only by the thread that owns the shard.
7 pub(crate) struct Local {
8 head: UnsafeCell<usize>,
9 }
10
11 /// Data accessed by any thread.
12 pub(crate) struct Shared<T> {
13 remote: TransferStack,
14 size: usize,
15 prev_sz: usize,
16 slab: UnsafeCell<Option<Box<[Slot<T>]>>>,
17 }
18
19 /// Returns the size of the page at index `n`
size(n: usize) -> usize20 pub(super) fn size(n: usize) -> usize {
21 INITIAL_PAGE_SIZE << n
22 }
23
24 impl Local {
new() -> Self25 pub(crate) fn new() -> Self {
26 Self {
27 head: UnsafeCell::new(0),
28 }
29 }
30
head(&self) -> usize31 fn head(&self) -> usize {
32 self.head.with(|head| unsafe { *head })
33 }
34
set_head(&self, new_head: usize)35 fn set_head(&self, new_head: usize) {
36 self.head.with_mut(|head| unsafe {
37 *head = new_head;
38 })
39 }
40 }
41
42 impl<T: Entry> Shared<T> {
new(size: usize, prev_sz: usize) -> Shared<T>43 pub(crate) fn new(size: usize, prev_sz: usize) -> Shared<T> {
44 Self {
45 prev_sz,
46 size,
47 remote: TransferStack::new(),
48 slab: UnsafeCell::new(None),
49 }
50 }
51
52 /// Allocates storage for this page if it does not allready exist.
53 ///
54 /// This requires unique access to the page (e.g. it is called from the
55 /// thread that owns the page, or, in the case of `SingleShard`, while the
56 /// lock is held). In order to indicate this, a reference to the page's
57 /// `Local` data is taken by this function; the `Local` argument is not
58 /// actually used, but requiring it ensures that this is only called when
59 /// local access is held.
60 #[cold]
alloc_page(&self, _: &Local)61 fn alloc_page(&self, _: &Local) {
62 debug_assert!(self.slab.with(|s| unsafe { (*s).is_none() }));
63
64 let mut slab = Vec::with_capacity(self.size);
65 slab.extend((1..self.size).map(Slot::new));
66 slab.push(Slot::new(Address::NULL));
67
68 self.slab.with_mut(|s| {
69 // this mut access is safe — it only occurs to initially
70 // allocate the page, which only happens on this thread; if the
71 // page has not yet been allocated, other threads will not try
72 // to access it yet.
73 unsafe {
74 *s = Some(slab.into_boxed_slice());
75 }
76 });
77 }
78
alloc(&self, local: &Local) -> Option<Address>79 pub(crate) fn alloc(&self, local: &Local) -> Option<Address> {
80 let head = local.head();
81
82 // are there any items on the local free list? (fast path)
83 let head = if head < self.size {
84 head
85 } else {
86 // if the local free list is empty, pop all the items on the remote
87 // free list onto the local free list.
88 self.remote.pop_all()?
89 };
90
91 // if the head is still null, both the local and remote free lists are
92 // empty --- we can't fit any more items on this page.
93 if head == Address::NULL {
94 return None;
95 }
96
97 // do we need to allocate storage for this page?
98 let page_needs_alloc = self.slab.with(|s| unsafe { (*s).is_none() });
99 if page_needs_alloc {
100 self.alloc_page(local);
101 }
102
103 let gen = self.slab.with(|slab| {
104 let slab = unsafe { &*(slab) }
105 .as_ref()
106 .expect("page must have been allocated to alloc!");
107
108 let slot = &slab[head];
109
110 local.set_head(slot.next());
111 slot.generation()
112 });
113
114 let index = head + self.prev_sz;
115
116 Some(Address::new(index, gen))
117 }
118
get(&self, addr: Address) -> Option<&T>119 pub(crate) fn get(&self, addr: Address) -> Option<&T> {
120 let page_offset = addr.slot() - self.prev_sz;
121
122 self.slab
123 .with(|slab| unsafe { &*slab }.as_ref()?.get(page_offset))
124 .map(|slot| slot.get())
125 }
126
remove_local(&self, local: &Local, addr: Address)127 pub(crate) fn remove_local(&self, local: &Local, addr: Address) {
128 let offset = addr.slot() - self.prev_sz;
129
130 self.slab.with(|slab| {
131 let slab = unsafe { &*slab }.as_ref();
132
133 let slot = if let Some(slot) = slab.and_then(|slab| slab.get(offset)) {
134 slot
135 } else {
136 return;
137 };
138
139 if slot.reset(addr.generation()) {
140 slot.set_next(local.head());
141 local.set_head(offset);
142 }
143 })
144 }
145
remove_remote(&self, addr: Address)146 pub(crate) fn remove_remote(&self, addr: Address) {
147 let offset = addr.slot() - self.prev_sz;
148
149 self.slab.with(|slab| {
150 let slab = unsafe { &*slab }.as_ref();
151
152 let slot = if let Some(slot) = slab.and_then(|slab| slab.get(offset)) {
153 slot
154 } else {
155 return;
156 };
157
158 if !slot.reset(addr.generation()) {
159 return;
160 }
161
162 self.remote.push(offset, |next| slot.set_next(next));
163 })
164 }
165 }
166
167 impl fmt::Debug for Local {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result168 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
169 self.head.with(|head| {
170 let head = unsafe { *head };
171 f.debug_struct("Local")
172 .field("head", &format_args!("{:#0x}", head))
173 .finish()
174 })
175 }
176 }
177
178 impl<T> fmt::Debug for Shared<T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result179 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
180 f.debug_struct("Shared")
181 .field("remote", &self.remote)
182 .field("prev_sz", &self.prev_sz)
183 .field("size", &self.size)
184 // .field("slab", &self.slab)
185 .finish()
186 }
187 }
188