1 use crate::page::{
2     slot::{Generation, RefCount},
3     Addr,
4 };
5 use crate::Pack;
6 use std::{fmt, marker::PhantomData};
7 /// Configuration parameters which can be overridden to tune the behavior of a slab.
8 pub trait Config: Sized {
9     /// The maximum number of threads which can access the slab.
10     ///
11     /// This value (rounded to a power of two) determines the number of shards
12     /// in the slab. If a thread is created, accesses the slab, and then terminates,
13     /// its shard may be reused and thus does not count against the maximum
14     /// number of threads once the thread has terminated.
15     const MAX_THREADS: usize = DefaultConfig::MAX_THREADS;
16     /// The maximum number of pages in each shard in the slab.
17     ///
18     /// This value, in combination with `INITIAL_PAGE_SIZE`, determines how many
19     /// bits of each index are used to represent page addresses.
20     const MAX_PAGES: usize = DefaultConfig::MAX_PAGES;
21     /// The size of the first page in each shard.
22     ///
23     /// When a page in a shard has been filled with values, a new page
24     /// will be allocated that is twice as large as the previous page. Thus, the
25     /// second page will be twice this size, and the third will be four times
26     /// this size, and so on.
27     ///
28     /// Note that page sizes must be powers of two. If this value is not a power
29     /// of two, it will be rounded to the next power of two.
30     const INITIAL_PAGE_SIZE: usize = DefaultConfig::INITIAL_PAGE_SIZE;
31     /// Sets a number of high-order bits in each index which are reserved from
32     /// user code.
33     ///
34     /// Note that these bits are taken from the generation counter; if the page
35     /// address and thread IDs are configured to use a large number of bits,
36     /// reserving additional bits will decrease the period of the generation
37     /// counter. These should thus be used relatively sparingly, to ensure that
38     /// generation counters are able to effectively prevent the ABA problem.
39     const RESERVED_BITS: usize = 0;
40 }
41 
42 pub(crate) trait CfgPrivate: Config {
43     const USED_BITS: usize = Generation::<Self>::LEN + Generation::<Self>::SHIFT;
44     const INITIAL_SZ: usize = next_pow2(Self::INITIAL_PAGE_SIZE);
45     const MAX_SHARDS: usize = next_pow2(Self::MAX_THREADS - 1);
46     const ADDR_INDEX_SHIFT: usize = Self::INITIAL_SZ.trailing_zeros() as usize + 1;
47 
page_size(n: usize) -> usize48     fn page_size(n: usize) -> usize {
49         Self::INITIAL_SZ * 2usize.pow(n as _)
50     }
51 
debug() -> DebugConfig<Self>52     fn debug() -> DebugConfig<Self> {
53         DebugConfig { _cfg: PhantomData }
54     }
55 
validate()56     fn validate() {
57         assert!(
58             Self::INITIAL_SZ.is_power_of_two(),
59             "invalid Config: {:#?}",
60             Self::debug(),
61         );
62         assert!(
63             Self::INITIAL_SZ <= Addr::<Self>::BITS,
64             "invalid Config: {:#?}",
65             Self::debug()
66         );
67 
68         assert!(
69             Generation::<Self>::BITS >= 3,
70             "invalid Config: {:#?}\ngeneration counter should be at least 3 bits!",
71             Self::debug()
72         );
73 
74         assert!(
75             Self::USED_BITS <= WIDTH,
76             "invalid Config: {:#?}\ntotal number of bits per index is too large to fit in a word!",
77             Self::debug()
78         );
79 
80         assert!(
81             WIDTH - Self::USED_BITS >= Self::RESERVED_BITS,
82             "invalid Config: {:#?}\nindices are too large to fit reserved bits!",
83             Self::debug()
84         );
85 
86         assert!(
87             RefCount::<Self>::MAX > 1,
88             "invalid config: {:#?}\n maximum concurrent references would be {}",
89             Self::debug(),
90             RefCount::<Self>::MAX,
91         );
92     }
93 
94     #[inline(always)]
unpack<A: Pack<Self>>(packed: usize) -> A95     fn unpack<A: Pack<Self>>(packed: usize) -> A {
96         A::from_packed(packed)
97     }
98 
99     #[inline(always)]
unpack_addr(packed: usize) -> Addr<Self>100     fn unpack_addr(packed: usize) -> Addr<Self> {
101         Self::unpack(packed)
102     }
103 
104     #[inline(always)]
unpack_tid(packed: usize) -> crate::Tid<Self>105     fn unpack_tid(packed: usize) -> crate::Tid<Self> {
106         Self::unpack(packed)
107     }
108 
109     #[inline(always)]
unpack_gen(packed: usize) -> Generation<Self>110     fn unpack_gen(packed: usize) -> Generation<Self> {
111         Self::unpack(packed)
112     }
113 }
114 impl<C: Config> CfgPrivate for C {}
115 
116 /// Default slab configuration values.
117 #[derive(Copy, Clone)]
118 pub struct DefaultConfig {
119     _p: (),
120 }
121 
122 pub(crate) struct DebugConfig<C: Config> {
123     _cfg: PhantomData<fn(C)>,
124 }
125 
126 pub(crate) const WIDTH: usize = std::mem::size_of::<usize>() * 8;
127 
next_pow2(n: usize) -> usize128 pub(crate) const fn next_pow2(n: usize) -> usize {
129     let pow2 = n.count_ones() == 1;
130     let zeros = n.leading_zeros();
131     1 << (WIDTH - zeros as usize - pow2 as usize)
132 }
133 
134 // === impl DefaultConfig ===
135 
136 impl Config for DefaultConfig {
137     const INITIAL_PAGE_SIZE: usize = 32;
138 
139     #[cfg(target_pointer_width = "64")]
140     const MAX_THREADS: usize = 4096;
141     #[cfg(target_pointer_width = "32")]
142     // TODO(eliza): can we find enough bits to give 32-bit platforms more threads?
143     const MAX_THREADS: usize = 128;
144 
145     const MAX_PAGES: usize = WIDTH / 2;
146 }
147 
148 impl fmt::Debug for DefaultConfig {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result149     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
150         Self::debug().fmt(f)
151     }
152 }
153 
154 impl<C: Config> fmt::Debug for DebugConfig<C> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result155     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
156         f.debug_struct(std::any::type_name::<C>())
157             .field("initial_page_size", &C::INITIAL_SZ)
158             .field("max_shards", &C::MAX_SHARDS)
159             .field("max_pages", &C::MAX_PAGES)
160             .field("used_bits", &C::USED_BITS)
161             .field("reserved_bits", &C::RESERVED_BITS)
162             .field("pointer_width", &WIDTH)
163             .field("max_concurrent_references", &RefCount::<C>::MAX)
164             .finish()
165     }
166 }
167 
168 #[cfg(test)]
169 mod tests {
170     use super::*;
171     use crate::test_util;
172     use crate::Slab;
173 
174     #[test]
175     #[cfg_attr(loom, ignore)]
176     #[should_panic]
validates_max_refs()177     fn validates_max_refs() {
178         struct GiantGenConfig;
179 
180         // Configure the slab with a very large number of bits for the generation
181         // counter. This will only leave 1 bit to use for the slot reference
182         // counter, which will fail to validate.
183         impl Config for GiantGenConfig {
184             const INITIAL_PAGE_SIZE: usize = 1;
185             const MAX_THREADS: usize = 1;
186             const MAX_PAGES: usize = 1;
187         }
188 
189         let _slab = Slab::<usize>::new_with_config::<GiantGenConfig>();
190     }
191 
192     #[test]
193     #[cfg_attr(loom, ignore)]
big()194     fn big() {
195         let slab = Slab::new();
196 
197         for i in 0..10000 {
198             println!("{:?}", i);
199             let k = slab.insert(i).expect("insert");
200             assert_eq!(slab.get(k).expect("get"), i);
201         }
202     }
203 
204     #[test]
205     #[cfg_attr(loom, ignore)]
custom_page_sz()206     fn custom_page_sz() {
207         let slab = Slab::new_with_config::<test_util::TinyConfig>();
208 
209         for i in 0..4096 {
210             println!("{}", i);
211             let k = slab.insert(i).expect("insert");
212             assert_eq!(slab.get(k).expect("get"), i);
213         }
214     }
215 }
216