1 use std::default::Default;
2 use std::fmt::{Debug, Error as FormatError, Formatter};
3 use std::iter::repeat;
4 use std::sync::atomic::{AtomicUsize, Ordering};
5 
6 use atom::AtomSetOnce;
7 
8 use util::*;
9 use {BitSetLike, DrainableBitSet};
10 
11 /// This is similar to a [`BitSet`] but allows setting of value
12 /// without unique ownership of the structure
13 ///
14 /// An `AtomicBitSet` has the ability to add an item to the set
15 /// without unique ownership (given that the set is big enough).
16 /// Removing elements does require unique ownership as an effect
17 /// of the hierarchy it holds. Worst case multiple writers set the
18 /// same bit twice (but only is told they set it).
19 ///
20 /// It is possible to atomically remove from the set, but not at the
21 /// same time as atomically adding. This is because there is no way
22 /// to know if layer 1-3 would be left in a consistent state if they are
23 /// being cleared and set at the same time.
24 ///
25 /// `AtromicBitSet` resolves this race by disallowing atomic
26 /// clearing of bits.
27 ///
28 /// [`BitSet`]: ../struct.BitSet.html
29 #[derive(Debug)]
30 pub struct AtomicBitSet {
31     layer3: AtomicUsize,
32     layer2: Vec<AtomicUsize>,
33     layer1: Vec<AtomicBlock>,
34 }
35 
36 impl AtomicBitSet {
37     /// Creates an empty `AtomicBitSet`.
new() -> AtomicBitSet38     pub fn new() -> AtomicBitSet {
39         Default::default()
40     }
41 
42     /// Adds `id` to the `AtomicBitSet`. Returns `true` if the value was
43     /// already in the set.
44     ///
45     /// Because we cannot safely extend an AtomicBitSet without unique ownership
46     /// this will panic if the Index is out of range.
47     #[inline]
add_atomic(&self, id: Index) -> bool48     pub fn add_atomic(&self, id: Index) -> bool {
49         let (_, p1, p2) = offsets(id);
50 
51         // While it is tempting to check of the bit was set and exit here if it
52         // was, this can result in a data race. If this thread and another
53         // thread both set the same bit it is possible for the second thread
54         // to exit before l3 was set. Resulting in the iterator to be in an
55         // incorrect state. The window is small, but it exists.
56         let set = self.layer1[p1].add(id);
57         self.layer2[p2].fetch_or(id.mask(SHIFT2), Ordering::Relaxed);
58         self.layer3.fetch_or(id.mask(SHIFT3), Ordering::Relaxed);
59         set
60     }
61 
62     /// Adds `id` to the `BitSet`. Returns `true` if the value was
63     /// already in the set.
64     #[inline]
add(&mut self, id: Index) -> bool65     pub fn add(&mut self, id: Index) -> bool {
66         use std::sync::atomic::Ordering::Relaxed;
67 
68         let (_, p1, p2) = offsets(id);
69         if self.layer1[p1].add(id) {
70             return true;
71         }
72 
73         self.layer2[p2].store(self.layer2[p2].load(Relaxed) | id.mask(SHIFT2), Relaxed);
74         self.layer3
75             .store(self.layer3.load(Relaxed) | id.mask(SHIFT3), Relaxed);
76         false
77     }
78 
79     /// Removes `id` from the set, returns `true` if the value
80     /// was removed, and `false` if the value was not set
81     /// to begin with.
82     #[inline]
remove(&mut self, id: Index) -> bool83     pub fn remove(&mut self, id: Index) -> bool {
84         use std::sync::atomic::Ordering::Relaxed;
85         let (_, p1, p2) = offsets(id);
86 
87         // if the bitmask was set we need to clear
88         // its bit from layer0 to 3. the layers above only
89         // should be cleared if the bit cleared was the last bit
90         // in its set
91         //
92         // These are used over a `fetch_and` because we have a mutable
93         // access to the AtomicBitSet so this is sound (and faster)
94         if !self.layer1[p1].remove(id) {
95             return false;
96         }
97         if self.layer1[p1].mask.load(Ordering::Relaxed) != 0 {
98             return true;
99         }
100 
101         let v = self.layer2[p2].load(Relaxed) & !id.mask(SHIFT2);
102         self.layer2[p2].store(v, Relaxed);
103         if v != 0 {
104             return true;
105         }
106 
107         let v = self.layer3.load(Relaxed) & !id.mask(SHIFT3);
108         self.layer3.store(v, Relaxed);
109         return true;
110     }
111 
112     /// Returns `true` if `id` is in the set.
113     #[inline]
contains(&self, id: Index) -> bool114     pub fn contains(&self, id: Index) -> bool {
115         let i = id.offset(SHIFT2);
116         self.layer1[i].contains(id)
117     }
118 
119     /// Clear all bits in the set
clear(&mut self)120     pub fn clear(&mut self) {
121         // This is the same hierarchical-striding used in the iterators.
122         // Using this technique we can avoid clearing segments of the bitset
123         // that are already clear. In the best case when the set is already cleared,
124         // this will only touch the highest layer.
125 
126         let (mut m3, mut m2) = (self.layer3.swap(0, Ordering::Relaxed), 0usize);
127         let mut offset = 0;
128 
129         loop {
130             if m2 != 0 {
131                 let bit = m2.trailing_zeros() as usize;
132                 m2 &= !(1 << bit);
133 
134                 // layer 1 & 0 are cleared unconditionally. it's only 32-64 words
135                 // and the extra logic to select the correct works is slower
136                 // then just clearing them all.
137                 self.layer1[offset + bit].clear();
138                 continue;
139             }
140 
141             if m3 != 0 {
142                 let bit = m3.trailing_zeros() as usize;
143                 m3 &= !(1 << bit);
144                 offset = bit << BITS;
145                 m2 = self.layer2[bit].swap(0, Ordering::Relaxed);
146                 continue;
147             }
148             break;
149         }
150     }
151 }
152 
153 impl BitSetLike for AtomicBitSet {
154     #[inline]
layer3(&self) -> usize155     fn layer3(&self) -> usize {
156         self.layer3.load(Ordering::Relaxed)
157     }
158     #[inline]
layer2(&self, i: usize) -> usize159     fn layer2(&self, i: usize) -> usize {
160         self.layer2[i].load(Ordering::Relaxed)
161     }
162     #[inline]
layer1(&self, i: usize) -> usize163     fn layer1(&self, i: usize) -> usize {
164         self.layer1[i].mask.load(Ordering::Relaxed)
165     }
166     #[inline]
layer0(&self, i: usize) -> usize167     fn layer0(&self, i: usize) -> usize {
168         let (o1, o0) = (i >> BITS, i & ((1 << BITS) - 1));
169         self.layer1[o1]
170             .atom
171             .get()
172             .map(|l0| l0[o0].load(Ordering::Relaxed))
173             .unwrap_or(0)
174     }
175     #[inline]
contains(&self, i: Index) -> bool176     fn contains(&self, i: Index) -> bool {
177         self.contains(i)
178     }
179 }
180 
181 impl DrainableBitSet for AtomicBitSet {
182     #[inline]
remove(&mut self, i: Index) -> bool183     fn remove(&mut self, i: Index) -> bool {
184         self.remove(i)
185     }
186 }
187 
188 impl Default for AtomicBitSet {
default() -> Self189     fn default() -> Self {
190         AtomicBitSet {
191             layer3: Default::default(),
192             layer2: repeat(0)
193                 .map(|_| AtomicUsize::new(0))
194                 .take(1 << BITS)
195                 .collect(),
196             layer1: repeat(0)
197                 .map(|_| AtomicBlock::new())
198                 .take(1 << (2 * BITS))
199                 .collect(),
200         }
201     }
202 }
203 
204 struct AtomicBlock {
205     mask: AtomicUsize,
206     atom: AtomSetOnce<Box<[AtomicUsize; 1 << BITS]>>,
207 }
208 
209 impl AtomicBlock {
new() -> AtomicBlock210     fn new() -> AtomicBlock {
211         AtomicBlock {
212             mask: AtomicUsize::new(0),
213             atom: AtomSetOnce::empty(),
214         }
215     }
216 
add(&self, id: Index) -> bool217     fn add(&self, id: Index) -> bool {
218         if self.atom.is_none() {
219             let v = Box::new(unsafe { ::std::mem::zeroed() });
220             self.atom.set_if_none(v);
221         }
222 
223         let (i, m) = (id.row(SHIFT1), id.mask(SHIFT0));
224         let old = self.atom.get().unwrap()[i].fetch_or(m, Ordering::Relaxed);
225         self.mask.fetch_or(id.mask(SHIFT1), Ordering::Relaxed);
226         old & m != 0
227     }
228 
contains(&self, id: Index) -> bool229     fn contains(&self, id: Index) -> bool {
230         self.atom
231             .get()
232             .map(|l0| l0[id.row(SHIFT1)].load(Ordering::Relaxed) & id.mask(SHIFT0) != 0)
233             .unwrap_or(false)
234     }
235 
remove(&mut self, id: Index) -> bool236     fn remove(&mut self, id: Index) -> bool {
237         if let Some(l0) = self.atom.get_mut() {
238             let (i, m) = (id.row(SHIFT1), !id.mask(SHIFT0));
239             let v = l0[i].load(Ordering::Relaxed);
240             l0[i].store(v & m, Ordering::Relaxed);
241             if v & m == 0 {
242                 let v = self.mask.load(Ordering::Relaxed) & !id.mask(SHIFT1);
243                 self.mask.store(v, Ordering::Relaxed);
244             }
245             v & id.mask(SHIFT0) == id.mask(SHIFT0)
246         } else {
247             false
248         }
249     }
250 
clear(&mut self)251     fn clear(&mut self) {
252         self.mask.store(0, Ordering::Relaxed);
253         self.atom.get().map(|l0| {
254             for l in &l0[..] {
255                 l.store(0, Ordering::Relaxed);
256             }
257         });
258     }
259 }
260 
261 impl Debug for AtomicBlock {
fmt(&self, f: &mut Formatter) -> Result<(), FormatError>262     fn fmt(&self, f: &mut Formatter) -> Result<(), FormatError> {
263         f.debug_struct("AtomicBlock")
264             .field("mask", &self.mask)
265             .field("atom", &self.atom.get().unwrap().iter())
266             .finish()
267     }
268 }
269 
270 #[cfg(test)]
271 mod atomic_set_test {
272     use {AtomicBitSet, BitSetAnd, BitSetLike};
273 
274     #[test]
insert()275     fn insert() {
276         let mut c = AtomicBitSet::new();
277         for i in 0..1_000 {
278             assert!(!c.add(i));
279             assert!(c.add(i));
280         }
281 
282         for i in 0..1_000 {
283             assert!(c.contains(i));
284         }
285     }
286 
287     #[test]
insert_100k()288     fn insert_100k() {
289         let mut c = AtomicBitSet::new();
290         for i in 0..100_000 {
291             assert!(!c.add(i));
292             assert!(c.add(i));
293         }
294 
295         for i in 0..100_000 {
296             assert!(c.contains(i));
297         }
298     }
299 
300     #[test]
add_atomic()301     fn add_atomic() {
302         let c = AtomicBitSet::new();
303         for i in 0..1_000 {
304             assert!(!c.add_atomic(i));
305             assert!(c.add_atomic(i));
306         }
307 
308         for i in 0..1_000 {
309             assert!(c.contains(i));
310         }
311     }
312 
313     #[test]
add_atomic_100k()314     fn add_atomic_100k() {
315         let c = AtomicBitSet::new();
316         for i in 0..100_000 {
317             assert!(!c.add_atomic(i));
318             assert!(c.add_atomic(i));
319         }
320 
321         for i in 0..100_000 {
322             assert!(c.contains(i));
323         }
324     }
325 
326     #[test]
remove()327     fn remove() {
328         let mut c = AtomicBitSet::new();
329         for i in 0..1_000 {
330             assert!(!c.add(i));
331         }
332 
333         for i in 0..1_000 {
334             assert!(c.contains(i));
335             assert!(c.remove(i));
336             assert!(!c.contains(i));
337             assert!(!c.remove(i));
338         }
339     }
340 
341     #[test]
iter()342     fn iter() {
343         let mut c = AtomicBitSet::new();
344         for i in 0..100_000 {
345             c.add(i);
346         }
347 
348         let mut count = 0;
349         for (idx, i) in c.iter().enumerate() {
350             count += 1;
351             assert_eq!(idx, i as usize);
352         }
353         assert_eq!(count, 100_000);
354     }
355 
356     #[test]
iter_odd_even()357     fn iter_odd_even() {
358         let mut odd = AtomicBitSet::new();
359         let mut even = AtomicBitSet::new();
360         for i in 0..100_000 {
361             if i % 2 == 1 {
362                 odd.add(i);
363             } else {
364                 even.add(i);
365             }
366         }
367 
368         assert_eq!((&odd).iter().count(), 50_000);
369         assert_eq!((&even).iter().count(), 50_000);
370         assert_eq!(BitSetAnd(&odd, &even).iter().count(), 0);
371     }
372 
373     #[test]
clear()374     fn clear() {
375         let mut set = AtomicBitSet::new();
376         for i in 0..1_000 {
377             set.add(i);
378         }
379 
380         assert_eq!((&set).iter().sum::<u32>(), 500_500 - 1_000);
381 
382         assert_eq!((&set).iter().count(), 1_000);
383         set.clear();
384         assert_eq!((&set).iter().count(), 0);
385 
386         for i in 0..1_000 {
387             set.add(i * 64);
388         }
389 
390         assert_eq!((&set).iter().count(), 1_000);
391         set.clear();
392         assert_eq!((&set).iter().count(), 0);
393 
394         for i in 0..1_000 {
395             set.add(i * 1_000);
396         }
397 
398         assert_eq!((&set).iter().count(), 1_000);
399         set.clear();
400         assert_eq!((&set).iter().count(), 0);
401 
402         for i in 0..100 {
403             set.add(i * 10_000);
404         }
405 
406         assert_eq!((&set).iter().count(), 100);
407         set.clear();
408         assert_eq!((&set).iter().count(), 0);
409 
410         for i in 0..10 {
411             set.add(i * 10_000);
412         }
413 
414         assert_eq!((&set).iter().count(), 10);
415         set.clear();
416         assert_eq!((&set).iter().count(), 0);
417     }
418 
419 }
420