1 //! Parallel iterator types for [slices][std::slice]
2 //!
3 //! You will rarely need to interact with this module directly unless you need
4 //! to name one of the iterator types.
5 //!
6 //! [std::slice]: https://doc.rust-lang.org/stable/std/slice/
7 
8 mod mergesort;
9 mod quicksort;
10 
11 mod test;
12 
13 use iter::*;
14 use iter::plumbing::*;
15 use self::mergesort::par_mergesort;
16 use self::quicksort::par_quicksort;
17 use split_producer::*;
18 use std::cmp;
19 use std::cmp::Ordering;
20 use std::fmt::{self, Debug};
21 
22 use super::math::div_round_up;
23 
24 /// Parallel extensions for slices.
25 pub trait ParallelSlice<T: Sync> {
26     /// Returns a plain slice, which is used to implement the rest of the
27     /// parallel methods.
as_parallel_slice(&self) -> &[T]28     fn as_parallel_slice(&self) -> &[T];
29 
30     /// Returns a parallel iterator over subslices separated by elements that
31     /// match the separator.
32     ///
33     /// # Examples
34     ///
35     /// ```
36     /// use rayon::prelude::*;
37     /// let smallest = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9]
38     ///     .par_split(|i| *i == 0)
39     ///     .map(|numbers| numbers.iter().min().unwrap())
40     ///     .min();
41     /// assert_eq!(Some(&1), smallest);
42     /// ```
par_split<P>(&self, separator: P) -> Split<T, P> where P: Fn(&T) -> bool + Sync + Send43     fn par_split<P>(&self, separator: P) -> Split<T, P>
44         where P: Fn(&T) -> bool + Sync + Send
45     {
46         Split {
47             slice: self.as_parallel_slice(),
48             separator: separator,
49         }
50     }
51 
52     /// Returns a parallel iterator over all contiguous windows of length
53     /// `window_size`. The windows overlap.
54     ///
55     /// # Examples
56     ///
57     /// ```
58     /// use rayon::prelude::*;
59     /// let windows: Vec<_> = [1, 2, 3].par_windows(2).collect();
60     /// assert_eq!(vec![[1, 2], [2, 3]], windows);
61     /// ```
par_windows(&self, window_size: usize) -> Windows<T>62     fn par_windows(&self, window_size: usize) -> Windows<T> {
63         Windows {
64             window_size: window_size,
65             slice: self.as_parallel_slice(),
66         }
67     }
68 
69     /// Returns a parallel iterator over at most `chunk_size` elements of
70     /// `self` at a time. The chunks do not overlap.
71     ///
72     /// If the number of elements in the iterator is not divisible by
73     /// `chunk_size`, the last chunk may be shorter than `chunk_size`.  All
74     /// other chunks will have that exact length.
75     ///
76     /// # Examples
77     ///
78     /// ```
79     /// use rayon::prelude::*;
80     /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks(2).collect();
81     /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]);
82     /// ```
par_chunks(&self, chunk_size: usize) -> Chunks<T>83     fn par_chunks(&self, chunk_size: usize) -> Chunks<T> {
84         assert!(chunk_size != 0, "chunk_size must not be zero");
85         Chunks {
86             chunk_size: chunk_size,
87             slice: self.as_parallel_slice(),
88         }
89     }
90 }
91 
92 impl<T: Sync> ParallelSlice<T> for [T] {
93     #[inline]
as_parallel_slice(&self) -> &[T]94     fn as_parallel_slice(&self) -> &[T] {
95         self
96     }
97 }
98 
99 
100 /// Parallel extensions for mutable slices.
101 pub trait ParallelSliceMut<T: Send> {
102     /// Returns a plain mutable slice, which is used to implement the rest of
103     /// the parallel methods.
as_parallel_slice_mut(&mut self) -> &mut [T]104     fn as_parallel_slice_mut(&mut self) -> &mut [T];
105 
106     /// Returns a parallel iterator over mutable subslices separated by
107     /// elements that match the separator.
108     ///
109     /// # Examples
110     ///
111     /// ```
112     /// use rayon::prelude::*;
113     /// let mut array = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9];
114     /// array.par_split_mut(|i| *i == 0)
115     ///      .for_each(|slice| slice.reverse());
116     /// assert_eq!(array, [3, 2, 1, 0, 8, 4, 2, 0, 9, 6, 3]);
117     /// ```
par_split_mut<P>(&mut self, separator: P) -> SplitMut<T, P> where P: Fn(&T) -> bool + Sync + Send118     fn par_split_mut<P>(&mut self, separator: P) -> SplitMut<T, P>
119         where P: Fn(&T) -> bool + Sync + Send
120     {
121         SplitMut {
122             slice: self.as_parallel_slice_mut(),
123             separator: separator,
124         }
125     }
126 
127     /// Returns a parallel iterator over at most `chunk_size` elements of
128     /// `self` at a time. The chunks are mutable and do not overlap.
129     ///
130     /// If the number of elements in the iterator is not divisible by
131     /// `chunk_size`, the last chunk may be shorter than `chunk_size`.  All
132     /// other chunks will have that exact length.
133     ///
134     /// # Examples
135     ///
136     /// ```
137     /// use rayon::prelude::*;
138     /// let mut array = [1, 2, 3, 4, 5];
139     /// array.par_chunks_mut(2)
140     ///      .for_each(|slice| slice.reverse());
141     /// assert_eq!(array, [2, 1, 4, 3, 5]);
142     /// ```
par_chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<T>143     fn par_chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<T> {
144         assert!(chunk_size != 0, "chunk_size must not be zero");
145         ChunksMut {
146             chunk_size: chunk_size,
147             slice: self.as_parallel_slice_mut(),
148         }
149     }
150 
151     /// Sorts the slice in parallel.
152     ///
153     /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
154     ///
155     /// When applicable, unstable sorting is preferred because it is generally faster than stable
156     /// sorting and it doesn't allocate auxiliary memory.
157     /// See [`par_sort_unstable`](#method.par_sort_unstable).
158     ///
159     /// # Current implementation
160     ///
161     /// The current algorithm is an adaptive merge sort inspired by
162     /// [timsort](https://en.wikipedia.org/wiki/Timsort).
163     /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
164     /// two or more sorted sequences concatenated one after another.
165     ///
166     /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
167     /// non-allocating insertion sort is used instead.
168     ///
169     /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
170     /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
171     /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
172     /// parallel subdivision of chunks and parallel merge operation.
173     ///
174     /// # Examples
175     ///
176     /// ```
177     /// use rayon::prelude::*;
178     ///
179     /// let mut v = [-5, 4, 1, -3, 2];
180     ///
181     /// v.par_sort();
182     /// assert_eq!(v, [-5, -3, 1, 2, 4]);
183     /// ```
par_sort(&mut self) where T: Ord,184     fn par_sort(&mut self)
185     where
186         T: Ord,
187     {
188         par_mergesort(self.as_parallel_slice_mut(), |a, b| a.lt(b));
189     }
190 
191     /// Sorts the slice in parallel with a comparator function.
192     ///
193     /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
194     ///
195     /// When applicable, unstable sorting is preferred because it is generally faster than stable
196     /// sorting and it doesn't allocate auxiliary memory.
197     /// See [`par_sort_unstable_by`](#method.par_sort_unstable_by).
198     ///
199     /// # Current implementation
200     ///
201     /// The current algorithm is an adaptive merge sort inspired by
202     /// [timsort](https://en.wikipedia.org/wiki/Timsort).
203     /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
204     /// two or more sorted sequences concatenated one after another.
205     ///
206     /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
207     /// non-allocating insertion sort is used instead.
208     ///
209     /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
210     /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
211     /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
212     /// parallel subdivision of chunks and parallel merge operation.
213     ///
214     /// # Examples
215     ///
216     /// ```
217     /// use rayon::prelude::*;
218     ///
219     /// let mut v = [5, 4, 1, 3, 2];
220     /// v.par_sort_by(|a, b| a.cmp(b));
221     /// assert_eq!(v, [1, 2, 3, 4, 5]);
222     ///
223     /// // reverse sorting
224     /// v.par_sort_by(|a, b| b.cmp(a));
225     /// assert_eq!(v, [5, 4, 3, 2, 1]);
226     /// ```
par_sort_by<F>(&mut self, compare: F) where F: Fn(&T, &T) -> Ordering + Sync,227     fn par_sort_by<F>(&mut self, compare: F)
228     where
229         F: Fn(&T, &T) -> Ordering + Sync,
230     {
231         par_mergesort(self.as_parallel_slice_mut(), |a, b| compare(a, b) == Ordering::Less);
232     }
233 
234     /// Sorts the slice in parallel with a key extraction function.
235     ///
236     /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
237     ///
238     /// When applicable, unstable sorting is preferred because it is generally faster than stable
239     /// sorting and it doesn't allocate auxiliary memory.
240     /// See [`par_sort_unstable_by_key`](#method.par_sort_unstable_by_key).
241     ///
242     /// # Current implementation
243     ///
244     /// The current algorithm is an adaptive merge sort inspired by
245     /// [timsort](https://en.wikipedia.org/wiki/Timsort).
246     /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
247     /// two or more sorted sequences concatenated one after another.
248     ///
249     /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
250     /// non-allocating insertion sort is used instead.
251     ///
252     /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
253     /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
254     /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
255     /// parallel subdivision of chunks and parallel merge operation.
256     ///
257     /// # Examples
258     ///
259     /// ```
260     /// use rayon::prelude::*;
261     ///
262     /// let mut v = [-5i32, 4, 1, -3, 2];
263     ///
264     /// v.par_sort_by_key(|k| k.abs());
265     /// assert_eq!(v, [1, 2, -3, 4, -5]);
266     /// ```
par_sort_by_key<B, F>(&mut self, f: F) where B: Ord, F: Fn(&T) -> B + Sync,267     fn par_sort_by_key<B, F>(&mut self, f: F)
268     where
269         B: Ord,
270         F: Fn(&T) -> B + Sync,
271     {
272         par_mergesort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
273     }
274 
275     /// Sorts the slice in parallel, but may not preserve the order of equal elements.
276     ///
277     /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
278     /// and `O(n log n)` worst-case.
279     ///
280     /// # Current implementation
281     ///
282     /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
283     /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
284     /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
285     /// heapsort on degenerate inputs.
286     ///
287     /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
288     /// slice consists of several concatenated sorted sequences.
289     ///
290     /// All quicksorts work in two stages: partitioning into two halves followed by recursive
291     /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
292     /// parallel.
293     ///
294     /// [pdqsort]: https://github.com/orlp/pdqsort
295     ///
296     /// # Examples
297     ///
298     /// ```
299     /// use rayon::prelude::*;
300     ///
301     /// let mut v = [-5, 4, 1, -3, 2];
302     ///
303     /// v.par_sort_unstable();
304     /// assert_eq!(v, [-5, -3, 1, 2, 4]);
305     /// ```
par_sort_unstable(&mut self) where T: Ord,306     fn par_sort_unstable(&mut self)
307     where
308         T: Ord,
309     {
310         par_quicksort(self.as_parallel_slice_mut(), |a, b| a.lt(b));
311     }
312 
313     /// Sorts the slice in parallel with a comparator function, but may not preserve the order of
314     /// equal elements.
315     ///
316     /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
317     /// and `O(n log n)` worst-case.
318     ///
319     /// # Current implementation
320     ///
321     /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
322     /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
323     /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
324     /// heapsort on degenerate inputs.
325     ///
326     /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
327     /// slice consists of several concatenated sorted sequences.
328     ///
329     /// All quicksorts work in two stages: partitioning into two halves followed by recursive
330     /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
331     /// parallel.
332     ///
333     /// [pdqsort]: https://github.com/orlp/pdqsort
334     ///
335     /// # Examples
336     ///
337     /// ```
338     /// use rayon::prelude::*;
339     ///
340     /// let mut v = [5, 4, 1, 3, 2];
341     /// v.par_sort_unstable_by(|a, b| a.cmp(b));
342     /// assert_eq!(v, [1, 2, 3, 4, 5]);
343     ///
344     /// // reverse sorting
345     /// v.par_sort_unstable_by(|a, b| b.cmp(a));
346     /// assert_eq!(v, [5, 4, 3, 2, 1]);
347     /// ```
par_sort_unstable_by<F>(&mut self, compare: F) where F: Fn(&T, &T) -> Ordering + Sync,348     fn par_sort_unstable_by<F>(&mut self, compare: F)
349     where
350         F: Fn(&T, &T) -> Ordering + Sync,
351     {
352         par_quicksort(self.as_parallel_slice_mut(), |a, b| compare(a, b) == Ordering::Less);
353     }
354 
355     /// Sorts the slice in parallel with a key extraction function, but may not preserve the order
356     /// of equal elements.
357     ///
358     /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
359     /// and `O(n log n)` worst-case.
360     ///
361     /// # Current implementation
362     ///
363     /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
364     /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
365     /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
366     /// heapsort on degenerate inputs.
367     ///
368     /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
369     /// slice consists of several concatenated sorted sequences.
370     ///
371     /// All quicksorts work in two stages: partitioning into two halves followed by recursive
372     /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
373     /// parallel.
374     ///
375     /// [pdqsort]: https://github.com/orlp/pdqsort
376     ///
377     /// # Examples
378     ///
379     /// ```
380     /// use rayon::prelude::*;
381     ///
382     /// let mut v = [-5i32, 4, 1, -3, 2];
383     ///
384     /// v.par_sort_unstable_by_key(|k| k.abs());
385     /// assert_eq!(v, [1, 2, -3, 4, -5]);
386     /// ```
par_sort_unstable_by_key<B, F>(&mut self, f: F) where B: Ord, F: Fn(&T) -> B + Sync,387     fn par_sort_unstable_by_key<B, F>(&mut self, f: F)
388     where
389         B: Ord,
390         F: Fn(&T) -> B + Sync,
391     {
392         par_quicksort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
393     }
394 }
395 
396 impl<T: Send> ParallelSliceMut<T> for [T] {
397     #[inline]
as_parallel_slice_mut(&mut self) -> &mut [T]398     fn as_parallel_slice_mut(&mut self) -> &mut [T] {
399         self
400     }
401 }
402 
403 
404 impl<'data, T: Sync + 'data> IntoParallelIterator for &'data [T] {
405     type Item = &'data T;
406     type Iter = Iter<'data, T>;
407 
into_par_iter(self) -> Self::Iter408     fn into_par_iter(self) -> Self::Iter {
409         Iter { slice: self }
410     }
411 }
412 
413 impl<'data, T: Sync + 'data> IntoParallelIterator for &'data Vec<T> {
414     type Item = &'data T;
415     type Iter = Iter<'data, T>;
416 
into_par_iter(self) -> Self::Iter417     fn into_par_iter(self) -> Self::Iter {
418         Iter { slice: self }
419     }
420 }
421 
422 impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut [T] {
423     type Item = &'data mut T;
424     type Iter = IterMut<'data, T>;
425 
into_par_iter(self) -> Self::Iter426     fn into_par_iter(self) -> Self::Iter {
427         IterMut { slice: self }
428     }
429 }
430 
431 impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut Vec<T> {
432     type Item = &'data mut T;
433     type Iter = IterMut<'data, T>;
434 
into_par_iter(self) -> Self::Iter435     fn into_par_iter(self) -> Self::Iter {
436         IterMut { slice: self }
437     }
438 }
439 
440 
441 /// Parallel iterator over immutable items in a slice
442 #[derive(Debug)]
443 pub struct Iter<'data, T: 'data + Sync> {
444     slice: &'data [T],
445 }
446 
447 impl<'data, T: Sync> Clone for Iter<'data, T> {
clone(&self) -> Self448     fn clone(&self) -> Self {
449         Iter { ..*self }
450     }
451 }
452 
453 impl<'data, T: Sync + 'data> ParallelIterator for Iter<'data, T> {
454     type Item = &'data T;
455 
drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>456     fn drive_unindexed<C>(self, consumer: C) -> C::Result
457         where C: UnindexedConsumer<Self::Item>
458     {
459         bridge(self, consumer)
460     }
461 
opt_len(&self) -> Option<usize>462     fn opt_len(&self) -> Option<usize> {
463         Some(self.len())
464     }
465 }
466 
467 impl<'data, T: Sync + 'data> IndexedParallelIterator for Iter<'data, T> {
drive<C>(self, consumer: C) -> C::Result where C: Consumer<Self::Item>468     fn drive<C>(self, consumer: C) -> C::Result
469         where C: Consumer<Self::Item>
470     {
471         bridge(self, consumer)
472     }
473 
len(&self) -> usize474     fn len(&self) -> usize {
475         self.slice.len()
476     }
477 
with_producer<CB>(self, callback: CB) -> CB::Output where CB: ProducerCallback<Self::Item>478     fn with_producer<CB>(self, callback: CB) -> CB::Output
479         where CB: ProducerCallback<Self::Item>
480     {
481         callback.callback(IterProducer { slice: self.slice })
482     }
483 }
484 
485 struct IterProducer<'data, T: 'data + Sync> {
486     slice: &'data [T],
487 }
488 
489 impl<'data, T: 'data + Sync> Producer for IterProducer<'data, T> {
490     type Item = &'data T;
491     type IntoIter = ::std::slice::Iter<'data, T>;
492 
into_iter(self) -> Self::IntoIter493     fn into_iter(self) -> Self::IntoIter {
494         self.slice.into_iter()
495     }
496 
split_at(self, index: usize) -> (Self, Self)497     fn split_at(self, index: usize) -> (Self, Self) {
498         let (left, right) = self.slice.split_at(index);
499         (IterProducer { slice: left }, IterProducer { slice: right })
500     }
501 }
502 
503 
504 /// Parallel iterator over immutable non-overlapping chunks of a slice
505 #[derive(Debug)]
506 pub struct Chunks<'data, T: 'data + Sync> {
507     chunk_size: usize,
508     slice: &'data [T],
509 }
510 
511 impl<'data, T: Sync> Clone for Chunks<'data, T> {
clone(&self) -> Self512     fn clone(&self) -> Self {
513         Chunks { ..*self }
514     }
515 }
516 
517 impl<'data, T: Sync + 'data> ParallelIterator for Chunks<'data, T> {
518     type Item = &'data [T];
519 
drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>520     fn drive_unindexed<C>(self, consumer: C) -> C::Result
521         where C: UnindexedConsumer<Self::Item>
522     {
523         bridge(self, consumer)
524     }
525 
opt_len(&self) -> Option<usize>526     fn opt_len(&self) -> Option<usize> {
527         Some(self.len())
528     }
529 }
530 
531 impl<'data, T: Sync + 'data> IndexedParallelIterator for Chunks<'data, T> {
drive<C>(self, consumer: C) -> C::Result where C: Consumer<Self::Item>532     fn drive<C>(self, consumer: C) -> C::Result
533         where C: Consumer<Self::Item>
534     {
535         bridge(self, consumer)
536     }
537 
len(&self) -> usize538     fn len(&self) -> usize {
539         div_round_up(self.slice.len(), self.chunk_size)
540     }
541 
with_producer<CB>(self, callback: CB) -> CB::Output where CB: ProducerCallback<Self::Item>542     fn with_producer<CB>(self, callback: CB) -> CB::Output
543         where CB: ProducerCallback<Self::Item>
544     {
545         callback.callback(ChunksProducer {
546                               chunk_size: self.chunk_size,
547                               slice: self.slice,
548                           })
549     }
550 }
551 
552 struct ChunksProducer<'data, T: 'data + Sync> {
553     chunk_size: usize,
554     slice: &'data [T],
555 }
556 
557 impl<'data, T: 'data + Sync> Producer for ChunksProducer<'data, T> {
558     type Item = &'data [T];
559     type IntoIter = ::std::slice::Chunks<'data, T>;
560 
into_iter(self) -> Self::IntoIter561     fn into_iter(self) -> Self::IntoIter {
562         self.slice.chunks(self.chunk_size)
563     }
564 
split_at(self, index: usize) -> (Self, Self)565     fn split_at(self, index: usize) -> (Self, Self) {
566         let elem_index = index * self.chunk_size;
567         let (left, right) = self.slice.split_at(elem_index);
568         (ChunksProducer {
569              chunk_size: self.chunk_size,
570              slice: left,
571          },
572          ChunksProducer {
573              chunk_size: self.chunk_size,
574              slice: right,
575          })
576     }
577 }
578 
579 
580 /// Parallel iterator over immutable overlapping windows of a slice
581 #[derive(Debug)]
582 pub struct Windows<'data, T: 'data + Sync> {
583     window_size: usize,
584     slice: &'data [T],
585 }
586 
587 impl<'data, T: Sync> Clone for Windows<'data, T> {
clone(&self) -> Self588     fn clone(&self) -> Self {
589         Windows { ..*self }
590     }
591 }
592 
593 impl<'data, T: Sync + 'data> ParallelIterator for Windows<'data, T> {
594     type Item = &'data [T];
595 
drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>596     fn drive_unindexed<C>(self, consumer: C) -> C::Result
597         where C: UnindexedConsumer<Self::Item>
598     {
599         bridge(self, consumer)
600     }
601 
opt_len(&self) -> Option<usize>602     fn opt_len(&self) -> Option<usize> {
603         Some(self.len())
604     }
605 }
606 
607 impl<'data, T: Sync + 'data> IndexedParallelIterator for Windows<'data, T> {
drive<C>(self, consumer: C) -> C::Result where C: Consumer<Self::Item>608     fn drive<C>(self, consumer: C) -> C::Result
609         where C: Consumer<Self::Item>
610     {
611         bridge(self, consumer)
612     }
613 
len(&self) -> usize614     fn len(&self) -> usize {
615         assert!(self.window_size >= 1);
616         self.slice.len().saturating_sub(self.window_size - 1)
617     }
618 
with_producer<CB>(self, callback: CB) -> CB::Output where CB: ProducerCallback<Self::Item>619     fn with_producer<CB>(self, callback: CB) -> CB::Output
620         where CB: ProducerCallback<Self::Item>
621     {
622         callback.callback(WindowsProducer {
623                               window_size: self.window_size,
624                               slice: self.slice,
625                           })
626     }
627 }
628 
629 struct WindowsProducer<'data, T: 'data + Sync> {
630     window_size: usize,
631     slice: &'data [T],
632 }
633 
634 impl<'data, T: 'data + Sync> Producer for WindowsProducer<'data, T> {
635     type Item = &'data [T];
636     type IntoIter = ::std::slice::Windows<'data, T>;
637 
into_iter(self) -> Self::IntoIter638     fn into_iter(self) -> Self::IntoIter {
639         self.slice.windows(self.window_size)
640     }
641 
split_at(self, index: usize) -> (Self, Self)642     fn split_at(self, index: usize) -> (Self, Self) {
643         let left_index = cmp::min(self.slice.len(), index + (self.window_size - 1));
644         let left = &self.slice[..left_index];
645         let right = &self.slice[index..];
646         (WindowsProducer {
647              window_size: self.window_size,
648              slice: left,
649          },
650          WindowsProducer {
651              window_size: self.window_size,
652              slice: right,
653          })
654     }
655 }
656 
657 
658 /// Parallel iterator over mutable items in a slice
659 #[derive(Debug)]
660 pub struct IterMut<'data, T: 'data + Send> {
661     slice: &'data mut [T],
662 }
663 
664 impl<'data, T: Send + 'data> ParallelIterator for IterMut<'data, T> {
665     type Item = &'data mut T;
666 
drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>667     fn drive_unindexed<C>(self, consumer: C) -> C::Result
668         where C: UnindexedConsumer<Self::Item>
669     {
670         bridge(self, consumer)
671     }
672 
opt_len(&self) -> Option<usize>673     fn opt_len(&self) -> Option<usize> {
674         Some(self.len())
675     }
676 }
677 
678 impl<'data, T: Send + 'data> IndexedParallelIterator for IterMut<'data, T> {
drive<C>(self, consumer: C) -> C::Result where C: Consumer<Self::Item>679     fn drive<C>(self, consumer: C) -> C::Result
680         where C: Consumer<Self::Item>
681     {
682         bridge(self, consumer)
683     }
684 
len(&self) -> usize685     fn len(&self) -> usize {
686         self.slice.len()
687     }
688 
with_producer<CB>(self, callback: CB) -> CB::Output where CB: ProducerCallback<Self::Item>689     fn with_producer<CB>(self, callback: CB) -> CB::Output
690         where CB: ProducerCallback<Self::Item>
691     {
692         callback.callback(IterMutProducer { slice: self.slice })
693     }
694 }
695 
696 struct IterMutProducer<'data, T: 'data + Send> {
697     slice: &'data mut [T],
698 }
699 
700 impl<'data, T: 'data + Send> Producer for IterMutProducer<'data, T> {
701     type Item = &'data mut T;
702     type IntoIter = ::std::slice::IterMut<'data, T>;
703 
into_iter(self) -> Self::IntoIter704     fn into_iter(self) -> Self::IntoIter {
705         self.slice.into_iter()
706     }
707 
split_at(self, index: usize) -> (Self, Self)708     fn split_at(self, index: usize) -> (Self, Self) {
709         let (left, right) = self.slice.split_at_mut(index);
710         (IterMutProducer { slice: left }, IterMutProducer { slice: right })
711     }
712 }
713 
714 
715 /// Parallel iterator over mutable non-overlapping chunks of a slice
716 #[derive(Debug)]
717 pub struct ChunksMut<'data, T: 'data + Send> {
718     chunk_size: usize,
719     slice: &'data mut [T],
720 }
721 
722 impl<'data, T: Send + 'data> ParallelIterator for ChunksMut<'data, T> {
723     type Item = &'data mut [T];
724 
drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>725     fn drive_unindexed<C>(self, consumer: C) -> C::Result
726         where C: UnindexedConsumer<Self::Item>
727     {
728         bridge(self, consumer)
729     }
730 
opt_len(&self) -> Option<usize>731     fn opt_len(&self) -> Option<usize> {
732         Some(self.len())
733     }
734 }
735 
736 impl<'data, T: Send + 'data> IndexedParallelIterator for ChunksMut<'data, T> {
drive<C>(self, consumer: C) -> C::Result where C: Consumer<Self::Item>737     fn drive<C>(self, consumer: C) -> C::Result
738         where C: Consumer<Self::Item>
739     {
740         bridge(self, consumer)
741     }
742 
len(&self) -> usize743     fn len(&self) -> usize {
744         div_round_up(self.slice.len(), self.chunk_size)
745     }
746 
with_producer<CB>(self, callback: CB) -> CB::Output where CB: ProducerCallback<Self::Item>747     fn with_producer<CB>(self, callback: CB) -> CB::Output
748         where CB: ProducerCallback<Self::Item>
749     {
750         callback.callback(ChunksMutProducer {
751                               chunk_size: self.chunk_size,
752                               slice: self.slice,
753                           })
754     }
755 }
756 
757 struct ChunksMutProducer<'data, T: 'data + Send> {
758     chunk_size: usize,
759     slice: &'data mut [T],
760 }
761 
762 impl<'data, T: 'data + Send> Producer for ChunksMutProducer<'data, T> {
763     type Item = &'data mut [T];
764     type IntoIter = ::std::slice::ChunksMut<'data, T>;
765 
into_iter(self) -> Self::IntoIter766     fn into_iter(self) -> Self::IntoIter {
767         self.slice.chunks_mut(self.chunk_size)
768     }
769 
split_at(self, index: usize) -> (Self, Self)770     fn split_at(self, index: usize) -> (Self, Self) {
771         let elem_index = index * self.chunk_size;
772         let (left, right) = self.slice.split_at_mut(elem_index);
773         (ChunksMutProducer {
774              chunk_size: self.chunk_size,
775              slice: left,
776          },
777          ChunksMutProducer {
778              chunk_size: self.chunk_size,
779              slice: right,
780          })
781     }
782 }
783 
784 
785 /// Parallel iterator over slices separated by a predicate
786 pub struct Split<'data, T: 'data, P> {
787     slice: &'data [T],
788     separator: P,
789 }
790 
791 impl<'data, T, P: Clone> Clone for Split<'data, T, P> {
clone(&self) -> Self792     fn clone(&self) -> Self {
793         Split { separator: self.separator.clone(), ..*self }
794     }
795 }
796 
797 impl<'data, T: Debug, P> Debug for Split<'data, T, P> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result798     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
799         f.debug_struct("Split")
800             .field("slice", &self.slice)
801             .finish()
802     }
803 }
804 
805 impl<'data, T, P> ParallelIterator for Split<'data, T, P>
806     where P: Fn(&T) -> bool + Sync + Send,
807           T: Sync
808 {
809     type Item = &'data [T];
810 
drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>811     fn drive_unindexed<C>(self, consumer: C) -> C::Result
812         where C: UnindexedConsumer<Self::Item>
813     {
814         let producer = SplitProducer::new(self.slice, &self.separator);
815         bridge_unindexed(producer, consumer)
816     }
817 }
818 
819 /// Implement support for `SplitProducer`.
820 impl<'data, T, P> Fissile<P> for &'data [T]
821     where P: Fn(&T) -> bool
822 {
length(&self) -> usize823     fn length(&self) -> usize {
824         self.len()
825     }
826 
midpoint(&self, end: usize) -> usize827     fn midpoint(&self, end: usize) -> usize {
828         end / 2
829     }
830 
find(&self, separator: &P, start: usize, end: usize) -> Option<usize>831     fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
832         self[start..end].iter().position(separator)
833     }
834 
rfind(&self, separator: &P, end: usize) -> Option<usize>835     fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
836         self[..end].iter().rposition(separator)
837     }
838 
split_once(self, index: usize) -> (Self, Self)839     fn split_once(self, index: usize) -> (Self, Self) {
840         let (left, right) = self.split_at(index);
841         (left, &right[1..]) // skip the separator
842     }
843 
fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F where F: Folder<Self>, Self: Send844     fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
845         where F: Folder<Self>,
846               Self: Send
847     {
848         let mut split = self.split(separator);
849         if skip_last {
850             split.next_back();
851         }
852         folder.consume_iter(split)
853     }
854 }
855 
856 
857 /// Parallel iterator over mutable slices separated by a predicate
858 pub struct SplitMut<'data, T: 'data, P> {
859     slice: &'data mut [T],
860     separator: P,
861 }
862 
863 impl<'data, T: Debug, P> Debug for SplitMut<'data, T, P> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result864     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
865         f.debug_struct("SplitMut")
866             .field("slice", &self.slice)
867             .finish()
868     }
869 }
870 
871 impl<'data, T, P> ParallelIterator for SplitMut<'data, T, P>
872     where P: Fn(&T) -> bool + Sync + Send,
873           T: Send
874 {
875     type Item = &'data mut [T];
876 
drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>877     fn drive_unindexed<C>(self, consumer: C) -> C::Result
878         where C: UnindexedConsumer<Self::Item>
879     {
880         let producer = SplitProducer::new(self.slice, &self.separator);
881         bridge_unindexed(producer, consumer)
882     }
883 }
884 
885 /// Implement support for `SplitProducer`.
886 impl<'data, T, P> Fissile<P> for &'data mut [T]
887     where P: Fn(&T) -> bool
888 {
length(&self) -> usize889     fn length(&self) -> usize {
890         self.len()
891     }
892 
midpoint(&self, end: usize) -> usize893     fn midpoint(&self, end: usize) -> usize {
894         end / 2
895     }
896 
find(&self, separator: &P, start: usize, end: usize) -> Option<usize>897     fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
898         self[start..end].iter().position(separator)
899     }
900 
rfind(&self, separator: &P, end: usize) -> Option<usize>901     fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
902         self[..end].iter().rposition(separator)
903     }
904 
split_once(self, index: usize) -> (Self, Self)905     fn split_once(self, index: usize) -> (Self, Self) {
906         let (left, right) = self.split_at_mut(index);
907         (left, &mut right[1..]) // skip the separator
908     }
909 
fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F where F: Folder<Self>, Self: Send910     fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
911         where F: Folder<Self>,
912               Self: Send
913     {
914         let mut split = self.split_mut(separator);
915         if skip_last {
916             split.next_back();
917         }
918         folder.consume_iter(split)
919     }
920 }
921