1 //! Traits for writing parallel programs using an iterator-style interface
2 //!
3 //! You will rarely need to interact with this module directly unless you have
4 //! need to name one of the iterator types.
5 //!
6 //! Parallel iterators make it easy to write iterator-like chains that
7 //! execute in parallel: typically all you have to do is convert the
8 //! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9 //! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10 //! example, to compute the sum of the squares of a sequence of
11 //! integers, one might write:
12 //!
13 //! ```rust
14 //! use rayon::prelude::*;
15 //! fn sum_of_squares(input: &[i32]) -> i32 {
16 //!     input.par_iter()
17 //!          .map(|i| i * i)
18 //!          .sum()
19 //! }
20 //! ```
21 //!
22 //! Or, to increment all the integers in a slice, you could write:
23 //!
24 //! ```rust
25 //! use rayon::prelude::*;
26 //! fn increment_all(input: &mut [i32]) {
27 //!     input.par_iter_mut()
28 //!          .for_each(|p| *p += 1);
29 //! }
30 //! ```
31 //!
32 //! To use parallel iterators, first import the traits by adding
33 //! something like `use rayon::prelude::*` to your module. You can
34 //! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35 //! parallel iterator. Like a [regular iterator][], parallel
36 //! iterators work by first constructing a computation and then
37 //! executing it.
38 //!
39 //! In addition to `par_iter()` and friends, some types offer other
40 //! ways to create (or consume) parallel iterators:
41 //!
42 //! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43 //!   `par_windows`, as well as various parallel sorting
44 //!   operations. See [the `ParallelSlice` trait] for the full list.
45 //! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
46 //!   See [the `ParallelString` trait] for the full list.
47 //! - Various collections offer [`par_extend`], which grows a
48 //!   collection given a parallel iterator. (If you don't have a
49 //!   collection to extend, you can use [`collect()`] to create a new
50 //!   one from scratch.)
51 //!
52 //! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html
53 //! [the `ParallelString` trait]: ../str/trait.ParallelString.html
54 //! [`par_extend`]: trait.ParallelExtend.html
55 //! [`collect()`]: trait.ParallelIterator.html#method.collect
56 //!
57 //! To see the full range of methods available on parallel iterators,
58 //! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
59 //! traits.
60 //!
61 //! If you'd like to build a custom parallel iterator, or to write your own
62 //! combinator, then check out the [split] function and the [plumbing] module.
63 //!
64 //! [regular iterator]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
65 //! [`ParallelIterator`]: trait.ParallelIterator.html
66 //! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
67 //! [split]: fn.split.html
68 //! [plumbing]: plumbing/index.html
69 //!
70 //! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
71 //! has been deliberately obscured from the public API.  This trait is intended
72 //! to mirror the unstable `std::ops::Try` with implementations for `Option` and
73 //! `Result`, where `Some`/`Ok` values will let those iterators continue, but
74 //! `None`/`Err` values will exit early.
75 //!
76 //! A note about object safety: It is currently _not_ possible to wrap
77 //! a `ParallelIterator` (or any trait that depends on it) using a
78 //! `Box<dyn ParallelIterator>` or other kind of dynamic allocation,
79 //! because `ParallelIterator` is **not object-safe**.
80 //! (This keeps the implementation simpler and allows extra optimizations.)
81 
82 use self::plumbing::*;
83 use self::private::Try;
84 pub use either::Either;
85 use std::cmp::{self, Ordering};
86 use std::iter::{Product, Sum};
87 use std::ops::{Fn, RangeBounds};
88 
89 pub mod plumbing;
90 
91 #[cfg(test)]
92 mod test;
93 
94 // There is a method to the madness here:
95 //
96 // - These modules are private but expose certain types to the end-user
97 //   (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
98 //   public API surface of the `ParallelIterator` traits.
99 // - In **this** module, those public types are always used unprefixed, which forces
100 //   us to add a `pub use` and helps identify if we missed anything.
101 // - In contrast, items that appear **only** in the body of a method,
102 //   e.g. `find::find()`, are always used **prefixed**, so that they
103 //   can be readily distinguished.
104 
105 mod chain;
106 mod chunks;
107 mod cloned;
108 mod collect;
109 mod copied;
110 mod empty;
111 mod enumerate;
112 mod extend;
113 mod filter;
114 mod filter_map;
115 mod find;
116 mod find_first_last;
117 mod flat_map;
118 mod flat_map_iter;
119 mod flatten;
120 mod flatten_iter;
121 mod fold;
122 mod for_each;
123 mod from_par_iter;
124 mod inspect;
125 mod interleave;
126 mod interleave_shortest;
127 mod intersperse;
128 mod len;
129 mod map;
130 mod map_with;
131 mod multizip;
132 mod noop;
133 mod once;
134 mod panic_fuse;
135 mod par_bridge;
136 mod positions;
137 mod product;
138 mod reduce;
139 mod repeat;
140 mod rev;
141 mod skip;
142 mod splitter;
143 mod sum;
144 mod take;
145 mod try_fold;
146 mod try_reduce;
147 mod try_reduce_with;
148 mod unzip;
149 mod update;
150 mod while_some;
151 mod zip;
152 mod zip_eq;
153 
154 pub use self::{
155     chain::Chain,
156     chunks::Chunks,
157     cloned::Cloned,
158     copied::Copied,
159     empty::{empty, Empty},
160     enumerate::Enumerate,
161     filter::Filter,
162     filter_map::FilterMap,
163     flat_map::FlatMap,
164     flat_map_iter::FlatMapIter,
165     flatten::Flatten,
166     flatten_iter::FlattenIter,
167     fold::{Fold, FoldWith},
168     inspect::Inspect,
169     interleave::Interleave,
170     interleave_shortest::InterleaveShortest,
171     intersperse::Intersperse,
172     len::{MaxLen, MinLen},
173     map::Map,
174     map_with::{MapInit, MapWith},
175     multizip::MultiZip,
176     once::{once, Once},
177     panic_fuse::PanicFuse,
178     par_bridge::{IterBridge, ParallelBridge},
179     positions::Positions,
180     repeat::{repeat, repeatn, Repeat, RepeatN},
181     rev::Rev,
182     skip::Skip,
183     splitter::{split, Split},
184     take::Take,
185     try_fold::{TryFold, TryFoldWith},
186     update::Update,
187     while_some::WhileSome,
188     zip::Zip,
189     zip_eq::ZipEq,
190 };
191 
192 mod step_by;
193 #[cfg(step_by)]
194 pub use self::step_by::StepBy;
195 
196 /// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
197 ///
198 /// By implementing `IntoParallelIterator` for a type, you define how it will
199 /// transformed into an iterator. This is a parallel version of the standard
200 /// library's [`std::iter::IntoIterator`] trait.
201 ///
202 /// [`ParallelIterator`]: trait.ParallelIterator.html
203 /// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
204 pub trait IntoParallelIterator {
205     /// The parallel iterator type that will be created.
206     type Iter: ParallelIterator<Item = Self::Item>;
207 
208     /// The type of item that the parallel iterator will produce.
209     type Item: Send;
210 
211     /// Converts `self` into a parallel iterator.
212     ///
213     /// # Examples
214     ///
215     /// ```
216     /// use rayon::prelude::*;
217     ///
218     /// println!("counting in parallel:");
219     /// (0..100).into_par_iter()
220     ///     .for_each(|i| println!("{}", i));
221     /// ```
222     ///
223     /// This conversion is often implicit for arguments to methods like [`zip`].
224     ///
225     /// ```
226     /// use rayon::prelude::*;
227     ///
228     /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
229     /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
230     /// ```
231     ///
232     /// [`zip`]: trait.IndexedParallelIterator.html#method.zip
into_par_iter(self) -> Self::Iter233     fn into_par_iter(self) -> Self::Iter;
234 }
235 
236 /// `IntoParallelRefIterator` implements the conversion to a
237 /// [`ParallelIterator`], providing shared references to the data.
238 ///
239 /// This is a parallel version of the `iter()` method
240 /// defined by various collections.
241 ///
242 /// This trait is automatically implemented
243 /// `for I where &I: IntoParallelIterator`. In most cases, users
244 /// will want to implement [`IntoParallelIterator`] rather than implement
245 /// this trait directly.
246 ///
247 /// [`ParallelIterator`]: trait.ParallelIterator.html
248 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
249 pub trait IntoParallelRefIterator<'data> {
250     /// The type of the parallel iterator that will be returned.
251     type Iter: ParallelIterator<Item = Self::Item>;
252 
253     /// The type of item that the parallel iterator will produce.
254     /// This will typically be an `&'data T` reference type.
255     type Item: Send + 'data;
256 
257     /// Converts `self` into a parallel iterator.
258     ///
259     /// # Examples
260     ///
261     /// ```
262     /// use rayon::prelude::*;
263     ///
264     /// let v: Vec<_> = (0..100).collect();
265     /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
266     ///
267     /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
268     /// // producing the exact same references.
269     /// assert!(v.par_iter().zip(&v)
270     ///          .all(|(a, b)| std::ptr::eq(a, b)));
271     /// ```
par_iter(&'data self) -> Self::Iter272     fn par_iter(&'data self) -> Self::Iter;
273 }
274 
275 impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
276 where
277     &'data I: IntoParallelIterator,
278 {
279     type Iter = <&'data I as IntoParallelIterator>::Iter;
280     type Item = <&'data I as IntoParallelIterator>::Item;
281 
par_iter(&'data self) -> Self::Iter282     fn par_iter(&'data self) -> Self::Iter {
283         self.into_par_iter()
284     }
285 }
286 
287 /// `IntoParallelRefMutIterator` implements the conversion to a
288 /// [`ParallelIterator`], providing mutable references to the data.
289 ///
290 /// This is a parallel version of the `iter_mut()` method
291 /// defined by various collections.
292 ///
293 /// This trait is automatically implemented
294 /// `for I where &mut I: IntoParallelIterator`. In most cases, users
295 /// will want to implement [`IntoParallelIterator`] rather than implement
296 /// this trait directly.
297 ///
298 /// [`ParallelIterator`]: trait.ParallelIterator.html
299 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
300 pub trait IntoParallelRefMutIterator<'data> {
301     /// The type of iterator that will be created.
302     type Iter: ParallelIterator<Item = Self::Item>;
303 
304     /// The type of item that will be produced; this is typically an
305     /// `&'data mut T` reference.
306     type Item: Send + 'data;
307 
308     /// Creates the parallel iterator from `self`.
309     ///
310     /// # Examples
311     ///
312     /// ```
313     /// use rayon::prelude::*;
314     ///
315     /// let mut v = vec![0usize; 5];
316     /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
317     /// assert_eq!(v, [0, 1, 2, 3, 4]);
318     /// ```
par_iter_mut(&'data mut self) -> Self::Iter319     fn par_iter_mut(&'data mut self) -> Self::Iter;
320 }
321 
322 impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
323 where
324     &'data mut I: IntoParallelIterator,
325 {
326     type Iter = <&'data mut I as IntoParallelIterator>::Iter;
327     type Item = <&'data mut I as IntoParallelIterator>::Item;
328 
par_iter_mut(&'data mut self) -> Self::Iter329     fn par_iter_mut(&'data mut self) -> Self::Iter {
330         self.into_par_iter()
331     }
332 }
333 
334 /// Parallel version of the standard iterator trait.
335 ///
336 /// The combinators on this trait are available on **all** parallel
337 /// iterators.  Additional methods can be found on the
338 /// [`IndexedParallelIterator`] trait: those methods are only
339 /// available for parallel iterators where the number of items is
340 /// known in advance (so, e.g., after invoking `filter`, those methods
341 /// become unavailable).
342 ///
343 /// For examples of using parallel iterators, see [the docs on the
344 /// `iter` module][iter].
345 ///
346 /// [iter]: index.html
347 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
348 pub trait ParallelIterator: Sized + Send {
349     /// The type of item that this parallel iterator produces.
350     /// For example, if you use the [`for_each`] method, this is the type of
351     /// item that your closure will be invoked with.
352     ///
353     /// [`for_each`]: #method.for_each
354     type Item: Send;
355 
356     /// Executes `OP` on each item produced by the iterator, in parallel.
357     ///
358     /// # Examples
359     ///
360     /// ```
361     /// use rayon::prelude::*;
362     ///
363     /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
364     /// ```
for_each<OP>(self, op: OP) where OP: Fn(Self::Item) + Sync + Send,365     fn for_each<OP>(self, op: OP)
366     where
367         OP: Fn(Self::Item) + Sync + Send,
368     {
369         for_each::for_each(self, &op)
370     }
371 
372     /// Executes `OP` on the given `init` value with each item produced by
373     /// the iterator, in parallel.
374     ///
375     /// The `init` value will be cloned only as needed to be paired with
376     /// the group of items in each rayon job.  It does not require the type
377     /// to be `Sync`.
378     ///
379     /// # Examples
380     ///
381     /// ```
382     /// use std::sync::mpsc::channel;
383     /// use rayon::prelude::*;
384     ///
385     /// let (sender, receiver) = channel();
386     ///
387     /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
388     ///
389     /// let mut res: Vec<_> = receiver.iter().collect();
390     ///
391     /// res.sort();
392     ///
393     /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
394     /// ```
for_each_with<OP, T>(self, init: T, op: OP) where OP: Fn(&mut T, Self::Item) + Sync + Send, T: Send + Clone,395     fn for_each_with<OP, T>(self, init: T, op: OP)
396     where
397         OP: Fn(&mut T, Self::Item) + Sync + Send,
398         T: Send + Clone,
399     {
400         self.map_with(init, op).collect()
401     }
402 
403     /// Executes `OP` on a value returned by `init` with each item produced by
404     /// the iterator, in parallel.
405     ///
406     /// The `init` function will be called only as needed for a value to be
407     /// paired with the group of items in each rayon job.  There is no
408     /// constraint on that returned type at all!
409     ///
410     /// # Examples
411     ///
412     /// ```
413     /// use rand::Rng;
414     /// use rayon::prelude::*;
415     ///
416     /// let mut v = vec![0u8; 1_000_000];
417     ///
418     /// v.par_chunks_mut(1000)
419     ///     .for_each_init(
420     ///         || rand::thread_rng(),
421     ///         |rng, chunk| rng.fill(chunk),
422     ///     );
423     ///
424     /// // There's a remote chance that this will fail...
425     /// for i in 0u8..=255 {
426     ///     assert!(v.contains(&i));
427     /// }
428     /// ```
for_each_init<OP, INIT, T>(self, init: INIT, op: OP) where OP: Fn(&mut T, Self::Item) + Sync + Send, INIT: Fn() -> T + Sync + Send,429     fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP)
430     where
431         OP: Fn(&mut T, Self::Item) + Sync + Send,
432         INIT: Fn() -> T + Sync + Send,
433     {
434         self.map_init(init, op).collect()
435     }
436 
437     /// Executes a fallible `OP` on each item produced by the iterator, in parallel.
438     ///
439     /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
440     /// stop processing the rest of the items in the iterator as soon as
441     /// possible, and we will return that terminating value.  Otherwise, we will
442     /// return an empty `Result::Ok(())` or `Option::Some(())`.  If there are
443     /// multiple errors in parallel, it is not specified which will be returned.
444     ///
445     /// # Examples
446     ///
447     /// ```
448     /// use rayon::prelude::*;
449     /// use std::io::{self, Write};
450     ///
451     /// // This will stop iteration early if there's any write error, like
452     /// // having piped output get closed on the other end.
453     /// (0..100).into_par_iter()
454     ///     .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
455     ///     .expect("expected no write errors");
456     /// ```
try_for_each<OP, R>(self, op: OP) -> R where OP: Fn(Self::Item) -> R + Sync + Send, R: Try<Ok = ()> + Send,457     fn try_for_each<OP, R>(self, op: OP) -> R
458     where
459         OP: Fn(Self::Item) -> R + Sync + Send,
460         R: Try<Ok = ()> + Send,
461     {
462         fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R {
463             R::from_ok(())
464         }
465 
466         self.map(op).try_reduce(<()>::default, ok)
467     }
468 
469     /// Executes a fallible `OP` on the given `init` value with each item
470     /// produced by the iterator, in parallel.
471     ///
472     /// This combines the `init` semantics of [`for_each_with()`] and the
473     /// failure semantics of [`try_for_each()`].
474     ///
475     /// [`for_each_with()`]: #method.for_each_with
476     /// [`try_for_each()`]: #method.try_for_each
477     ///
478     /// # Examples
479     ///
480     /// ```
481     /// use std::sync::mpsc::channel;
482     /// use rayon::prelude::*;
483     ///
484     /// let (sender, receiver) = channel();
485     ///
486     /// (0..5).into_par_iter()
487     ///     .try_for_each_with(sender, |s, x| s.send(x))
488     ///     .expect("expected no send errors");
489     ///
490     /// let mut res: Vec<_> = receiver.iter().collect();
491     ///
492     /// res.sort();
493     ///
494     /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
495     /// ```
try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R where OP: Fn(&mut T, Self::Item) -> R + Sync + Send, T: Send + Clone, R: Try<Ok = ()> + Send,496     fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
497     where
498         OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
499         T: Send + Clone,
500         R: Try<Ok = ()> + Send,
501     {
502         fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R {
503             R::from_ok(())
504         }
505 
506         self.map_with(init, op).try_reduce(<()>::default, ok)
507     }
508 
509     /// Executes a fallible `OP` on a value returned by `init` with each item
510     /// produced by the iterator, in parallel.
511     ///
512     /// This combines the `init` semantics of [`for_each_init()`] and the
513     /// failure semantics of [`try_for_each()`].
514     ///
515     /// [`for_each_init()`]: #method.for_each_init
516     /// [`try_for_each()`]: #method.try_for_each
517     ///
518     /// # Examples
519     ///
520     /// ```
521     /// use rand::Rng;
522     /// use rayon::prelude::*;
523     ///
524     /// let mut v = vec![0u8; 1_000_000];
525     ///
526     /// v.par_chunks_mut(1000)
527     ///     .try_for_each_init(
528     ///         || rand::thread_rng(),
529     ///         |rng, chunk| rng.try_fill(chunk),
530     ///     )
531     ///     .expect("expected no rand errors");
532     ///
533     /// // There's a remote chance that this will fail...
534     /// for i in 0u8..=255 {
535     ///     assert!(v.contains(&i));
536     /// }
537     /// ```
try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R where OP: Fn(&mut T, Self::Item) -> R + Sync + Send, INIT: Fn() -> T + Sync + Send, R: Try<Ok = ()> + Send,538     fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
539     where
540         OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
541         INIT: Fn() -> T + Sync + Send,
542         R: Try<Ok = ()> + Send,
543     {
544         fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R {
545             R::from_ok(())
546         }
547 
548         self.map_init(init, op).try_reduce(<()>::default, ok)
549     }
550 
551     /// Counts the number of items in this parallel iterator.
552     ///
553     /// # Examples
554     ///
555     /// ```
556     /// use rayon::prelude::*;
557     ///
558     /// let count = (0..100).into_par_iter().count();
559     ///
560     /// assert_eq!(count, 100);
561     /// ```
count(self) -> usize562     fn count(self) -> usize {
563         fn one<T>(_: T) -> usize {
564             1
565         }
566 
567         self.map(one).sum()
568     }
569 
570     /// Applies `map_op` to each item of this iterator, producing a new
571     /// iterator with the results.
572     ///
573     /// # Examples
574     ///
575     /// ```
576     /// use rayon::prelude::*;
577     ///
578     /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
579     ///
580     /// let doubles: Vec<_> = par_iter.collect();
581     ///
582     /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
583     /// ```
map<F, R>(self, map_op: F) -> Map<Self, F> where F: Fn(Self::Item) -> R + Sync + Send, R: Send,584     fn map<F, R>(self, map_op: F) -> Map<Self, F>
585     where
586         F: Fn(Self::Item) -> R + Sync + Send,
587         R: Send,
588     {
589         Map::new(self, map_op)
590     }
591 
592     /// Applies `map_op` to the given `init` value with each item of this
593     /// iterator, producing a new iterator with the results.
594     ///
595     /// The `init` value will be cloned only as needed to be paired with
596     /// the group of items in each rayon job.  It does not require the type
597     /// to be `Sync`.
598     ///
599     /// # Examples
600     ///
601     /// ```
602     /// use std::sync::mpsc::channel;
603     /// use rayon::prelude::*;
604     ///
605     /// let (sender, receiver) = channel();
606     ///
607     /// let a: Vec<_> = (0..5)
608     ///                 .into_par_iter()            // iterating over i32
609     ///                 .map_with(sender, |s, x| {
610     ///                     s.send(x).unwrap();     // sending i32 values through the channel
611     ///                     x                       // returning i32
612     ///                 })
613     ///                 .collect();                 // collecting the returned values into a vector
614     ///
615     /// let mut b: Vec<_> = receiver.iter()         // iterating over the values in the channel
616     ///                             .collect();     // and collecting them
617     /// b.sort();
618     ///
619     /// assert_eq!(a, b);
620     /// ```
map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F> where F: Fn(&mut T, Self::Item) -> R + Sync + Send, T: Send + Clone, R: Send,621     fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
622     where
623         F: Fn(&mut T, Self::Item) -> R + Sync + Send,
624         T: Send + Clone,
625         R: Send,
626     {
627         MapWith::new(self, init, map_op)
628     }
629 
630     /// Applies `map_op` to a value returned by `init` with each item of this
631     /// iterator, producing a new iterator with the results.
632     ///
633     /// The `init` function will be called only as needed for a value to be
634     /// paired with the group of items in each rayon job.  There is no
635     /// constraint on that returned type at all!
636     ///
637     /// # Examples
638     ///
639     /// ```
640     /// use rand::Rng;
641     /// use rayon::prelude::*;
642     ///
643     /// let a: Vec<_> = (1i32..1_000_000)
644     ///     .into_par_iter()
645     ///     .map_init(
646     ///         || rand::thread_rng(),  // get the thread-local RNG
647     ///         |rng, x| if rng.gen() { // randomly negate items
648     ///             -x
649     ///         } else {
650     ///             x
651     ///         },
652     ///     ).collect();
653     ///
654     /// // There's a remote chance that this will fail...
655     /// assert!(a.iter().any(|&x| x < 0));
656     /// assert!(a.iter().any(|&x| x > 0));
657     /// ```
map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F> where F: Fn(&mut T, Self::Item) -> R + Sync + Send, INIT: Fn() -> T + Sync + Send, R: Send,658     fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F>
659     where
660         F: Fn(&mut T, Self::Item) -> R + Sync + Send,
661         INIT: Fn() -> T + Sync + Send,
662         R: Send,
663     {
664         MapInit::new(self, init, map_op)
665     }
666 
667     /// Creates an iterator which clones all of its elements.  This may be
668     /// useful when you have an iterator over `&T`, but you need `T`, and
669     /// that type implements `Clone`. See also [`copied()`].
670     ///
671     /// [`copied()`]: #method.copied
672     ///
673     /// # Examples
674     ///
675     /// ```
676     /// use rayon::prelude::*;
677     ///
678     /// let a = [1, 2, 3];
679     ///
680     /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
681     ///
682     /// // cloned is the same as .map(|&x| x), for integers
683     /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
684     ///
685     /// assert_eq!(v_cloned, vec![1, 2, 3]);
686     /// assert_eq!(v_map, vec![1, 2, 3]);
687     /// ```
cloned<'a, T>(self) -> Cloned<Self> where T: 'a + Clone + Send, Self: ParallelIterator<Item = &'a T>,688     fn cloned<'a, T>(self) -> Cloned<Self>
689     where
690         T: 'a + Clone + Send,
691         Self: ParallelIterator<Item = &'a T>,
692     {
693         Cloned::new(self)
694     }
695 
696     /// Creates an iterator which copies all of its elements.  This may be
697     /// useful when you have an iterator over `&T`, but you need `T`, and
698     /// that type implements `Copy`. See also [`cloned()`].
699     ///
700     /// [`cloned()`]: #method.cloned
701     ///
702     /// # Examples
703     ///
704     /// ```
705     /// use rayon::prelude::*;
706     ///
707     /// let a = [1, 2, 3];
708     ///
709     /// let v_copied: Vec<_> = a.par_iter().copied().collect();
710     ///
711     /// // copied is the same as .map(|&x| x), for integers
712     /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
713     ///
714     /// assert_eq!(v_copied, vec![1, 2, 3]);
715     /// assert_eq!(v_map, vec![1, 2, 3]);
716     /// ```
copied<'a, T>(self) -> Copied<Self> where T: 'a + Copy + Send, Self: ParallelIterator<Item = &'a T>,717     fn copied<'a, T>(self) -> Copied<Self>
718     where
719         T: 'a + Copy + Send,
720         Self: ParallelIterator<Item = &'a T>,
721     {
722         Copied::new(self)
723     }
724 
725     /// Applies `inspect_op` to a reference to each item of this iterator,
726     /// producing a new iterator passing through the original items.  This is
727     /// often useful for debugging to see what's happening in iterator stages.
728     ///
729     /// # Examples
730     ///
731     /// ```
732     /// use rayon::prelude::*;
733     ///
734     /// let a = [1, 4, 2, 3];
735     ///
736     /// // this iterator sequence is complex.
737     /// let sum = a.par_iter()
738     ///             .cloned()
739     ///             .filter(|&x| x % 2 == 0)
740     ///             .reduce(|| 0, |sum, i| sum + i);
741     ///
742     /// println!("{}", sum);
743     ///
744     /// // let's add some inspect() calls to investigate what's happening
745     /// let sum = a.par_iter()
746     ///             .cloned()
747     ///             .inspect(|x| println!("about to filter: {}", x))
748     ///             .filter(|&x| x % 2 == 0)
749     ///             .inspect(|x| println!("made it through filter: {}", x))
750     ///             .reduce(|| 0, |sum, i| sum + i);
751     ///
752     /// println!("{}", sum);
753     /// ```
inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP> where OP: Fn(&Self::Item) + Sync + Send,754     fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
755     where
756         OP: Fn(&Self::Item) + Sync + Send,
757     {
758         Inspect::new(self, inspect_op)
759     }
760 
761     /// Mutates each item of this iterator before yielding it.
762     ///
763     /// # Examples
764     ///
765     /// ```
766     /// use rayon::prelude::*;
767     ///
768     /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
769     ///
770     /// let doubles: Vec<_> = par_iter.collect();
771     ///
772     /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
773     /// ```
update<F>(self, update_op: F) -> Update<Self, F> where F: Fn(&mut Self::Item) + Sync + Send,774     fn update<F>(self, update_op: F) -> Update<Self, F>
775     where
776         F: Fn(&mut Self::Item) + Sync + Send,
777     {
778         Update::new(self, update_op)
779     }
780 
781     /// Applies `filter_op` to each item of this iterator, producing a new
782     /// iterator with only the items that gave `true` results.
783     ///
784     /// # Examples
785     ///
786     /// ```
787     /// use rayon::prelude::*;
788     ///
789     /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
790     ///
791     /// let even_numbers: Vec<_> = par_iter.collect();
792     ///
793     /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
794     /// ```
filter<P>(self, filter_op: P) -> Filter<Self, P> where P: Fn(&Self::Item) -> bool + Sync + Send,795     fn filter<P>(self, filter_op: P) -> Filter<Self, P>
796     where
797         P: Fn(&Self::Item) -> bool + Sync + Send,
798     {
799         Filter::new(self, filter_op)
800     }
801 
802     /// Applies `filter_op` to each item of this iterator to get an `Option`,
803     /// producing a new iterator with only the items from `Some` results.
804     ///
805     /// # Examples
806     ///
807     /// ```
808     /// use rayon::prelude::*;
809     ///
810     /// let mut par_iter = (0..10).into_par_iter()
811     ///                         .filter_map(|x| {
812     ///                             if x % 2 == 0 { Some(x * 3) }
813     ///                             else { None }
814     ///                         });
815     ///
816     /// let even_numbers: Vec<_> = par_iter.collect();
817     ///
818     /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
819     /// ```
filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,820     fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
821     where
822         P: Fn(Self::Item) -> Option<R> + Sync + Send,
823         R: Send,
824     {
825         FilterMap::new(self, filter_op)
826     }
827 
828     /// Applies `map_op` to each item of this iterator to get nested parallel iterators,
829     /// producing a new parallel iterator that flattens these back into one.
830     ///
831     /// See also [`flat_map_iter`](#method.flat_map_iter).
832     ///
833     /// # Examples
834     ///
835     /// ```
836     /// use rayon::prelude::*;
837     ///
838     /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
839     ///
840     /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
841     ///
842     /// let vec: Vec<_> = par_iter.collect();
843     ///
844     /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
845     /// ```
flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F> where F: Fn(Self::Item) -> PI + Sync + Send, PI: IntoParallelIterator,846     fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
847     where
848         F: Fn(Self::Item) -> PI + Sync + Send,
849         PI: IntoParallelIterator,
850     {
851         FlatMap::new(self, map_op)
852     }
853 
854     /// Applies `map_op` to each item of this iterator to get nested serial iterators,
855     /// producing a new parallel iterator that flattens these back into one.
856     ///
857     /// # `flat_map_iter` versus `flat_map`
858     ///
859     /// These two methods are similar but behave slightly differently. With [`flat_map`],
860     /// each of the nested iterators must be a parallel iterator, and they will be further
861     /// split up with nested parallelism. With `flat_map_iter`, each nested iterator is a
862     /// sequential `Iterator`, and we only parallelize _between_ them, while the items
863     /// produced by each nested iterator are processed sequentially.
864     ///
865     /// When choosing between these methods, consider whether nested parallelism suits the
866     /// potential iterators at hand. If there's little computation involved, or its length
867     /// is much less than the outer parallel iterator, then it may perform better to avoid
868     /// the overhead of parallelism, just flattening sequentially with `flat_map_iter`.
869     /// If there is a lot of computation, potentially outweighing the outer parallel
870     /// iterator, then the nested parallelism of `flat_map` may be worthwhile.
871     ///
872     /// [`flat_map`]: #method.flat_map
873     ///
874     /// # Examples
875     ///
876     /// ```
877     /// use rayon::prelude::*;
878     /// use std::cell::RefCell;
879     ///
880     /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
881     ///
882     /// let par_iter = a.par_iter().flat_map_iter(|a| {
883     ///     // The serial iterator doesn't have to be thread-safe, just its items.
884     ///     let cell_iter = RefCell::new(a.iter().cloned());
885     ///     std::iter::from_fn(move || cell_iter.borrow_mut().next())
886     /// });
887     ///
888     /// let vec: Vec<_> = par_iter.collect();
889     ///
890     /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
891     /// ```
flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F> where F: Fn(Self::Item) -> SI + Sync + Send, SI: IntoIterator, SI::Item: Send,892     fn flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F>
893     where
894         F: Fn(Self::Item) -> SI + Sync + Send,
895         SI: IntoIterator,
896         SI::Item: Send,
897     {
898         FlatMapIter::new(self, map_op)
899     }
900 
901     /// An adaptor that flattens parallel-iterable `Item`s into one large iterator.
902     ///
903     /// See also [`flatten_iter`](#method.flatten_iter).
904     ///
905     /// # Examples
906     ///
907     /// ```
908     /// use rayon::prelude::*;
909     ///
910     /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
911     /// let y: Vec<_> = x.into_par_iter().flatten().collect();
912     ///
913     /// assert_eq!(y, vec![1, 2, 3, 4]);
914     /// ```
flatten(self) -> Flatten<Self> where Self::Item: IntoParallelIterator,915     fn flatten(self) -> Flatten<Self>
916     where
917         Self::Item: IntoParallelIterator,
918     {
919         Flatten::new(self)
920     }
921 
922     /// An adaptor that flattens serial-iterable `Item`s into one large iterator.
923     ///
924     /// See also [`flatten`](#method.flatten) and the analagous comparison of
925     /// [`flat_map_iter` versus `flat_map`](#flat_map_iter-versus-flat_map).
926     ///
927     /// # Examples
928     ///
929     /// ```
930     /// use rayon::prelude::*;
931     ///
932     /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
933     /// let iters: Vec<_> = x.into_iter().map(Vec::into_iter).collect();
934     /// let y: Vec<_> = iters.into_par_iter().flatten_iter().collect();
935     ///
936     /// assert_eq!(y, vec![1, 2, 3, 4]);
937     /// ```
flatten_iter(self) -> FlattenIter<Self> where Self::Item: IntoIterator, <Self::Item as IntoIterator>::Item: Send,938     fn flatten_iter(self) -> FlattenIter<Self>
939     where
940         Self::Item: IntoIterator,
941         <Self::Item as IntoIterator>::Item: Send,
942     {
943         FlattenIter::new(self)
944     }
945 
946     /// Reduces the items in the iterator into one item using `op`.
947     /// The argument `identity` should be a closure that can produce
948     /// "identity" value which may be inserted into the sequence as
949     /// needed to create opportunities for parallel execution. So, for
950     /// example, if you are doing a summation, then `identity()` ought
951     /// to produce something that represents the zero for your type
952     /// (but consider just calling `sum()` in that case).
953     ///
954     /// # Examples
955     ///
956     /// ```
957     /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
958     /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
959     /// // where the first/second elements are summed separately.
960     /// use rayon::prelude::*;
961     /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
962     ///            .par_iter()        // iterating over &(i32, i32)
963     ///            .cloned()          // iterating over (i32, i32)
964     ///            .reduce(|| (0, 0), // the "identity" is 0 in both columns
965     ///                    |a, b| (a.0 + b.0, a.1 + b.1));
966     /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
967     /// ```
968     ///
969     /// **Note:** unlike a sequential `fold` operation, the order in
970     /// which `op` will be applied to reduce the result is not fully
971     /// specified. So `op` should be [associative] or else the results
972     /// will be non-deterministic. And of course `identity()` should
973     /// produce a true identity.
974     ///
975     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send, ID: Fn() -> Self::Item + Sync + Send,976     fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
977     where
978         OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
979         ID: Fn() -> Self::Item + Sync + Send,
980     {
981         reduce::reduce(self, identity, op)
982     }
983 
984     /// Reduces the items in the iterator into one item using `op`.
985     /// If the iterator is empty, `None` is returned; otherwise,
986     /// `Some` is returned.
987     ///
988     /// This version of `reduce` is simple but somewhat less
989     /// efficient. If possible, it is better to call `reduce()`, which
990     /// requires an identity element.
991     ///
992     /// # Examples
993     ///
994     /// ```
995     /// use rayon::prelude::*;
996     /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
997     ///            .par_iter()        // iterating over &(i32, i32)
998     ///            .cloned()          // iterating over (i32, i32)
999     ///            .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
1000     ///            .unwrap();
1001     /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
1002     /// ```
1003     ///
1004     /// **Note:** unlike a sequential `fold` operation, the order in
1005     /// which `op` will be applied to reduce the result is not fully
1006     /// specified. So `op` should be [associative] or else the results
1007     /// will be non-deterministic.
1008     ///
1009     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
reduce_with<OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,1010     fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
1011     where
1012         OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
1013     {
1014         fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> {
1015             move |opt_a, b| match opt_a {
1016                 Some(a) => Some(op(a, b)),
1017                 None => Some(b),
1018             }
1019         }
1020 
1021         fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> {
1022             move |opt_a, opt_b| match (opt_a, opt_b) {
1023                 (Some(a), Some(b)) => Some(op(a, b)),
1024                 (Some(v), None) | (None, Some(v)) => Some(v),
1025                 (None, None) => None,
1026             }
1027         }
1028 
1029         self.fold(<_>::default, opt_fold(&op))
1030             .reduce(<_>::default, opt_reduce(&op))
1031     }
1032 
1033     /// Reduces the items in the iterator into one item using a fallible `op`.
1034     /// The `identity` argument is used the same way as in [`reduce()`].
1035     ///
1036     /// [`reduce()`]: #method.reduce
1037     ///
1038     /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
1039     /// to one, we will attempt to stop processing the rest of the items in the
1040     /// iterator as soon as possible, and we will return that terminating value.
1041     /// Otherwise, we will return the final reduced `Result::Ok(T)` or
1042     /// `Option::Some(T)`.  If there are multiple errors in parallel, it is not
1043     /// specified which will be returned.
1044     ///
1045     /// # Examples
1046     ///
1047     /// ```
1048     /// use rayon::prelude::*;
1049     ///
1050     /// // Compute the sum of squares, being careful about overflow.
1051     /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
1052     ///     iter.into_par_iter()
1053     ///         .map(|i| i.checked_mul(i))            // square each item,
1054     ///         .try_reduce(|| 0, i32::checked_add)   // and add them up!
1055     /// }
1056     /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
1057     ///
1058     /// // The sum might overflow
1059     /// assert_eq!(sum_squares(0..10_000), None);
1060     ///
1061     /// // Or the squares might overflow before it even reaches `try_reduce`
1062     /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
1063     /// ```
try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item where OP: Fn(T, T) -> Self::Item + Sync + Send, ID: Fn() -> T + Sync + Send, Self::Item: Try<Ok = T>,1064     fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
1065     where
1066         OP: Fn(T, T) -> Self::Item + Sync + Send,
1067         ID: Fn() -> T + Sync + Send,
1068         Self::Item: Try<Ok = T>,
1069     {
1070         try_reduce::try_reduce(self, identity, op)
1071     }
1072 
1073     /// Reduces the items in the iterator into one item using a fallible `op`.
1074     ///
1075     /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
1076     /// otherwise, `Some` is returned.  Beyond that, it behaves like
1077     /// [`try_reduce()`] for handling `Err`/`None`.
1078     ///
1079     /// [`reduce_with()`]: #method.reduce_with
1080     /// [`try_reduce()`]: #method.try_reduce
1081     ///
1082     /// For instance, with `Option` items, the return value may be:
1083     /// - `None`, the iterator was empty
1084     /// - `Some(None)`, we stopped after encountering `None`.
1085     /// - `Some(Some(x))`, the entire iterator reduced to `x`.
1086     ///
1087     /// With `Result` items, the nesting is more obvious:
1088     /// - `None`, the iterator was empty
1089     /// - `Some(Err(e))`, we stopped after encountering an error `e`.
1090     /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
1091     ///
1092     /// # Examples
1093     ///
1094     /// ```
1095     /// use rayon::prelude::*;
1096     ///
1097     /// let files = ["/dev/null", "/does/not/exist"];
1098     ///
1099     /// // Find the biggest file
1100     /// files.into_par_iter()
1101     ///     .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
1102     ///     .try_reduce_with(|a, b| {
1103     ///         Ok(if a.1 >= b.1 { a } else { b })
1104     ///     })
1105     ///     .expect("Some value, since the iterator is not empty")
1106     ///     .expect_err("not found");
1107     /// ```
try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(T, T) -> Self::Item + Sync + Send, Self::Item: Try<Ok = T>,1108     fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item>
1109     where
1110         OP: Fn(T, T) -> Self::Item + Sync + Send,
1111         Self::Item: Try<Ok = T>,
1112     {
1113         try_reduce_with::try_reduce_with(self, op)
1114     }
1115 
1116     /// Parallel fold is similar to sequential fold except that the
1117     /// sequence of items may be subdivided before it is
1118     /// folded. Consider a list of numbers like `22 3 77 89 46`. If
1119     /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
1120     /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
1121     /// 77, and so forth. The **parallel fold** works similarly except
1122     /// that it first breaks up your list into sublists, and hence
1123     /// instead of yielding up a single sum at the end, it yields up
1124     /// multiple sums. The number of results is nondeterministic, as
1125     /// is the point where the breaks occur.
1126     ///
1127     /// So if did the same parallel fold (`fold(0, |a,b| a+b)`) on
1128     /// our example list, we might wind up with a sequence of two numbers,
1129     /// like so:
1130     ///
1131     /// ```notrust
1132     /// 22 3 77 89 46
1133     ///       |     |
1134     ///     102   135
1135     /// ```
1136     ///
1137     /// Or perhaps these three numbers:
1138     ///
1139     /// ```notrust
1140     /// 22 3 77 89 46
1141     ///       |  |  |
1142     ///     102 89 46
1143     /// ```
1144     ///
1145     /// In general, Rayon will attempt to find good breaking points
1146     /// that keep all of your cores busy.
1147     ///
1148     /// ### Fold versus reduce
1149     ///
1150     /// The `fold()` and `reduce()` methods each take an identity element
1151     /// and a combining function, but they operate rather differently.
1152     ///
1153     /// `reduce()` requires that the identity function has the same
1154     /// type as the things you are iterating over, and it fully
1155     /// reduces the list of items into a single item. So, for example,
1156     /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1157     /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1158     /// u8| a + b)`, we would get an overflow. This is because `0`,
1159     /// `a`, and `b` here are all bytes, just like the numbers in the
1160     /// list (I wrote the types explicitly above, but those are the
1161     /// only types you can use). To avoid the overflow, we would need
1162     /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1163     /// b| a + b)`, in which case our result would be `256`.
1164     ///
1165     /// In contrast, with `fold()`, the identity function does not
1166     /// have to have the same type as the things you are iterating
1167     /// over, and you potentially get back many results. So, if we
1168     /// continue with the `bytes` example from the previous paragraph,
1169     /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1170     /// convert our bytes into `u32`. And of course we might not get
1171     /// back a single sum.
1172     ///
1173     /// There is a more subtle distinction as well, though it's
1174     /// actually implied by the above points. When you use `reduce()`,
1175     /// your reduction function is sometimes called with values that
1176     /// were never part of your original parallel iterator (for
1177     /// example, both the left and right might be a partial sum). With
1178     /// `fold()`, in contrast, the left value in the fold function is
1179     /// always the accumulator, and the right value is always from
1180     /// your original sequence.
1181     ///
1182     /// ### Fold vs Map/Reduce
1183     ///
1184     /// Fold makes sense if you have some operation where it is
1185     /// cheaper to create groups of elements at a time. For example,
1186     /// imagine collecting characters into a string. If you were going
1187     /// to use map/reduce, you might try this:
1188     ///
1189     /// ```
1190     /// use rayon::prelude::*;
1191     ///
1192     /// let s =
1193     ///     ['a', 'b', 'c', 'd', 'e']
1194     ///     .par_iter()
1195     ///     .map(|c: &char| format!("{}", c))
1196     ///     .reduce(|| String::new(),
1197     ///             |mut a: String, b: String| { a.push_str(&b); a });
1198     ///
1199     /// assert_eq!(s, "abcde");
1200     /// ```
1201     ///
1202     /// Because reduce produces the same type of element as its input,
1203     /// you have to first map each character into a string, and then
1204     /// you can reduce them. This means we create one string per
1205     /// element in our iterator -- not so great. Using `fold`, we can
1206     /// do this instead:
1207     ///
1208     /// ```
1209     /// use rayon::prelude::*;
1210     ///
1211     /// let s =
1212     ///     ['a', 'b', 'c', 'd', 'e']
1213     ///     .par_iter()
1214     ///     .fold(|| String::new(),
1215     ///             |mut s: String, c: &char| { s.push(*c); s })
1216     ///     .reduce(|| String::new(),
1217     ///             |mut a: String, b: String| { a.push_str(&b); a });
1218     ///
1219     /// assert_eq!(s, "abcde");
1220     /// ```
1221     ///
1222     /// Now `fold` will process groups of our characters at a time,
1223     /// and we only make one string per group. We should wind up with
1224     /// some small-ish number of strings roughly proportional to the
1225     /// number of CPUs you have (it will ultimately depend on how busy
1226     /// your processors are). Note that we still need to do a reduce
1227     /// afterwards to combine those groups of strings into a single
1228     /// string.
1229     ///
1230     /// You could use a similar trick to save partial results (e.g., a
1231     /// cache) or something similar.
1232     ///
1233     /// ### Combining fold with other operations
1234     ///
1235     /// You can combine `fold` with `reduce` if you want to produce a
1236     /// single value. This is then roughly equivalent to a map/reduce
1237     /// combination in effect:
1238     ///
1239     /// ```
1240     /// use rayon::prelude::*;
1241     ///
1242     /// let bytes = 0..22_u8;
1243     /// let sum = bytes.into_par_iter()
1244     ///                .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1245     ///                .sum::<u32>();
1246     ///
1247     /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1248     /// ```
fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F> where F: Fn(T, Self::Item) -> T + Sync + Send, ID: Fn() -> T + Sync + Send, T: Send,1249     fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
1250     where
1251         F: Fn(T, Self::Item) -> T + Sync + Send,
1252         ID: Fn() -> T + Sync + Send,
1253         T: Send,
1254     {
1255         Fold::new(self, identity, fold_op)
1256     }
1257 
1258     /// Applies `fold_op` to the given `init` value with each item of this
1259     /// iterator, finally producing the value for further use.
1260     ///
1261     /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1262     /// it doesn't require the `init` type to be `Sync`, nor any other form
1263     /// of added synchronization.
1264     ///
1265     /// # Examples
1266     ///
1267     /// ```
1268     /// use rayon::prelude::*;
1269     ///
1270     /// let bytes = 0..22_u8;
1271     /// let sum = bytes.into_par_iter()
1272     ///                .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1273     ///                .sum::<u32>();
1274     ///
1275     /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1276     /// ```
fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F> where F: Fn(T, Self::Item) -> T + Sync + Send, T: Send + Clone,1277     fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
1278     where
1279         F: Fn(T, Self::Item) -> T + Sync + Send,
1280         T: Send + Clone,
1281     {
1282         FoldWith::new(self, init, fold_op)
1283     }
1284 
1285     /// Performs a fallible parallel fold.
1286     ///
1287     /// This is a variation of [`fold()`] for operations which can fail with
1288     /// `Option::None` or `Result::Err`.  The first such failure stops
1289     /// processing the local set of items, without affecting other folds in the
1290     /// iterator's subdivisions.
1291     ///
1292     /// Often, `try_fold()` will be followed by [`try_reduce()`]
1293     /// for a final reduction and global short-circuiting effect.
1294     ///
1295     /// [`fold()`]: #method.fold
1296     /// [`try_reduce()`]: #method.try_reduce
1297     ///
1298     /// # Examples
1299     ///
1300     /// ```
1301     /// use rayon::prelude::*;
1302     ///
1303     /// let bytes = 0..22_u8;
1304     /// let sum = bytes.into_par_iter()
1305     ///                .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1306     ///                .try_reduce(|| 0, u32::checked_add);
1307     ///
1308     /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1309     /// ```
try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F> where F: Fn(T, Self::Item) -> R + Sync + Send, ID: Fn() -> T + Sync + Send, R: Try<Ok = T> + Send,1310     fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F>
1311     where
1312         F: Fn(T, Self::Item) -> R + Sync + Send,
1313         ID: Fn() -> T + Sync + Send,
1314         R: Try<Ok = T> + Send,
1315     {
1316         TryFold::new(self, identity, fold_op)
1317     }
1318 
1319     /// Performs a fallible parallel fold with a cloneable `init` value.
1320     ///
1321     /// This combines the `init` semantics of [`fold_with()`] and the failure
1322     /// semantics of [`try_fold()`].
1323     ///
1324     /// [`fold_with()`]: #method.fold_with
1325     /// [`try_fold()`]: #method.try_fold
1326     ///
1327     /// ```
1328     /// use rayon::prelude::*;
1329     ///
1330     /// let bytes = 0..22_u8;
1331     /// let sum = bytes.into_par_iter()
1332     ///                .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1333     ///                .try_reduce(|| 0, u32::checked_add);
1334     ///
1335     /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1336     /// ```
try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F> where F: Fn(T, Self::Item) -> R + Sync + Send, R: Try<Ok = T> + Send, T: Clone + Send,1337     fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F>
1338     where
1339         F: Fn(T, Self::Item) -> R + Sync + Send,
1340         R: Try<Ok = T> + Send,
1341         T: Clone + Send,
1342     {
1343         TryFoldWith::new(self, init, fold_op)
1344     }
1345 
1346     /// Sums up the items in the iterator.
1347     ///
1348     /// Note that the order in items will be reduced is not specified,
1349     /// so if the `+` operator is not truly [associative] \(as is the
1350     /// case for floating point numbers), then the results are not
1351     /// fully deterministic.
1352     ///
1353     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1354     ///
1355     /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1356     /// except that the type of `0` and the `+` operation may vary
1357     /// depending on the type of value being produced.
1358     ///
1359     /// # Examples
1360     ///
1361     /// ```
1362     /// use rayon::prelude::*;
1363     ///
1364     /// let a = [1, 5, 7];
1365     ///
1366     /// let sum: i32 = a.par_iter().sum();
1367     ///
1368     /// assert_eq!(sum, 13);
1369     /// ```
sum<S>(self) -> S where S: Send + Sum<Self::Item> + Sum<S>,1370     fn sum<S>(self) -> S
1371     where
1372         S: Send + Sum<Self::Item> + Sum<S>,
1373     {
1374         sum::sum(self)
1375     }
1376 
1377     /// Multiplies all the items in the iterator.
1378     ///
1379     /// Note that the order in items will be reduced is not specified,
1380     /// so if the `*` operator is not truly [associative] \(as is the
1381     /// case for floating point numbers), then the results are not
1382     /// fully deterministic.
1383     ///
1384     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1385     ///
1386     /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1387     /// except that the type of `1` and the `*` operation may vary
1388     /// depending on the type of value being produced.
1389     ///
1390     /// # Examples
1391     ///
1392     /// ```
1393     /// use rayon::prelude::*;
1394     ///
1395     /// fn factorial(n: u32) -> u32 {
1396     ///    (1..n+1).into_par_iter().product()
1397     /// }
1398     ///
1399     /// assert_eq!(factorial(0), 1);
1400     /// assert_eq!(factorial(1), 1);
1401     /// assert_eq!(factorial(5), 120);
1402     /// ```
product<P>(self) -> P where P: Send + Product<Self::Item> + Product<P>,1403     fn product<P>(self) -> P
1404     where
1405         P: Send + Product<Self::Item> + Product<P>,
1406     {
1407         product::product(self)
1408     }
1409 
1410     /// Computes the minimum of all the items in the iterator. If the
1411     /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1412     /// is returned.
1413     ///
1414     /// Note that the order in which the items will be reduced is not
1415     /// specified, so if the `Ord` impl is not truly associative, then
1416     /// the results are not deterministic.
1417     ///
1418     /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`.
1419     ///
1420     /// # Examples
1421     ///
1422     /// ```
1423     /// use rayon::prelude::*;
1424     ///
1425     /// let a = [45, 74, 32];
1426     ///
1427     /// assert_eq!(a.par_iter().min(), Some(&32));
1428     ///
1429     /// let b: [i32; 0] = [];
1430     ///
1431     /// assert_eq!(b.par_iter().min(), None);
1432     /// ```
min(self) -> Option<Self::Item> where Self::Item: Ord,1433     fn min(self) -> Option<Self::Item>
1434     where
1435         Self::Item: Ord,
1436     {
1437         self.reduce_with(cmp::min)
1438     }
1439 
1440     /// Computes the minimum of all the items in the iterator with respect to
1441     /// the given comparison function. If the iterator is empty, `None` is
1442     /// returned; otherwise, `Some(min)` is returned.
1443     ///
1444     /// Note that the order in which the items will be reduced is not
1445     /// specified, so if the comparison function is not associative, then
1446     /// the results are not deterministic.
1447     ///
1448     /// # Examples
1449     ///
1450     /// ```
1451     /// use rayon::prelude::*;
1452     ///
1453     /// let a = [-3_i32, 77, 53, 240, -1];
1454     ///
1455     /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1456     /// ```
min_by<F>(self, f: F) -> Option<Self::Item> where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,1457     fn min_by<F>(self, f: F) -> Option<Self::Item>
1458     where
1459         F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1460     {
1461         fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1462             move |a, b| match f(&a, &b) {
1463                 Ordering::Greater => b,
1464                 _ => a,
1465             }
1466         }
1467 
1468         self.reduce_with(min(f))
1469     }
1470 
1471     /// Computes the item that yields the minimum value for the given
1472     /// function. If the iterator is empty, `None` is returned;
1473     /// otherwise, `Some(item)` is returned.
1474     ///
1475     /// Note that the order in which the items will be reduced is not
1476     /// specified, so if the `Ord` impl is not truly associative, then
1477     /// the results are not deterministic.
1478     ///
1479     /// # Examples
1480     ///
1481     /// ```
1482     /// use rayon::prelude::*;
1483     ///
1484     /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1485     ///
1486     /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1487     /// ```
min_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Send + Fn(&Self::Item) -> K,1488     fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
1489     where
1490         K: Ord + Send,
1491         F: Sync + Send + Fn(&Self::Item) -> K,
1492     {
1493         fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1494             move |x| (f(&x), x)
1495         }
1496 
1497         fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1498             match (a.0).cmp(&b.0) {
1499                 Ordering::Greater => b,
1500                 _ => a,
1501             }
1502         }
1503 
1504         let (_, x) = self.map(key(f)).reduce_with(min_key)?;
1505         Some(x)
1506     }
1507 
1508     /// Computes the maximum of all the items in the iterator. If the
1509     /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1510     /// is returned.
1511     ///
1512     /// Note that the order in which the items will be reduced is not
1513     /// specified, so if the `Ord` impl is not truly associative, then
1514     /// the results are not deterministic.
1515     ///
1516     /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`.
1517     ///
1518     /// # Examples
1519     ///
1520     /// ```
1521     /// use rayon::prelude::*;
1522     ///
1523     /// let a = [45, 74, 32];
1524     ///
1525     /// assert_eq!(a.par_iter().max(), Some(&74));
1526     ///
1527     /// let b: [i32; 0] = [];
1528     ///
1529     /// assert_eq!(b.par_iter().max(), None);
1530     /// ```
max(self) -> Option<Self::Item> where Self::Item: Ord,1531     fn max(self) -> Option<Self::Item>
1532     where
1533         Self::Item: Ord,
1534     {
1535         self.reduce_with(cmp::max)
1536     }
1537 
1538     /// Computes the maximum of all the items in the iterator with respect to
1539     /// the given comparison function. If the iterator is empty, `None` is
1540     /// returned; otherwise, `Some(min)` is returned.
1541     ///
1542     /// Note that the order in which the items will be reduced is not
1543     /// specified, so if the comparison function is not associative, then
1544     /// the results are not deterministic.
1545     ///
1546     /// # Examples
1547     ///
1548     /// ```
1549     /// use rayon::prelude::*;
1550     ///
1551     /// let a = [-3_i32, 77, 53, 240, -1];
1552     ///
1553     /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1554     /// ```
max_by<F>(self, f: F) -> Option<Self::Item> where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,1555     fn max_by<F>(self, f: F) -> Option<Self::Item>
1556     where
1557         F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1558     {
1559         fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1560             move |a, b| match f(&a, &b) {
1561                 Ordering::Greater => a,
1562                 _ => b,
1563             }
1564         }
1565 
1566         self.reduce_with(max(f))
1567     }
1568 
1569     /// Computes the item that yields the maximum value for the given
1570     /// function. If the iterator is empty, `None` is returned;
1571     /// otherwise, `Some(item)` is returned.
1572     ///
1573     /// Note that the order in which the items will be reduced is not
1574     /// specified, so if the `Ord` impl is not truly associative, then
1575     /// the results are not deterministic.
1576     ///
1577     /// # Examples
1578     ///
1579     /// ```
1580     /// use rayon::prelude::*;
1581     ///
1582     /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1583     ///
1584     /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1585     /// ```
max_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Send + Fn(&Self::Item) -> K,1586     fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
1587     where
1588         K: Ord + Send,
1589         F: Sync + Send + Fn(&Self::Item) -> K,
1590     {
1591         fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1592             move |x| (f(&x), x)
1593         }
1594 
1595         fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1596             match (a.0).cmp(&b.0) {
1597                 Ordering::Greater => a,
1598                 _ => b,
1599             }
1600         }
1601 
1602         let (_, x) = self.map(key(f)).reduce_with(max_key)?;
1603         Some(x)
1604     }
1605 
1606     /// Takes two iterators and creates a new iterator over both.
1607     ///
1608     /// # Examples
1609     ///
1610     /// ```
1611     /// use rayon::prelude::*;
1612     ///
1613     /// let a = [0, 1, 2];
1614     /// let b = [9, 8, 7];
1615     ///
1616     /// let par_iter = a.par_iter().chain(b.par_iter());
1617     ///
1618     /// let chained: Vec<_> = par_iter.cloned().collect();
1619     ///
1620     /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1621     /// ```
chain<C>(self, chain: C) -> Chain<Self, C::Iter> where C: IntoParallelIterator<Item = Self::Item>,1622     fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
1623     where
1624         C: IntoParallelIterator<Item = Self::Item>,
1625     {
1626         Chain::new(self, chain.into_par_iter())
1627     }
1628 
1629     /// Searches for **some** item in the parallel iterator that
1630     /// matches the given predicate and returns it. This operation
1631     /// is similar to [`find` on sequential iterators][find] but
1632     /// the item returned may not be the **first** one in the parallel
1633     /// sequence which matches, since we search the entire sequence in parallel.
1634     ///
1635     /// Once a match is found, we will attempt to stop processing
1636     /// the rest of the items in the iterator as soon as possible
1637     /// (just as `find` stops iterating once a match is found).
1638     ///
1639     /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find
1640     ///
1641     /// # Examples
1642     ///
1643     /// ```
1644     /// use rayon::prelude::*;
1645     ///
1646     /// let a = [1, 2, 3, 3];
1647     ///
1648     /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1649     ///
1650     /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1651     /// ```
find_any<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1652     fn find_any<P>(self, predicate: P) -> Option<Self::Item>
1653     where
1654         P: Fn(&Self::Item) -> bool + Sync + Send,
1655     {
1656         find::find(self, predicate)
1657     }
1658 
1659     /// Searches for the sequentially **first** item in the parallel iterator
1660     /// that matches the given predicate and returns it.
1661     ///
1662     /// Once a match is found, all attempts to the right of the match
1663     /// will be stopped, while attempts to the left must continue in case
1664     /// an earlier match is found.
1665     ///
1666     /// Note that not all parallel iterators have a useful order, much like
1667     /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
1668     /// just want the first match that discovered anywhere in the iterator,
1669     /// `find_any` is a better choice.
1670     ///
1671     /// # Examples
1672     ///
1673     /// ```
1674     /// use rayon::prelude::*;
1675     ///
1676     /// let a = [1, 2, 3, 3];
1677     ///
1678     /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1679     ///
1680     /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1681     /// ```
find_first<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1682     fn find_first<P>(self, predicate: P) -> Option<Self::Item>
1683     where
1684         P: Fn(&Self::Item) -> bool + Sync + Send,
1685     {
1686         find_first_last::find_first(self, predicate)
1687     }
1688 
1689     /// Searches for the sequentially **last** item in the parallel iterator
1690     /// that matches the given predicate and returns it.
1691     ///
1692     /// Once a match is found, all attempts to the left of the match
1693     /// will be stopped, while attempts to the right must continue in case
1694     /// a later match is found.
1695     ///
1696     /// Note that not all parallel iterators have a useful order, much like
1697     /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
1698     /// order doesn't actually matter to you, `find_any` is a better choice.
1699     ///
1700     /// # Examples
1701     ///
1702     /// ```
1703     /// use rayon::prelude::*;
1704     ///
1705     /// let a = [1, 2, 3, 3];
1706     ///
1707     /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1708     ///
1709     /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1710     /// ```
find_last<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1711     fn find_last<P>(self, predicate: P) -> Option<Self::Item>
1712     where
1713         P: Fn(&Self::Item) -> bool + Sync + Send,
1714     {
1715         find_first_last::find_last(self, predicate)
1716     }
1717 
1718     /// Applies the given predicate to the items in the parallel iterator
1719     /// and returns **any** non-None result of the map operation.
1720     ///
1721     /// Once a non-None value is produced from the map operation, we will
1722     /// attempt to stop processing the rest of the items in the iterator
1723     /// as soon as possible.
1724     ///
1725     /// Note that this method only returns **some** item in the parallel
1726     /// iterator that is not None from the map predicate. The item returned
1727     /// may not be the **first** non-None value produced in the parallel
1728     /// sequence, since the entire sequence is mapped over in parallel.
1729     ///
1730     /// # Examples
1731     ///
1732     /// ```
1733     /// use rayon::prelude::*;
1734     ///
1735     /// let c = ["lol", "NaN", "5", "5"];
1736     ///
1737     /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok());
1738     ///
1739     /// assert_eq!(found_number, Some(5));
1740     /// ```
find_map_any<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1741     fn find_map_any<P, R>(self, predicate: P) -> Option<R>
1742     where
1743         P: Fn(Self::Item) -> Option<R> + Sync + Send,
1744         R: Send,
1745     {
1746         fn yes<T>(_: &T) -> bool {
1747             true
1748         }
1749         self.filter_map(predicate).find_any(yes)
1750     }
1751 
1752     /// Applies the given predicate to the items in the parallel iterator and
1753     /// returns the sequentially **first** non-None result of the map operation.
1754     ///
1755     /// Once a non-None value is produced from the map operation, all attempts
1756     /// to the right of the match will be stopped, while attempts to the left
1757     /// must continue in case an earlier match is found.
1758     ///
1759     /// Note that not all parallel iterators have a useful order, much like
1760     /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1761     /// just want the first non-None value discovered anywhere in the iterator,
1762     /// `find_map_any` is a better choice.
1763     ///
1764     /// # Examples
1765     ///
1766     /// ```
1767     /// use rayon::prelude::*;
1768     ///
1769     /// let c = ["lol", "NaN", "2", "5"];
1770     ///
1771     /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1772     ///
1773     /// assert_eq!(first_number, Some(2));
1774     /// ```
find_map_first<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1775     fn find_map_first<P, R>(self, predicate: P) -> Option<R>
1776     where
1777         P: Fn(Self::Item) -> Option<R> + Sync + Send,
1778         R: Send,
1779     {
1780         fn yes<T>(_: &T) -> bool {
1781             true
1782         }
1783         self.filter_map(predicate).find_first(yes)
1784     }
1785 
1786     /// Applies the given predicate to the items in the parallel iterator and
1787     /// returns the sequentially **last** non-None result of the map operation.
1788     ///
1789     /// Once a non-None value is produced from the map operation, all attempts
1790     /// to the left of the match will be stopped, while attempts to the right
1791     /// must continue in case a later match is found.
1792     ///
1793     /// Note that not all parallel iterators have a useful order, much like
1794     /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1795     /// just want the first non-None value discovered anywhere in the iterator,
1796     /// `find_map_any` is a better choice.
1797     ///
1798     /// # Examples
1799     ///
1800     /// ```
1801     /// use rayon::prelude::*;
1802     ///
1803     /// let c = ["lol", "NaN", "2", "5"];
1804     ///
1805     /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok());
1806     ///
1807     /// assert_eq!(last_number, Some(5));
1808     /// ```
find_map_last<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1809     fn find_map_last<P, R>(self, predicate: P) -> Option<R>
1810     where
1811         P: Fn(Self::Item) -> Option<R> + Sync + Send,
1812         R: Send,
1813     {
1814         fn yes<T>(_: &T) -> bool {
1815             true
1816         }
1817         self.filter_map(predicate).find_last(yes)
1818     }
1819 
1820     #[doc(hidden)]
1821     #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1822                          `find_first`, or `find_last`")]
find<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1823     fn find<P>(self, predicate: P) -> Option<Self::Item>
1824     where
1825         P: Fn(&Self::Item) -> bool + Sync + Send,
1826     {
1827         self.find_any(predicate)
1828     }
1829 
1830     /// Searches for **some** item in the parallel iterator that
1831     /// matches the given predicate, and if so returns true.  Once
1832     /// a match is found, we'll attempt to stop process the rest
1833     /// of the items.  Proving that there's no match, returning false,
1834     /// does require visiting every item.
1835     ///
1836     /// # Examples
1837     ///
1838     /// ```
1839     /// use rayon::prelude::*;
1840     ///
1841     /// let a = [0, 12, 3, 4, 0, 23, 0];
1842     ///
1843     /// let is_valid = a.par_iter().any(|&x| x > 10);
1844     ///
1845     /// assert!(is_valid);
1846     /// ```
any<P>(self, predicate: P) -> bool where P: Fn(Self::Item) -> bool + Sync + Send,1847     fn any<P>(self, predicate: P) -> bool
1848     where
1849         P: Fn(Self::Item) -> bool + Sync + Send,
1850     {
1851         self.map(predicate).find_any(bool::clone).is_some()
1852     }
1853 
1854     /// Tests that every item in the parallel iterator matches the given
1855     /// predicate, and if so returns true.  If a counter-example is found,
1856     /// we'll attempt to stop processing more items, then return false.
1857     ///
1858     /// # Examples
1859     ///
1860     /// ```
1861     /// use rayon::prelude::*;
1862     ///
1863     /// let a = [0, 12, 3, 4, 0, 23, 0];
1864     ///
1865     /// let is_valid = a.par_iter().all(|&x| x > 10);
1866     ///
1867     /// assert!(!is_valid);
1868     /// ```
all<P>(self, predicate: P) -> bool where P: Fn(Self::Item) -> bool + Sync + Send,1869     fn all<P>(self, predicate: P) -> bool
1870     where
1871         P: Fn(Self::Item) -> bool + Sync + Send,
1872     {
1873         #[inline]
1874         fn is_false(x: &bool) -> bool {
1875             !x
1876         }
1877 
1878         self.map(predicate).find_any(is_false).is_none()
1879     }
1880 
1881     /// Creates an iterator over the `Some` items of this iterator, halting
1882     /// as soon as any `None` is found.
1883     ///
1884     /// # Examples
1885     ///
1886     /// ```
1887     /// use rayon::prelude::*;
1888     /// use std::sync::atomic::{AtomicUsize, Ordering};
1889     ///
1890     /// let counter = AtomicUsize::new(0);
1891     /// let value = (0_i32..2048)
1892     ///     .into_par_iter()
1893     ///     .map(|x| {
1894     ///              counter.fetch_add(1, Ordering::SeqCst);
1895     ///              if x < 1024 { Some(x) } else { None }
1896     ///          })
1897     ///     .while_some()
1898     ///     .max();
1899     ///
1900     /// assert!(value < Some(1024));
1901     /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1902     /// ```
while_some<T>(self) -> WhileSome<Self> where Self: ParallelIterator<Item = Option<T>>, T: Send,1903     fn while_some<T>(self) -> WhileSome<Self>
1904     where
1905         Self: ParallelIterator<Item = Option<T>>,
1906         T: Send,
1907     {
1908         WhileSome::new(self)
1909     }
1910 
1911     /// Wraps an iterator with a fuse in case of panics, to halt all threads
1912     /// as soon as possible.
1913     ///
1914     /// Panics within parallel iterators are always propagated to the caller,
1915     /// but they don't always halt the rest of the iterator right away, due to
1916     /// the internal semantics of [`join`]. This adaptor makes a greater effort
1917     /// to stop processing other items sooner, with the cost of additional
1918     /// synchronization overhead, which may also inhibit some optimizations.
1919     ///
1920     /// [`join`]: ../fn.join.html#panics
1921     ///
1922     /// # Examples
1923     ///
1924     /// If this code didn't use `panic_fuse()`, it would continue processing
1925     /// many more items in other threads (with long sleep delays) before the
1926     /// panic is finally propagated.
1927     ///
1928     /// ```should_panic
1929     /// use rayon::prelude::*;
1930     /// use std::{thread, time};
1931     ///
1932     /// (0..1_000_000)
1933     ///     .into_par_iter()
1934     ///     .panic_fuse()
1935     ///     .for_each(|i| {
1936     ///         // simulate some work
1937     ///         thread::sleep(time::Duration::from_secs(1));
1938     ///         assert!(i > 0); // oops!
1939     ///     });
1940     /// ```
panic_fuse(self) -> PanicFuse<Self>1941     fn panic_fuse(self) -> PanicFuse<Self> {
1942         PanicFuse::new(self)
1943     }
1944 
1945     /// Creates a fresh collection containing all the elements produced
1946     /// by this parallel iterator.
1947     ///
1948     /// You may prefer [`collect_into_vec()`] implemented on
1949     /// [`IndexedParallelIterator`], if your underlying iterator also implements
1950     /// it. [`collect_into_vec()`] allocates efficiently with precise knowledge
1951     /// of how many elements the iterator contains, and even allows you to reuse
1952     /// an existing vector's backing store rather than allocating a fresh vector.
1953     ///
1954     /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
1955     /// [`collect_into_vec()`]:
1956     ///     trait.IndexedParallelIterator.html#method.collect_into_vec
1957     ///
1958     /// # Examples
1959     ///
1960     /// ```
1961     /// use rayon::prelude::*;
1962     ///
1963     /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1964     ///
1965     /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
1966     ///
1967     /// assert_eq!(sync_vec, async_vec);
1968     /// ```
1969     ///
1970     /// You can collect a pair of collections like [`unzip`](#method.unzip)
1971     /// for paired items:
1972     ///
1973     /// ```
1974     /// use rayon::prelude::*;
1975     ///
1976     /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
1977     /// let (first, second): (Vec<_>, Vec<_>) = a.into_par_iter().collect();
1978     ///
1979     /// assert_eq!(first, [0, 1, 2, 3]);
1980     /// assert_eq!(second, [1, 2, 3, 4]);
1981     /// ```
1982     ///
1983     /// Or like [`partition_map`](#method.partition_map) for `Either` items:
1984     ///
1985     /// ```
1986     /// use rayon::prelude::*;
1987     /// use rayon::iter::Either;
1988     ///
1989     /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().map(|x| {
1990     ///     if x % 2 == 0 {
1991     ///         Either::Left(x * 4)
1992     ///     } else {
1993     ///         Either::Right(x * 3)
1994     ///     }
1995     /// }).collect();
1996     ///
1997     /// assert_eq!(left, [0, 8, 16, 24]);
1998     /// assert_eq!(right, [3, 9, 15, 21]);
1999     /// ```
2000     ///
2001     /// You can even collect an arbitrarily-nested combination of pairs and `Either`:
2002     ///
2003     /// ```
2004     /// use rayon::prelude::*;
2005     /// use rayon::iter::Either;
2006     ///
2007     /// let (first, (left, right)): (Vec<_>, (Vec<_>, Vec<_>))
2008     ///     = (0..8).into_par_iter().map(|x| {
2009     ///         if x % 2 == 0 {
2010     ///             (x, Either::Left(x * 4))
2011     ///         } else {
2012     ///             (-x, Either::Right(x * 3))
2013     ///         }
2014     ///     }).collect();
2015     ///
2016     /// assert_eq!(first, [0, -1, 2, -3, 4, -5, 6, -7]);
2017     /// assert_eq!(left, [0, 8, 16, 24]);
2018     /// assert_eq!(right, [3, 9, 15, 21]);
2019     /// ```
2020     ///
2021     /// All of that can _also_ be combined with short-circuiting collection of
2022     /// `Result` or `Option` types:
2023     ///
2024     /// ```
2025     /// use rayon::prelude::*;
2026     /// use rayon::iter::Either;
2027     ///
2028     /// let result: Result<(Vec<_>, (Vec<_>, Vec<_>)), _>
2029     ///     = (0..8).into_par_iter().map(|x| {
2030     ///         if x > 5 {
2031     ///             Err(x)
2032     ///         } else if x % 2 == 0 {
2033     ///             Ok((x, Either::Left(x * 4)))
2034     ///         } else {
2035     ///             Ok((-x, Either::Right(x * 3)))
2036     ///         }
2037     ///     }).collect();
2038     ///
2039     /// let error = result.unwrap_err();
2040     /// assert!(error == 6 || error == 7);
2041     /// ```
collect<C>(self) -> C where C: FromParallelIterator<Self::Item>,2042     fn collect<C>(self) -> C
2043     where
2044         C: FromParallelIterator<Self::Item>,
2045     {
2046         C::from_par_iter(self)
2047     }
2048 
2049     /// Unzips the items of a parallel iterator into a pair of arbitrary
2050     /// `ParallelExtend` containers.
2051     ///
2052     /// You may prefer to use `unzip_into_vecs()`, which allocates more
2053     /// efficiently with precise knowledge of how many elements the
2054     /// iterator contains, and even allows you to reuse existing
2055     /// vectors' backing stores rather than allocating fresh vectors.
2056     ///
2057     /// # Examples
2058     ///
2059     /// ```
2060     /// use rayon::prelude::*;
2061     ///
2062     /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
2063     ///
2064     /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
2065     ///
2066     /// assert_eq!(left, [0, 1, 2, 3]);
2067     /// assert_eq!(right, [1, 2, 3, 4]);
2068     /// ```
2069     ///
2070     /// Nested pairs can be unzipped too.
2071     ///
2072     /// ```
2073     /// use rayon::prelude::*;
2074     ///
2075     /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
2076     ///     .map(|i| (i, (i * i, i * i * i)))
2077     ///     .unzip();
2078     ///
2079     /// assert_eq!(values, [0, 1, 2, 3]);
2080     /// assert_eq!(squares, [0, 1, 4, 9]);
2081     /// assert_eq!(cubes, [0, 1, 8, 27]);
2082     /// ```
unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where Self: ParallelIterator<Item = (A, B)>, FromA: Default + Send + ParallelExtend<A>, FromB: Default + Send + ParallelExtend<B>, A: Send, B: Send,2083     fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
2084     where
2085         Self: ParallelIterator<Item = (A, B)>,
2086         FromA: Default + Send + ParallelExtend<A>,
2087         FromB: Default + Send + ParallelExtend<B>,
2088         A: Send,
2089         B: Send,
2090     {
2091         unzip::unzip(self)
2092     }
2093 
2094     /// Partitions the items of a parallel iterator into a pair of arbitrary
2095     /// `ParallelExtend` containers.  Items for which the `predicate` returns
2096     /// true go into the first container, and the rest go into the second.
2097     ///
2098     /// Note: unlike the standard `Iterator::partition`, this allows distinct
2099     /// collection types for the left and right items.  This is more flexible,
2100     /// but may require new type annotations when converting sequential code
2101     /// that used type inferrence assuming the two were the same.
2102     ///
2103     /// # Examples
2104     ///
2105     /// ```
2106     /// use rayon::prelude::*;
2107     ///
2108     /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
2109     ///
2110     /// assert_eq!(left, [0, 2, 4, 6]);
2111     /// assert_eq!(right, [1, 3, 5, 7]);
2112     /// ```
partition<A, B, P>(self, predicate: P) -> (A, B) where A: Default + Send + ParallelExtend<Self::Item>, B: Default + Send + ParallelExtend<Self::Item>, P: Fn(&Self::Item) -> bool + Sync + Send,2113     fn partition<A, B, P>(self, predicate: P) -> (A, B)
2114     where
2115         A: Default + Send + ParallelExtend<Self::Item>,
2116         B: Default + Send + ParallelExtend<Self::Item>,
2117         P: Fn(&Self::Item) -> bool + Sync + Send,
2118     {
2119         unzip::partition(self, predicate)
2120     }
2121 
2122     /// Partitions and maps the items of a parallel iterator into a pair of
2123     /// arbitrary `ParallelExtend` containers.  `Either::Left` items go into
2124     /// the first container, and `Either::Right` items go into the second.
2125     ///
2126     /// # Examples
2127     ///
2128     /// ```
2129     /// use rayon::prelude::*;
2130     /// use rayon::iter::Either;
2131     ///
2132     /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
2133     ///     .partition_map(|x| {
2134     ///         if x % 2 == 0 {
2135     ///             Either::Left(x * 4)
2136     ///         } else {
2137     ///             Either::Right(x * 3)
2138     ///         }
2139     ///     });
2140     ///
2141     /// assert_eq!(left, [0, 8, 16, 24]);
2142     /// assert_eq!(right, [3, 9, 15, 21]);
2143     /// ```
2144     ///
2145     /// Nested `Either` enums can be split as well.
2146     ///
2147     /// ```
2148     /// use rayon::prelude::*;
2149     /// use rayon::iter::Either::*;
2150     ///
2151     /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
2152     ///     .into_par_iter()
2153     ///     .partition_map(|x| match (x % 3, x % 5) {
2154     ///         (0, 0) => Left(Left(x)),
2155     ///         (0, _) => Left(Right(x)),
2156     ///         (_, 0) => Right(Left(x)),
2157     ///         (_, _) => Right(Right(x)),
2158     ///     });
2159     ///
2160     /// assert_eq!(fizzbuzz, [15]);
2161     /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
2162     /// assert_eq!(buzz, [5, 10]);
2163     /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
2164     /// ```
partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B) where A: Default + Send + ParallelExtend<L>, B: Default + Send + ParallelExtend<R>, P: Fn(Self::Item) -> Either<L, R> + Sync + Send, L: Send, R: Send,2165     fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
2166     where
2167         A: Default + Send + ParallelExtend<L>,
2168         B: Default + Send + ParallelExtend<R>,
2169         P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
2170         L: Send,
2171         R: Send,
2172     {
2173         unzip::partition_map(self, predicate)
2174     }
2175 
2176     /// Intersperses clones of an element between items of this iterator.
2177     ///
2178     /// # Examples
2179     ///
2180     /// ```
2181     /// use rayon::prelude::*;
2182     ///
2183     /// let x = vec![1, 2, 3];
2184     /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
2185     ///
2186     /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
2187     /// ```
intersperse(self, element: Self::Item) -> Intersperse<Self> where Self::Item: Clone,2188     fn intersperse(self, element: Self::Item) -> Intersperse<Self>
2189     where
2190         Self::Item: Clone,
2191     {
2192         Intersperse::new(self, element)
2193     }
2194 
2195     /// Internal method used to define the behavior of this parallel
2196     /// iterator. You should not need to call this directly.
2197     ///
2198     /// This method causes the iterator `self` to start producing
2199     /// items and to feed them to the consumer `consumer` one by one.
2200     /// It may split the consumer before doing so to create the
2201     /// opportunity to produce in parallel.
2202     ///
2203     /// See the [README] for more details on the internals of parallel
2204     /// iterators.
2205     ///
2206     /// [README]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>2207     fn drive_unindexed<C>(self, consumer: C) -> C::Result
2208     where
2209         C: UnindexedConsumer<Self::Item>;
2210 
2211     /// Internal method used to define the behavior of this parallel
2212     /// iterator. You should not need to call this directly.
2213     ///
2214     /// Returns the number of items produced by this iterator, if known
2215     /// statically. This can be used by consumers to trigger special fast
2216     /// paths. Therefore, if `Some(_)` is returned, this iterator must only
2217     /// use the (indexed) `Consumer` methods when driving a consumer, such
2218     /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
2219     /// other `UnindexedConsumer` methods -- or returning an inaccurate
2220     /// value -- may result in panics.
2221     ///
2222     /// This method is currently used to optimize `collect` for want
2223     /// of true Rust specialization; it may be removed when
2224     /// specialization is stable.
opt_len(&self) -> Option<usize>2225     fn opt_len(&self) -> Option<usize> {
2226         None
2227     }
2228 }
2229 
2230 impl<T: ParallelIterator> IntoParallelIterator for T {
2231     type Iter = T;
2232     type Item = T::Item;
2233 
into_par_iter(self) -> T2234     fn into_par_iter(self) -> T {
2235         self
2236     }
2237 }
2238 
2239 /// An iterator that supports "random access" to its data, meaning
2240 /// that you can split it at arbitrary indices and draw data from
2241 /// those points.
2242 ///
2243 /// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
2244 pub trait IndexedParallelIterator: ParallelIterator {
2245     /// Collects the results of the iterator into the specified
2246     /// vector. The vector is always truncated before execution
2247     /// begins. If possible, reusing the vector across calls can lead
2248     /// to better performance since it reuses the same backing buffer.
2249     ///
2250     /// # Examples
2251     ///
2252     /// ```
2253     /// use rayon::prelude::*;
2254     ///
2255     /// // any prior data will be truncated
2256     /// let mut vec = vec![-1, -2, -3];
2257     ///
2258     /// (0..5).into_par_iter()
2259     ///     .collect_into_vec(&mut vec);
2260     ///
2261     /// assert_eq!(vec, [0, 1, 2, 3, 4]);
2262     /// ```
collect_into_vec(self, target: &mut Vec<Self::Item>)2263     fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
2264         collect::collect_into_vec(self, target);
2265     }
2266 
2267     /// Unzips the results of the iterator into the specified
2268     /// vectors. The vectors are always truncated before execution
2269     /// begins. If possible, reusing the vectors across calls can lead
2270     /// to better performance since they reuse the same backing buffer.
2271     ///
2272     /// # Examples
2273     ///
2274     /// ```
2275     /// use rayon::prelude::*;
2276     ///
2277     /// // any prior data will be truncated
2278     /// let mut left = vec![42; 10];
2279     /// let mut right = vec![-1; 10];
2280     ///
2281     /// (10..15).into_par_iter()
2282     ///     .enumerate()
2283     ///     .unzip_into_vecs(&mut left, &mut right);
2284     ///
2285     /// assert_eq!(left, [0, 1, 2, 3, 4]);
2286     /// assert_eq!(right, [10, 11, 12, 13, 14]);
2287     /// ```
unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>) where Self: IndexedParallelIterator<Item = (A, B)>, A: Send, B: Send,2288     fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
2289     where
2290         Self: IndexedParallelIterator<Item = (A, B)>,
2291         A: Send,
2292         B: Send,
2293     {
2294         collect::unzip_into_vecs(self, left, right);
2295     }
2296 
2297     /// Iterates over tuples `(A, B)`, where the items `A` are from
2298     /// this iterator and `B` are from the iterator given as argument.
2299     /// Like the `zip` method on ordinary iterators, if the two
2300     /// iterators are of unequal length, you only get the items they
2301     /// have in common.
2302     ///
2303     /// # Examples
2304     ///
2305     /// ```
2306     /// use rayon::prelude::*;
2307     ///
2308     /// let result: Vec<_> = (1..4)
2309     ///     .into_par_iter()
2310     ///     .zip(vec!['a', 'b', 'c'])
2311     ///     .collect();
2312     ///
2313     /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
2314     /// ```
zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter> where Z: IntoParallelIterator, Z::Iter: IndexedParallelIterator,2315     fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
2316     where
2317         Z: IntoParallelIterator,
2318         Z::Iter: IndexedParallelIterator,
2319     {
2320         Zip::new(self, zip_op.into_par_iter())
2321     }
2322 
2323     /// The same as `Zip`, but requires that both iterators have the same length.
2324     ///
2325     /// # Panics
2326     /// Will panic if `self` and `zip_op` are not the same length.
2327     ///
2328     /// ```should_panic
2329     /// use rayon::prelude::*;
2330     ///
2331     /// let one = [1u8];
2332     /// let two = [2u8, 2];
2333     /// let one_iter = one.par_iter();
2334     /// let two_iter = two.par_iter();
2335     ///
2336     /// // this will panic
2337     /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
2338     ///
2339     /// // we should never get here
2340     /// assert_eq!(1, zipped.len());
2341     /// ```
zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter> where Z: IntoParallelIterator, Z::Iter: IndexedParallelIterator,2342     fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
2343     where
2344         Z: IntoParallelIterator,
2345         Z::Iter: IndexedParallelIterator,
2346     {
2347         let zip_op_iter = zip_op.into_par_iter();
2348         assert_eq!(self.len(), zip_op_iter.len());
2349         ZipEq::new(self, zip_op_iter)
2350     }
2351 
2352     /// Interleaves elements of this iterator and the other given
2353     /// iterator. Alternately yields elements from this iterator and
2354     /// the given iterator, until both are exhausted. If one iterator
2355     /// is exhausted before the other, the last elements are provided
2356     /// from the other.
2357     ///
2358     /// # Examples
2359     ///
2360     /// ```
2361     /// use rayon::prelude::*;
2362     /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
2363     /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
2364     /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
2365     /// ```
interleave<I>(self, other: I) -> Interleave<Self, I::Iter> where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator<Item = Self::Item>,2366     fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
2367     where
2368         I: IntoParallelIterator<Item = Self::Item>,
2369         I::Iter: IndexedParallelIterator<Item = Self::Item>,
2370     {
2371         Interleave::new(self, other.into_par_iter())
2372     }
2373 
2374     /// Interleaves elements of this iterator and the other given
2375     /// iterator, until one is exhausted.
2376     ///
2377     /// # Examples
2378     ///
2379     /// ```
2380     /// use rayon::prelude::*;
2381     /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
2382     /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2383     /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2384     /// ```
interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter> where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator<Item = Self::Item>,2385     fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
2386     where
2387         I: IntoParallelIterator<Item = Self::Item>,
2388         I::Iter: IndexedParallelIterator<Item = Self::Item>,
2389     {
2390         InterleaveShortest::new(self, other.into_par_iter())
2391     }
2392 
2393     /// Splits an iterator up into fixed-size chunks.
2394     ///
2395     /// Returns an iterator that returns `Vec`s of the given number of elements.
2396     /// If the number of elements in the iterator is not divisible by `chunk_size`,
2397     /// the last chunk may be shorter than `chunk_size`.
2398     ///
2399     /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
2400     /// slices, without having to allocate intermediate `Vec`s for the chunks.
2401     ///
2402     /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks
2403     /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut
2404     ///
2405     /// # Examples
2406     ///
2407     /// ```
2408     /// use rayon::prelude::*;
2409     /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2410     /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2411     /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2412     /// ```
chunks(self, chunk_size: usize) -> Chunks<Self>2413     fn chunks(self, chunk_size: usize) -> Chunks<Self> {
2414         assert!(chunk_size != 0, "chunk_size must not be zero");
2415         Chunks::new(self, chunk_size)
2416     }
2417 
2418     /// Lexicographically compares the elements of this `ParallelIterator` with those of
2419     /// another.
2420     ///
2421     /// # Examples
2422     ///
2423     /// ```
2424     /// use rayon::prelude::*;
2425     /// use std::cmp::Ordering::*;
2426     ///
2427     /// let x = vec![1, 2, 3];
2428     /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2429     /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2430     /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2431     /// ```
cmp<I>(self, other: I) -> Ordering where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator, Self::Item: Ord,2432     fn cmp<I>(self, other: I) -> Ordering
2433     where
2434         I: IntoParallelIterator<Item = Self::Item>,
2435         I::Iter: IndexedParallelIterator,
2436         Self::Item: Ord,
2437     {
2438         #[inline]
2439         fn ordering<T: Ord>((x, y): (T, T)) -> Ordering {
2440             Ord::cmp(&x, &y)
2441         }
2442 
2443         #[inline]
2444         fn inequal(&ord: &Ordering) -> bool {
2445             ord != Ordering::Equal
2446         }
2447 
2448         let other = other.into_par_iter();
2449         let ord_len = self.len().cmp(&other.len());
2450         self.zip(other)
2451             .map(ordering)
2452             .find_first(inequal)
2453             .unwrap_or(ord_len)
2454     }
2455 
2456     /// Lexicographically compares the elements of this `ParallelIterator` with those of
2457     /// another.
2458     ///
2459     /// # Examples
2460     ///
2461     /// ```
2462     /// use rayon::prelude::*;
2463     /// use std::cmp::Ordering::*;
2464     /// use std::f64::NAN;
2465     ///
2466     /// let x = vec![1.0, 2.0, 3.0];
2467     /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2468     /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2469     /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2470     /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);
2471     /// ```
partial_cmp<I>(self, other: I) -> Option<Ordering> where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2472     fn partial_cmp<I>(self, other: I) -> Option<Ordering>
2473     where
2474         I: IntoParallelIterator,
2475         I::Iter: IndexedParallelIterator,
2476         Self::Item: PartialOrd<I::Item>,
2477     {
2478         #[inline]
2479         fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> {
2480             PartialOrd::partial_cmp(&x, &y)
2481         }
2482 
2483         #[inline]
2484         fn inequal(&ord: &Option<Ordering>) -> bool {
2485             ord != Some(Ordering::Equal)
2486         }
2487 
2488         let other = other.into_par_iter();
2489         let ord_len = self.len().cmp(&other.len());
2490         self.zip(other)
2491             .map(ordering)
2492             .find_first(inequal)
2493             .unwrap_or(Some(ord_len))
2494     }
2495 
2496     /// Determines if the elements of this `ParallelIterator`
2497     /// are equal to those of another
eq<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialEq<I::Item>,2498     fn eq<I>(self, other: I) -> bool
2499     where
2500         I: IntoParallelIterator,
2501         I::Iter: IndexedParallelIterator,
2502         Self::Item: PartialEq<I::Item>,
2503     {
2504         #[inline]
2505         fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool {
2506             PartialEq::eq(&x, &y)
2507         }
2508 
2509         let other = other.into_par_iter();
2510         self.len() == other.len() && self.zip(other).all(eq)
2511     }
2512 
2513     /// Determines if the elements of this `ParallelIterator`
2514     /// are unequal to those of another
ne<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialEq<I::Item>,2515     fn ne<I>(self, other: I) -> bool
2516     where
2517         I: IntoParallelIterator,
2518         I::Iter: IndexedParallelIterator,
2519         Self::Item: PartialEq<I::Item>,
2520     {
2521         !self.eq(other)
2522     }
2523 
2524     /// Determines if the elements of this `ParallelIterator`
2525     /// are lexicographically less than those of another.
lt<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2526     fn lt<I>(self, other: I) -> bool
2527     where
2528         I: IntoParallelIterator,
2529         I::Iter: IndexedParallelIterator,
2530         Self::Item: PartialOrd<I::Item>,
2531     {
2532         self.partial_cmp(other) == Some(Ordering::Less)
2533     }
2534 
2535     /// Determines if the elements of this `ParallelIterator`
2536     /// are less or equal to those of another.
le<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2537     fn le<I>(self, other: I) -> bool
2538     where
2539         I: IntoParallelIterator,
2540         I::Iter: IndexedParallelIterator,
2541         Self::Item: PartialOrd<I::Item>,
2542     {
2543         let ord = self.partial_cmp(other);
2544         ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
2545     }
2546 
2547     /// Determines if the elements of this `ParallelIterator`
2548     /// are lexicographically greater than those of another.
gt<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2549     fn gt<I>(self, other: I) -> bool
2550     where
2551         I: IntoParallelIterator,
2552         I::Iter: IndexedParallelIterator,
2553         Self::Item: PartialOrd<I::Item>,
2554     {
2555         self.partial_cmp(other) == Some(Ordering::Greater)
2556     }
2557 
2558     /// Determines if the elements of this `ParallelIterator`
2559     /// are less or equal to those of another.
ge<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2560     fn ge<I>(self, other: I) -> bool
2561     where
2562         I: IntoParallelIterator,
2563         I::Iter: IndexedParallelIterator,
2564         Self::Item: PartialOrd<I::Item>,
2565     {
2566         let ord = self.partial_cmp(other);
2567         ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
2568     }
2569 
2570     /// Yields an index along with each item.
2571     ///
2572     /// # Examples
2573     ///
2574     /// ```
2575     /// use rayon::prelude::*;
2576     ///
2577     /// let chars = vec!['a', 'b', 'c'];
2578     /// let result: Vec<_> = chars
2579     ///     .into_par_iter()
2580     ///     .enumerate()
2581     ///     .collect();
2582     ///
2583     /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2584     /// ```
enumerate(self) -> Enumerate<Self>2585     fn enumerate(self) -> Enumerate<Self> {
2586         Enumerate::new(self)
2587     }
2588 
2589     /// Creates an iterator that steps by the given amount
2590     ///
2591     /// # Examples
2592     ///
2593     /// ```
2594     ///use rayon::prelude::*;
2595     ///
2596     /// let range = (3..10);
2597     /// let result: Vec<i32> = range
2598     ///    .into_par_iter()
2599     ///    .step_by(3)
2600     ///    .collect();
2601     ///
2602     /// assert_eq!(result, [3, 6, 9])
2603     /// ```
2604     ///
2605     /// # Compatibility
2606     ///
2607     /// This method is only available on Rust 1.38 or greater.
2608     #[cfg(step_by)]
step_by(self, step: usize) -> StepBy<Self>2609     fn step_by(self, step: usize) -> StepBy<Self> {
2610         StepBy::new(self, step)
2611     }
2612 
2613     /// Creates an iterator that skips the first `n` elements.
2614     ///
2615     /// # Examples
2616     ///
2617     /// ```
2618     /// use rayon::prelude::*;
2619     ///
2620     /// let result: Vec<_> = (0..100)
2621     ///     .into_par_iter()
2622     ///     .skip(95)
2623     ///     .collect();
2624     ///
2625     /// assert_eq!(result, [95, 96, 97, 98, 99]);
2626     /// ```
skip(self, n: usize) -> Skip<Self>2627     fn skip(self, n: usize) -> Skip<Self> {
2628         Skip::new(self, n)
2629     }
2630 
2631     /// Creates an iterator that yields the first `n` elements.
2632     ///
2633     /// # Examples
2634     ///
2635     /// ```
2636     /// use rayon::prelude::*;
2637     ///
2638     /// let result: Vec<_> = (0..100)
2639     ///     .into_par_iter()
2640     ///     .take(5)
2641     ///     .collect();
2642     ///
2643     /// assert_eq!(result, [0, 1, 2, 3, 4]);
2644     /// ```
take(self, n: usize) -> Take<Self>2645     fn take(self, n: usize) -> Take<Self> {
2646         Take::new(self, n)
2647     }
2648 
2649     /// Searches for **some** item in the parallel iterator that
2650     /// matches the given predicate, and returns its index.  Like
2651     /// `ParallelIterator::find_any`, the parallel search will not
2652     /// necessarily find the **first** match, and once a match is
2653     /// found we'll attempt to stop processing any more.
2654     ///
2655     /// # Examples
2656     ///
2657     /// ```
2658     /// use rayon::prelude::*;
2659     ///
2660     /// let a = [1, 2, 3, 3];
2661     ///
2662     /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
2663     /// assert!(i == 2 || i == 3);
2664     ///
2665     /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
2666     /// ```
position_any<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2667     fn position_any<P>(self, predicate: P) -> Option<usize>
2668     where
2669         P: Fn(Self::Item) -> bool + Sync + Send,
2670     {
2671         #[inline]
2672         fn check(&(_, p): &(usize, bool)) -> bool {
2673             p
2674         }
2675 
2676         let (i, _) = self.map(predicate).enumerate().find_any(check)?;
2677         Some(i)
2678     }
2679 
2680     /// Searches for the sequentially **first** item in the parallel iterator
2681     /// that matches the given predicate, and returns its index.
2682     ///
2683     /// Like `ParallelIterator::find_first`, once a match is found,
2684     /// all attempts to the right of the match will be stopped, while
2685     /// attempts to the left must continue in case an earlier match
2686     /// is found.
2687     ///
2688     /// Note that not all parallel iterators have a useful order, much like
2689     /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
2690     /// just want the first match that discovered anywhere in the iterator,
2691     /// `position_any` is a better choice.
2692     ///
2693     /// # Examples
2694     ///
2695     /// ```
2696     /// use rayon::prelude::*;
2697     ///
2698     /// let a = [1, 2, 3, 3];
2699     ///
2700     /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
2701     ///
2702     /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
2703     /// ```
position_first<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2704     fn position_first<P>(self, predicate: P) -> Option<usize>
2705     where
2706         P: Fn(Self::Item) -> bool + Sync + Send,
2707     {
2708         #[inline]
2709         fn check(&(_, p): &(usize, bool)) -> bool {
2710             p
2711         }
2712 
2713         let (i, _) = self.map(predicate).enumerate().find_first(check)?;
2714         Some(i)
2715     }
2716 
2717     /// Searches for the sequentially **last** item in the parallel iterator
2718     /// that matches the given predicate, and returns its index.
2719     ///
2720     /// Like `ParallelIterator::find_last`, once a match is found,
2721     /// all attempts to the left of the match will be stopped, while
2722     /// attempts to the right must continue in case a later match
2723     /// is found.
2724     ///
2725     /// Note that not all parallel iterators have a useful order, much like
2726     /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
2727     /// order doesn't actually matter to you, `position_any` is a better
2728     /// choice.
2729     ///
2730     /// # Examples
2731     ///
2732     /// ```
2733     /// use rayon::prelude::*;
2734     ///
2735     /// let a = [1, 2, 3, 3];
2736     ///
2737     /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
2738     ///
2739     /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
2740     /// ```
position_last<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2741     fn position_last<P>(self, predicate: P) -> Option<usize>
2742     where
2743         P: Fn(Self::Item) -> bool + Sync + Send,
2744     {
2745         #[inline]
2746         fn check(&(_, p): &(usize, bool)) -> bool {
2747             p
2748         }
2749 
2750         let (i, _) = self.map(predicate).enumerate().find_last(check)?;
2751         Some(i)
2752     }
2753 
2754     #[doc(hidden)]
2755     #[deprecated(
2756         note = "parallel `position` does not search in order -- use `position_any`, \\
2757                 `position_first`, or `position_last`"
2758     )]
position<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2759     fn position<P>(self, predicate: P) -> Option<usize>
2760     where
2761         P: Fn(Self::Item) -> bool + Sync + Send,
2762     {
2763         self.position_any(predicate)
2764     }
2765 
2766     /// Searches for items in the parallel iterator that match the given
2767     /// predicate, and returns their indices.
2768     ///
2769     /// # Examples
2770     ///
2771     /// ```
2772     /// use rayon::prelude::*;
2773     ///
2774     /// let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29];
2775     ///
2776     /// // Find the positions of primes congruent to 1 modulo 6
2777     /// let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect();
2778     /// assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19
2779     ///
2780     /// // Find the positions of primes congruent to 5 modulo 6
2781     /// let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect();
2782     /// assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29
2783     /// ```
positions<P>(self, predicate: P) -> Positions<Self, P> where P: Fn(Self::Item) -> bool + Sync + Send,2784     fn positions<P>(self, predicate: P) -> Positions<Self, P>
2785     where
2786         P: Fn(Self::Item) -> bool + Sync + Send,
2787     {
2788         Positions::new(self, predicate)
2789     }
2790 
2791     /// Produces a new iterator with the elements of this iterator in
2792     /// reverse order.
2793     ///
2794     /// # Examples
2795     ///
2796     /// ```
2797     /// use rayon::prelude::*;
2798     ///
2799     /// let result: Vec<_> = (0..5)
2800     ///     .into_par_iter()
2801     ///     .rev()
2802     ///     .collect();
2803     ///
2804     /// assert_eq!(result, [4, 3, 2, 1, 0]);
2805     /// ```
rev(self) -> Rev<Self>2806     fn rev(self) -> Rev<Self> {
2807         Rev::new(self)
2808     }
2809 
2810     /// Sets the minimum length of iterators desired to process in each
2811     /// thread.  Rayon will not split any smaller than this length, but
2812     /// of course an iterator could already be smaller to begin with.
2813     ///
2814     /// Producers like `zip` and `interleave` will use greater of the two
2815     /// minimums.
2816     /// Chained iterators and iterators inside `flat_map` may each use
2817     /// their own minimum length.
2818     ///
2819     /// # Examples
2820     ///
2821     /// ```
2822     /// use rayon::prelude::*;
2823     ///
2824     /// let min = (0..1_000_000)
2825     ///     .into_par_iter()
2826     ///     .with_min_len(1234)
2827     ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
2828     ///     .min().unwrap();
2829     ///
2830     /// assert!(min >= 1234);
2831     /// ```
with_min_len(self, min: usize) -> MinLen<Self>2832     fn with_min_len(self, min: usize) -> MinLen<Self> {
2833         MinLen::new(self, min)
2834     }
2835 
2836     /// Sets the maximum length of iterators desired to process in each
2837     /// thread.  Rayon will try to split at least below this length,
2838     /// unless that would put it below the length from `with_min_len()`.
2839     /// For example, given min=10 and max=15, a length of 16 will not be
2840     /// split any further.
2841     ///
2842     /// Producers like `zip` and `interleave` will use lesser of the two
2843     /// maximums.
2844     /// Chained iterators and iterators inside `flat_map` may each use
2845     /// their own maximum length.
2846     ///
2847     /// # Examples
2848     ///
2849     /// ```
2850     /// use rayon::prelude::*;
2851     ///
2852     /// let max = (0..1_000_000)
2853     ///     .into_par_iter()
2854     ///     .with_max_len(1234)
2855     ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
2856     ///     .max().unwrap();
2857     ///
2858     /// assert!(max <= 1234);
2859     /// ```
with_max_len(self, max: usize) -> MaxLen<Self>2860     fn with_max_len(self, max: usize) -> MaxLen<Self> {
2861         MaxLen::new(self, max)
2862     }
2863 
2864     /// Produces an exact count of how many items this iterator will
2865     /// produce, presuming no panic occurs.
2866     ///
2867     /// # Examples
2868     ///
2869     /// ```
2870     /// use rayon::prelude::*;
2871     ///
2872     /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
2873     /// assert_eq!(par_iter.len(), 10);
2874     ///
2875     /// let vec: Vec<_> = par_iter.collect();
2876     /// assert_eq!(vec.len(), 10);
2877     /// ```
len(&self) -> usize2878     fn len(&self) -> usize;
2879 
2880     /// Internal method used to define the behavior of this parallel
2881     /// iterator. You should not need to call this directly.
2882     ///
2883     /// This method causes the iterator `self` to start producing
2884     /// items and to feed them to the consumer `consumer` one by one.
2885     /// It may split the consumer before doing so to create the
2886     /// opportunity to produce in parallel. If a split does happen, it
2887     /// will inform the consumer of the index where the split should
2888     /// occur (unlike `ParallelIterator::drive_unindexed()`).
2889     ///
2890     /// See the [README] for more details on the internals of parallel
2891     /// iterators.
2892     ///
2893     /// [README]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result2894     fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
2895 
2896     /// Internal method used to define the behavior of this parallel
2897     /// iterator. You should not need to call this directly.
2898     ///
2899     /// This method converts the iterator into a producer P and then
2900     /// invokes `callback.callback()` with P. Note that the type of
2901     /// this producer is not defined as part of the API, since
2902     /// `callback` must be defined generically for all producers. This
2903     /// allows the producer type to contain references; it also means
2904     /// that parallel iterators can adjust that type without causing a
2905     /// breaking change.
2906     ///
2907     /// See the [README] for more details on the internals of parallel
2908     /// iterators.
2909     ///
2910     /// [README]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output2911     fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
2912 }
2913 
2914 /// `FromParallelIterator` implements the creation of a collection
2915 /// from a [`ParallelIterator`]. By implementing
2916 /// `FromParallelIterator` for a given type, you define how it will be
2917 /// created from an iterator.
2918 ///
2919 /// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
2920 ///
2921 /// [`ParallelIterator`]: trait.ParallelIterator.html
2922 /// [`collect()`]: trait.ParallelIterator.html#method.collect
2923 ///
2924 /// # Examples
2925 ///
2926 /// Implementing `FromParallelIterator` for your type:
2927 ///
2928 /// ```
2929 /// use rayon::prelude::*;
2930 /// use std::mem;
2931 ///
2932 /// struct BlackHole {
2933 ///     mass: usize,
2934 /// }
2935 ///
2936 /// impl<T: Send> FromParallelIterator<T> for BlackHole {
2937 ///     fn from_par_iter<I>(par_iter: I) -> Self
2938 ///         where I: IntoParallelIterator<Item = T>
2939 ///     {
2940 ///         let par_iter = par_iter.into_par_iter();
2941 ///         BlackHole {
2942 ///             mass: par_iter.count() * mem::size_of::<T>(),
2943 ///         }
2944 ///     }
2945 /// }
2946 ///
2947 /// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
2948 /// assert_eq!(bh.mass, 4000);
2949 /// ```
2950 pub trait FromParallelIterator<T>
2951 where
2952     T: Send,
2953 {
2954     /// Creates an instance of the collection from the parallel iterator `par_iter`.
2955     ///
2956     /// If your collection is not naturally parallel, the easiest (and
2957     /// fastest) way to do this is often to collect `par_iter` into a
2958     /// [`LinkedList`] or other intermediate data structure and then
2959     /// sequentially extend your collection. However, a more 'native'
2960     /// technique is to use the [`par_iter.fold`] or
2961     /// [`par_iter.fold_with`] methods to create the collection.
2962     /// Alternatively, if your collection is 'natively' parallel, you
2963     /// can use `par_iter.for_each` to process each element in turn.
2964     ///
2965     /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html
2966     /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold
2967     /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with
2968     /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each
from_par_iter<I>(par_iter: I) -> Self where I: IntoParallelIterator<Item = T>2969     fn from_par_iter<I>(par_iter: I) -> Self
2970     where
2971         I: IntoParallelIterator<Item = T>;
2972 }
2973 
2974 /// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
2975 ///
2976 /// [`ParallelIterator`]: trait.ParallelIterator.html
2977 ///
2978 /// # Examples
2979 ///
2980 /// Implementing `ParallelExtend` for your type:
2981 ///
2982 /// ```
2983 /// use rayon::prelude::*;
2984 /// use std::mem;
2985 ///
2986 /// struct BlackHole {
2987 ///     mass: usize,
2988 /// }
2989 ///
2990 /// impl<T: Send> ParallelExtend<T> for BlackHole {
2991 ///     fn par_extend<I>(&mut self, par_iter: I)
2992 ///         where I: IntoParallelIterator<Item = T>
2993 ///     {
2994 ///         let par_iter = par_iter.into_par_iter();
2995 ///         self.mass += par_iter.count() * mem::size_of::<T>();
2996 ///     }
2997 /// }
2998 ///
2999 /// let mut bh = BlackHole { mass: 0 };
3000 /// bh.par_extend(0i32..1000);
3001 /// assert_eq!(bh.mass, 4000);
3002 /// bh.par_extend(0i64..10);
3003 /// assert_eq!(bh.mass, 4080);
3004 /// ```
3005 pub trait ParallelExtend<T>
3006 where
3007     T: Send,
3008 {
3009     /// Extends an instance of the collection with the elements drawn
3010     /// from the parallel iterator `par_iter`.
3011     ///
3012     /// # Examples
3013     ///
3014     /// ```
3015     /// use rayon::prelude::*;
3016     ///
3017     /// let mut vec = vec![];
3018     /// vec.par_extend(0..5);
3019     /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
3020     /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
3021     /// ```
par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = T>3022     fn par_extend<I>(&mut self, par_iter: I)
3023     where
3024         I: IntoParallelIterator<Item = T>;
3025 }
3026 
3027 /// `ParallelDrainFull` creates a parallel iterator that moves all items
3028 /// from a collection while retaining the original capacity.
3029 ///
3030 /// Types which are indexable typically implement [`ParallelDrainRange`]
3031 /// instead, where you can drain fully with `par_drain(..)`.
3032 ///
3033 /// [`ParallelDrainRange`]: trait.ParallelDrainRange.html
3034 pub trait ParallelDrainFull {
3035     /// The draining parallel iterator type that will be created.
3036     type Iter: ParallelIterator<Item = Self::Item>;
3037 
3038     /// The type of item that the parallel iterator will produce.
3039     /// This is usually the same as `IntoParallelIterator::Item`.
3040     type Item: Send;
3041 
3042     /// Returns a draining parallel iterator over an entire collection.
3043     ///
3044     /// When the iterator is dropped, all items are removed, even if the
3045     /// iterator was not fully consumed. If the iterator is leaked, for example
3046     /// using `std::mem::forget`, it is unspecified how many items are removed.
3047     ///
3048     /// # Examples
3049     ///
3050     /// ```
3051     /// use rayon::prelude::*;
3052     /// use std::collections::{BinaryHeap, HashSet};
3053     ///
3054     /// let squares: HashSet<i32> = (0..10).map(|x| x * x).collect();
3055     ///
3056     /// let mut heap: BinaryHeap<_> = squares.iter().copied().collect();
3057     /// assert_eq!(
3058     ///     // heaps are drained in arbitrary order
3059     ///     heap.par_drain()
3060     ///         .inspect(|x| assert!(squares.contains(x)))
3061     ///         .count(),
3062     ///     squares.len(),
3063     /// );
3064     /// assert!(heap.is_empty());
3065     /// assert!(heap.capacity() >= squares.len());
3066     /// ```
par_drain(self) -> Self::Iter3067     fn par_drain(self) -> Self::Iter;
3068 }
3069 
3070 /// `ParallelDrainRange` creates a parallel iterator that moves a range of items
3071 /// from a collection while retaining the original capacity.
3072 ///
3073 /// Types which are not indexable may implement [`ParallelDrainFull`] instead.
3074 ///
3075 /// [`ParallelDrainFull`]: trait.ParallelDrainFull.html
3076 pub trait ParallelDrainRange<Idx = usize> {
3077     /// The draining parallel iterator type that will be created.
3078     type Iter: ParallelIterator<Item = Self::Item>;
3079 
3080     /// The type of item that the parallel iterator will produce.
3081     /// This is usually the same as `IntoParallelIterator::Item`.
3082     type Item: Send;
3083 
3084     /// Returns a draining parallel iterator over a range of the collection.
3085     ///
3086     /// When the iterator is dropped, all items in the range are removed, even
3087     /// if the iterator was not fully consumed. If the iterator is leaked, for
3088     /// example using `std::mem::forget`, it is unspecified how many items are
3089     /// removed.
3090     ///
3091     /// # Examples
3092     ///
3093     /// ```
3094     /// use rayon::prelude::*;
3095     ///
3096     /// let squares: Vec<i32> = (0..10).map(|x| x * x).collect();
3097     ///
3098     /// println!("RangeFull");
3099     /// let mut vec = squares.clone();
3100     /// assert!(vec.par_drain(..)
3101     ///            .eq(squares.par_iter().copied()));
3102     /// assert!(vec.is_empty());
3103     /// assert!(vec.capacity() >= squares.len());
3104     ///
3105     /// println!("RangeFrom");
3106     /// let mut vec = squares.clone();
3107     /// assert!(vec.par_drain(5..)
3108     ///            .eq(squares[5..].par_iter().copied()));
3109     /// assert_eq!(&vec[..], &squares[..5]);
3110     /// assert!(vec.capacity() >= squares.len());
3111     ///
3112     /// println!("RangeTo");
3113     /// let mut vec = squares.clone();
3114     /// assert!(vec.par_drain(..5)
3115     ///            .eq(squares[..5].par_iter().copied()));
3116     /// assert_eq!(&vec[..], &squares[5..]);
3117     /// assert!(vec.capacity() >= squares.len());
3118     ///
3119     /// println!("RangeToInclusive");
3120     /// let mut vec = squares.clone();
3121     /// assert!(vec.par_drain(..=5)
3122     ///            .eq(squares[..=5].par_iter().copied()));
3123     /// assert_eq!(&vec[..], &squares[6..]);
3124     /// assert!(vec.capacity() >= squares.len());
3125     ///
3126     /// println!("Range");
3127     /// let mut vec = squares.clone();
3128     /// assert!(vec.par_drain(3..7)
3129     ///            .eq(squares[3..7].par_iter().copied()));
3130     /// assert_eq!(&vec[..3], &squares[..3]);
3131     /// assert_eq!(&vec[3..], &squares[7..]);
3132     /// assert!(vec.capacity() >= squares.len());
3133     ///
3134     /// println!("RangeInclusive");
3135     /// let mut vec = squares.clone();
3136     /// assert!(vec.par_drain(3..=7)
3137     ///            .eq(squares[3..=7].par_iter().copied()));
3138     /// assert_eq!(&vec[..3], &squares[..3]);
3139     /// assert_eq!(&vec[3..], &squares[8..]);
3140     /// assert!(vec.capacity() >= squares.len());
3141     /// ```
par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter3142     fn par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter;
3143 }
3144 
3145 /// We hide the `Try` trait in a private module, as it's only meant to be a
3146 /// stable clone of the standard library's `Try` trait, as yet unstable.
3147 mod private {
3148     /// Clone of `std::ops::Try`.
3149     ///
3150     /// Implementing this trait is not permitted outside of `rayon`.
3151     pub trait Try {
3152         private_decl! {}
3153 
3154         type Ok;
3155         type Error;
into_result(self) -> Result<Self::Ok, Self::Error>3156         fn into_result(self) -> Result<Self::Ok, Self::Error>;
from_ok(v: Self::Ok) -> Self3157         fn from_ok(v: Self::Ok) -> Self;
from_error(v: Self::Error) -> Self3158         fn from_error(v: Self::Error) -> Self;
3159     }
3160 
3161     impl<T> Try for Option<T> {
3162         private_impl! {}
3163 
3164         type Ok = T;
3165         type Error = ();
3166 
into_result(self) -> Result<T, ()>3167         fn into_result(self) -> Result<T, ()> {
3168             self.ok_or(())
3169         }
from_ok(v: T) -> Self3170         fn from_ok(v: T) -> Self {
3171             Some(v)
3172         }
from_error(_: ()) -> Self3173         fn from_error(_: ()) -> Self {
3174             None
3175         }
3176     }
3177 
3178     impl<T, E> Try for Result<T, E> {
3179         private_impl! {}
3180 
3181         type Ok = T;
3182         type Error = E;
3183 
into_result(self) -> Result<T, E>3184         fn into_result(self) -> Result<T, E> {
3185             self
3186         }
from_ok(v: T) -> Self3187         fn from_ok(v: T) -> Self {
3188             Ok(v)
3189         }
from_error(v: E) -> Self3190         fn from_error(v: E) -> Self {
3191             Err(v)
3192         }
3193     }
3194 }
3195