1 //! Traits for writing parallel programs using an iterator-style interface 2 //! 3 //! You will rarely need to interact with this module directly unless you have 4 //! need to name one of the iterator types. 5 //! 6 //! Parallel iterators make it easy to write iterator-like chains that 7 //! execute in parallel: typically all you have to do is convert the 8 //! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into 9 //! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For 10 //! example, to compute the sum of the squares of a sequence of 11 //! integers, one might write: 12 //! 13 //! ```rust 14 //! use rayon::prelude::*; 15 //! fn sum_of_squares(input: &[i32]) -> i32 { 16 //! input.par_iter() 17 //! .map(|i| i * i) 18 //! .sum() 19 //! } 20 //! ``` 21 //! 22 //! Or, to increment all the integers in a slice, you could write: 23 //! 24 //! ```rust 25 //! use rayon::prelude::*; 26 //! fn increment_all(input: &mut [i32]) { 27 //! input.par_iter_mut() 28 //! .for_each(|p| *p += 1); 29 //! } 30 //! ``` 31 //! 32 //! To use parallel iterators, first import the traits by adding 33 //! something like `use rayon::prelude::*` to your module. You can 34 //! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a 35 //! parallel iterator. Like a [regular iterator][], parallel 36 //! iterators work by first constructing a computation and then 37 //! executing it. 38 //! 39 //! In addition to `par_iter()` and friends, some types offer other 40 //! ways to create (or consume) parallel iterators: 41 //! 42 //! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and 43 //! `par_windows`, as well as various parallel sorting 44 //! operations. See [the `ParallelSlice` trait] for the full list. 45 //! - Strings (`&str`) offer methods like `par_split` and `par_lines`. 46 //! See [the `ParallelString` trait] for the full list. 47 //! - Various collections offer [`par_extend`], which grows a 48 //! collection given a parallel iterator. (If you don't have a 49 //! collection to extend, you can use [`collect()`] to create a new 50 //! one from scratch.) 51 //! 52 //! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html 53 //! [the `ParallelString` trait]: ../str/trait.ParallelString.html 54 //! [`par_extend`]: trait.ParallelExtend.html 55 //! [`collect()`]: trait.ParallelIterator.html#method.collect 56 //! 57 //! To see the full range of methods available on parallel iterators, 58 //! check out the [`ParallelIterator`] and [`IndexedParallelIterator`] 59 //! traits. 60 //! 61 //! If you'd like to offer parallel iterators for your own collector, 62 //! or write your own combinator, then check out the [plumbing] 63 //! module. 64 //! 65 //! [regular iterator]: http://doc.rust-lang.org/std/iter/trait.Iterator.html 66 //! [`ParallelIterator`]: trait.ParallelIterator.html 67 //! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 68 //! [plumbing]: plumbing 69 70 pub use either::Either; 71 use std::cmp::{self, Ordering}; 72 use std::iter::{Sum, Product}; 73 use std::ops::Fn; 74 use self::plumbing::*; 75 76 // There is a method to the madness here: 77 // 78 // - Most of these modules are private but expose certain types to the end-user 79 // (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the 80 // public API surface of the `ParallelIterator` traits. 81 // - In **this** module, those public types are always used unprefixed, which forces 82 // us to add a `pub use` and helps identify if we missed anything. 83 // - In contrast, items that appear **only** in the body of a method, 84 // e.g. `find::find()`, are always used **prefixed**, so that they 85 // can be readily distinguished. 86 87 mod find; 88 mod find_first_last; 89 mod chain; 90 pub use self::chain::Chain; 91 mod chunks; 92 pub use self::chunks::Chunks; 93 mod collect; 94 mod enumerate; 95 pub use self::enumerate::Enumerate; 96 mod filter; 97 pub use self::filter::Filter; 98 mod filter_map; 99 pub use self::filter_map::FilterMap; 100 mod flat_map; 101 pub use self::flat_map::FlatMap; 102 mod flatten; 103 pub use self::flatten::Flatten; 104 mod from_par_iter; 105 pub mod plumbing; 106 mod for_each; 107 mod fold; 108 pub use self::fold::{Fold, FoldWith}; 109 mod reduce; 110 mod skip; 111 pub use self::skip::Skip; 112 mod splitter; 113 pub use self::splitter::{split, Split}; 114 mod take; 115 pub use self::take::Take; 116 mod map; 117 pub use self::map::Map; 118 mod map_with; 119 pub use self::map_with::MapWith; 120 mod zip; 121 pub use self::zip::Zip; 122 mod zip_eq; 123 pub use self::zip_eq::ZipEq; 124 mod interleave; 125 pub use self::interleave::Interleave; 126 mod interleave_shortest; 127 pub use self::interleave_shortest::InterleaveShortest; 128 mod intersperse; 129 pub use self::intersperse::Intersperse; 130 mod update; 131 pub use self::update::Update; 132 133 mod noop; 134 mod rev; 135 pub use self::rev::Rev; 136 mod len; 137 pub use self::len::{MinLen, MaxLen}; 138 mod sum; 139 mod product; 140 mod cloned; 141 pub use self::cloned::Cloned; 142 mod inspect; 143 pub use self::inspect::Inspect; 144 mod while_some; 145 pub use self::while_some::WhileSome; 146 mod extend; 147 mod unzip; 148 mod repeat; 149 pub use self::repeat::{Repeat, repeat}; 150 pub use self::repeat::{RepeatN, repeatn}; 151 152 mod empty; 153 pub use self::empty::{Empty, empty}; 154 mod once; 155 pub use self::once::{Once, once}; 156 157 #[cfg(test)] 158 mod test; 159 160 /// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`]. 161 /// 162 /// By implementing `IntoParallelIterator` for a type, you define how it will 163 /// transformed into an iterator. This is a parallel version of the standard 164 /// library's [`std::iter::IntoIterator`] trait. 165 /// 166 /// [`ParallelIterator`]: trait.ParallelIterator.html 167 /// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html 168 pub trait IntoParallelIterator { 169 /// The parallel iterator type that will be created. 170 type Iter: ParallelIterator<Item = Self::Item>; 171 172 /// The type of item that the parallel iterator will produce. 173 type Item: Send; 174 175 /// Converts `self` into a parallel iterator. 176 /// 177 /// # Examples 178 /// 179 /// ``` 180 /// use rayon::prelude::*; 181 /// 182 /// println!("counting in parallel:"); 183 /// (0..100).into_par_iter() 184 /// .for_each(|i| println!("{}", i)); 185 /// ``` 186 /// 187 /// This conversion is often implicit for arguments to methods like [`zip`]. 188 /// 189 /// ``` 190 /// use rayon::prelude::*; 191 /// 192 /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect(); 193 /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]); 194 /// ``` 195 /// 196 /// [`zip`]: trait.IndexedParallelIterator.html#method.zip into_par_iter(self) -> Self::Iter197 fn into_par_iter(self) -> Self::Iter; 198 } 199 200 /// `IntoParallelRefIterator` implements the conversion to a 201 /// [`ParallelIterator`], providing shared references to the data. 202 /// 203 /// This is a parallel version of the `iter()` method 204 /// defined by various collections. 205 /// 206 /// This trait is automatically implemented 207 /// `for I where &I: IntoParallelIterator`. In most cases, users 208 /// will want to implement [`IntoParallelIterator`] rather than implement 209 /// this trait directly. 210 /// 211 /// [`ParallelIterator`]: trait.ParallelIterator.html 212 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html 213 pub trait IntoParallelRefIterator<'data> { 214 /// The type of the parallel iterator that will be returned. 215 type Iter: ParallelIterator<Item = Self::Item>; 216 217 /// The type of item that the parallel iterator will produce. 218 /// This will typically be an `&'data T` reference type. 219 type Item: Send + 'data; 220 221 /// Converts `self` into a parallel iterator. 222 /// 223 /// # Examples 224 /// 225 /// ``` 226 /// use rayon::prelude::*; 227 /// 228 /// let v: Vec<_> = (0..100).collect(); 229 /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2); 230 /// 231 /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`, 232 /// // producing the exact same references. 233 /// assert!(v.par_iter().zip(&v) 234 /// .all(|(a, b)| std::ptr::eq(a, b))); 235 /// ``` par_iter(&'data self) -> Self::Iter236 fn par_iter(&'data self) -> Self::Iter; 237 } 238 239 impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I 240 where &'data I: IntoParallelIterator 241 { 242 type Iter = <&'data I as IntoParallelIterator>::Iter; 243 type Item = <&'data I as IntoParallelIterator>::Item; 244 par_iter(&'data self) -> Self::Iter245 fn par_iter(&'data self) -> Self::Iter { 246 self.into_par_iter() 247 } 248 } 249 250 251 /// `IntoParallelRefMutIterator` implements the conversion to a 252 /// [`ParallelIterator`], providing mutable references to the data. 253 /// 254 /// This is a parallel version of the `iter_mut()` method 255 /// defined by various collections. 256 /// 257 /// This trait is automatically implemented 258 /// `for I where &mut I: IntoParallelIterator`. In most cases, users 259 /// will want to implement [`IntoParallelIterator`] rather than implement 260 /// this trait directly. 261 /// 262 /// [`ParallelIterator`]: trait.ParallelIterator.html 263 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html 264 pub trait IntoParallelRefMutIterator<'data> { 265 /// The type of iterator that will be created. 266 type Iter: ParallelIterator<Item = Self::Item>; 267 268 /// The type of item that will be produced; this is typically an 269 /// `&'data mut T` reference. 270 type Item: Send + 'data; 271 272 /// Creates the parallel iterator from `self`. 273 /// 274 /// # Examples 275 /// 276 /// ``` 277 /// use rayon::prelude::*; 278 /// 279 /// let mut v = vec![0usize; 5]; 280 /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i); 281 /// assert_eq!(v, [0, 1, 2, 3, 4]); 282 /// ``` par_iter_mut(&'data mut self) -> Self::Iter283 fn par_iter_mut(&'data mut self) -> Self::Iter; 284 } 285 286 impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I 287 where &'data mut I: IntoParallelIterator 288 { 289 type Iter = <&'data mut I as IntoParallelIterator>::Iter; 290 type Item = <&'data mut I as IntoParallelIterator>::Item; 291 par_iter_mut(&'data mut self) -> Self::Iter292 fn par_iter_mut(&'data mut self) -> Self::Iter { 293 self.into_par_iter() 294 } 295 } 296 297 /// Parallel version of the standard iterator trait. 298 /// 299 /// The combinators on this trait are available on **all** parallel 300 /// iterators. Additional methods can be found on the 301 /// [`IndexedParallelIterator`] trait: those methods are only 302 /// available for parallel iterators where the number of items is 303 /// known in advance (so, e.g., after invoking `filter`, those methods 304 /// become unavailable). 305 /// 306 /// For examples of using parallel iterators, see [the docs on the 307 /// `iter` module][iter]. 308 /// 309 /// [iter]: index.html 310 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 311 pub trait ParallelIterator: Sized + Send { 312 /// The type of item that this parallel iterator produces. 313 /// For example, if you use the [`for_each`] method, this is the type of 314 /// item that your closure will be invoked with. 315 /// 316 /// [`for_each`]: #method.for_each 317 type Item: Send; 318 319 /// Executes `OP` on each item produced by the iterator, in parallel. 320 /// 321 /// # Examples 322 /// 323 /// ``` 324 /// use rayon::prelude::*; 325 /// 326 /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x)); 327 /// ``` for_each<OP>(self, op: OP) where OP: Fn(Self::Item) + Sync + Send328 fn for_each<OP>(self, op: OP) 329 where OP: Fn(Self::Item) + Sync + Send 330 { 331 for_each::for_each(self, &op) 332 } 333 334 /// Executes `OP` on the given `init` value with each item produced by 335 /// the iterator, in parallel. 336 /// 337 /// The `init` value will be cloned only as needed to be paired with 338 /// the group of items in each rayon job. It does not require the type 339 /// to be `Sync`. 340 /// 341 /// # Examples 342 /// 343 /// ``` 344 /// use std::sync::mpsc::channel; 345 /// use rayon::prelude::*; 346 /// 347 /// let (sender, receiver) = channel(); 348 /// 349 /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap()); 350 /// 351 /// let mut res: Vec<_> = receiver.iter().collect(); 352 /// 353 /// res.sort(); 354 /// 355 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4]) 356 /// ``` for_each_with<OP, T>(self, init: T, op: OP) where OP: Fn(&mut T, Self::Item) + Sync + Send, T: Send + Clone357 fn for_each_with<OP, T>(self, init: T, op: OP) 358 where OP: Fn(&mut T, Self::Item) + Sync + Send, 359 T: Send + Clone 360 { 361 self.map_with(init, op).for_each(|()| ()) 362 } 363 364 /// Counts the number of items in this parallel iterator. 365 /// 366 /// # Examples 367 /// 368 /// ``` 369 /// use rayon::prelude::*; 370 /// 371 /// let count = (0..100).into_par_iter().count(); 372 /// 373 /// assert_eq!(count, 100); 374 /// ``` count(self) -> usize375 fn count(self) -> usize { 376 self.map(|_| 1).sum() 377 } 378 379 /// Applies `map_op` to each item of this iterator, producing a new 380 /// iterator with the results. 381 /// 382 /// # Examples 383 /// 384 /// ``` 385 /// use rayon::prelude::*; 386 /// 387 /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2); 388 /// 389 /// let doubles: Vec<_> = par_iter.collect(); 390 /// 391 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]); 392 /// ``` map<F, R>(self, map_op: F) -> Map<Self, F> where F: Fn(Self::Item) -> R + Sync + Send, R: Send393 fn map<F, R>(self, map_op: F) -> Map<Self, F> 394 where F: Fn(Self::Item) -> R + Sync + Send, 395 R: Send 396 { 397 map::new(self, map_op) 398 } 399 400 /// Applies `map_op` to the given `init` value with each item of this 401 /// iterator, producing a new iterator with the results. 402 /// 403 /// The `init` value will be cloned only as needed to be paired with 404 /// the group of items in each rayon job. It does not require the type 405 /// to be `Sync`. 406 /// 407 /// # Examples 408 /// 409 /// ``` 410 /// use std::sync::mpsc::channel; 411 /// use rayon::prelude::*; 412 /// 413 /// let (sender, receiver) = channel(); 414 /// 415 /// let a: Vec<_> = (0..5) 416 /// .into_par_iter() // iterating over i32 417 /// .map_with(sender, |s, x| { 418 /// s.send(x).unwrap(); // sending i32 values through the channel 419 /// x // returning i32 420 /// }) 421 /// .collect(); // collecting the returned values into a vector 422 /// 423 /// let mut b: Vec<_> = receiver.iter() // iterating over the values in the channel 424 /// .collect(); // and collecting them 425 /// b.sort(); 426 /// 427 /// assert_eq!(a, b); 428 /// ``` map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F> where F: Fn(&mut T, Self::Item) -> R + Sync + Send, T: Send + Clone, R: Send429 fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F> 430 where F: Fn(&mut T, Self::Item) -> R + Sync + Send, 431 T: Send + Clone, 432 R: Send 433 { 434 map_with::new(self, init, map_op) 435 } 436 437 /// Creates an iterator which clones all of its elements. This may be 438 /// useful when you have an iterator over `&T`, but you need `T`. 439 /// 440 /// # Examples 441 /// 442 /// ``` 443 /// use rayon::prelude::*; 444 /// 445 /// let a = [1, 2, 3]; 446 /// 447 /// let v_cloned: Vec<_> = a.par_iter().cloned().collect(); 448 /// 449 /// // cloned is the same as .map(|&x| x), for integers 450 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect(); 451 /// 452 /// assert_eq!(v_cloned, vec![1, 2, 3]); 453 /// assert_eq!(v_map, vec![1, 2, 3]); 454 /// ``` cloned<'a, T>(self) -> Cloned<Self> where T: 'a + Clone + Send, Self: ParallelIterator<Item = &'a T>455 fn cloned<'a, T>(self) -> Cloned<Self> 456 where T: 'a + Clone + Send, 457 Self: ParallelIterator<Item = &'a T> 458 { 459 cloned::new(self) 460 } 461 462 /// Applies `inspect_op` to a reference to each item of this iterator, 463 /// producing a new iterator passing through the original items. This is 464 /// often useful for debugging to see what's happening in iterator stages. 465 /// 466 /// # Examples 467 /// 468 /// ``` 469 /// use rayon::prelude::*; 470 /// 471 /// let a = [1, 4, 2, 3]; 472 /// 473 /// // this iterator sequence is complex. 474 /// let sum = a.par_iter() 475 /// .cloned() 476 /// .filter(|&x| x % 2 == 0) 477 /// .reduce(|| 0, |sum, i| sum + i); 478 /// 479 /// println!("{}", sum); 480 /// 481 /// // let's add some inspect() calls to investigate what's happening 482 /// let sum = a.par_iter() 483 /// .cloned() 484 /// .inspect(|x| println!("about to filter: {}", x)) 485 /// .filter(|&x| x % 2 == 0) 486 /// .inspect(|x| println!("made it through filter: {}", x)) 487 /// .reduce(|| 0, |sum, i| sum + i); 488 /// 489 /// println!("{}", sum); 490 /// ``` inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP> where OP: Fn(&Self::Item) + Sync + Send491 fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP> 492 where OP: Fn(&Self::Item) + Sync + Send 493 { 494 inspect::new(self, inspect_op) 495 } 496 497 /// Mutates each item of this iterator before yielding it. 498 /// 499 /// # Examples 500 /// 501 /// ``` 502 /// use rayon::prelude::*; 503 /// 504 /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;}); 505 /// 506 /// let doubles: Vec<_> = par_iter.collect(); 507 /// 508 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]); 509 /// ``` update<F>(self, update_op: F) -> Update<Self, F> where F: Fn(&mut Self::Item) + Sync + Send510 fn update<F>(self, update_op: F) -> Update<Self, F> 511 where F: Fn(&mut Self::Item) + Sync + Send 512 { 513 update::new(self, update_op) 514 } 515 516 /// Applies `filter_op` to each item of this iterator, producing a new 517 /// iterator with only the items that gave `true` results. 518 /// 519 /// # Examples 520 /// 521 /// ``` 522 /// use rayon::prelude::*; 523 /// 524 /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0); 525 /// 526 /// let even_numbers: Vec<_> = par_iter.collect(); 527 /// 528 /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]); 529 /// ``` filter<P>(self, filter_op: P) -> Filter<Self, P> where P: Fn(&Self::Item) -> bool + Sync + Send530 fn filter<P>(self, filter_op: P) -> Filter<Self, P> 531 where P: Fn(&Self::Item) -> bool + Sync + Send 532 { 533 filter::new(self, filter_op) 534 } 535 536 /// Applies `filter_op` to each item of this iterator to get an `Option`, 537 /// producing a new iterator with only the items from `Some` results. 538 /// 539 /// # Examples 540 /// 541 /// ``` 542 /// use rayon::prelude::*; 543 /// 544 /// let mut par_iter = (0..10).into_par_iter() 545 /// .filter_map(|x| { 546 /// if x % 2 == 0 { Some(x * 3) } 547 /// else { None } 548 /// }); 549 /// 550 /// let even_numbers: Vec<_> = par_iter.collect(); 551 /// 552 /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]); 553 /// ``` filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send554 fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P> 555 where P: Fn(Self::Item) -> Option<R> + Sync + Send, 556 R: Send 557 { 558 filter_map::new(self, filter_op) 559 } 560 561 /// Applies `map_op` to each item of this iterator to get nested iterators, 562 /// producing a new iterator that flattens these back into one. 563 /// 564 /// # Examples 565 /// 566 /// ``` 567 /// use rayon::prelude::*; 568 /// 569 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]]; 570 /// 571 /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec()); 572 /// 573 /// let vec: Vec<_> = par_iter.collect(); 574 /// 575 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]); 576 /// ``` flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F> where F: Fn(Self::Item) -> PI + Sync + Send, PI: IntoParallelIterator577 fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F> 578 where F: Fn(Self::Item) -> PI + Sync + Send, 579 PI: IntoParallelIterator 580 { 581 flat_map::new(self, map_op) 582 } 583 584 /// An adaptor that flattens iterable `Item`s into one large iterator 585 /// 586 /// # Examples 587 /// 588 /// ``` 589 /// use rayon::prelude::*; 590 /// 591 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]]; 592 /// let y: Vec<_> = x.into_par_iter().flatten().collect(); 593 /// 594 /// assert_eq!(y, vec![1, 2, 3, 4]); 595 /// ``` flatten(self) -> Flatten<Self> where Self::Item: IntoParallelIterator596 fn flatten(self) -> Flatten<Self> 597 where Self::Item: IntoParallelIterator 598 { 599 flatten::new(self) 600 } 601 602 /// Reduces the items in the iterator into one item using `op`. 603 /// The argument `identity` should be a closure that can produce 604 /// "identity" value which may be inserted into the sequence as 605 /// needed to create opportunities for parallel execution. So, for 606 /// example, if you are doing a summation, then `identity()` ought 607 /// to produce something that represents the zero for your type 608 /// (but consider just calling `sum()` in that case). 609 /// 610 /// # Examples 611 /// 612 /// ``` 613 /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)` 614 /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)` 615 /// // where the first/second elements are summed separately. 616 /// use rayon::prelude::*; 617 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)] 618 /// .par_iter() // iterating over &(i32, i32) 619 /// .cloned() // iterating over (i32, i32) 620 /// .reduce(|| (0, 0), // the "identity" is 0 in both columns 621 /// |a, b| (a.0 + b.0, a.1 + b.1)); 622 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9)); 623 /// ``` 624 /// 625 /// **Note:** unlike a sequential `fold` operation, the order in 626 /// which `op` will be applied to reduce the result is not fully 627 /// specified. So `op` should be [associative] or else the results 628 /// will be non-deterministic. And of course `identity()` should 629 /// produce a true identity. 630 /// 631 /// [associative]: https://en.wikipedia.org/wiki/Associative_property reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send, ID: Fn() -> Self::Item + Sync + Send632 fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item 633 where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send, 634 ID: Fn() -> Self::Item + Sync + Send 635 { 636 reduce::reduce(self, identity, op) 637 } 638 639 /// Reduces the items in the iterator into one item using `op`. 640 /// If the iterator is empty, `None` is returned; otherwise, 641 /// `Some` is returned. 642 /// 643 /// This version of `reduce` is simple but somewhat less 644 /// efficient. If possible, it is better to call `reduce()`, which 645 /// requires an identity element. 646 /// 647 /// # Examples 648 /// 649 /// ``` 650 /// use rayon::prelude::*; 651 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)] 652 /// .par_iter() // iterating over &(i32, i32) 653 /// .cloned() // iterating over (i32, i32) 654 /// .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1)) 655 /// .unwrap(); 656 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9)); 657 /// ``` 658 /// 659 /// **Note:** unlike a sequential `fold` operation, the order in 660 /// which `op` will be applied to reduce the result is not fully 661 /// specified. So `op` should be [associative] or else the results 662 /// will be non-deterministic. 663 /// 664 /// [associative]: https://en.wikipedia.org/wiki/Associative_property reduce_with<OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send665 fn reduce_with<OP>(self, op: OP) -> Option<Self::Item> 666 where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send 667 { 668 self.fold(|| None, |opt_a, b| match opt_a { 669 Some(a) => Some(op(a, b)), 670 None => Some(b), 671 }) 672 .reduce(|| None, |opt_a, opt_b| match (opt_a, opt_b) { 673 (Some(a), Some(b)) => Some(op(a, b)), 674 (Some(v), None) | (None, Some(v)) => Some(v), 675 (None, None) => None, 676 }) 677 } 678 679 /// Parallel fold is similar to sequential fold except that the 680 /// sequence of items may be subdivided before it is 681 /// folded. Consider a list of numbers like `22 3 77 89 46`. If 682 /// you used sequential fold to add them (`fold(0, |a,b| a+b)`, 683 /// you would wind up first adding 0 + 22, then 22 + 3, then 25 + 684 /// 77, and so forth. The **parallel fold** works similarly except 685 /// that it first breaks up your list into sublists, and hence 686 /// instead of yielding up a single sum at the end, it yields up 687 /// multiple sums. The number of results is nondeterministic, as 688 /// is the point where the breaks occur. 689 /// 690 /// So if did the same parallel fold (`fold(0, |a,b| a+b)`) on 691 /// our example list, we might wind up with a sequence of two numbers, 692 /// like so: 693 /// 694 /// ```notrust 695 /// 22 3 77 89 46 696 /// | | 697 /// 102 135 698 /// ``` 699 /// 700 /// Or perhaps these three numbers: 701 /// 702 /// ```notrust 703 /// 22 3 77 89 46 704 /// | | | 705 /// 102 89 46 706 /// ``` 707 /// 708 /// In general, Rayon will attempt to find good breaking points 709 /// that keep all of your cores busy. 710 /// 711 /// ### Fold versus reduce 712 /// 713 /// The `fold()` and `reduce()` methods each take an identity element 714 /// and a combining function, but they operate rather differently. 715 /// 716 /// `reduce()` requires that the identity function has the same 717 /// type as the things you are iterating over, and it fully 718 /// reduces the list of items into a single item. So, for example, 719 /// imagine we are iterating over a list of bytes `bytes: [128_u8, 720 /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b: 721 /// u8| a + b)`, we would get an overflow. This is because `0`, 722 /// `a`, and `b` here are all bytes, just like the numbers in the 723 /// list (I wrote the types explicitly above, but those are the 724 /// only types you can use). To avoid the overflow, we would need 725 /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a, 726 /// b| a + b)`, in which case our result would be `256`. 727 /// 728 /// In contrast, with `fold()`, the identity function does not 729 /// have to have the same type as the things you are iterating 730 /// over, and you potentially get back many results. So, if we 731 /// continue with the `bytes` example from the previous paragraph, 732 /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to 733 /// convert our bytes into `u32`. And of course we might not get 734 /// back a single sum. 735 /// 736 /// There is a more subtle distinction as well, though it's 737 /// actually implied by the above points. When you use `reduce()`, 738 /// your reduction function is sometimes called with values that 739 /// were never part of your original parallel iterator (for 740 /// example, both the left and right might be a partial sum). With 741 /// `fold()`, in contrast, the left value in the fold function is 742 /// always the accumulator, and the right value is always from 743 /// your original sequence. 744 /// 745 /// ### Fold vs Map/Reduce 746 /// 747 /// Fold makes sense if you have some operation where it is 748 /// cheaper to groups of elements at a time. For example, imagine 749 /// collecting characters into a string. If you were going to use 750 /// map/reduce, you might try this: 751 /// 752 /// ``` 753 /// use rayon::prelude::*; 754 /// 755 /// let s = 756 /// ['a', 'b', 'c', 'd', 'e'] 757 /// .par_iter() 758 /// .map(|c: &char| format!("{}", c)) 759 /// .reduce(|| String::new(), 760 /// |mut a: String, b: String| { a.push_str(&b); a }); 761 /// 762 /// assert_eq!(s, "abcde"); 763 /// ``` 764 /// 765 /// Because reduce produces the same type of element as its input, 766 /// you have to first map each character into a string, and then 767 /// you can reduce them. This means we create one string per 768 /// element in ou iterator -- not so great. Using `fold`, we can 769 /// do this instead: 770 /// 771 /// ``` 772 /// use rayon::prelude::*; 773 /// 774 /// let s = 775 /// ['a', 'b', 'c', 'd', 'e'] 776 /// .par_iter() 777 /// .fold(|| String::new(), 778 /// |mut s: String, c: &char| { s.push(*c); s }) 779 /// .reduce(|| String::new(), 780 /// |mut a: String, b: String| { a.push_str(&b); a }); 781 /// 782 /// assert_eq!(s, "abcde"); 783 /// ``` 784 /// 785 /// Now `fold` will process groups of our characters at a time, 786 /// and we only make one string per group. We should wind up with 787 /// some small-ish number of strings roughly proportional to the 788 /// number of CPUs you have (it will ultimately depend on how busy 789 /// your processors are). Note that we still need to do a reduce 790 /// afterwards to combine those groups of strings into a single 791 /// string. 792 /// 793 /// You could use a similar trick to save partial results (e.g., a 794 /// cache) or something similar. 795 /// 796 /// ### Combining fold with other operations 797 /// 798 /// You can combine `fold` with `reduce` if you want to produce a 799 /// single value. This is then roughly equivalent to a map/reduce 800 /// combination in effect: 801 /// 802 /// ``` 803 /// use rayon::prelude::*; 804 /// 805 /// let bytes = 0..22_u8; 806 /// let sum = bytes.into_par_iter() 807 /// .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32)) 808 /// .sum::<u32>(); 809 /// 810 /// assert_eq!(sum, (0..22).sum()); // compare to sequential 811 /// ``` fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F> where F: Fn(T, Self::Item) -> T + Sync + Send, ID: Fn() -> T + Sync + Send, T: Send812 fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F> 813 where F: Fn(T, Self::Item) -> T + Sync + Send, 814 ID: Fn() -> T + Sync + Send, 815 T: Send 816 { 817 fold::fold(self, identity, fold_op) 818 } 819 820 /// Applies `fold_op` to the given `init` value with each item of this 821 /// iterator, finally producing the value for further use. 822 /// 823 /// This works essentially like `fold(|| init.clone(), fold_op)`, except 824 /// it doesn't require the `init` type to be `Sync`, nor any other form 825 /// of added synchronization. 826 /// 827 /// # Examples 828 /// 829 /// ``` 830 /// use rayon::prelude::*; 831 /// 832 /// let bytes = 0..22_u8; 833 /// let sum = bytes.into_par_iter() 834 /// .fold_with(0_u32, |a: u32, b: u8| a + (b as u32)) 835 /// .sum::<u32>(); 836 /// 837 /// assert_eq!(sum, (0..22).sum()); // compare to sequential 838 /// ``` fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F> where F: Fn(T, Self::Item) -> T + Sync + Send, T: Send + Clone839 fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F> 840 where F: Fn(T, Self::Item) -> T + Sync + Send, 841 T: Send + Clone 842 { 843 fold::fold_with(self, init, fold_op) 844 } 845 846 /// Sums up the items in the iterator. 847 /// 848 /// Note that the order in items will be reduced is not specified, 849 /// so if the `+` operator is not truly [associative] \(as is the 850 /// case for floating point numbers), then the results are not 851 /// fully deterministic. 852 /// 853 /// [associative]: https://en.wikipedia.org/wiki/Associative_property 854 /// 855 /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`, 856 /// except that the type of `0` and the `+` operation may vary 857 /// depending on the type of value being produced. 858 /// 859 /// # Examples 860 /// 861 /// ``` 862 /// use rayon::prelude::*; 863 /// 864 /// let a = [1, 5, 7]; 865 /// 866 /// let sum: i32 = a.par_iter().sum(); 867 /// 868 /// assert_eq!(sum, 13); 869 /// ``` sum<S>(self) -> S where S: Send + Sum<Self::Item> + Sum<S>870 fn sum<S>(self) -> S 871 where S: Send + Sum<Self::Item> + Sum<S> 872 { 873 sum::sum(self) 874 } 875 876 /// Multiplies all the items in the iterator. 877 /// 878 /// Note that the order in items will be reduced is not specified, 879 /// so if the `*` operator is not truly [associative] \(as is the 880 /// case for floating point numbers), then the results are not 881 /// fully deterministic. 882 /// 883 /// [associative]: https://en.wikipedia.org/wiki/Associative_property 884 /// 885 /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`, 886 /// except that the type of `1` and the `*` operation may vary 887 /// depending on the type of value being produced. 888 /// 889 /// # Examples 890 /// 891 /// ``` 892 /// use rayon::prelude::*; 893 /// 894 /// fn factorial(n: u32) -> u32 { 895 /// (1..n+1).into_par_iter().product() 896 /// } 897 /// 898 /// assert_eq!(factorial(0), 1); 899 /// assert_eq!(factorial(1), 1); 900 /// assert_eq!(factorial(5), 120); 901 /// ``` product<P>(self) -> P where P: Send + Product<Self::Item> + Product<P>902 fn product<P>(self) -> P 903 where P: Send + Product<Self::Item> + Product<P> 904 { 905 product::product(self) 906 } 907 908 /// Computes the minimum of all the items in the iterator. If the 909 /// iterator is empty, `None` is returned; otherwise, `Some(min)` 910 /// is returned. 911 /// 912 /// Note that the order in which the items will be reduced is not 913 /// specified, so if the `Ord` impl is not truly associative, then 914 /// the results are not deterministic. 915 /// 916 /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`. 917 /// 918 /// # Examples 919 /// 920 /// ``` 921 /// use rayon::prelude::*; 922 /// 923 /// let a = [45, 74, 32]; 924 /// 925 /// assert_eq!(a.par_iter().min(), Some(&32)); 926 /// 927 /// let b: [i32; 0] = []; 928 /// 929 /// assert_eq!(b.par_iter().min(), None); 930 /// ``` min(self) -> Option<Self::Item> where Self::Item: Ord931 fn min(self) -> Option<Self::Item> 932 where Self::Item: Ord 933 { 934 self.reduce_with(cmp::min) 935 } 936 937 /// Computes the minimum of all the items in the iterator with respect to 938 /// the given comparison function. If the iterator is empty, `None` is 939 /// returned; otherwise, `Some(min)` is returned. 940 /// 941 /// Note that the order in which the items will be reduced is not 942 /// specified, so if the comparison function is not associative, then 943 /// the results are not deterministic. 944 /// 945 /// # Examples 946 /// 947 /// ``` 948 /// use rayon::prelude::*; 949 /// 950 /// let a = [-3_i32, 77, 53, 240, -1]; 951 /// 952 /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3)); 953 /// ``` min_by<F>(self, f: F) -> Option<Self::Item> where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering954 fn min_by<F>(self, f: F) -> Option<Self::Item> 955 where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering 956 { 957 self.reduce_with(|a, b| match f(&a, &b) { 958 Ordering::Greater => b, 959 _ => a, 960 }) 961 } 962 963 /// Computes the item that yields the minimum value for the given 964 /// function. If the iterator is empty, `None` is returned; 965 /// otherwise, `Some(item)` is returned. 966 /// 967 /// Note that the order in which the items will be reduced is not 968 /// specified, so if the `Ord` impl is not truly associative, then 969 /// the results are not deterministic. 970 /// 971 /// # Examples 972 /// 973 /// ``` 974 /// use rayon::prelude::*; 975 /// 976 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23]; 977 /// 978 /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2)); 979 /// ``` min_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Send + Fn(&Self::Item) -> K980 fn min_by_key<K, F>(self, f: F) -> Option<Self::Item> 981 where K: Ord + Send, 982 F: Sync + Send + Fn(&Self::Item) -> K 983 { 984 self.map(|x| (f(&x), x)) 985 .min_by(|a, b| (a.0).cmp(&b.0)) 986 .map(|(_, x)| x) 987 } 988 989 /// Computes the maximum of all the items in the iterator. If the 990 /// iterator is empty, `None` is returned; otherwise, `Some(max)` 991 /// is returned. 992 /// 993 /// Note that the order in which the items will be reduced is not 994 /// specified, so if the `Ord` impl is not truly associative, then 995 /// the results are not deterministic. 996 /// 997 /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`. 998 /// 999 /// # Examples 1000 /// 1001 /// ``` 1002 /// use rayon::prelude::*; 1003 /// 1004 /// let a = [45, 74, 32]; 1005 /// 1006 /// assert_eq!(a.par_iter().max(), Some(&74)); 1007 /// 1008 /// let b: [i32; 0] = []; 1009 /// 1010 /// assert_eq!(b.par_iter().max(), None); 1011 /// ``` max(self) -> Option<Self::Item> where Self::Item: Ord1012 fn max(self) -> Option<Self::Item> 1013 where Self::Item: Ord 1014 { 1015 self.reduce_with(cmp::max) 1016 } 1017 1018 /// Computes the maximum of all the items in the iterator with respect to 1019 /// the given comparison function. If the iterator is empty, `None` is 1020 /// returned; otherwise, `Some(min)` is returned. 1021 /// 1022 /// Note that the order in which the items will be reduced is not 1023 /// specified, so if the comparison function is not associative, then 1024 /// the results are not deterministic. 1025 /// 1026 /// # Examples 1027 /// 1028 /// ``` 1029 /// use rayon::prelude::*; 1030 /// 1031 /// let a = [-3_i32, 77, 53, 240, -1]; 1032 /// 1033 /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240)); 1034 /// ``` max_by<F>(self, f: F) -> Option<Self::Item> where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering1035 fn max_by<F>(self, f: F) -> Option<Self::Item> 1036 where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering 1037 { 1038 self.reduce_with(|a, b| match f(&a, &b) { 1039 Ordering::Greater => a, 1040 _ => b, 1041 }) 1042 } 1043 1044 /// Computes the item that yields the maximum value for the given 1045 /// function. If the iterator is empty, `None` is returned; 1046 /// otherwise, `Some(item)` is returned. 1047 /// 1048 /// Note that the order in which the items will be reduced is not 1049 /// specified, so if the `Ord` impl is not truly associative, then 1050 /// the results are not deterministic. 1051 /// 1052 /// # Examples 1053 /// 1054 /// ``` 1055 /// use rayon::prelude::*; 1056 /// 1057 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23]; 1058 /// 1059 /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34)); 1060 /// ``` max_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Send + Fn(&Self::Item) -> K1061 fn max_by_key<K, F>(self, f: F) -> Option<Self::Item> 1062 where K: Ord + Send, 1063 F: Sync + Send + Fn(&Self::Item) -> K 1064 { 1065 self.map(|x| (f(&x), x)) 1066 .max_by(|a, b| (a.0).cmp(&b.0)) 1067 .map(|(_, x)| x) 1068 } 1069 1070 /// Takes two iterators and creates a new iterator over both. 1071 /// 1072 /// # Examples 1073 /// 1074 /// ``` 1075 /// use rayon::prelude::*; 1076 /// 1077 /// let a = [0, 1, 2]; 1078 /// let b = [9, 8, 7]; 1079 /// 1080 /// let par_iter = a.par_iter().chain(b.par_iter()); 1081 /// 1082 /// let chained: Vec<_> = par_iter.cloned().collect(); 1083 /// 1084 /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]); 1085 /// ``` chain<C>(self, chain: C) -> Chain<Self, C::Iter> where C: IntoParallelIterator<Item = Self::Item>1086 fn chain<C>(self, chain: C) -> Chain<Self, C::Iter> 1087 where C: IntoParallelIterator<Item = Self::Item> 1088 { 1089 chain::new(self, chain.into_par_iter()) 1090 } 1091 1092 /// Searches for **some** item in the parallel iterator that 1093 /// matches the given predicate and returns it. This operation 1094 /// is similar to [`find` on sequential iterators][find] but 1095 /// the item returned may not be the **first** one in the parallel 1096 /// sequence which matches, since we search the entire sequence in parallel. 1097 /// 1098 /// Once a match is found, we will attempt to stop processing 1099 /// the rest of the items in the iterator as soon as possible 1100 /// (just as `find` stops iterating once a match is found). 1101 /// 1102 /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find 1103 /// 1104 /// # Examples 1105 /// 1106 /// ``` 1107 /// use rayon::prelude::*; 1108 /// 1109 /// let a = [1, 2, 3, 3]; 1110 /// 1111 /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3)); 1112 /// 1113 /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None); 1114 /// ``` find_any<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send1115 fn find_any<P>(self, predicate: P) -> Option<Self::Item> 1116 where P: Fn(&Self::Item) -> bool + Sync + Send 1117 { 1118 find::find(self, predicate) 1119 } 1120 1121 /// Searches for the sequentially **first** item in the parallel iterator 1122 /// that matches the given predicate and returns it. 1123 /// 1124 /// Once a match is found, all attempts to the right of the match 1125 /// will be stopped, while attempts to the left must continue in case 1126 /// an earlier match is found. 1127 /// 1128 /// Note that not all parallel iterators have a useful order, much like 1129 /// sequential `HashMap` iteration, so "first" may be nebulous. If you 1130 /// just want the first match that discovered anywhere in the iterator, 1131 /// `find_any` is a better choice. 1132 /// 1133 /// # Exmaples 1134 /// 1135 /// ``` 1136 /// use rayon::prelude::*; 1137 /// 1138 /// let a = [1, 2, 3, 3]; 1139 /// 1140 /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3)); 1141 /// 1142 /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None); 1143 /// ``` find_first<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send1144 fn find_first<P>(self, predicate: P) -> Option<Self::Item> 1145 where P: Fn(&Self::Item) -> bool + Sync + Send 1146 { 1147 find_first_last::find_first(self, predicate) 1148 } 1149 1150 /// Searches for the sequentially **last** item in the parallel iterator 1151 /// that matches the given predicate and returns it. 1152 /// 1153 /// Once a match is found, all attempts to the left of the match 1154 /// will be stopped, while attempts to the right must continue in case 1155 /// a later match is found. 1156 /// 1157 /// Note that not all parallel iterators have a useful order, much like 1158 /// sequential `HashMap` iteration, so "last" may be nebulous. When the 1159 /// order doesn't actually matter to you, `find_any` is a better choice. 1160 /// 1161 /// # Examples 1162 /// 1163 /// ``` 1164 /// use rayon::prelude::*; 1165 /// 1166 /// let a = [1, 2, 3, 3]; 1167 /// 1168 /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3)); 1169 /// 1170 /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None); 1171 /// ``` find_last<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send1172 fn find_last<P>(self, predicate: P) -> Option<Self::Item> 1173 where P: Fn(&Self::Item) -> bool + Sync + Send 1174 { 1175 find_first_last::find_last(self, predicate) 1176 } 1177 1178 #[doc(hidden)] 1179 #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\ 1180 `find_first`, or `find_last`")] find<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send1181 fn find<P>(self, predicate: P) -> Option<Self::Item> 1182 where P: Fn(&Self::Item) -> bool + Sync + Send 1183 { 1184 self.find_any(predicate) 1185 } 1186 1187 /// Searches for **some** item in the parallel iterator that 1188 /// matches the given predicate, and if so returns true. Once 1189 /// a match is found, we'll attempt to stop process the rest 1190 /// of the items. Proving that there's no match, returning false, 1191 /// does require visiting every item. 1192 /// 1193 /// # Examples 1194 /// 1195 /// ``` 1196 /// use rayon::prelude::*; 1197 /// 1198 /// let a = [0, 12, 3, 4, 0, 23, 0]; 1199 /// 1200 /// let is_valid = a.par_iter().any(|&x| x > 10); 1201 /// 1202 /// assert!(is_valid); 1203 /// ``` any<P>(self, predicate: P) -> bool where P: Fn(Self::Item) -> bool + Sync + Send1204 fn any<P>(self, predicate: P) -> bool 1205 where P: Fn(Self::Item) -> bool + Sync + Send 1206 { 1207 self.map(predicate).find_any(|&p| p).is_some() 1208 } 1209 1210 /// Tests that every item in the parallel iterator matches the given 1211 /// predicate, and if so returns true. If a counter-example is found, 1212 /// we'll attempt to stop processing more items, then return false. 1213 /// 1214 /// # Examples 1215 /// 1216 /// ``` 1217 /// use rayon::prelude::*; 1218 /// 1219 /// let a = [0, 12, 3, 4, 0, 23, 0]; 1220 /// 1221 /// let is_valid = a.par_iter().all(|&x| x > 10); 1222 /// 1223 /// assert!(!is_valid); 1224 /// ``` all<P>(self, predicate: P) -> bool where P: Fn(Self::Item) -> bool + Sync + Send1225 fn all<P>(self, predicate: P) -> bool 1226 where P: Fn(Self::Item) -> bool + Sync + Send 1227 { 1228 self.map(predicate).find_any(|&p| !p).is_none() 1229 } 1230 1231 /// Creates an iterator over the `Some` items of this iterator, halting 1232 /// as soon as any `None` is found. 1233 /// 1234 /// # Examples 1235 /// 1236 /// ``` 1237 /// use rayon::prelude::*; 1238 /// use std::sync::atomic::{AtomicUsize, Ordering}; 1239 /// 1240 /// let counter = AtomicUsize::new(0); 1241 /// let value = (0_i32..2048) 1242 /// .into_par_iter() 1243 /// .map(|x| { 1244 /// counter.fetch_add(1, Ordering::SeqCst); 1245 /// if x < 1024 { Some(x) } else { None } 1246 /// }) 1247 /// .while_some() 1248 /// .max(); 1249 /// 1250 /// assert!(value < Some(1024)); 1251 /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one 1252 /// ``` while_some<T>(self) -> WhileSome<Self> where Self: ParallelIterator<Item = Option<T>>, T: Send1253 fn while_some<T>(self) -> WhileSome<Self> 1254 where Self: ParallelIterator<Item = Option<T>>, 1255 T: Send 1256 { 1257 while_some::new(self) 1258 } 1259 1260 /// Create a fresh collection containing all the element produced 1261 /// by this parallel iterator. 1262 /// 1263 /// You may prefer to use `collect_into_vec()`, which allocates more 1264 /// efficiently with precise knowledge of how many elements the 1265 /// iterator contains, and even allows you to reuse an existing 1266 /// vector's backing store rather than allocating a fresh vector. 1267 /// 1268 /// # Examples 1269 /// 1270 /// ``` 1271 /// use rayon::prelude::*; 1272 /// 1273 /// let sync_vec: Vec<_> = (0..100).into_iter().collect(); 1274 /// 1275 /// let async_vec: Vec<_> = (0..100).into_par_iter().collect(); 1276 /// 1277 /// assert_eq!(sync_vec, async_vec); 1278 /// ``` collect<C>(self) -> C where C: FromParallelIterator<Self::Item>1279 fn collect<C>(self) -> C 1280 where C: FromParallelIterator<Self::Item> 1281 { 1282 C::from_par_iter(self) 1283 } 1284 1285 /// Unzips the items of a parallel iterator into a pair of arbitrary 1286 /// `ParallelExtend` containers. 1287 /// 1288 /// You may prefer to use `unzip_into_vecs()`, which allocates more 1289 /// efficiently with precise knowledge of how many elements the 1290 /// iterator contains, and even allows you to reuse existing 1291 /// vectors' backing stores rather than allocating fresh vectors. 1292 /// 1293 /// # Examples 1294 /// 1295 /// ``` 1296 /// use rayon::prelude::*; 1297 /// 1298 /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)]; 1299 /// 1300 /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip(); 1301 /// 1302 /// assert_eq!(left, [0, 1, 2, 3]); 1303 /// assert_eq!(right, [1, 2, 3, 4]); 1304 /// ``` unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where Self: ParallelIterator<Item = (A, B)>, FromA: Default + Send + ParallelExtend<A>, FromB: Default + Send + ParallelExtend<B>, A: Send, B: Send1305 fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) 1306 where Self: ParallelIterator<Item = (A, B)>, 1307 FromA: Default + Send + ParallelExtend<A>, 1308 FromB: Default + Send + ParallelExtend<B>, 1309 A: Send, 1310 B: Send 1311 { 1312 unzip::unzip(self) 1313 } 1314 1315 /// Partitions the items of a parallel iterator into a pair of arbitrary 1316 /// `ParallelExtend` containers. Items for which the `predicate` returns 1317 /// true go into the first container, and the rest go into the second. 1318 /// 1319 /// Note: unlike the standard `Iterator::partition`, this allows distinct 1320 /// collection types for the left and right items. This is more flexible, 1321 /// but may require new type annotations when converting sequential code 1322 /// that used type inferrence assuming the two were the same. 1323 /// 1324 /// # Examples 1325 /// 1326 /// ``` 1327 /// use rayon::prelude::*; 1328 /// 1329 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0); 1330 /// 1331 /// assert_eq!(left, [0, 2, 4, 6]); 1332 /// assert_eq!(right, [1, 3, 5, 7]); 1333 /// ``` partition<A, B, P>(self, predicate: P) -> (A, B) where A: Default + Send + ParallelExtend<Self::Item>, B: Default + Send + ParallelExtend<Self::Item>, P: Fn(&Self::Item) -> bool + Sync + Send1334 fn partition<A, B, P>(self, predicate: P) -> (A, B) 1335 where A: Default + Send + ParallelExtend<Self::Item>, 1336 B: Default + Send + ParallelExtend<Self::Item>, 1337 P: Fn(&Self::Item) -> bool + Sync + Send 1338 { 1339 unzip::partition(self, predicate) 1340 } 1341 1342 /// Partitions and maps the items of a parallel iterator into a pair of 1343 /// arbitrary `ParallelExtend` containers. `Either::Left` items go into 1344 /// the first container, and `Either::Right` items go into the second. 1345 /// 1346 /// # Examples 1347 /// 1348 /// ``` 1349 /// use rayon::prelude::*; 1350 /// use rayon::iter::Either; 1351 /// 1352 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter() 1353 /// .partition_map(|x| { 1354 /// if x % 2 == 0 { 1355 /// Either::Left(x * 4) 1356 /// } else { 1357 /// Either::Right(x * 3) 1358 /// } 1359 /// }); 1360 /// 1361 /// assert_eq!(left, [0, 8, 16, 24]); 1362 /// assert_eq!(right, [3, 9, 15, 21]); 1363 /// ``` partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B) where A: Default + Send + ParallelExtend<L>, B: Default + Send + ParallelExtend<R>, P: Fn(Self::Item) -> Either<L, R> + Sync + Send, L: Send, R: Send1364 fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B) 1365 where A: Default + Send + ParallelExtend<L>, 1366 B: Default + Send + ParallelExtend<R>, 1367 P: Fn(Self::Item) -> Either<L, R> + Sync + Send, 1368 L: Send, 1369 R: Send 1370 { 1371 unzip::partition_map(self, predicate) 1372 } 1373 1374 /// Intersperses clones of an element between items of this iterator. 1375 /// 1376 /// # Examples 1377 /// 1378 /// ``` 1379 /// use rayon::prelude::*; 1380 /// 1381 /// let x = vec![1, 2, 3]; 1382 /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect(); 1383 /// 1384 /// assert_eq!(r, vec![1, -1, 2, -1, 3]); 1385 /// ``` intersperse(self, element: Self::Item) -> Intersperse<Self> where Self::Item: Clone1386 fn intersperse(self, element: Self::Item) -> Intersperse<Self> 1387 where Self::Item: Clone 1388 { 1389 intersperse::new(self, element) 1390 } 1391 1392 /// Internal method used to define the behavior of this parallel 1393 /// iterator. You should not need to call this directly. 1394 /// 1395 /// This method causes the iterator `self` to start producing 1396 /// items and to feed them to the consumer `consumer` one by one. 1397 /// It may split the consumer before doing so to create the 1398 /// opportunity to produce in parallel. 1399 /// 1400 /// See the [README] for more details on the internals of parallel 1401 /// iterators. 1402 /// 1403 /// [README]: README.md drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>1404 fn drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>; 1405 1406 1407 /// Internal method used to define the behavior of this parallel 1408 /// iterator. You should not need to call this directly. 1409 /// 1410 /// Returns the number of items produced by this iterator, if known 1411 /// statically. This can be used by consumers to trigger special fast 1412 /// paths. Therefore, if `Some(_)` is returned, this iterator must only 1413 /// use the (indexed) `Consumer` methods when driving a consumer, such 1414 /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or 1415 /// other `UnindexedConsumer` methods -- or returning an inaccurate 1416 /// value -- may result in panics. 1417 /// 1418 /// This method is currently used to optimize `collect` for want 1419 /// of true Rust specialization; it may be removed when 1420 /// specialization is stable. opt_len(&self) -> Option<usize>1421 fn opt_len(&self) -> Option<usize> { 1422 None 1423 } 1424 } 1425 1426 impl<T: ParallelIterator> IntoParallelIterator for T { 1427 type Iter = T; 1428 type Item = T::Item; 1429 into_par_iter(self) -> T1430 fn into_par_iter(self) -> T { 1431 self 1432 } 1433 } 1434 1435 /// An iterator that supports "random access" to its data, meaning 1436 /// that you can split it at arbitrary indices and draw data from 1437 /// those points. 1438 /// 1439 /// **Note:** Not implemented for `u64` and `i64` ranges 1440 pub trait IndexedParallelIterator: ParallelIterator { 1441 /// Collects the results of the iterator into the specified 1442 /// vector. The vector is always truncated before execution 1443 /// begins. If possible, reusing the vector across calls can lead 1444 /// to better performance since it reuses the same backing buffer. 1445 /// 1446 /// # Examples 1447 /// 1448 /// ``` 1449 /// use rayon::prelude::*; 1450 /// 1451 /// // any prior data will be truncated 1452 /// let mut vec = vec![-1, -2, -3]; 1453 /// 1454 /// (0..5).into_par_iter() 1455 /// .collect_into_vec(&mut vec); 1456 /// 1457 /// assert_eq!(vec, [0, 1, 2, 3, 4]); 1458 /// ``` collect_into_vec(self, target: &mut Vec<Self::Item>)1459 fn collect_into_vec(self, target: &mut Vec<Self::Item>) { 1460 collect::collect_into_vec(self, target); 1461 } 1462 1463 /// Unzips the results of the iterator into the specified 1464 /// vectors. The vectors are always truncated before execution 1465 /// begins. If possible, reusing the vectors across calls can lead 1466 /// to better performance since they reuse the same backing buffer. 1467 /// 1468 /// # Examples 1469 /// 1470 /// ``` 1471 /// use rayon::prelude::*; 1472 /// 1473 /// // any prior data will be truncated 1474 /// let mut left = vec![42; 10]; 1475 /// let mut right = vec![-1; 10]; 1476 /// 1477 /// (10..15).into_par_iter() 1478 /// .enumerate() 1479 /// .unzip_into_vecs(&mut left, &mut right); 1480 /// 1481 /// assert_eq!(left, [0, 1, 2, 3, 4]); 1482 /// assert_eq!(right, [10, 11, 12, 13, 14]); 1483 /// ``` unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>) where Self: IndexedParallelIterator<Item = (A, B)>, A: Send, B: Send1484 fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>) 1485 where Self: IndexedParallelIterator<Item = (A, B)>, 1486 A: Send, 1487 B: Send 1488 { 1489 collect::unzip_into_vecs(self, left, right); 1490 } 1491 1492 /// Iterate over tuples `(A, B)`, where the items `A` are from 1493 /// this iterator and `B` are from the iterator given as argument. 1494 /// Like the `zip` method on ordinary iterators, if the two 1495 /// iterators are of unequal length, you only get the items they 1496 /// have in common. 1497 /// 1498 /// # Examples 1499 /// 1500 /// ``` 1501 /// use rayon::prelude::*; 1502 /// 1503 /// let result: Vec<_> = (1..4) 1504 /// .into_par_iter() 1505 /// .zip(vec!['a', 'b', 'c']) 1506 /// .collect(); 1507 /// 1508 /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]); 1509 /// ``` zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter> where Z: IntoParallelIterator, Z::Iter: IndexedParallelIterator1510 fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter> 1511 where Z: IntoParallelIterator, 1512 Z::Iter: IndexedParallelIterator 1513 { 1514 zip::new(self, zip_op.into_par_iter()) 1515 } 1516 1517 /// The same as `Zip`, but requires that both iterators have the same length. 1518 /// 1519 /// # Panics 1520 /// Will panic if `self` and `zip_op` are not the same length. 1521 /// 1522 /// ```should_panic 1523 /// use rayon::prelude::*; 1524 /// 1525 /// let one = [1u8]; 1526 /// let two = [2u8, 2]; 1527 /// let one_iter = one.par_iter(); 1528 /// let two_iter = two.par_iter(); 1529 /// 1530 /// // this will panic 1531 /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect(); 1532 /// 1533 /// // we should never get here 1534 /// assert_eq!(1, zipped.len()); 1535 /// ``` zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter> where Z: IntoParallelIterator, Z::Iter: IndexedParallelIterator1536 fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter> 1537 where Z: IntoParallelIterator, 1538 Z::Iter: IndexedParallelIterator 1539 { 1540 let zip_op_iter = zip_op.into_par_iter(); 1541 assert_eq!(self.len(), zip_op_iter.len()); 1542 zip_eq::new(self, zip_op_iter) 1543 } 1544 1545 /// Interleave elements of this iterator and the other given 1546 /// iterator. Alternately yields elements from this iterator and 1547 /// the given iterator, until both are exhausted. If one iterator 1548 /// is exhausted before the other, the last elements are provided 1549 /// from the other. 1550 /// 1551 /// # Examples 1552 /// 1553 /// ``` 1554 /// use rayon::prelude::*; 1555 /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]); 1556 /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect(); 1557 /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]); 1558 /// ``` interleave<I>(self, other: I) -> Interleave<Self, I::Iter> where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator<Item = Self::Item>1559 fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter> 1560 where I: IntoParallelIterator<Item = Self::Item>, 1561 I::Iter: IndexedParallelIterator<Item = Self::Item> 1562 { 1563 interleave::new(self, other.into_par_iter()) 1564 } 1565 1566 /// Interleave elements of this iterator and the other given 1567 /// iterator, until one is exhausted. 1568 /// 1569 /// # Examples 1570 /// 1571 /// ``` 1572 /// use rayon::prelude::*; 1573 /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]); 1574 /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect(); 1575 /// assert_eq!(r, vec![1, 5, 2, 6, 3]); 1576 /// ``` interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter> where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator<Item = Self::Item>1577 fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter> 1578 where I: IntoParallelIterator<Item = Self::Item>, 1579 I::Iter: IndexedParallelIterator<Item = Self::Item> 1580 { 1581 interleave_shortest::new(self, other.into_par_iter()) 1582 } 1583 1584 /// Split an iterator up into fixed-size chunks. 1585 /// 1586 /// Returns an iterator that returns `Vec`s of the given number of elements. 1587 /// If the number of elements in the iterator is not divisible by `chunk_size`, 1588 /// the last chunk may be shorter than `chunk_size`. 1589 /// 1590 /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on 1591 /// slices, without having to allocate intermediate `Vec`s for the chunks. 1592 /// 1593 /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks 1594 /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut 1595 /// 1596 /// # Examples 1597 /// 1598 /// ``` 1599 /// use rayon::prelude::*; 1600 /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; 1601 /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect(); 1602 /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]); 1603 /// ``` chunks(self, chunk_size: usize) -> Chunks<Self>1604 fn chunks(self, chunk_size: usize) -> Chunks<Self> { 1605 assert!(chunk_size != 0, "chunk_size must not be zero"); 1606 chunks::new(self, chunk_size) 1607 } 1608 1609 /// Lexicographically compares the elements of this `ParallelIterator` with those of 1610 /// another. 1611 /// 1612 /// # Examples 1613 /// 1614 /// ``` 1615 /// use rayon::prelude::*; 1616 /// use std::cmp::Ordering::*; 1617 /// 1618 /// let x = vec![1, 2, 3]; 1619 /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less); 1620 /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal); 1621 /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater); 1622 /// ``` cmp<I>(self, other: I) -> Ordering where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator, Self::Item: Ord1623 fn cmp<I>(self, other: I) -> Ordering 1624 where I: IntoParallelIterator<Item = Self::Item>, 1625 I::Iter: IndexedParallelIterator, 1626 Self::Item: Ord 1627 { 1628 let other = other.into_par_iter(); 1629 let ord_len = self.len().cmp(&other.len()); 1630 self.zip(other) 1631 .map(|(x, y)| Ord::cmp(&x, &y)) 1632 .find_first(|&ord| ord != Ordering::Equal) 1633 .unwrap_or(ord_len) 1634 } 1635 1636 /// Lexicographically compares the elements of this `ParallelIterator` with those of 1637 /// another. 1638 /// 1639 /// # Examples 1640 /// 1641 /// ``` 1642 /// use rayon::prelude::*; 1643 /// use std::cmp::Ordering::*; 1644 /// use std::f64::NAN; 1645 /// 1646 /// let x = vec![1.0, 2.0, 3.0]; 1647 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less)); 1648 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal)); 1649 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater)); 1650 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None); 1651 /// ``` partial_cmp<I>(self, other: I) -> Option<Ordering> where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>1652 fn partial_cmp<I>(self, other: I) -> Option<Ordering> 1653 where I: IntoParallelIterator, 1654 I::Iter: IndexedParallelIterator, 1655 Self::Item: PartialOrd<I::Item> 1656 { 1657 let other = other.into_par_iter(); 1658 let ord_len = self.len().cmp(&other.len()); 1659 self.zip(other) 1660 .map(|(x, y)| PartialOrd::partial_cmp(&x, &y)) 1661 .find_first(|&ord| ord != Some(Ordering::Equal)) 1662 .unwrap_or(Some(ord_len)) 1663 } 1664 1665 /// Determines if the elements of this `ParallelIterator` 1666 /// are equal to those of another eq<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialEq<I::Item>1667 fn eq<I>(self, other: I) -> bool 1668 where I: IntoParallelIterator, 1669 I::Iter: IndexedParallelIterator, 1670 Self::Item: PartialEq<I::Item> 1671 { 1672 let other = other.into_par_iter(); 1673 self.len() == other.len() && self.zip(other).all(|(x, y)| x.eq(&y)) 1674 } 1675 1676 /// Determines if the elements of this `ParallelIterator` 1677 /// are unequal to those of another ne<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialEq<I::Item>1678 fn ne<I>(self, other: I) -> bool 1679 where I: IntoParallelIterator, 1680 I::Iter: IndexedParallelIterator, 1681 Self::Item: PartialEq<I::Item> 1682 { 1683 !self.eq(other) 1684 } 1685 1686 /// Determines if the elements of this `ParallelIterator` 1687 /// are lexicographically less than those of another. lt<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>1688 fn lt<I>(self, other: I) -> bool 1689 where I: IntoParallelIterator, 1690 I::Iter: IndexedParallelIterator, 1691 Self::Item: PartialOrd<I::Item> 1692 { 1693 self.partial_cmp(other) == Some(Ordering::Less) 1694 } 1695 1696 /// Determines if the elements of this `ParallelIterator` 1697 /// are less or equal to those of another. le<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>1698 fn le<I>(self, other: I) -> bool 1699 where I: IntoParallelIterator, 1700 I::Iter: IndexedParallelIterator, 1701 Self::Item: PartialOrd<I::Item> 1702 { 1703 let ord = self.partial_cmp(other); 1704 ord == Some(Ordering::Equal) || ord == Some(Ordering::Less) 1705 } 1706 1707 /// Determines if the elements of this `ParallelIterator` 1708 /// are lexicographically greater than those of another. gt<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>1709 fn gt<I>(self, other: I) -> bool 1710 where I: IntoParallelIterator, 1711 I::Iter: IndexedParallelIterator, 1712 Self::Item: PartialOrd<I::Item> 1713 { 1714 self.partial_cmp(other) == Some(Ordering::Greater) 1715 } 1716 1717 /// Determines if the elements of this `ParallelIterator` 1718 /// are less or equal to those of another. ge<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>1719 fn ge<I>(self, other: I) -> bool 1720 where I: IntoParallelIterator, 1721 I::Iter: IndexedParallelIterator, 1722 Self::Item: PartialOrd<I::Item> 1723 { 1724 let ord = self.partial_cmp(other); 1725 ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater) 1726 } 1727 1728 /// Yields an index along with each item. 1729 /// 1730 /// # Examples 1731 /// 1732 /// ``` 1733 /// use rayon::prelude::*; 1734 /// 1735 /// let chars = vec!['a', 'b', 'c']; 1736 /// let result: Vec<_> = chars 1737 /// .into_par_iter() 1738 /// .enumerate() 1739 /// .collect(); 1740 /// 1741 /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]); 1742 /// ``` enumerate(self) -> Enumerate<Self>1743 fn enumerate(self) -> Enumerate<Self> { 1744 enumerate::new(self) 1745 } 1746 1747 /// Creates an iterator that skips the first `n` elements. 1748 /// 1749 /// # Examples 1750 /// 1751 /// ``` 1752 /// use rayon::prelude::*; 1753 /// 1754 /// let result: Vec<_> = (0..100) 1755 /// .into_par_iter() 1756 /// .skip(95) 1757 /// .collect(); 1758 /// 1759 /// assert_eq!(result, [95, 96, 97, 98, 99]); 1760 /// ``` skip(self, n: usize) -> Skip<Self>1761 fn skip(self, n: usize) -> Skip<Self> { 1762 skip::new(self, n) 1763 } 1764 1765 /// Creates an iterator that yields the first `n` elements. 1766 /// 1767 /// # Examples 1768 /// 1769 /// ``` 1770 /// use rayon::prelude::*; 1771 /// 1772 /// let result: Vec<_> = (0..100) 1773 /// .into_par_iter() 1774 /// .take(5) 1775 /// .collect(); 1776 /// 1777 /// assert_eq!(result, [0, 1, 2, 3, 4]); 1778 /// ``` take(self, n: usize) -> Take<Self>1779 fn take(self, n: usize) -> Take<Self> { 1780 take::new(self, n) 1781 } 1782 1783 /// Searches for **some** item in the parallel iterator that 1784 /// matches the given predicate, and returns its index. Like 1785 /// `ParallelIterator::find_any`, the parallel search will not 1786 /// necessarily find the **first** match, and once a match is 1787 /// found we'll attempt to stop processing any more. 1788 /// 1789 /// # Examples 1790 /// 1791 /// ``` 1792 /// use rayon::prelude::*; 1793 /// 1794 /// let a = [1, 2, 3, 3]; 1795 /// 1796 /// let i = a.par_iter().position_any(|&x| x == 3).expect("found"); 1797 /// assert!(i == 2 || i == 3); 1798 /// 1799 /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None); 1800 /// ``` position_any<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send1801 fn position_any<P>(self, predicate: P) -> Option<usize> 1802 where P: Fn(Self::Item) -> bool + Sync + Send 1803 { 1804 self.map(predicate) 1805 .enumerate() 1806 .find_any(|&(_, p)| p) 1807 .map(|(i, _)| i) 1808 } 1809 1810 /// Searches for the sequentially **first** item in the parallel iterator 1811 /// that matches the given predicate, and returns its index. 1812 /// 1813 /// Like `ParallelIterator::find_first`, once a match is found, 1814 /// all attempts to the right of the match will be stopped, while 1815 /// attempts to the left must continue in case an earlier match 1816 /// is found. 1817 /// 1818 /// Note that not all parallel iterators have a useful order, much like 1819 /// sequential `HashMap` iteration, so "first" may be nebulous. If you 1820 /// just want the first match that discovered anywhere in the iterator, 1821 /// `position_any` is a better choice. 1822 /// 1823 /// # Examples 1824 /// 1825 /// ``` 1826 /// use rayon::prelude::*; 1827 /// 1828 /// let a = [1, 2, 3, 3]; 1829 /// 1830 /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2)); 1831 /// 1832 /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None); 1833 /// ``` position_first<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send1834 fn position_first<P>(self, predicate: P) -> Option<usize> 1835 where P: Fn(Self::Item) -> bool + Sync + Send 1836 { 1837 self.map(predicate) 1838 .enumerate() 1839 .find_first(|&(_, p)| p) 1840 .map(|(i, _)| i) 1841 } 1842 1843 /// Searches for the sequentially **last** item in the parallel iterator 1844 /// that matches the given predicate, and returns its index. 1845 /// 1846 /// Like `ParallelIterator::find_last`, once a match is found, 1847 /// all attempts to the left of the match will be stopped, while 1848 /// attempts to the right must continue in case a later match 1849 /// is found. 1850 /// 1851 /// Note that not all parallel iterators have a useful order, much like 1852 /// sequential `HashMap` iteration, so "last" may be nebulous. When the 1853 /// order doesn't actually matter to you, `position_any` is a better 1854 /// choice. 1855 /// 1856 /// # Examples 1857 /// 1858 /// ``` 1859 /// use rayon::prelude::*; 1860 /// 1861 /// let a = [1, 2, 3, 3]; 1862 /// 1863 /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3)); 1864 /// 1865 /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None); 1866 /// ``` position_last<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send1867 fn position_last<P>(self, predicate: P) -> Option<usize> 1868 where P: Fn(Self::Item) -> bool + Sync + Send 1869 { 1870 self.map(predicate) 1871 .enumerate() 1872 .find_last(|&(_, p)| p) 1873 .map(|(i, _)| i) 1874 } 1875 1876 #[doc(hidden)] 1877 #[deprecated(note = "parallel `position` does not search in order -- use `position_any`, \\ 1878 `position_first`, or `position_last`")] position<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send1879 fn position<P>(self, predicate: P) -> Option<usize> 1880 where P: Fn(Self::Item) -> bool + Sync + Send 1881 { 1882 self.position_any(predicate) 1883 } 1884 1885 /// Produces a new iterator with the elements of this iterator in 1886 /// reverse order. 1887 /// 1888 /// # Examples 1889 /// 1890 /// ``` 1891 /// use rayon::prelude::*; 1892 /// 1893 /// let result: Vec<_> = (0..5) 1894 /// .into_par_iter() 1895 /// .rev() 1896 /// .collect(); 1897 /// 1898 /// assert_eq!(result, [4, 3, 2, 1, 0]); 1899 /// ``` rev(self) -> Rev<Self>1900 fn rev(self) -> Rev<Self> { 1901 rev::new(self) 1902 } 1903 1904 /// Sets the minimum length of iterators desired to process in each 1905 /// thread. Rayon will not split any smaller than this length, but 1906 /// of course an iterator could already be smaller to begin with. 1907 /// 1908 /// Producers like `zip` and `interleave` will use greater of the two 1909 /// minimums. 1910 /// Chained iterators and iterators inside `flat_map` may each use 1911 /// their own minimum length. 1912 /// 1913 /// # Examples 1914 /// 1915 /// ``` 1916 /// use rayon::prelude::*; 1917 /// 1918 /// let min = (0..1_000_000) 1919 /// .into_par_iter() 1920 /// .with_min_len(1234) 1921 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment 1922 /// .min().unwrap(); 1923 /// 1924 /// assert!(min >= 1234); 1925 /// ``` with_min_len(self, min: usize) -> MinLen<Self>1926 fn with_min_len(self, min: usize) -> MinLen<Self> { 1927 len::new_min_len(self, min) 1928 } 1929 1930 /// Sets the maximum length of iterators desired to process in each 1931 /// thread. Rayon will try to split at least below this length, 1932 /// unless that would put it below the length from `with_min_len()`. 1933 /// For example, given min=10 and max=15, a length of 16 will not be 1934 /// split any further. 1935 /// 1936 /// Producers like `zip` and `interleave` will use lesser of the two 1937 /// maximums. 1938 /// Chained iterators and iterators inside `flat_map` may each use 1939 /// their own maximum length. 1940 /// 1941 /// # Examples 1942 /// 1943 /// ``` 1944 /// use rayon::prelude::*; 1945 /// 1946 /// let max = (0..1_000_000) 1947 /// .into_par_iter() 1948 /// .with_max_len(1234) 1949 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment 1950 /// .max().unwrap(); 1951 /// 1952 /// assert!(max <= 1234); 1953 /// ``` with_max_len(self, max: usize) -> MaxLen<Self>1954 fn with_max_len(self, max: usize) -> MaxLen<Self> { 1955 len::new_max_len(self, max) 1956 } 1957 1958 /// Produces an exact count of how many items this iterator will 1959 /// produce, presuming no panic occurs. 1960 /// 1961 /// # Examples 1962 /// 1963 /// ``` 1964 /// use rayon::prelude::*; 1965 /// 1966 /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]); 1967 /// assert_eq!(par_iter.len(), 10); 1968 /// 1969 /// let vec: Vec<_> = par_iter.collect(); 1970 /// assert_eq!(vec.len(), 10); 1971 /// ``` len(&self) -> usize1972 fn len(&self) -> usize; 1973 1974 /// Internal method used to define the behavior of this parallel 1975 /// iterator. You should not need to call this directly. 1976 /// 1977 /// This method causes the iterator `self` to start producing 1978 /// items and to feed them to the consumer `consumer` one by one. 1979 /// It may split the consumer before doing so to create the 1980 /// opportunity to produce in parallel. If a split does happen, it 1981 /// will inform the consumer of the index where the split should 1982 /// occur (unlike `ParallelIterator::drive_unindexed()`). 1983 /// 1984 /// See the [README] for more details on the internals of parallel 1985 /// iterators. 1986 /// 1987 /// [README]: README.md drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result1988 fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result; 1989 1990 /// Internal method used to define the behavior of this parallel 1991 /// iterator. You should not need to call this directly. 1992 /// 1993 /// This method converts the iterator into a producer P and then 1994 /// invokes `callback.callback()` with P. Note that the type of 1995 /// this producer is not defined as part of the API, since 1996 /// `callback` must be defined generically for all producers. This 1997 /// allows the producer type to contain references; it also means 1998 /// that parallel iterators can adjust that type without causing a 1999 /// breaking change. 2000 /// 2001 /// See the [README] for more details on the internals of parallel 2002 /// iterators. 2003 /// 2004 /// [README]: README.md with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output2005 fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output; 2006 } 2007 2008 /// `FromParallelIterator` implements the creation of a collection 2009 /// from a [`ParallelIterator`]. By implementing 2010 /// `FromParallelIterator` for a given type, you define how it will be 2011 /// created from an iterator. 2012 /// 2013 /// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method. 2014 /// 2015 /// [`ParallelIterator`]: trait.ParallelIterator.html 2016 /// [`collect()`]: trait.ParallelIterator.html#method.collect 2017 /// 2018 /// # Examples 2019 /// 2020 /// Implementing `FromParallelIterator` for your type: 2021 /// 2022 /// ``` 2023 /// use rayon::prelude::*; 2024 /// use std::mem; 2025 /// 2026 /// struct BlackHole { 2027 /// mass: usize, 2028 /// } 2029 /// 2030 /// impl<T: Send> FromParallelIterator<T> for BlackHole { 2031 /// fn from_par_iter<I>(par_iter: I) -> Self 2032 /// where I: IntoParallelIterator<Item = T> 2033 /// { 2034 /// let par_iter = par_iter.into_par_iter(); 2035 /// BlackHole { 2036 /// mass: par_iter.count() * mem::size_of::<T>(), 2037 /// } 2038 /// } 2039 /// } 2040 /// 2041 /// let bh: BlackHole = (0i32..1000).into_par_iter().collect(); 2042 /// assert_eq!(bh.mass, 4000); 2043 /// ``` 2044 pub trait FromParallelIterator<T> 2045 where T: Send 2046 { 2047 /// Creates an instance of the collection from the parallel iterator `par_iter`. 2048 /// 2049 /// If your collection is not naturally parallel, the easiest (and 2050 /// fastest) way to do this is often to collect `par_iter` into a 2051 /// [`LinkedList`] or other intermediate data structure and then 2052 /// sequentially extend your collection. However, a more 'native' 2053 /// technique is to use the [`par_iter.fold`] or 2054 /// [`par_iter.fold_with`] methods to create the collection. 2055 /// Alternatively, if your collection is 'natively' parallel, you 2056 /// can use `par_iter.for_each` to process each element in turn. 2057 /// 2058 /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html 2059 /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold 2060 /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with 2061 /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each from_par_iter<I>(par_iter: I) -> Self where I: IntoParallelIterator<Item = T>2062 fn from_par_iter<I>(par_iter: I) -> Self where I: IntoParallelIterator<Item = T>; 2063 } 2064 2065 /// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`]. 2066 /// 2067 /// [`ParallelIterator`]: trait.ParallelIterator.html 2068 /// 2069 /// # Examples 2070 /// 2071 /// Implementing `ParallelExtend` for your type: 2072 /// 2073 /// ``` 2074 /// use rayon::prelude::*; 2075 /// use std::mem; 2076 /// 2077 /// struct BlackHole { 2078 /// mass: usize, 2079 /// } 2080 /// 2081 /// impl<T: Send> ParallelExtend<T> for BlackHole { 2082 /// fn par_extend<I>(&mut self, par_iter: I) 2083 /// where I: IntoParallelIterator<Item = T> 2084 /// { 2085 /// let par_iter = par_iter.into_par_iter(); 2086 /// self.mass += par_iter.count() * mem::size_of::<T>(); 2087 /// } 2088 /// } 2089 /// 2090 /// let mut bh = BlackHole { mass: 0 }; 2091 /// bh.par_extend(0i32..1000); 2092 /// assert_eq!(bh.mass, 4000); 2093 /// bh.par_extend(0i64..10); 2094 /// assert_eq!(bh.mass, 4080); 2095 /// ``` 2096 pub trait ParallelExtend<T> 2097 where T: Send 2098 { 2099 /// Extends an instance of the collection with the elements drawn 2100 /// from the parallel iterator `par_iter`. 2101 /// 2102 /// # Examples 2103 /// 2104 /// ``` 2105 /// use rayon::prelude::*; 2106 /// 2107 /// let mut vec = vec![]; 2108 /// vec.par_extend(0..5); 2109 /// vec.par_extend((0..5).into_par_iter().map(|i| i * i)); 2110 /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]); 2111 /// ``` par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = T>2112 fn par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = T>; 2113 } 2114