1 //! Traits for writing parallel programs using an iterator-style interface 2 //! 3 //! You will rarely need to interact with this module directly unless you have 4 //! need to name one of the iterator types. 5 //! 6 //! Parallel iterators make it easy to write iterator-like chains that 7 //! execute in parallel: typically all you have to do is convert the 8 //! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into 9 //! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For 10 //! example, to compute the sum of the squares of a sequence of 11 //! integers, one might write: 12 //! 13 //! ```rust 14 //! use rayon::prelude::*; 15 //! fn sum_of_squares(input: &[i32]) -> i32 { 16 //! input.par_iter() 17 //! .map(|i| i * i) 18 //! .sum() 19 //! } 20 //! ``` 21 //! 22 //! Or, to increment all the integers in a slice, you could write: 23 //! 24 //! ```rust 25 //! use rayon::prelude::*; 26 //! fn increment_all(input: &mut [i32]) { 27 //! input.par_iter_mut() 28 //! .for_each(|p| *p += 1); 29 //! } 30 //! ``` 31 //! 32 //! To use parallel iterators, first import the traits by adding 33 //! something like `use rayon::prelude::*` to your module. You can 34 //! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a 35 //! parallel iterator. Like a [regular iterator][], parallel 36 //! iterators work by first constructing a computation and then 37 //! executing it. 38 //! 39 //! In addition to `par_iter()` and friends, some types offer other 40 //! ways to create (or consume) parallel iterators: 41 //! 42 //! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and 43 //! `par_windows`, as well as various parallel sorting 44 //! operations. See [the `ParallelSlice` trait] for the full list. 45 //! - Strings (`&str`) offer methods like `par_split` and `par_lines`. 46 //! See [the `ParallelString` trait] for the full list. 47 //! - Various collections offer [`par_extend`], which grows a 48 //! collection given a parallel iterator. (If you don't have a 49 //! collection to extend, you can use [`collect()`] to create a new 50 //! one from scratch.) 51 //! 52 //! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html 53 //! [the `ParallelString` trait]: ../str/trait.ParallelString.html 54 //! [`par_extend`]: trait.ParallelExtend.html 55 //! [`collect()`]: trait.ParallelIterator.html#method.collect 56 //! 57 //! To see the full range of methods available on parallel iterators, 58 //! check out the [`ParallelIterator`] and [`IndexedParallelIterator`] 59 //! traits. 60 //! 61 //! If you'd like to build a custom parallel iterator, or to write your own 62 //! combinator, then check out the [split] function and the [plumbing] module. 63 //! 64 //! [regular iterator]: http://doc.rust-lang.org/std/iter/trait.Iterator.html 65 //! [`ParallelIterator`]: trait.ParallelIterator.html 66 //! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 67 //! [split]: fn.split.html 68 //! [plumbing]: plumbing/index.html 69 //! 70 //! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which 71 //! has been deliberately obscured from the public API. This trait is intended 72 //! to mirror the unstable `std::ops::Try` with implementations for `Option` and 73 //! `Result`, where `Some`/`Ok` values will let those iterators continue, but 74 //! `None`/`Err` values will exit early. 75 //! 76 //! A note about object safety: It is currently _not_ possible to wrap 77 //! a `ParallelIterator` (or any trait that depends on it) using a 78 //! `Box<dyn ParallelIterator>` or other kind of dynamic allocation, 79 //! because `ParallelIterator` is **not object-safe**. 80 //! (This keeps the implementation simpler and allows extra optimizations.) 81 82 use self::plumbing::*; 83 use self::private::Try; 84 pub use either::Either; 85 use std::cmp::{self, Ordering}; 86 use std::iter::{Product, Sum}; 87 use std::ops::Fn; 88 89 pub mod plumbing; 90 91 #[cfg(test)] 92 mod test; 93 94 // There is a method to the madness here: 95 // 96 // - These modules are private but expose certain types to the end-user 97 // (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the 98 // public API surface of the `ParallelIterator` traits. 99 // - In **this** module, those public types are always used unprefixed, which forces 100 // us to add a `pub use` and helps identify if we missed anything. 101 // - In contrast, items that appear **only** in the body of a method, 102 // e.g. `find::find()`, are always used **prefixed**, so that they 103 // can be readily distinguished. 104 105 mod chain; 106 mod chunks; 107 mod cloned; 108 mod collect; 109 mod copied; 110 mod empty; 111 mod enumerate; 112 mod extend; 113 mod filter; 114 mod filter_map; 115 mod find; 116 mod find_first_last; 117 mod flat_map; 118 mod flatten; 119 mod fold; 120 mod for_each; 121 mod from_par_iter; 122 mod inspect; 123 mod interleave; 124 mod interleave_shortest; 125 mod intersperse; 126 mod len; 127 mod map; 128 mod map_with; 129 mod multizip; 130 mod noop; 131 mod once; 132 mod panic_fuse; 133 mod par_bridge; 134 mod product; 135 mod reduce; 136 mod repeat; 137 mod rev; 138 mod skip; 139 mod splitter; 140 mod sum; 141 mod take; 142 mod try_fold; 143 mod try_reduce; 144 mod try_reduce_with; 145 mod unzip; 146 mod update; 147 mod while_some; 148 mod zip; 149 mod zip_eq; 150 151 pub use self::{ 152 chain::Chain, 153 chunks::Chunks, 154 cloned::Cloned, 155 copied::Copied, 156 empty::{empty, Empty}, 157 enumerate::Enumerate, 158 filter::Filter, 159 filter_map::FilterMap, 160 flat_map::FlatMap, 161 flatten::Flatten, 162 fold::{Fold, FoldWith}, 163 inspect::Inspect, 164 interleave::Interleave, 165 interleave_shortest::InterleaveShortest, 166 intersperse::Intersperse, 167 len::{MaxLen, MinLen}, 168 map::Map, 169 map_with::{MapInit, MapWith}, 170 multizip::MultiZip, 171 once::{once, Once}, 172 panic_fuse::PanicFuse, 173 par_bridge::{IterBridge, ParallelBridge}, 174 repeat::{repeat, repeatn, Repeat, RepeatN}, 175 rev::Rev, 176 skip::Skip, 177 splitter::{split, Split}, 178 take::Take, 179 try_fold::{TryFold, TryFoldWith}, 180 update::Update, 181 while_some::WhileSome, 182 zip::Zip, 183 zip_eq::ZipEq, 184 }; 185 186 mod step_by; 187 #[cfg(step_by)] 188 pub use self::step_by::StepBy; 189 190 /// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`]. 191 /// 192 /// By implementing `IntoParallelIterator` for a type, you define how it will 193 /// transformed into an iterator. This is a parallel version of the standard 194 /// library's [`std::iter::IntoIterator`] trait. 195 /// 196 /// [`ParallelIterator`]: trait.ParallelIterator.html 197 /// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html 198 pub trait IntoParallelIterator { 199 /// The parallel iterator type that will be created. 200 type Iter: ParallelIterator<Item = Self::Item>; 201 202 /// The type of item that the parallel iterator will produce. 203 type Item: Send; 204 205 /// Converts `self` into a parallel iterator. 206 /// 207 /// # Examples 208 /// 209 /// ``` 210 /// use rayon::prelude::*; 211 /// 212 /// println!("counting in parallel:"); 213 /// (0..100).into_par_iter() 214 /// .for_each(|i| println!("{}", i)); 215 /// ``` 216 /// 217 /// This conversion is often implicit for arguments to methods like [`zip`]. 218 /// 219 /// ``` 220 /// use rayon::prelude::*; 221 /// 222 /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect(); 223 /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]); 224 /// ``` 225 /// 226 /// [`zip`]: trait.IndexedParallelIterator.html#method.zip into_par_iter(self) -> Self::Iter227 fn into_par_iter(self) -> Self::Iter; 228 } 229 230 /// `IntoParallelRefIterator` implements the conversion to a 231 /// [`ParallelIterator`], providing shared references to the data. 232 /// 233 /// This is a parallel version of the `iter()` method 234 /// defined by various collections. 235 /// 236 /// This trait is automatically implemented 237 /// `for I where &I: IntoParallelIterator`. In most cases, users 238 /// will want to implement [`IntoParallelIterator`] rather than implement 239 /// this trait directly. 240 /// 241 /// [`ParallelIterator`]: trait.ParallelIterator.html 242 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html 243 pub trait IntoParallelRefIterator<'data> { 244 /// The type of the parallel iterator that will be returned. 245 type Iter: ParallelIterator<Item = Self::Item>; 246 247 /// The type of item that the parallel iterator will produce. 248 /// This will typically be an `&'data T` reference type. 249 type Item: Send + 'data; 250 251 /// Converts `self` into a parallel iterator. 252 /// 253 /// # Examples 254 /// 255 /// ``` 256 /// use rayon::prelude::*; 257 /// 258 /// let v: Vec<_> = (0..100).collect(); 259 /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2); 260 /// 261 /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`, 262 /// // producing the exact same references. 263 /// assert!(v.par_iter().zip(&v) 264 /// .all(|(a, b)| std::ptr::eq(a, b))); 265 /// ``` par_iter(&'data self) -> Self::Iter266 fn par_iter(&'data self) -> Self::Iter; 267 } 268 269 impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I 270 where 271 &'data I: IntoParallelIterator, 272 { 273 type Iter = <&'data I as IntoParallelIterator>::Iter; 274 type Item = <&'data I as IntoParallelIterator>::Item; 275 par_iter(&'data self) -> Self::Iter276 fn par_iter(&'data self) -> Self::Iter { 277 self.into_par_iter() 278 } 279 } 280 281 /// `IntoParallelRefMutIterator` implements the conversion to a 282 /// [`ParallelIterator`], providing mutable references to the data. 283 /// 284 /// This is a parallel version of the `iter_mut()` method 285 /// defined by various collections. 286 /// 287 /// This trait is automatically implemented 288 /// `for I where &mut I: IntoParallelIterator`. In most cases, users 289 /// will want to implement [`IntoParallelIterator`] rather than implement 290 /// this trait directly. 291 /// 292 /// [`ParallelIterator`]: trait.ParallelIterator.html 293 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html 294 pub trait IntoParallelRefMutIterator<'data> { 295 /// The type of iterator that will be created. 296 type Iter: ParallelIterator<Item = Self::Item>; 297 298 /// The type of item that will be produced; this is typically an 299 /// `&'data mut T` reference. 300 type Item: Send + 'data; 301 302 /// Creates the parallel iterator from `self`. 303 /// 304 /// # Examples 305 /// 306 /// ``` 307 /// use rayon::prelude::*; 308 /// 309 /// let mut v = vec![0usize; 5]; 310 /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i); 311 /// assert_eq!(v, [0, 1, 2, 3, 4]); 312 /// ``` par_iter_mut(&'data mut self) -> Self::Iter313 fn par_iter_mut(&'data mut self) -> Self::Iter; 314 } 315 316 impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I 317 where 318 &'data mut I: IntoParallelIterator, 319 { 320 type Iter = <&'data mut I as IntoParallelIterator>::Iter; 321 type Item = <&'data mut I as IntoParallelIterator>::Item; 322 par_iter_mut(&'data mut self) -> Self::Iter323 fn par_iter_mut(&'data mut self) -> Self::Iter { 324 self.into_par_iter() 325 } 326 } 327 328 /// Parallel version of the standard iterator trait. 329 /// 330 /// The combinators on this trait are available on **all** parallel 331 /// iterators. Additional methods can be found on the 332 /// [`IndexedParallelIterator`] trait: those methods are only 333 /// available for parallel iterators where the number of items is 334 /// known in advance (so, e.g., after invoking `filter`, those methods 335 /// become unavailable). 336 /// 337 /// For examples of using parallel iterators, see [the docs on the 338 /// `iter` module][iter]. 339 /// 340 /// [iter]: index.html 341 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 342 pub trait ParallelIterator: Sized + Send { 343 /// The type of item that this parallel iterator produces. 344 /// For example, if you use the [`for_each`] method, this is the type of 345 /// item that your closure will be invoked with. 346 /// 347 /// [`for_each`]: #method.for_each 348 type Item: Send; 349 350 /// Executes `OP` on each item produced by the iterator, in parallel. 351 /// 352 /// # Examples 353 /// 354 /// ``` 355 /// use rayon::prelude::*; 356 /// 357 /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x)); 358 /// ``` for_each<OP>(self, op: OP) where OP: Fn(Self::Item) + Sync + Send,359 fn for_each<OP>(self, op: OP) 360 where 361 OP: Fn(Self::Item) + Sync + Send, 362 { 363 for_each::for_each(self, &op) 364 } 365 366 /// Executes `OP` on the given `init` value with each item produced by 367 /// the iterator, in parallel. 368 /// 369 /// The `init` value will be cloned only as needed to be paired with 370 /// the group of items in each rayon job. It does not require the type 371 /// to be `Sync`. 372 /// 373 /// # Examples 374 /// 375 /// ``` 376 /// use std::sync::mpsc::channel; 377 /// use rayon::prelude::*; 378 /// 379 /// let (sender, receiver) = channel(); 380 /// 381 /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap()); 382 /// 383 /// let mut res: Vec<_> = receiver.iter().collect(); 384 /// 385 /// res.sort(); 386 /// 387 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4]) 388 /// ``` for_each_with<OP, T>(self, init: T, op: OP) where OP: Fn(&mut T, Self::Item) + Sync + Send, T: Send + Clone,389 fn for_each_with<OP, T>(self, init: T, op: OP) 390 where 391 OP: Fn(&mut T, Self::Item) + Sync + Send, 392 T: Send + Clone, 393 { 394 self.map_with(init, op).collect() 395 } 396 397 /// Executes `OP` on a value returned by `init` with each item produced by 398 /// the iterator, in parallel. 399 /// 400 /// The `init` function will be called only as needed for a value to be 401 /// paired with the group of items in each rayon job. There is no 402 /// constraint on that returned type at all! 403 /// 404 /// # Examples 405 /// 406 /// ``` 407 /// use rand::Rng; 408 /// use rayon::prelude::*; 409 /// 410 /// let mut v = vec![0u8; 1_000_000]; 411 /// 412 /// v.par_chunks_mut(1000) 413 /// .for_each_init( 414 /// || rand::thread_rng(), 415 /// |rng, chunk| rng.fill(chunk), 416 /// ); 417 /// 418 /// // There's a remote chance that this will fail... 419 /// for i in 0u8..=255 { 420 /// assert!(v.contains(&i)); 421 /// } 422 /// ``` for_each_init<OP, INIT, T>(self, init: INIT, op: OP) where OP: Fn(&mut T, Self::Item) + Sync + Send, INIT: Fn() -> T + Sync + Send,423 fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP) 424 where 425 OP: Fn(&mut T, Self::Item) + Sync + Send, 426 INIT: Fn() -> T + Sync + Send, 427 { 428 self.map_init(init, op).collect() 429 } 430 431 /// Executes a fallible `OP` on each item produced by the iterator, in parallel. 432 /// 433 /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to 434 /// stop processing the rest of the items in the iterator as soon as 435 /// possible, and we will return that terminating value. Otherwise, we will 436 /// return an empty `Result::Ok(())` or `Option::Some(())`. If there are 437 /// multiple errors in parallel, it is not specified which will be returned. 438 /// 439 /// # Examples 440 /// 441 /// ``` 442 /// use rayon::prelude::*; 443 /// use std::io::{self, Write}; 444 /// 445 /// // This will stop iteration early if there's any write error, like 446 /// // having piped output get closed on the other end. 447 /// (0..100).into_par_iter() 448 /// .try_for_each(|x| writeln!(io::stdout(), "{:?}", x)) 449 /// .expect("expected no write errors"); 450 /// ``` try_for_each<OP, R>(self, op: OP) -> R where OP: Fn(Self::Item) -> R + Sync + Send, R: Try<Ok = ()> + Send,451 fn try_for_each<OP, R>(self, op: OP) -> R 452 where 453 OP: Fn(Self::Item) -> R + Sync + Send, 454 R: Try<Ok = ()> + Send, 455 { 456 fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R { 457 R::from_ok(()) 458 } 459 460 self.map(op).try_reduce(<()>::default, ok) 461 } 462 463 /// Executes a fallible `OP` on the given `init` value with each item 464 /// produced by the iterator, in parallel. 465 /// 466 /// This combines the `init` semantics of [`for_each_with()`] and the 467 /// failure semantics of [`try_for_each()`]. 468 /// 469 /// [`for_each_with()`]: #method.for_each_with 470 /// [`try_for_each()`]: #method.try_for_each 471 /// 472 /// # Examples 473 /// 474 /// ``` 475 /// use std::sync::mpsc::channel; 476 /// use rayon::prelude::*; 477 /// 478 /// let (sender, receiver) = channel(); 479 /// 480 /// (0..5).into_par_iter() 481 /// .try_for_each_with(sender, |s, x| s.send(x)) 482 /// .expect("expected no send errors"); 483 /// 484 /// let mut res: Vec<_> = receiver.iter().collect(); 485 /// 486 /// res.sort(); 487 /// 488 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4]) 489 /// ``` try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R where OP: Fn(&mut T, Self::Item) -> R + Sync + Send, T: Send + Clone, R: Try<Ok = ()> + Send,490 fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R 491 where 492 OP: Fn(&mut T, Self::Item) -> R + Sync + Send, 493 T: Send + Clone, 494 R: Try<Ok = ()> + Send, 495 { 496 fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R { 497 R::from_ok(()) 498 } 499 500 self.map_with(init, op).try_reduce(<()>::default, ok) 501 } 502 503 /// Executes a fallible `OP` on a value returned by `init` with each item 504 /// produced by the iterator, in parallel. 505 /// 506 /// This combines the `init` semantics of [`for_each_init()`] and the 507 /// failure semantics of [`try_for_each()`]. 508 /// 509 /// [`for_each_init()`]: #method.for_each_init 510 /// [`try_for_each()`]: #method.try_for_each 511 /// 512 /// # Examples 513 /// 514 /// ``` 515 /// use rand::Rng; 516 /// use rayon::prelude::*; 517 /// 518 /// let mut v = vec![0u8; 1_000_000]; 519 /// 520 /// v.par_chunks_mut(1000) 521 /// .try_for_each_init( 522 /// || rand::thread_rng(), 523 /// |rng, chunk| rng.try_fill(chunk), 524 /// ) 525 /// .expect("expected no rand errors"); 526 /// 527 /// // There's a remote chance that this will fail... 528 /// for i in 0u8..=255 { 529 /// assert!(v.contains(&i)); 530 /// } 531 /// ``` try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R where OP: Fn(&mut T, Self::Item) -> R + Sync + Send, INIT: Fn() -> T + Sync + Send, R: Try<Ok = ()> + Send,532 fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R 533 where 534 OP: Fn(&mut T, Self::Item) -> R + Sync + Send, 535 INIT: Fn() -> T + Sync + Send, 536 R: Try<Ok = ()> + Send, 537 { 538 fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R { 539 R::from_ok(()) 540 } 541 542 self.map_init(init, op).try_reduce(<()>::default, ok) 543 } 544 545 /// Counts the number of items in this parallel iterator. 546 /// 547 /// # Examples 548 /// 549 /// ``` 550 /// use rayon::prelude::*; 551 /// 552 /// let count = (0..100).into_par_iter().count(); 553 /// 554 /// assert_eq!(count, 100); 555 /// ``` count(self) -> usize556 fn count(self) -> usize { 557 fn one<T>(_: T) -> usize { 558 1 559 } 560 561 self.map(one).sum() 562 } 563 564 /// Applies `map_op` to each item of this iterator, producing a new 565 /// iterator with the results. 566 /// 567 /// # Examples 568 /// 569 /// ``` 570 /// use rayon::prelude::*; 571 /// 572 /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2); 573 /// 574 /// let doubles: Vec<_> = par_iter.collect(); 575 /// 576 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]); 577 /// ``` map<F, R>(self, map_op: F) -> Map<Self, F> where F: Fn(Self::Item) -> R + Sync + Send, R: Send,578 fn map<F, R>(self, map_op: F) -> Map<Self, F> 579 where 580 F: Fn(Self::Item) -> R + Sync + Send, 581 R: Send, 582 { 583 Map::new(self, map_op) 584 } 585 586 /// Applies `map_op` to the given `init` value with each item of this 587 /// iterator, producing a new iterator with the results. 588 /// 589 /// The `init` value will be cloned only as needed to be paired with 590 /// the group of items in each rayon job. It does not require the type 591 /// to be `Sync`. 592 /// 593 /// # Examples 594 /// 595 /// ``` 596 /// use std::sync::mpsc::channel; 597 /// use rayon::prelude::*; 598 /// 599 /// let (sender, receiver) = channel(); 600 /// 601 /// let a: Vec<_> = (0..5) 602 /// .into_par_iter() // iterating over i32 603 /// .map_with(sender, |s, x| { 604 /// s.send(x).unwrap(); // sending i32 values through the channel 605 /// x // returning i32 606 /// }) 607 /// .collect(); // collecting the returned values into a vector 608 /// 609 /// let mut b: Vec<_> = receiver.iter() // iterating over the values in the channel 610 /// .collect(); // and collecting them 611 /// b.sort(); 612 /// 613 /// assert_eq!(a, b); 614 /// ``` map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F> where F: Fn(&mut T, Self::Item) -> R + Sync + Send, T: Send + Clone, R: Send,615 fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F> 616 where 617 F: Fn(&mut T, Self::Item) -> R + Sync + Send, 618 T: Send + Clone, 619 R: Send, 620 { 621 MapWith::new(self, init, map_op) 622 } 623 624 /// Applies `map_op` to a value returned by `init` with each item of this 625 /// iterator, producing a new iterator with the results. 626 /// 627 /// The `init` function will be called only as needed for a value to be 628 /// paired with the group of items in each rayon job. There is no 629 /// constraint on that returned type at all! 630 /// 631 /// # Examples 632 /// 633 /// ``` 634 /// use rand::Rng; 635 /// use rayon::prelude::*; 636 /// 637 /// let a: Vec<_> = (1i32..1_000_000) 638 /// .into_par_iter() 639 /// .map_init( 640 /// || rand::thread_rng(), // get the thread-local RNG 641 /// |rng, x| if rng.gen() { // randomly negate items 642 /// -x 643 /// } else { 644 /// x 645 /// }, 646 /// ).collect(); 647 /// 648 /// // There's a remote chance that this will fail... 649 /// assert!(a.iter().any(|&x| x < 0)); 650 /// assert!(a.iter().any(|&x| x > 0)); 651 /// ``` map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F> where F: Fn(&mut T, Self::Item) -> R + Sync + Send, INIT: Fn() -> T + Sync + Send, R: Send,652 fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F> 653 where 654 F: Fn(&mut T, Self::Item) -> R + Sync + Send, 655 INIT: Fn() -> T + Sync + Send, 656 R: Send, 657 { 658 MapInit::new(self, init, map_op) 659 } 660 661 /// Creates an iterator which clones all of its elements. This may be 662 /// useful when you have an iterator over `&T`, but you need `T`, and 663 /// that type implements `Clone`. See also [`copied()`]. 664 /// 665 /// [`copied()`]: #method.copied 666 /// 667 /// # Examples 668 /// 669 /// ``` 670 /// use rayon::prelude::*; 671 /// 672 /// let a = [1, 2, 3]; 673 /// 674 /// let v_cloned: Vec<_> = a.par_iter().cloned().collect(); 675 /// 676 /// // cloned is the same as .map(|&x| x), for integers 677 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect(); 678 /// 679 /// assert_eq!(v_cloned, vec![1, 2, 3]); 680 /// assert_eq!(v_map, vec![1, 2, 3]); 681 /// ``` cloned<'a, T>(self) -> Cloned<Self> where T: 'a + Clone + Send, Self: ParallelIterator<Item = &'a T>,682 fn cloned<'a, T>(self) -> Cloned<Self> 683 where 684 T: 'a + Clone + Send, 685 Self: ParallelIterator<Item = &'a T>, 686 { 687 Cloned::new(self) 688 } 689 690 /// Creates an iterator which copies all of its elements. This may be 691 /// useful when you have an iterator over `&T`, but you need `T`, and 692 /// that type implements `Copy`. See also [`cloned()`]. 693 /// 694 /// [`cloned()`]: #method.cloned 695 /// 696 /// # Examples 697 /// 698 /// ``` 699 /// use rayon::prelude::*; 700 /// 701 /// let a = [1, 2, 3]; 702 /// 703 /// let v_copied: Vec<_> = a.par_iter().copied().collect(); 704 /// 705 /// // copied is the same as .map(|&x| x), for integers 706 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect(); 707 /// 708 /// assert_eq!(v_copied, vec![1, 2, 3]); 709 /// assert_eq!(v_map, vec![1, 2, 3]); 710 /// ``` copied<'a, T>(self) -> Copied<Self> where T: 'a + Copy + Send, Self: ParallelIterator<Item = &'a T>,711 fn copied<'a, T>(self) -> Copied<Self> 712 where 713 T: 'a + Copy + Send, 714 Self: ParallelIterator<Item = &'a T>, 715 { 716 Copied::new(self) 717 } 718 719 /// Applies `inspect_op` to a reference to each item of this iterator, 720 /// producing a new iterator passing through the original items. This is 721 /// often useful for debugging to see what's happening in iterator stages. 722 /// 723 /// # Examples 724 /// 725 /// ``` 726 /// use rayon::prelude::*; 727 /// 728 /// let a = [1, 4, 2, 3]; 729 /// 730 /// // this iterator sequence is complex. 731 /// let sum = a.par_iter() 732 /// .cloned() 733 /// .filter(|&x| x % 2 == 0) 734 /// .reduce(|| 0, |sum, i| sum + i); 735 /// 736 /// println!("{}", sum); 737 /// 738 /// // let's add some inspect() calls to investigate what's happening 739 /// let sum = a.par_iter() 740 /// .cloned() 741 /// .inspect(|x| println!("about to filter: {}", x)) 742 /// .filter(|&x| x % 2 == 0) 743 /// .inspect(|x| println!("made it through filter: {}", x)) 744 /// .reduce(|| 0, |sum, i| sum + i); 745 /// 746 /// println!("{}", sum); 747 /// ``` inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP> where OP: Fn(&Self::Item) + Sync + Send,748 fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP> 749 where 750 OP: Fn(&Self::Item) + Sync + Send, 751 { 752 Inspect::new(self, inspect_op) 753 } 754 755 /// Mutates each item of this iterator before yielding it. 756 /// 757 /// # Examples 758 /// 759 /// ``` 760 /// use rayon::prelude::*; 761 /// 762 /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;}); 763 /// 764 /// let doubles: Vec<_> = par_iter.collect(); 765 /// 766 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]); 767 /// ``` update<F>(self, update_op: F) -> Update<Self, F> where F: Fn(&mut Self::Item) + Sync + Send,768 fn update<F>(self, update_op: F) -> Update<Self, F> 769 where 770 F: Fn(&mut Self::Item) + Sync + Send, 771 { 772 Update::new(self, update_op) 773 } 774 775 /// Applies `filter_op` to each item of this iterator, producing a new 776 /// iterator with only the items that gave `true` results. 777 /// 778 /// # Examples 779 /// 780 /// ``` 781 /// use rayon::prelude::*; 782 /// 783 /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0); 784 /// 785 /// let even_numbers: Vec<_> = par_iter.collect(); 786 /// 787 /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]); 788 /// ``` filter<P>(self, filter_op: P) -> Filter<Self, P> where P: Fn(&Self::Item) -> bool + Sync + Send,789 fn filter<P>(self, filter_op: P) -> Filter<Self, P> 790 where 791 P: Fn(&Self::Item) -> bool + Sync + Send, 792 { 793 Filter::new(self, filter_op) 794 } 795 796 /// Applies `filter_op` to each item of this iterator to get an `Option`, 797 /// producing a new iterator with only the items from `Some` results. 798 /// 799 /// # Examples 800 /// 801 /// ``` 802 /// use rayon::prelude::*; 803 /// 804 /// let mut par_iter = (0..10).into_par_iter() 805 /// .filter_map(|x| { 806 /// if x % 2 == 0 { Some(x * 3) } 807 /// else { None } 808 /// }); 809 /// 810 /// let even_numbers: Vec<_> = par_iter.collect(); 811 /// 812 /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]); 813 /// ``` filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,814 fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P> 815 where 816 P: Fn(Self::Item) -> Option<R> + Sync + Send, 817 R: Send, 818 { 819 FilterMap::new(self, filter_op) 820 } 821 822 /// Applies `map_op` to each item of this iterator to get nested iterators, 823 /// producing a new iterator that flattens these back into one. 824 /// 825 /// # Examples 826 /// 827 /// ``` 828 /// use rayon::prelude::*; 829 /// 830 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]]; 831 /// 832 /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec()); 833 /// 834 /// let vec: Vec<_> = par_iter.collect(); 835 /// 836 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]); 837 /// ``` flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F> where F: Fn(Self::Item) -> PI + Sync + Send, PI: IntoParallelIterator,838 fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F> 839 where 840 F: Fn(Self::Item) -> PI + Sync + Send, 841 PI: IntoParallelIterator, 842 { 843 FlatMap::new(self, map_op) 844 } 845 846 /// An adaptor that flattens iterable `Item`s into one large iterator 847 /// 848 /// # Examples 849 /// 850 /// ``` 851 /// use rayon::prelude::*; 852 /// 853 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]]; 854 /// let y: Vec<_> = x.into_par_iter().flatten().collect(); 855 /// 856 /// assert_eq!(y, vec![1, 2, 3, 4]); 857 /// ``` flatten(self) -> Flatten<Self> where Self::Item: IntoParallelIterator,858 fn flatten(self) -> Flatten<Self> 859 where 860 Self::Item: IntoParallelIterator, 861 { 862 Flatten::new(self) 863 } 864 865 /// Reduces the items in the iterator into one item using `op`. 866 /// The argument `identity` should be a closure that can produce 867 /// "identity" value which may be inserted into the sequence as 868 /// needed to create opportunities for parallel execution. So, for 869 /// example, if you are doing a summation, then `identity()` ought 870 /// to produce something that represents the zero for your type 871 /// (but consider just calling `sum()` in that case). 872 /// 873 /// # Examples 874 /// 875 /// ``` 876 /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)` 877 /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)` 878 /// // where the first/second elements are summed separately. 879 /// use rayon::prelude::*; 880 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)] 881 /// .par_iter() // iterating over &(i32, i32) 882 /// .cloned() // iterating over (i32, i32) 883 /// .reduce(|| (0, 0), // the "identity" is 0 in both columns 884 /// |a, b| (a.0 + b.0, a.1 + b.1)); 885 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9)); 886 /// ``` 887 /// 888 /// **Note:** unlike a sequential `fold` operation, the order in 889 /// which `op` will be applied to reduce the result is not fully 890 /// specified. So `op` should be [associative] or else the results 891 /// will be non-deterministic. And of course `identity()` should 892 /// produce a true identity. 893 /// 894 /// [associative]: https://en.wikipedia.org/wiki/Associative_property reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send, ID: Fn() -> Self::Item + Sync + Send,895 fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item 896 where 897 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send, 898 ID: Fn() -> Self::Item + Sync + Send, 899 { 900 reduce::reduce(self, identity, op) 901 } 902 903 /// Reduces the items in the iterator into one item using `op`. 904 /// If the iterator is empty, `None` is returned; otherwise, 905 /// `Some` is returned. 906 /// 907 /// This version of `reduce` is simple but somewhat less 908 /// efficient. If possible, it is better to call `reduce()`, which 909 /// requires an identity element. 910 /// 911 /// # Examples 912 /// 913 /// ``` 914 /// use rayon::prelude::*; 915 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)] 916 /// .par_iter() // iterating over &(i32, i32) 917 /// .cloned() // iterating over (i32, i32) 918 /// .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1)) 919 /// .unwrap(); 920 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9)); 921 /// ``` 922 /// 923 /// **Note:** unlike a sequential `fold` operation, the order in 924 /// which `op` will be applied to reduce the result is not fully 925 /// specified. So `op` should be [associative] or else the results 926 /// will be non-deterministic. 927 /// 928 /// [associative]: https://en.wikipedia.org/wiki/Associative_property reduce_with<OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,929 fn reduce_with<OP>(self, op: OP) -> Option<Self::Item> 930 where 931 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send, 932 { 933 fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> { 934 move |opt_a, b| match opt_a { 935 Some(a) => Some(op(a, b)), 936 None => Some(b), 937 } 938 } 939 940 fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> { 941 move |opt_a, opt_b| match (opt_a, opt_b) { 942 (Some(a), Some(b)) => Some(op(a, b)), 943 (Some(v), None) | (None, Some(v)) => Some(v), 944 (None, None) => None, 945 } 946 } 947 948 self.fold(<_>::default, opt_fold(&op)) 949 .reduce(<_>::default, opt_reduce(&op)) 950 } 951 952 /// Reduces the items in the iterator into one item using a fallible `op`. 953 /// The `identity` argument is used the same way as in [`reduce()`]. 954 /// 955 /// [`reduce()`]: #method.reduce 956 /// 957 /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces 958 /// to one, we will attempt to stop processing the rest of the items in the 959 /// iterator as soon as possible, and we will return that terminating value. 960 /// Otherwise, we will return the final reduced `Result::Ok(T)` or 961 /// `Option::Some(T)`. If there are multiple errors in parallel, it is not 962 /// specified which will be returned. 963 /// 964 /// # Examples 965 /// 966 /// ``` 967 /// use rayon::prelude::*; 968 /// 969 /// // Compute the sum of squares, being careful about overflow. 970 /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> { 971 /// iter.into_par_iter() 972 /// .map(|i| i.checked_mul(i)) // square each item, 973 /// .try_reduce(|| 0, i32::checked_add) // and add them up! 974 /// } 975 /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16)); 976 /// 977 /// // The sum might overflow 978 /// assert_eq!(sum_squares(0..10_000), None); 979 /// 980 /// // Or the squares might overflow before it even reaches `try_reduce` 981 /// assert_eq!(sum_squares(1_000_000..1_000_001), None); 982 /// ``` try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item where OP: Fn(T, T) -> Self::Item + Sync + Send, ID: Fn() -> T + Sync + Send, Self::Item: Try<Ok = T>,983 fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item 984 where 985 OP: Fn(T, T) -> Self::Item + Sync + Send, 986 ID: Fn() -> T + Sync + Send, 987 Self::Item: Try<Ok = T>, 988 { 989 try_reduce::try_reduce(self, identity, op) 990 } 991 992 /// Reduces the items in the iterator into one item using a fallible `op`. 993 /// 994 /// Like [`reduce_with()`], if the iterator is empty, `None` is returned; 995 /// otherwise, `Some` is returned. Beyond that, it behaves like 996 /// [`try_reduce()`] for handling `Err`/`None`. 997 /// 998 /// [`reduce_with()`]: #method.reduce_with 999 /// [`try_reduce()`]: #method.try_reduce 1000 /// 1001 /// For instance, with `Option` items, the return value may be: 1002 /// - `None`, the iterator was empty 1003 /// - `Some(None)`, we stopped after encountering `None`. 1004 /// - `Some(Some(x))`, the entire iterator reduced to `x`. 1005 /// 1006 /// With `Result` items, the nesting is more obvious: 1007 /// - `None`, the iterator was empty 1008 /// - `Some(Err(e))`, we stopped after encountering an error `e`. 1009 /// - `Some(Ok(x))`, the entire iterator reduced to `x`. 1010 /// 1011 /// # Examples 1012 /// 1013 /// ``` 1014 /// use rayon::prelude::*; 1015 /// 1016 /// let files = ["/dev/null", "/does/not/exist"]; 1017 /// 1018 /// // Find the biggest file 1019 /// files.into_par_iter() 1020 /// .map(|path| std::fs::metadata(path).map(|m| (path, m.len()))) 1021 /// .try_reduce_with(|a, b| { 1022 /// Ok(if a.1 >= b.1 { a } else { b }) 1023 /// }) 1024 /// .expect("Some value, since the iterator is not empty") 1025 /// .expect_err("not found"); 1026 /// ``` try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(T, T) -> Self::Item + Sync + Send, Self::Item: Try<Ok = T>,1027 fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item> 1028 where 1029 OP: Fn(T, T) -> Self::Item + Sync + Send, 1030 Self::Item: Try<Ok = T>, 1031 { 1032 try_reduce_with::try_reduce_with(self, op) 1033 } 1034 1035 /// Parallel fold is similar to sequential fold except that the 1036 /// sequence of items may be subdivided before it is 1037 /// folded. Consider a list of numbers like `22 3 77 89 46`. If 1038 /// you used sequential fold to add them (`fold(0, |a,b| a+b)`, 1039 /// you would wind up first adding 0 + 22, then 22 + 3, then 25 + 1040 /// 77, and so forth. The **parallel fold** works similarly except 1041 /// that it first breaks up your list into sublists, and hence 1042 /// instead of yielding up a single sum at the end, it yields up 1043 /// multiple sums. The number of results is nondeterministic, as 1044 /// is the point where the breaks occur. 1045 /// 1046 /// So if did the same parallel fold (`fold(0, |a,b| a+b)`) on 1047 /// our example list, we might wind up with a sequence of two numbers, 1048 /// like so: 1049 /// 1050 /// ```notrust 1051 /// 22 3 77 89 46 1052 /// | | 1053 /// 102 135 1054 /// ``` 1055 /// 1056 /// Or perhaps these three numbers: 1057 /// 1058 /// ```notrust 1059 /// 22 3 77 89 46 1060 /// | | | 1061 /// 102 89 46 1062 /// ``` 1063 /// 1064 /// In general, Rayon will attempt to find good breaking points 1065 /// that keep all of your cores busy. 1066 /// 1067 /// ### Fold versus reduce 1068 /// 1069 /// The `fold()` and `reduce()` methods each take an identity element 1070 /// and a combining function, but they operate rather differently. 1071 /// 1072 /// `reduce()` requires that the identity function has the same 1073 /// type as the things you are iterating over, and it fully 1074 /// reduces the list of items into a single item. So, for example, 1075 /// imagine we are iterating over a list of bytes `bytes: [128_u8, 1076 /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b: 1077 /// u8| a + b)`, we would get an overflow. This is because `0`, 1078 /// `a`, and `b` here are all bytes, just like the numbers in the 1079 /// list (I wrote the types explicitly above, but those are the 1080 /// only types you can use). To avoid the overflow, we would need 1081 /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a, 1082 /// b| a + b)`, in which case our result would be `256`. 1083 /// 1084 /// In contrast, with `fold()`, the identity function does not 1085 /// have to have the same type as the things you are iterating 1086 /// over, and you potentially get back many results. So, if we 1087 /// continue with the `bytes` example from the previous paragraph, 1088 /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to 1089 /// convert our bytes into `u32`. And of course we might not get 1090 /// back a single sum. 1091 /// 1092 /// There is a more subtle distinction as well, though it's 1093 /// actually implied by the above points. When you use `reduce()`, 1094 /// your reduction function is sometimes called with values that 1095 /// were never part of your original parallel iterator (for 1096 /// example, both the left and right might be a partial sum). With 1097 /// `fold()`, in contrast, the left value in the fold function is 1098 /// always the accumulator, and the right value is always from 1099 /// your original sequence. 1100 /// 1101 /// ### Fold vs Map/Reduce 1102 /// 1103 /// Fold makes sense if you have some operation where it is 1104 /// cheaper to create groups of elements at a time. For example, 1105 /// imagine collecting characters into a string. If you were going 1106 /// to use map/reduce, you might try this: 1107 /// 1108 /// ``` 1109 /// use rayon::prelude::*; 1110 /// 1111 /// let s = 1112 /// ['a', 'b', 'c', 'd', 'e'] 1113 /// .par_iter() 1114 /// .map(|c: &char| format!("{}", c)) 1115 /// .reduce(|| String::new(), 1116 /// |mut a: String, b: String| { a.push_str(&b); a }); 1117 /// 1118 /// assert_eq!(s, "abcde"); 1119 /// ``` 1120 /// 1121 /// Because reduce produces the same type of element as its input, 1122 /// you have to first map each character into a string, and then 1123 /// you can reduce them. This means we create one string per 1124 /// element in our iterator -- not so great. Using `fold`, we can 1125 /// do this instead: 1126 /// 1127 /// ``` 1128 /// use rayon::prelude::*; 1129 /// 1130 /// let s = 1131 /// ['a', 'b', 'c', 'd', 'e'] 1132 /// .par_iter() 1133 /// .fold(|| String::new(), 1134 /// |mut s: String, c: &char| { s.push(*c); s }) 1135 /// .reduce(|| String::new(), 1136 /// |mut a: String, b: String| { a.push_str(&b); a }); 1137 /// 1138 /// assert_eq!(s, "abcde"); 1139 /// ``` 1140 /// 1141 /// Now `fold` will process groups of our characters at a time, 1142 /// and we only make one string per group. We should wind up with 1143 /// some small-ish number of strings roughly proportional to the 1144 /// number of CPUs you have (it will ultimately depend on how busy 1145 /// your processors are). Note that we still need to do a reduce 1146 /// afterwards to combine those groups of strings into a single 1147 /// string. 1148 /// 1149 /// You could use a similar trick to save partial results (e.g., a 1150 /// cache) or something similar. 1151 /// 1152 /// ### Combining fold with other operations 1153 /// 1154 /// You can combine `fold` with `reduce` if you want to produce a 1155 /// single value. This is then roughly equivalent to a map/reduce 1156 /// combination in effect: 1157 /// 1158 /// ``` 1159 /// use rayon::prelude::*; 1160 /// 1161 /// let bytes = 0..22_u8; 1162 /// let sum = bytes.into_par_iter() 1163 /// .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32)) 1164 /// .sum::<u32>(); 1165 /// 1166 /// assert_eq!(sum, (0..22).sum()); // compare to sequential 1167 /// ``` fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F> where F: Fn(T, Self::Item) -> T + Sync + Send, ID: Fn() -> T + Sync + Send, T: Send,1168 fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F> 1169 where 1170 F: Fn(T, Self::Item) -> T + Sync + Send, 1171 ID: Fn() -> T + Sync + Send, 1172 T: Send, 1173 { 1174 Fold::new(self, identity, fold_op) 1175 } 1176 1177 /// Applies `fold_op` to the given `init` value with each item of this 1178 /// iterator, finally producing the value for further use. 1179 /// 1180 /// This works essentially like `fold(|| init.clone(), fold_op)`, except 1181 /// it doesn't require the `init` type to be `Sync`, nor any other form 1182 /// of added synchronization. 1183 /// 1184 /// # Examples 1185 /// 1186 /// ``` 1187 /// use rayon::prelude::*; 1188 /// 1189 /// let bytes = 0..22_u8; 1190 /// let sum = bytes.into_par_iter() 1191 /// .fold_with(0_u32, |a: u32, b: u8| a + (b as u32)) 1192 /// .sum::<u32>(); 1193 /// 1194 /// assert_eq!(sum, (0..22).sum()); // compare to sequential 1195 /// ``` fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F> where F: Fn(T, Self::Item) -> T + Sync + Send, T: Send + Clone,1196 fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F> 1197 where 1198 F: Fn(T, Self::Item) -> T + Sync + Send, 1199 T: Send + Clone, 1200 { 1201 FoldWith::new(self, init, fold_op) 1202 } 1203 1204 /// Performs a fallible parallel fold. 1205 /// 1206 /// This is a variation of [`fold()`] for operations which can fail with 1207 /// `Option::None` or `Result::Err`. The first such failure stops 1208 /// processing the local set of items, without affecting other folds in the 1209 /// iterator's subdivisions. 1210 /// 1211 /// Often, `try_fold()` will be followed by [`try_reduce()`] 1212 /// for a final reduction and global short-circuiting effect. 1213 /// 1214 /// [`fold()`]: #method.fold 1215 /// [`try_reduce()`]: #method.try_reduce 1216 /// 1217 /// # Examples 1218 /// 1219 /// ``` 1220 /// use rayon::prelude::*; 1221 /// 1222 /// let bytes = 0..22_u8; 1223 /// let sum = bytes.into_par_iter() 1224 /// .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32)) 1225 /// .try_reduce(|| 0, u32::checked_add); 1226 /// 1227 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential 1228 /// ``` try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F> where F: Fn(T, Self::Item) -> R + Sync + Send, ID: Fn() -> T + Sync + Send, R: Try<Ok = T> + Send,1229 fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F> 1230 where 1231 F: Fn(T, Self::Item) -> R + Sync + Send, 1232 ID: Fn() -> T + Sync + Send, 1233 R: Try<Ok = T> + Send, 1234 { 1235 TryFold::new(self, identity, fold_op) 1236 } 1237 1238 /// Performs a fallible parallel fold with a cloneable `init` value. 1239 /// 1240 /// This combines the `init` semantics of [`fold_with()`] and the failure 1241 /// semantics of [`try_fold()`]. 1242 /// 1243 /// [`fold_with()`]: #method.fold_with 1244 /// [`try_fold()`]: #method.try_fold 1245 /// 1246 /// ``` 1247 /// use rayon::prelude::*; 1248 /// 1249 /// let bytes = 0..22_u8; 1250 /// let sum = bytes.into_par_iter() 1251 /// .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32)) 1252 /// .try_reduce(|| 0, u32::checked_add); 1253 /// 1254 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential 1255 /// ``` try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F> where F: Fn(T, Self::Item) -> R + Sync + Send, R: Try<Ok = T> + Send, T: Clone + Send,1256 fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F> 1257 where 1258 F: Fn(T, Self::Item) -> R + Sync + Send, 1259 R: Try<Ok = T> + Send, 1260 T: Clone + Send, 1261 { 1262 TryFoldWith::new(self, init, fold_op) 1263 } 1264 1265 /// Sums up the items in the iterator. 1266 /// 1267 /// Note that the order in items will be reduced is not specified, 1268 /// so if the `+` operator is not truly [associative] \(as is the 1269 /// case for floating point numbers), then the results are not 1270 /// fully deterministic. 1271 /// 1272 /// [associative]: https://en.wikipedia.org/wiki/Associative_property 1273 /// 1274 /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`, 1275 /// except that the type of `0` and the `+` operation may vary 1276 /// depending on the type of value being produced. 1277 /// 1278 /// # Examples 1279 /// 1280 /// ``` 1281 /// use rayon::prelude::*; 1282 /// 1283 /// let a = [1, 5, 7]; 1284 /// 1285 /// let sum: i32 = a.par_iter().sum(); 1286 /// 1287 /// assert_eq!(sum, 13); 1288 /// ``` sum<S>(self) -> S where S: Send + Sum<Self::Item> + Sum<S>,1289 fn sum<S>(self) -> S 1290 where 1291 S: Send + Sum<Self::Item> + Sum<S>, 1292 { 1293 sum::sum(self) 1294 } 1295 1296 /// Multiplies all the items in the iterator. 1297 /// 1298 /// Note that the order in items will be reduced is not specified, 1299 /// so if the `*` operator is not truly [associative] \(as is the 1300 /// case for floating point numbers), then the results are not 1301 /// fully deterministic. 1302 /// 1303 /// [associative]: https://en.wikipedia.org/wiki/Associative_property 1304 /// 1305 /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`, 1306 /// except that the type of `1` and the `*` operation may vary 1307 /// depending on the type of value being produced. 1308 /// 1309 /// # Examples 1310 /// 1311 /// ``` 1312 /// use rayon::prelude::*; 1313 /// 1314 /// fn factorial(n: u32) -> u32 { 1315 /// (1..n+1).into_par_iter().product() 1316 /// } 1317 /// 1318 /// assert_eq!(factorial(0), 1); 1319 /// assert_eq!(factorial(1), 1); 1320 /// assert_eq!(factorial(5), 120); 1321 /// ``` product<P>(self) -> P where P: Send + Product<Self::Item> + Product<P>,1322 fn product<P>(self) -> P 1323 where 1324 P: Send + Product<Self::Item> + Product<P>, 1325 { 1326 product::product(self) 1327 } 1328 1329 /// Computes the minimum of all the items in the iterator. If the 1330 /// iterator is empty, `None` is returned; otherwise, `Some(min)` 1331 /// is returned. 1332 /// 1333 /// Note that the order in which the items will be reduced is not 1334 /// specified, so if the `Ord` impl is not truly associative, then 1335 /// the results are not deterministic. 1336 /// 1337 /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`. 1338 /// 1339 /// # Examples 1340 /// 1341 /// ``` 1342 /// use rayon::prelude::*; 1343 /// 1344 /// let a = [45, 74, 32]; 1345 /// 1346 /// assert_eq!(a.par_iter().min(), Some(&32)); 1347 /// 1348 /// let b: [i32; 0] = []; 1349 /// 1350 /// assert_eq!(b.par_iter().min(), None); 1351 /// ``` min(self) -> Option<Self::Item> where Self::Item: Ord,1352 fn min(self) -> Option<Self::Item> 1353 where 1354 Self::Item: Ord, 1355 { 1356 self.reduce_with(cmp::min) 1357 } 1358 1359 /// Computes the minimum of all the items in the iterator with respect to 1360 /// the given comparison function. If the iterator is empty, `None` is 1361 /// returned; otherwise, `Some(min)` is returned. 1362 /// 1363 /// Note that the order in which the items will be reduced is not 1364 /// specified, so if the comparison function is not associative, then 1365 /// the results are not deterministic. 1366 /// 1367 /// # Examples 1368 /// 1369 /// ``` 1370 /// use rayon::prelude::*; 1371 /// 1372 /// let a = [-3_i32, 77, 53, 240, -1]; 1373 /// 1374 /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3)); 1375 /// ``` min_by<F>(self, f: F) -> Option<Self::Item> where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,1376 fn min_by<F>(self, f: F) -> Option<Self::Item> 1377 where 1378 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering, 1379 { 1380 fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T { 1381 move |a, b| match f(&a, &b) { 1382 Ordering::Greater => b, 1383 _ => a, 1384 } 1385 } 1386 1387 self.reduce_with(min(f)) 1388 } 1389 1390 /// Computes the item that yields the minimum value for the given 1391 /// function. If the iterator is empty, `None` is returned; 1392 /// otherwise, `Some(item)` is returned. 1393 /// 1394 /// Note that the order in which the items will be reduced is not 1395 /// specified, so if the `Ord` impl is not truly associative, then 1396 /// the results are not deterministic. 1397 /// 1398 /// # Examples 1399 /// 1400 /// ``` 1401 /// use rayon::prelude::*; 1402 /// 1403 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23]; 1404 /// 1405 /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2)); 1406 /// ``` min_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Send + Fn(&Self::Item) -> K,1407 fn min_by_key<K, F>(self, f: F) -> Option<Self::Item> 1408 where 1409 K: Ord + Send, 1410 F: Sync + Send + Fn(&Self::Item) -> K, 1411 { 1412 fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) { 1413 move |x| (f(&x), x) 1414 } 1415 1416 fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) { 1417 match (a.0).cmp(&b.0) { 1418 Ordering::Greater => b, 1419 _ => a, 1420 } 1421 } 1422 1423 let (_, x) = self.map(key(f)).reduce_with(min_key)?; 1424 Some(x) 1425 } 1426 1427 /// Computes the maximum of all the items in the iterator. If the 1428 /// iterator is empty, `None` is returned; otherwise, `Some(max)` 1429 /// is returned. 1430 /// 1431 /// Note that the order in which the items will be reduced is not 1432 /// specified, so if the `Ord` impl is not truly associative, then 1433 /// the results are not deterministic. 1434 /// 1435 /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`. 1436 /// 1437 /// # Examples 1438 /// 1439 /// ``` 1440 /// use rayon::prelude::*; 1441 /// 1442 /// let a = [45, 74, 32]; 1443 /// 1444 /// assert_eq!(a.par_iter().max(), Some(&74)); 1445 /// 1446 /// let b: [i32; 0] = []; 1447 /// 1448 /// assert_eq!(b.par_iter().max(), None); 1449 /// ``` max(self) -> Option<Self::Item> where Self::Item: Ord,1450 fn max(self) -> Option<Self::Item> 1451 where 1452 Self::Item: Ord, 1453 { 1454 self.reduce_with(cmp::max) 1455 } 1456 1457 /// Computes the maximum of all the items in the iterator with respect to 1458 /// the given comparison function. If the iterator is empty, `None` is 1459 /// returned; otherwise, `Some(min)` is returned. 1460 /// 1461 /// Note that the order in which the items will be reduced is not 1462 /// specified, so if the comparison function is not associative, then 1463 /// the results are not deterministic. 1464 /// 1465 /// # Examples 1466 /// 1467 /// ``` 1468 /// use rayon::prelude::*; 1469 /// 1470 /// let a = [-3_i32, 77, 53, 240, -1]; 1471 /// 1472 /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240)); 1473 /// ``` max_by<F>(self, f: F) -> Option<Self::Item> where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,1474 fn max_by<F>(self, f: F) -> Option<Self::Item> 1475 where 1476 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering, 1477 { 1478 fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T { 1479 move |a, b| match f(&a, &b) { 1480 Ordering::Greater => a, 1481 _ => b, 1482 } 1483 } 1484 1485 self.reduce_with(max(f)) 1486 } 1487 1488 /// Computes the item that yields the maximum value for the given 1489 /// function. If the iterator is empty, `None` is returned; 1490 /// otherwise, `Some(item)` is returned. 1491 /// 1492 /// Note that the order in which the items will be reduced is not 1493 /// specified, so if the `Ord` impl is not truly associative, then 1494 /// the results are not deterministic. 1495 /// 1496 /// # Examples 1497 /// 1498 /// ``` 1499 /// use rayon::prelude::*; 1500 /// 1501 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23]; 1502 /// 1503 /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34)); 1504 /// ``` max_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Send + Fn(&Self::Item) -> K,1505 fn max_by_key<K, F>(self, f: F) -> Option<Self::Item> 1506 where 1507 K: Ord + Send, 1508 F: Sync + Send + Fn(&Self::Item) -> K, 1509 { 1510 fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) { 1511 move |x| (f(&x), x) 1512 } 1513 1514 fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) { 1515 match (a.0).cmp(&b.0) { 1516 Ordering::Greater => a, 1517 _ => b, 1518 } 1519 } 1520 1521 let (_, x) = self.map(key(f)).reduce_with(max_key)?; 1522 Some(x) 1523 } 1524 1525 /// Takes two iterators and creates a new iterator over both. 1526 /// 1527 /// # Examples 1528 /// 1529 /// ``` 1530 /// use rayon::prelude::*; 1531 /// 1532 /// let a = [0, 1, 2]; 1533 /// let b = [9, 8, 7]; 1534 /// 1535 /// let par_iter = a.par_iter().chain(b.par_iter()); 1536 /// 1537 /// let chained: Vec<_> = par_iter.cloned().collect(); 1538 /// 1539 /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]); 1540 /// ``` chain<C>(self, chain: C) -> Chain<Self, C::Iter> where C: IntoParallelIterator<Item = Self::Item>,1541 fn chain<C>(self, chain: C) -> Chain<Self, C::Iter> 1542 where 1543 C: IntoParallelIterator<Item = Self::Item>, 1544 { 1545 Chain::new(self, chain.into_par_iter()) 1546 } 1547 1548 /// Searches for **some** item in the parallel iterator that 1549 /// matches the given predicate and returns it. This operation 1550 /// is similar to [`find` on sequential iterators][find] but 1551 /// the item returned may not be the **first** one in the parallel 1552 /// sequence which matches, since we search the entire sequence in parallel. 1553 /// 1554 /// Once a match is found, we will attempt to stop processing 1555 /// the rest of the items in the iterator as soon as possible 1556 /// (just as `find` stops iterating once a match is found). 1557 /// 1558 /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find 1559 /// 1560 /// # Examples 1561 /// 1562 /// ``` 1563 /// use rayon::prelude::*; 1564 /// 1565 /// let a = [1, 2, 3, 3]; 1566 /// 1567 /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3)); 1568 /// 1569 /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None); 1570 /// ``` find_any<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1571 fn find_any<P>(self, predicate: P) -> Option<Self::Item> 1572 where 1573 P: Fn(&Self::Item) -> bool + Sync + Send, 1574 { 1575 find::find(self, predicate) 1576 } 1577 1578 /// Searches for the sequentially **first** item in the parallel iterator 1579 /// that matches the given predicate and returns it. 1580 /// 1581 /// Once a match is found, all attempts to the right of the match 1582 /// will be stopped, while attempts to the left must continue in case 1583 /// an earlier match is found. 1584 /// 1585 /// Note that not all parallel iterators have a useful order, much like 1586 /// sequential `HashMap` iteration, so "first" may be nebulous. If you 1587 /// just want the first match that discovered anywhere in the iterator, 1588 /// `find_any` is a better choice. 1589 /// 1590 /// # Examples 1591 /// 1592 /// ``` 1593 /// use rayon::prelude::*; 1594 /// 1595 /// let a = [1, 2, 3, 3]; 1596 /// 1597 /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3)); 1598 /// 1599 /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None); 1600 /// ``` find_first<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1601 fn find_first<P>(self, predicate: P) -> Option<Self::Item> 1602 where 1603 P: Fn(&Self::Item) -> bool + Sync + Send, 1604 { 1605 find_first_last::find_first(self, predicate) 1606 } 1607 1608 /// Searches for the sequentially **last** item in the parallel iterator 1609 /// that matches the given predicate and returns it. 1610 /// 1611 /// Once a match is found, all attempts to the left of the match 1612 /// will be stopped, while attempts to the right must continue in case 1613 /// a later match is found. 1614 /// 1615 /// Note that not all parallel iterators have a useful order, much like 1616 /// sequential `HashMap` iteration, so "last" may be nebulous. When the 1617 /// order doesn't actually matter to you, `find_any` is a better choice. 1618 /// 1619 /// # Examples 1620 /// 1621 /// ``` 1622 /// use rayon::prelude::*; 1623 /// 1624 /// let a = [1, 2, 3, 3]; 1625 /// 1626 /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3)); 1627 /// 1628 /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None); 1629 /// ``` find_last<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1630 fn find_last<P>(self, predicate: P) -> Option<Self::Item> 1631 where 1632 P: Fn(&Self::Item) -> bool + Sync + Send, 1633 { 1634 find_first_last::find_last(self, predicate) 1635 } 1636 1637 /// Applies the given predicate to the items in the parallel iterator 1638 /// and returns **any** non-None result of the map operation. 1639 /// 1640 /// Once a non-None value is produced from the map operation, we will 1641 /// attempt to stop processing the rest of the items in the iterator 1642 /// as soon as possible. 1643 /// 1644 /// Note that this method only returns **some** item in the parallel 1645 /// iterator that is not None from the map predicate. The item returned 1646 /// may not be the **first** non-None value produced in the parallel 1647 /// sequence, since the entire sequence is mapped over in parallel. 1648 /// 1649 /// # Examples 1650 /// 1651 /// ``` 1652 /// use rayon::prelude::*; 1653 /// 1654 /// let c = ["lol", "NaN", "5", "5"]; 1655 /// 1656 /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok()); 1657 /// 1658 /// assert_eq!(found_number, Some(5)); 1659 /// ``` find_map_any<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1660 fn find_map_any<P, R>(self, predicate: P) -> Option<R> 1661 where 1662 P: Fn(Self::Item) -> Option<R> + Sync + Send, 1663 R: Send, 1664 { 1665 fn yes<T>(_: &T) -> bool { 1666 true 1667 } 1668 self.filter_map(predicate).find_any(yes) 1669 } 1670 1671 /// Applies the given predicate to the items in the parallel iterator and 1672 /// returns the sequentially **first** non-None result of the map operation. 1673 /// 1674 /// Once a non-None value is produced from the map operation, all attempts 1675 /// to the right of the match will be stopped, while attempts to the left 1676 /// must continue in case an earlier match is found. 1677 /// 1678 /// Note that not all parallel iterators have a useful order, much like 1679 /// sequential `HashMap` iteration, so "first" may be nebulous. If you 1680 /// just want the first non-None value discovered anywhere in the iterator, 1681 /// `find_map_any` is a better choice. 1682 /// 1683 /// # Examples 1684 /// 1685 /// ``` 1686 /// use rayon::prelude::*; 1687 /// 1688 /// let c = ["lol", "NaN", "2", "5"]; 1689 /// 1690 /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok()); 1691 /// 1692 /// assert_eq!(first_number, Some(2)); 1693 /// ``` find_map_first<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1694 fn find_map_first<P, R>(self, predicate: P) -> Option<R> 1695 where 1696 P: Fn(Self::Item) -> Option<R> + Sync + Send, 1697 R: Send, 1698 { 1699 fn yes<T>(_: &T) -> bool { 1700 true 1701 } 1702 self.filter_map(predicate).find_first(yes) 1703 } 1704 1705 /// Applies the given predicate to the items in the parallel iterator and 1706 /// returns the sequentially **last** non-None result of the map operation. 1707 /// 1708 /// Once a non-None value is produced from the map operation, all attempts 1709 /// to the left of the match will be stopped, while attempts to the right 1710 /// must continue in case a later match is found. 1711 /// 1712 /// Note that not all parallel iterators have a useful order, much like 1713 /// sequential `HashMap` iteration, so "first" may be nebulous. If you 1714 /// just want the first non-None value discovered anywhere in the iterator, 1715 /// `find_map_any` is a better choice. 1716 /// 1717 /// # Examples 1718 /// 1719 /// ``` 1720 /// use rayon::prelude::*; 1721 /// 1722 /// let c = ["lol", "NaN", "2", "5"]; 1723 /// 1724 /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok()); 1725 /// 1726 /// assert_eq!(last_number, Some(5)); 1727 /// ``` find_map_last<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1728 fn find_map_last<P, R>(self, predicate: P) -> Option<R> 1729 where 1730 P: Fn(Self::Item) -> Option<R> + Sync + Send, 1731 R: Send, 1732 { 1733 fn yes<T>(_: &T) -> bool { 1734 true 1735 } 1736 self.filter_map(predicate).find_last(yes) 1737 } 1738 1739 #[doc(hidden)] 1740 #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\ 1741 `find_first`, or `find_last`")] find<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1742 fn find<P>(self, predicate: P) -> Option<Self::Item> 1743 where 1744 P: Fn(&Self::Item) -> bool + Sync + Send, 1745 { 1746 self.find_any(predicate) 1747 } 1748 1749 /// Searches for **some** item in the parallel iterator that 1750 /// matches the given predicate, and if so returns true. Once 1751 /// a match is found, we'll attempt to stop process the rest 1752 /// of the items. Proving that there's no match, returning false, 1753 /// does require visiting every item. 1754 /// 1755 /// # Examples 1756 /// 1757 /// ``` 1758 /// use rayon::prelude::*; 1759 /// 1760 /// let a = [0, 12, 3, 4, 0, 23, 0]; 1761 /// 1762 /// let is_valid = a.par_iter().any(|&x| x > 10); 1763 /// 1764 /// assert!(is_valid); 1765 /// ``` any<P>(self, predicate: P) -> bool where P: Fn(Self::Item) -> bool + Sync + Send,1766 fn any<P>(self, predicate: P) -> bool 1767 where 1768 P: Fn(Self::Item) -> bool + Sync + Send, 1769 { 1770 self.map(predicate).find_any(bool::clone).is_some() 1771 } 1772 1773 /// Tests that every item in the parallel iterator matches the given 1774 /// predicate, and if so returns true. If a counter-example is found, 1775 /// we'll attempt to stop processing more items, then return false. 1776 /// 1777 /// # Examples 1778 /// 1779 /// ``` 1780 /// use rayon::prelude::*; 1781 /// 1782 /// let a = [0, 12, 3, 4, 0, 23, 0]; 1783 /// 1784 /// let is_valid = a.par_iter().all(|&x| x > 10); 1785 /// 1786 /// assert!(!is_valid); 1787 /// ``` all<P>(self, predicate: P) -> bool where P: Fn(Self::Item) -> bool + Sync + Send,1788 fn all<P>(self, predicate: P) -> bool 1789 where 1790 P: Fn(Self::Item) -> bool + Sync + Send, 1791 { 1792 #[inline] 1793 fn is_false(x: &bool) -> bool { 1794 !x 1795 } 1796 1797 self.map(predicate).find_any(is_false).is_none() 1798 } 1799 1800 /// Creates an iterator over the `Some` items of this iterator, halting 1801 /// as soon as any `None` is found. 1802 /// 1803 /// # Examples 1804 /// 1805 /// ``` 1806 /// use rayon::prelude::*; 1807 /// use std::sync::atomic::{AtomicUsize, Ordering}; 1808 /// 1809 /// let counter = AtomicUsize::new(0); 1810 /// let value = (0_i32..2048) 1811 /// .into_par_iter() 1812 /// .map(|x| { 1813 /// counter.fetch_add(1, Ordering::SeqCst); 1814 /// if x < 1024 { Some(x) } else { None } 1815 /// }) 1816 /// .while_some() 1817 /// .max(); 1818 /// 1819 /// assert!(value < Some(1024)); 1820 /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one 1821 /// ``` while_some<T>(self) -> WhileSome<Self> where Self: ParallelIterator<Item = Option<T>>, T: Send,1822 fn while_some<T>(self) -> WhileSome<Self> 1823 where 1824 Self: ParallelIterator<Item = Option<T>>, 1825 T: Send, 1826 { 1827 WhileSome::new(self) 1828 } 1829 1830 /// Wraps an iterator with a fuse in case of panics, to halt all threads 1831 /// as soon as possible. 1832 /// 1833 /// Panics within parallel iterators are always propagated to the caller, 1834 /// but they don't always halt the rest of the iterator right away, due to 1835 /// the internal semantics of [`join`]. This adaptor makes a greater effort 1836 /// to stop processing other items sooner, with the cost of additional 1837 /// synchronization overhead, which may also inhibit some optimizations. 1838 /// 1839 /// [`join`]: ../fn.join.html#panics 1840 /// 1841 /// # Examples 1842 /// 1843 /// If this code didn't use `panic_fuse()`, it would continue processing 1844 /// many more items in other threads (with long sleep delays) before the 1845 /// panic is finally propagated. 1846 /// 1847 /// ```should_panic 1848 /// use rayon::prelude::*; 1849 /// use std::{thread, time}; 1850 /// 1851 /// (0..1_000_000) 1852 /// .into_par_iter() 1853 /// .panic_fuse() 1854 /// .for_each(|i| { 1855 /// // simulate some work 1856 /// thread::sleep(time::Duration::from_secs(1)); 1857 /// assert!(i > 0); // oops! 1858 /// }); 1859 /// ``` panic_fuse(self) -> PanicFuse<Self>1860 fn panic_fuse(self) -> PanicFuse<Self> { 1861 PanicFuse::new(self) 1862 } 1863 1864 /// Creates a fresh collection containing all the elements produced 1865 /// by this parallel iterator. 1866 /// 1867 /// You may prefer to use `collect_into_vec()`, which allocates more 1868 /// efficiently with precise knowledge of how many elements the 1869 /// iterator contains, and even allows you to reuse an existing 1870 /// vector's backing store rather than allocating a fresh vector. 1871 /// 1872 /// # Examples 1873 /// 1874 /// ``` 1875 /// use rayon::prelude::*; 1876 /// 1877 /// let sync_vec: Vec<_> = (0..100).into_iter().collect(); 1878 /// 1879 /// let async_vec: Vec<_> = (0..100).into_par_iter().collect(); 1880 /// 1881 /// assert_eq!(sync_vec, async_vec); 1882 /// ``` collect<C>(self) -> C where C: FromParallelIterator<Self::Item>,1883 fn collect<C>(self) -> C 1884 where 1885 C: FromParallelIterator<Self::Item>, 1886 { 1887 C::from_par_iter(self) 1888 } 1889 1890 /// Unzips the items of a parallel iterator into a pair of arbitrary 1891 /// `ParallelExtend` containers. 1892 /// 1893 /// You may prefer to use `unzip_into_vecs()`, which allocates more 1894 /// efficiently with precise knowledge of how many elements the 1895 /// iterator contains, and even allows you to reuse existing 1896 /// vectors' backing stores rather than allocating fresh vectors. 1897 /// 1898 /// # Examples 1899 /// 1900 /// ``` 1901 /// use rayon::prelude::*; 1902 /// 1903 /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)]; 1904 /// 1905 /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip(); 1906 /// 1907 /// assert_eq!(left, [0, 1, 2, 3]); 1908 /// assert_eq!(right, [1, 2, 3, 4]); 1909 /// ``` 1910 /// 1911 /// Nested pairs can be unzipped too. 1912 /// 1913 /// ``` 1914 /// use rayon::prelude::*; 1915 /// 1916 /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter() 1917 /// .map(|i| (i, (i * i, i * i * i))) 1918 /// .unzip(); 1919 /// 1920 /// assert_eq!(values, [0, 1, 2, 3]); 1921 /// assert_eq!(squares, [0, 1, 4, 9]); 1922 /// assert_eq!(cubes, [0, 1, 8, 27]); 1923 /// ``` unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where Self: ParallelIterator<Item = (A, B)>, FromA: Default + Send + ParallelExtend<A>, FromB: Default + Send + ParallelExtend<B>, A: Send, B: Send,1924 fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) 1925 where 1926 Self: ParallelIterator<Item = (A, B)>, 1927 FromA: Default + Send + ParallelExtend<A>, 1928 FromB: Default + Send + ParallelExtend<B>, 1929 A: Send, 1930 B: Send, 1931 { 1932 unzip::unzip(self) 1933 } 1934 1935 /// Partitions the items of a parallel iterator into a pair of arbitrary 1936 /// `ParallelExtend` containers. Items for which the `predicate` returns 1937 /// true go into the first container, and the rest go into the second. 1938 /// 1939 /// Note: unlike the standard `Iterator::partition`, this allows distinct 1940 /// collection types for the left and right items. This is more flexible, 1941 /// but may require new type annotations when converting sequential code 1942 /// that used type inferrence assuming the two were the same. 1943 /// 1944 /// # Examples 1945 /// 1946 /// ``` 1947 /// use rayon::prelude::*; 1948 /// 1949 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0); 1950 /// 1951 /// assert_eq!(left, [0, 2, 4, 6]); 1952 /// assert_eq!(right, [1, 3, 5, 7]); 1953 /// ``` partition<A, B, P>(self, predicate: P) -> (A, B) where A: Default + Send + ParallelExtend<Self::Item>, B: Default + Send + ParallelExtend<Self::Item>, P: Fn(&Self::Item) -> bool + Sync + Send,1954 fn partition<A, B, P>(self, predicate: P) -> (A, B) 1955 where 1956 A: Default + Send + ParallelExtend<Self::Item>, 1957 B: Default + Send + ParallelExtend<Self::Item>, 1958 P: Fn(&Self::Item) -> bool + Sync + Send, 1959 { 1960 unzip::partition(self, predicate) 1961 } 1962 1963 /// Partitions and maps the items of a parallel iterator into a pair of 1964 /// arbitrary `ParallelExtend` containers. `Either::Left` items go into 1965 /// the first container, and `Either::Right` items go into the second. 1966 /// 1967 /// # Examples 1968 /// 1969 /// ``` 1970 /// use rayon::prelude::*; 1971 /// use rayon::iter::Either; 1972 /// 1973 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter() 1974 /// .partition_map(|x| { 1975 /// if x % 2 == 0 { 1976 /// Either::Left(x * 4) 1977 /// } else { 1978 /// Either::Right(x * 3) 1979 /// } 1980 /// }); 1981 /// 1982 /// assert_eq!(left, [0, 8, 16, 24]); 1983 /// assert_eq!(right, [3, 9, 15, 21]); 1984 /// ``` 1985 /// 1986 /// Nested `Either` enums can be split as well. 1987 /// 1988 /// ``` 1989 /// use rayon::prelude::*; 1990 /// use rayon::iter::Either::*; 1991 /// 1992 /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20) 1993 /// .into_par_iter() 1994 /// .partition_map(|x| match (x % 3, x % 5) { 1995 /// (0, 0) => Left(Left(x)), 1996 /// (0, _) => Left(Right(x)), 1997 /// (_, 0) => Right(Left(x)), 1998 /// (_, _) => Right(Right(x)), 1999 /// }); 2000 /// 2001 /// assert_eq!(fizzbuzz, [15]); 2002 /// assert_eq!(fizz, [3, 6, 9, 12, 18]); 2003 /// assert_eq!(buzz, [5, 10]); 2004 /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]); 2005 /// ``` partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B) where A: Default + Send + ParallelExtend<L>, B: Default + Send + ParallelExtend<R>, P: Fn(Self::Item) -> Either<L, R> + Sync + Send, L: Send, R: Send,2006 fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B) 2007 where 2008 A: Default + Send + ParallelExtend<L>, 2009 B: Default + Send + ParallelExtend<R>, 2010 P: Fn(Self::Item) -> Either<L, R> + Sync + Send, 2011 L: Send, 2012 R: Send, 2013 { 2014 unzip::partition_map(self, predicate) 2015 } 2016 2017 /// Intersperses clones of an element between items of this iterator. 2018 /// 2019 /// # Examples 2020 /// 2021 /// ``` 2022 /// use rayon::prelude::*; 2023 /// 2024 /// let x = vec![1, 2, 3]; 2025 /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect(); 2026 /// 2027 /// assert_eq!(r, vec![1, -1, 2, -1, 3]); 2028 /// ``` intersperse(self, element: Self::Item) -> Intersperse<Self> where Self::Item: Clone,2029 fn intersperse(self, element: Self::Item) -> Intersperse<Self> 2030 where 2031 Self::Item: Clone, 2032 { 2033 Intersperse::new(self, element) 2034 } 2035 2036 /// Internal method used to define the behavior of this parallel 2037 /// iterator. You should not need to call this directly. 2038 /// 2039 /// This method causes the iterator `self` to start producing 2040 /// items and to feed them to the consumer `consumer` one by one. 2041 /// It may split the consumer before doing so to create the 2042 /// opportunity to produce in parallel. 2043 /// 2044 /// See the [README] for more details on the internals of parallel 2045 /// iterators. 2046 /// 2047 /// [README]: README.md drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>2048 fn drive_unindexed<C>(self, consumer: C) -> C::Result 2049 where 2050 C: UnindexedConsumer<Self::Item>; 2051 2052 /// Internal method used to define the behavior of this parallel 2053 /// iterator. You should not need to call this directly. 2054 /// 2055 /// Returns the number of items produced by this iterator, if known 2056 /// statically. This can be used by consumers to trigger special fast 2057 /// paths. Therefore, if `Some(_)` is returned, this iterator must only 2058 /// use the (indexed) `Consumer` methods when driving a consumer, such 2059 /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or 2060 /// other `UnindexedConsumer` methods -- or returning an inaccurate 2061 /// value -- may result in panics. 2062 /// 2063 /// This method is currently used to optimize `collect` for want 2064 /// of true Rust specialization; it may be removed when 2065 /// specialization is stable. opt_len(&self) -> Option<usize>2066 fn opt_len(&self) -> Option<usize> { 2067 None 2068 } 2069 } 2070 2071 impl<T: ParallelIterator> IntoParallelIterator for T { 2072 type Iter = T; 2073 type Item = T::Item; 2074 into_par_iter(self) -> T2075 fn into_par_iter(self) -> T { 2076 self 2077 } 2078 } 2079 2080 /// An iterator that supports "random access" to its data, meaning 2081 /// that you can split it at arbitrary indices and draw data from 2082 /// those points. 2083 /// 2084 /// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges 2085 pub trait IndexedParallelIterator: ParallelIterator { 2086 /// Collects the results of the iterator into the specified 2087 /// vector. The vector is always truncated before execution 2088 /// begins. If possible, reusing the vector across calls can lead 2089 /// to better performance since it reuses the same backing buffer. 2090 /// 2091 /// # Examples 2092 /// 2093 /// ``` 2094 /// use rayon::prelude::*; 2095 /// 2096 /// // any prior data will be truncated 2097 /// let mut vec = vec![-1, -2, -3]; 2098 /// 2099 /// (0..5).into_par_iter() 2100 /// .collect_into_vec(&mut vec); 2101 /// 2102 /// assert_eq!(vec, [0, 1, 2, 3, 4]); 2103 /// ``` collect_into_vec(self, target: &mut Vec<Self::Item>)2104 fn collect_into_vec(self, target: &mut Vec<Self::Item>) { 2105 collect::collect_into_vec(self, target); 2106 } 2107 2108 /// Unzips the results of the iterator into the specified 2109 /// vectors. The vectors are always truncated before execution 2110 /// begins. If possible, reusing the vectors across calls can lead 2111 /// to better performance since they reuse the same backing buffer. 2112 /// 2113 /// # Examples 2114 /// 2115 /// ``` 2116 /// use rayon::prelude::*; 2117 /// 2118 /// // any prior data will be truncated 2119 /// let mut left = vec![42; 10]; 2120 /// let mut right = vec![-1; 10]; 2121 /// 2122 /// (10..15).into_par_iter() 2123 /// .enumerate() 2124 /// .unzip_into_vecs(&mut left, &mut right); 2125 /// 2126 /// assert_eq!(left, [0, 1, 2, 3, 4]); 2127 /// assert_eq!(right, [10, 11, 12, 13, 14]); 2128 /// ``` unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>) where Self: IndexedParallelIterator<Item = (A, B)>, A: Send, B: Send,2129 fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>) 2130 where 2131 Self: IndexedParallelIterator<Item = (A, B)>, 2132 A: Send, 2133 B: Send, 2134 { 2135 collect::unzip_into_vecs(self, left, right); 2136 } 2137 2138 /// Iterates over tuples `(A, B)`, where the items `A` are from 2139 /// this iterator and `B` are from the iterator given as argument. 2140 /// Like the `zip` method on ordinary iterators, if the two 2141 /// iterators are of unequal length, you only get the items they 2142 /// have in common. 2143 /// 2144 /// # Examples 2145 /// 2146 /// ``` 2147 /// use rayon::prelude::*; 2148 /// 2149 /// let result: Vec<_> = (1..4) 2150 /// .into_par_iter() 2151 /// .zip(vec!['a', 'b', 'c']) 2152 /// .collect(); 2153 /// 2154 /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]); 2155 /// ``` zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter> where Z: IntoParallelIterator, Z::Iter: IndexedParallelIterator,2156 fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter> 2157 where 2158 Z: IntoParallelIterator, 2159 Z::Iter: IndexedParallelIterator, 2160 { 2161 Zip::new(self, zip_op.into_par_iter()) 2162 } 2163 2164 /// The same as `Zip`, but requires that both iterators have the same length. 2165 /// 2166 /// # Panics 2167 /// Will panic if `self` and `zip_op` are not the same length. 2168 /// 2169 /// ```should_panic 2170 /// use rayon::prelude::*; 2171 /// 2172 /// let one = [1u8]; 2173 /// let two = [2u8, 2]; 2174 /// let one_iter = one.par_iter(); 2175 /// let two_iter = two.par_iter(); 2176 /// 2177 /// // this will panic 2178 /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect(); 2179 /// 2180 /// // we should never get here 2181 /// assert_eq!(1, zipped.len()); 2182 /// ``` zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter> where Z: IntoParallelIterator, Z::Iter: IndexedParallelIterator,2183 fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter> 2184 where 2185 Z: IntoParallelIterator, 2186 Z::Iter: IndexedParallelIterator, 2187 { 2188 let zip_op_iter = zip_op.into_par_iter(); 2189 assert_eq!(self.len(), zip_op_iter.len()); 2190 ZipEq::new(self, zip_op_iter) 2191 } 2192 2193 /// Interleaves elements of this iterator and the other given 2194 /// iterator. Alternately yields elements from this iterator and 2195 /// the given iterator, until both are exhausted. If one iterator 2196 /// is exhausted before the other, the last elements are provided 2197 /// from the other. 2198 /// 2199 /// # Examples 2200 /// 2201 /// ``` 2202 /// use rayon::prelude::*; 2203 /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]); 2204 /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect(); 2205 /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]); 2206 /// ``` interleave<I>(self, other: I) -> Interleave<Self, I::Iter> where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator<Item = Self::Item>,2207 fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter> 2208 where 2209 I: IntoParallelIterator<Item = Self::Item>, 2210 I::Iter: IndexedParallelIterator<Item = Self::Item>, 2211 { 2212 Interleave::new(self, other.into_par_iter()) 2213 } 2214 2215 /// Interleaves elements of this iterator and the other given 2216 /// iterator, until one is exhausted. 2217 /// 2218 /// # Examples 2219 /// 2220 /// ``` 2221 /// use rayon::prelude::*; 2222 /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]); 2223 /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect(); 2224 /// assert_eq!(r, vec![1, 5, 2, 6, 3]); 2225 /// ``` interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter> where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator<Item = Self::Item>,2226 fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter> 2227 where 2228 I: IntoParallelIterator<Item = Self::Item>, 2229 I::Iter: IndexedParallelIterator<Item = Self::Item>, 2230 { 2231 InterleaveShortest::new(self, other.into_par_iter()) 2232 } 2233 2234 /// Splits an iterator up into fixed-size chunks. 2235 /// 2236 /// Returns an iterator that returns `Vec`s of the given number of elements. 2237 /// If the number of elements in the iterator is not divisible by `chunk_size`, 2238 /// the last chunk may be shorter than `chunk_size`. 2239 /// 2240 /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on 2241 /// slices, without having to allocate intermediate `Vec`s for the chunks. 2242 /// 2243 /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks 2244 /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut 2245 /// 2246 /// # Examples 2247 /// 2248 /// ``` 2249 /// use rayon::prelude::*; 2250 /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; 2251 /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect(); 2252 /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]); 2253 /// ``` chunks(self, chunk_size: usize) -> Chunks<Self>2254 fn chunks(self, chunk_size: usize) -> Chunks<Self> { 2255 assert!(chunk_size != 0, "chunk_size must not be zero"); 2256 Chunks::new(self, chunk_size) 2257 } 2258 2259 /// Lexicographically compares the elements of this `ParallelIterator` with those of 2260 /// another. 2261 /// 2262 /// # Examples 2263 /// 2264 /// ``` 2265 /// use rayon::prelude::*; 2266 /// use std::cmp::Ordering::*; 2267 /// 2268 /// let x = vec![1, 2, 3]; 2269 /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less); 2270 /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal); 2271 /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater); 2272 /// ``` cmp<I>(self, other: I) -> Ordering where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator, Self::Item: Ord,2273 fn cmp<I>(self, other: I) -> Ordering 2274 where 2275 I: IntoParallelIterator<Item = Self::Item>, 2276 I::Iter: IndexedParallelIterator, 2277 Self::Item: Ord, 2278 { 2279 #[inline] 2280 fn ordering<T: Ord>((x, y): (T, T)) -> Ordering { 2281 Ord::cmp(&x, &y) 2282 } 2283 2284 #[inline] 2285 fn inequal(&ord: &Ordering) -> bool { 2286 ord != Ordering::Equal 2287 } 2288 2289 let other = other.into_par_iter(); 2290 let ord_len = self.len().cmp(&other.len()); 2291 self.zip(other) 2292 .map(ordering) 2293 .find_first(inequal) 2294 .unwrap_or(ord_len) 2295 } 2296 2297 /// Lexicographically compares the elements of this `ParallelIterator` with those of 2298 /// another. 2299 /// 2300 /// # Examples 2301 /// 2302 /// ``` 2303 /// use rayon::prelude::*; 2304 /// use std::cmp::Ordering::*; 2305 /// use std::f64::NAN; 2306 /// 2307 /// let x = vec![1.0, 2.0, 3.0]; 2308 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less)); 2309 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal)); 2310 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater)); 2311 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None); 2312 /// ``` partial_cmp<I>(self, other: I) -> Option<Ordering> where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2313 fn partial_cmp<I>(self, other: I) -> Option<Ordering> 2314 where 2315 I: IntoParallelIterator, 2316 I::Iter: IndexedParallelIterator, 2317 Self::Item: PartialOrd<I::Item>, 2318 { 2319 #[inline] 2320 fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> { 2321 PartialOrd::partial_cmp(&x, &y) 2322 } 2323 2324 #[inline] 2325 fn inequal(&ord: &Option<Ordering>) -> bool { 2326 ord != Some(Ordering::Equal) 2327 } 2328 2329 let other = other.into_par_iter(); 2330 let ord_len = self.len().cmp(&other.len()); 2331 self.zip(other) 2332 .map(ordering) 2333 .find_first(inequal) 2334 .unwrap_or(Some(ord_len)) 2335 } 2336 2337 /// Determines if the elements of this `ParallelIterator` 2338 /// are equal to those of another eq<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialEq<I::Item>,2339 fn eq<I>(self, other: I) -> bool 2340 where 2341 I: IntoParallelIterator, 2342 I::Iter: IndexedParallelIterator, 2343 Self::Item: PartialEq<I::Item>, 2344 { 2345 #[inline] 2346 fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool { 2347 PartialEq::eq(&x, &y) 2348 } 2349 2350 let other = other.into_par_iter(); 2351 self.len() == other.len() && self.zip(other).all(eq) 2352 } 2353 2354 /// Determines if the elements of this `ParallelIterator` 2355 /// are unequal to those of another ne<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialEq<I::Item>,2356 fn ne<I>(self, other: I) -> bool 2357 where 2358 I: IntoParallelIterator, 2359 I::Iter: IndexedParallelIterator, 2360 Self::Item: PartialEq<I::Item>, 2361 { 2362 !self.eq(other) 2363 } 2364 2365 /// Determines if the elements of this `ParallelIterator` 2366 /// are lexicographically less than those of another. lt<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2367 fn lt<I>(self, other: I) -> bool 2368 where 2369 I: IntoParallelIterator, 2370 I::Iter: IndexedParallelIterator, 2371 Self::Item: PartialOrd<I::Item>, 2372 { 2373 self.partial_cmp(other) == Some(Ordering::Less) 2374 } 2375 2376 /// Determines if the elements of this `ParallelIterator` 2377 /// are less or equal to those of another. le<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2378 fn le<I>(self, other: I) -> bool 2379 where 2380 I: IntoParallelIterator, 2381 I::Iter: IndexedParallelIterator, 2382 Self::Item: PartialOrd<I::Item>, 2383 { 2384 let ord = self.partial_cmp(other); 2385 ord == Some(Ordering::Equal) || ord == Some(Ordering::Less) 2386 } 2387 2388 /// Determines if the elements of this `ParallelIterator` 2389 /// are lexicographically greater than those of another. gt<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2390 fn gt<I>(self, other: I) -> bool 2391 where 2392 I: IntoParallelIterator, 2393 I::Iter: IndexedParallelIterator, 2394 Self::Item: PartialOrd<I::Item>, 2395 { 2396 self.partial_cmp(other) == Some(Ordering::Greater) 2397 } 2398 2399 /// Determines if the elements of this `ParallelIterator` 2400 /// are less or equal to those of another. ge<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2401 fn ge<I>(self, other: I) -> bool 2402 where 2403 I: IntoParallelIterator, 2404 I::Iter: IndexedParallelIterator, 2405 Self::Item: PartialOrd<I::Item>, 2406 { 2407 let ord = self.partial_cmp(other); 2408 ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater) 2409 } 2410 2411 /// Yields an index along with each item. 2412 /// 2413 /// # Examples 2414 /// 2415 /// ``` 2416 /// use rayon::prelude::*; 2417 /// 2418 /// let chars = vec!['a', 'b', 'c']; 2419 /// let result: Vec<_> = chars 2420 /// .into_par_iter() 2421 /// .enumerate() 2422 /// .collect(); 2423 /// 2424 /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]); 2425 /// ``` enumerate(self) -> Enumerate<Self>2426 fn enumerate(self) -> Enumerate<Self> { 2427 Enumerate::new(self) 2428 } 2429 2430 /// Creates an iterator that steps by the given amount 2431 /// 2432 /// # Examples 2433 /// 2434 /// ``` 2435 ///use rayon::prelude::*; 2436 /// 2437 /// let range = (3..10); 2438 /// let result: Vec<i32> = range 2439 /// .into_par_iter() 2440 /// .step_by(3) 2441 /// .collect(); 2442 /// 2443 /// assert_eq!(result, [3, 6, 9]) 2444 /// ``` 2445 /// 2446 /// # Compatibility 2447 /// 2448 /// This method is only available on Rust 1.38 or greater. 2449 #[cfg(step_by)] step_by(self, step: usize) -> StepBy<Self>2450 fn step_by(self, step: usize) -> StepBy<Self> { 2451 StepBy::new(self, step) 2452 } 2453 2454 /// Creates an iterator that skips the first `n` elements. 2455 /// 2456 /// # Examples 2457 /// 2458 /// ``` 2459 /// use rayon::prelude::*; 2460 /// 2461 /// let result: Vec<_> = (0..100) 2462 /// .into_par_iter() 2463 /// .skip(95) 2464 /// .collect(); 2465 /// 2466 /// assert_eq!(result, [95, 96, 97, 98, 99]); 2467 /// ``` skip(self, n: usize) -> Skip<Self>2468 fn skip(self, n: usize) -> Skip<Self> { 2469 Skip::new(self, n) 2470 } 2471 2472 /// Creates an iterator that yields the first `n` elements. 2473 /// 2474 /// # Examples 2475 /// 2476 /// ``` 2477 /// use rayon::prelude::*; 2478 /// 2479 /// let result: Vec<_> = (0..100) 2480 /// .into_par_iter() 2481 /// .take(5) 2482 /// .collect(); 2483 /// 2484 /// assert_eq!(result, [0, 1, 2, 3, 4]); 2485 /// ``` take(self, n: usize) -> Take<Self>2486 fn take(self, n: usize) -> Take<Self> { 2487 Take::new(self, n) 2488 } 2489 2490 /// Searches for **some** item in the parallel iterator that 2491 /// matches the given predicate, and returns its index. Like 2492 /// `ParallelIterator::find_any`, the parallel search will not 2493 /// necessarily find the **first** match, and once a match is 2494 /// found we'll attempt to stop processing any more. 2495 /// 2496 /// # Examples 2497 /// 2498 /// ``` 2499 /// use rayon::prelude::*; 2500 /// 2501 /// let a = [1, 2, 3, 3]; 2502 /// 2503 /// let i = a.par_iter().position_any(|&x| x == 3).expect("found"); 2504 /// assert!(i == 2 || i == 3); 2505 /// 2506 /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None); 2507 /// ``` position_any<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2508 fn position_any<P>(self, predicate: P) -> Option<usize> 2509 where 2510 P: Fn(Self::Item) -> bool + Sync + Send, 2511 { 2512 #[inline] 2513 fn check(&(_, p): &(usize, bool)) -> bool { 2514 p 2515 } 2516 2517 let (i, _) = self.map(predicate).enumerate().find_any(check)?; 2518 Some(i) 2519 } 2520 2521 /// Searches for the sequentially **first** item in the parallel iterator 2522 /// that matches the given predicate, and returns its index. 2523 /// 2524 /// Like `ParallelIterator::find_first`, once a match is found, 2525 /// all attempts to the right of the match will be stopped, while 2526 /// attempts to the left must continue in case an earlier match 2527 /// is found. 2528 /// 2529 /// Note that not all parallel iterators have a useful order, much like 2530 /// sequential `HashMap` iteration, so "first" may be nebulous. If you 2531 /// just want the first match that discovered anywhere in the iterator, 2532 /// `position_any` is a better choice. 2533 /// 2534 /// # Examples 2535 /// 2536 /// ``` 2537 /// use rayon::prelude::*; 2538 /// 2539 /// let a = [1, 2, 3, 3]; 2540 /// 2541 /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2)); 2542 /// 2543 /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None); 2544 /// ``` position_first<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2545 fn position_first<P>(self, predicate: P) -> Option<usize> 2546 where 2547 P: Fn(Self::Item) -> bool + Sync + Send, 2548 { 2549 #[inline] 2550 fn check(&(_, p): &(usize, bool)) -> bool { 2551 p 2552 } 2553 2554 let (i, _) = self.map(predicate).enumerate().find_first(check)?; 2555 Some(i) 2556 } 2557 2558 /// Searches for the sequentially **last** item in the parallel iterator 2559 /// that matches the given predicate, and returns its index. 2560 /// 2561 /// Like `ParallelIterator::find_last`, once a match is found, 2562 /// all attempts to the left of the match will be stopped, while 2563 /// attempts to the right must continue in case a later match 2564 /// is found. 2565 /// 2566 /// Note that not all parallel iterators have a useful order, much like 2567 /// sequential `HashMap` iteration, so "last" may be nebulous. When the 2568 /// order doesn't actually matter to you, `position_any` is a better 2569 /// choice. 2570 /// 2571 /// # Examples 2572 /// 2573 /// ``` 2574 /// use rayon::prelude::*; 2575 /// 2576 /// let a = [1, 2, 3, 3]; 2577 /// 2578 /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3)); 2579 /// 2580 /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None); 2581 /// ``` position_last<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2582 fn position_last<P>(self, predicate: P) -> Option<usize> 2583 where 2584 P: Fn(Self::Item) -> bool + Sync + Send, 2585 { 2586 #[inline] 2587 fn check(&(_, p): &(usize, bool)) -> bool { 2588 p 2589 } 2590 2591 let (i, _) = self.map(predicate).enumerate().find_last(check)?; 2592 Some(i) 2593 } 2594 2595 #[doc(hidden)] 2596 #[deprecated( 2597 note = "parallel `position` does not search in order -- use `position_any`, \\ 2598 `position_first`, or `position_last`" 2599 )] position<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2600 fn position<P>(self, predicate: P) -> Option<usize> 2601 where 2602 P: Fn(Self::Item) -> bool + Sync + Send, 2603 { 2604 self.position_any(predicate) 2605 } 2606 2607 /// Produces a new iterator with the elements of this iterator in 2608 /// reverse order. 2609 /// 2610 /// # Examples 2611 /// 2612 /// ``` 2613 /// use rayon::prelude::*; 2614 /// 2615 /// let result: Vec<_> = (0..5) 2616 /// .into_par_iter() 2617 /// .rev() 2618 /// .collect(); 2619 /// 2620 /// assert_eq!(result, [4, 3, 2, 1, 0]); 2621 /// ``` rev(self) -> Rev<Self>2622 fn rev(self) -> Rev<Self> { 2623 Rev::new(self) 2624 } 2625 2626 /// Sets the minimum length of iterators desired to process in each 2627 /// thread. Rayon will not split any smaller than this length, but 2628 /// of course an iterator could already be smaller to begin with. 2629 /// 2630 /// Producers like `zip` and `interleave` will use greater of the two 2631 /// minimums. 2632 /// Chained iterators and iterators inside `flat_map` may each use 2633 /// their own minimum length. 2634 /// 2635 /// # Examples 2636 /// 2637 /// ``` 2638 /// use rayon::prelude::*; 2639 /// 2640 /// let min = (0..1_000_000) 2641 /// .into_par_iter() 2642 /// .with_min_len(1234) 2643 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment 2644 /// .min().unwrap(); 2645 /// 2646 /// assert!(min >= 1234); 2647 /// ``` with_min_len(self, min: usize) -> MinLen<Self>2648 fn with_min_len(self, min: usize) -> MinLen<Self> { 2649 MinLen::new(self, min) 2650 } 2651 2652 /// Sets the maximum length of iterators desired to process in each 2653 /// thread. Rayon will try to split at least below this length, 2654 /// unless that would put it below the length from `with_min_len()`. 2655 /// For example, given min=10 and max=15, a length of 16 will not be 2656 /// split any further. 2657 /// 2658 /// Producers like `zip` and `interleave` will use lesser of the two 2659 /// maximums. 2660 /// Chained iterators and iterators inside `flat_map` may each use 2661 /// their own maximum length. 2662 /// 2663 /// # Examples 2664 /// 2665 /// ``` 2666 /// use rayon::prelude::*; 2667 /// 2668 /// let max = (0..1_000_000) 2669 /// .into_par_iter() 2670 /// .with_max_len(1234) 2671 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment 2672 /// .max().unwrap(); 2673 /// 2674 /// assert!(max <= 1234); 2675 /// ``` with_max_len(self, max: usize) -> MaxLen<Self>2676 fn with_max_len(self, max: usize) -> MaxLen<Self> { 2677 MaxLen::new(self, max) 2678 } 2679 2680 /// Produces an exact count of how many items this iterator will 2681 /// produce, presuming no panic occurs. 2682 /// 2683 /// # Examples 2684 /// 2685 /// ``` 2686 /// use rayon::prelude::*; 2687 /// 2688 /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]); 2689 /// assert_eq!(par_iter.len(), 10); 2690 /// 2691 /// let vec: Vec<_> = par_iter.collect(); 2692 /// assert_eq!(vec.len(), 10); 2693 /// ``` len(&self) -> usize2694 fn len(&self) -> usize; 2695 2696 /// Internal method used to define the behavior of this parallel 2697 /// iterator. You should not need to call this directly. 2698 /// 2699 /// This method causes the iterator `self` to start producing 2700 /// items and to feed them to the consumer `consumer` one by one. 2701 /// It may split the consumer before doing so to create the 2702 /// opportunity to produce in parallel. If a split does happen, it 2703 /// will inform the consumer of the index where the split should 2704 /// occur (unlike `ParallelIterator::drive_unindexed()`). 2705 /// 2706 /// See the [README] for more details on the internals of parallel 2707 /// iterators. 2708 /// 2709 /// [README]: README.md drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result2710 fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result; 2711 2712 /// Internal method used to define the behavior of this parallel 2713 /// iterator. You should not need to call this directly. 2714 /// 2715 /// This method converts the iterator into a producer P and then 2716 /// invokes `callback.callback()` with P. Note that the type of 2717 /// this producer is not defined as part of the API, since 2718 /// `callback` must be defined generically for all producers. This 2719 /// allows the producer type to contain references; it also means 2720 /// that parallel iterators can adjust that type without causing a 2721 /// breaking change. 2722 /// 2723 /// See the [README] for more details on the internals of parallel 2724 /// iterators. 2725 /// 2726 /// [README]: README.md with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output2727 fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output; 2728 } 2729 2730 /// `FromParallelIterator` implements the creation of a collection 2731 /// from a [`ParallelIterator`]. By implementing 2732 /// `FromParallelIterator` for a given type, you define how it will be 2733 /// created from an iterator. 2734 /// 2735 /// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method. 2736 /// 2737 /// [`ParallelIterator`]: trait.ParallelIterator.html 2738 /// [`collect()`]: trait.ParallelIterator.html#method.collect 2739 /// 2740 /// # Examples 2741 /// 2742 /// Implementing `FromParallelIterator` for your type: 2743 /// 2744 /// ``` 2745 /// use rayon::prelude::*; 2746 /// use std::mem; 2747 /// 2748 /// struct BlackHole { 2749 /// mass: usize, 2750 /// } 2751 /// 2752 /// impl<T: Send> FromParallelIterator<T> for BlackHole { 2753 /// fn from_par_iter<I>(par_iter: I) -> Self 2754 /// where I: IntoParallelIterator<Item = T> 2755 /// { 2756 /// let par_iter = par_iter.into_par_iter(); 2757 /// BlackHole { 2758 /// mass: par_iter.count() * mem::size_of::<T>(), 2759 /// } 2760 /// } 2761 /// } 2762 /// 2763 /// let bh: BlackHole = (0i32..1000).into_par_iter().collect(); 2764 /// assert_eq!(bh.mass, 4000); 2765 /// ``` 2766 pub trait FromParallelIterator<T> 2767 where 2768 T: Send, 2769 { 2770 /// Creates an instance of the collection from the parallel iterator `par_iter`. 2771 /// 2772 /// If your collection is not naturally parallel, the easiest (and 2773 /// fastest) way to do this is often to collect `par_iter` into a 2774 /// [`LinkedList`] or other intermediate data structure and then 2775 /// sequentially extend your collection. However, a more 'native' 2776 /// technique is to use the [`par_iter.fold`] or 2777 /// [`par_iter.fold_with`] methods to create the collection. 2778 /// Alternatively, if your collection is 'natively' parallel, you 2779 /// can use `par_iter.for_each` to process each element in turn. 2780 /// 2781 /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html 2782 /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold 2783 /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with 2784 /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each from_par_iter<I>(par_iter: I) -> Self where I: IntoParallelIterator<Item = T>2785 fn from_par_iter<I>(par_iter: I) -> Self 2786 where 2787 I: IntoParallelIterator<Item = T>; 2788 } 2789 2790 /// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`]. 2791 /// 2792 /// [`ParallelIterator`]: trait.ParallelIterator.html 2793 /// 2794 /// # Examples 2795 /// 2796 /// Implementing `ParallelExtend` for your type: 2797 /// 2798 /// ``` 2799 /// use rayon::prelude::*; 2800 /// use std::mem; 2801 /// 2802 /// struct BlackHole { 2803 /// mass: usize, 2804 /// } 2805 /// 2806 /// impl<T: Send> ParallelExtend<T> for BlackHole { 2807 /// fn par_extend<I>(&mut self, par_iter: I) 2808 /// where I: IntoParallelIterator<Item = T> 2809 /// { 2810 /// let par_iter = par_iter.into_par_iter(); 2811 /// self.mass += par_iter.count() * mem::size_of::<T>(); 2812 /// } 2813 /// } 2814 /// 2815 /// let mut bh = BlackHole { mass: 0 }; 2816 /// bh.par_extend(0i32..1000); 2817 /// assert_eq!(bh.mass, 4000); 2818 /// bh.par_extend(0i64..10); 2819 /// assert_eq!(bh.mass, 4080); 2820 /// ``` 2821 pub trait ParallelExtend<T> 2822 where 2823 T: Send, 2824 { 2825 /// Extends an instance of the collection with the elements drawn 2826 /// from the parallel iterator `par_iter`. 2827 /// 2828 /// # Examples 2829 /// 2830 /// ``` 2831 /// use rayon::prelude::*; 2832 /// 2833 /// let mut vec = vec![]; 2834 /// vec.par_extend(0..5); 2835 /// vec.par_extend((0..5).into_par_iter().map(|i| i * i)); 2836 /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]); 2837 /// ``` par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = T>2838 fn par_extend<I>(&mut self, par_iter: I) 2839 where 2840 I: IntoParallelIterator<Item = T>; 2841 } 2842 2843 /// We hide the `Try` trait in a private module, as it's only meant to be a 2844 /// stable clone of the standard library's `Try` trait, as yet unstable. 2845 mod private { 2846 /// Clone of `std::ops::Try`. 2847 /// 2848 /// Implementing this trait is not permitted outside of `rayon`. 2849 pub trait Try { 2850 private_decl! {} 2851 2852 type Ok; 2853 type Error; into_result(self) -> Result<Self::Ok, Self::Error>2854 fn into_result(self) -> Result<Self::Ok, Self::Error>; from_ok(v: Self::Ok) -> Self2855 fn from_ok(v: Self::Ok) -> Self; from_error(v: Self::Error) -> Self2856 fn from_error(v: Self::Error) -> Self; 2857 } 2858 2859 impl<T> Try for Option<T> { 2860 private_impl! {} 2861 2862 type Ok = T; 2863 type Error = (); 2864 into_result(self) -> Result<T, ()>2865 fn into_result(self) -> Result<T, ()> { 2866 self.ok_or(()) 2867 } from_ok(v: T) -> Self2868 fn from_ok(v: T) -> Self { 2869 Some(v) 2870 } from_error(_: ()) -> Self2871 fn from_error(_: ()) -> Self { 2872 None 2873 } 2874 } 2875 2876 impl<T, E> Try for Result<T, E> { 2877 private_impl! {} 2878 2879 type Ok = T; 2880 type Error = E; 2881 into_result(self) -> Result<T, E>2882 fn into_result(self) -> Result<T, E> { 2883 self 2884 } from_ok(v: T) -> Self2885 fn from_ok(v: T) -> Self { 2886 Ok(v) 2887 } from_error(v: E) -> Self2888 fn from_error(v: E) -> Self { 2889 Err(v) 2890 } 2891 } 2892 } 2893