1 //! Traits for writing parallel programs using an iterator-style interface
3 //! You will rarely need to interact with this module directly unless you have
4 //! need to name one of the iterator types.
6 //! Parallel iterators make it easy to write iterator-like chains that
7 //! execute in parallel: typically all you have to do is convert the
8 //! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9 //! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10 //! example, to compute the sum of the squares of a sequence of
11 //! integers, one might write:
14 //! use rayon::prelude::*;
15 //! fn sum_of_squares(input: &[i32]) -> i32 {
22 //! Or, to increment all the integers in a slice, you could write:
25 //! use rayon::prelude::*;
26 //! fn increment_all(input: &mut [i32]) {
27 //! input.par_iter_mut()
28 //! .for_each(|p| *p += 1);
32 //! To use parallel iterators, first import the traits by adding
33 //! something like `use rayon::prelude::*` to your module. You can
34 //! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35 //! parallel iterator. Like a [regular iterator][], parallel
36 //! iterators work by first constructing a computation and then
39 //! In addition to `par_iter()` and friends, some types offer other
40 //! ways to create (or consume) parallel iterators:
42 //! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43 //! `par_windows`, as well as various parallel sorting
44 //! operations. See [the `ParallelSlice` trait] for the full list.
45 //! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
46 //! See [the `ParallelString` trait] for the full list.
47 //! - Various collections offer [`par_extend`], which grows a
48 //! collection given a parallel iterator. (If you don't have a
49 //! collection to extend, you can use [`collect()`] to create a new
50 //! one from scratch.)
52 //! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html
53 //! [the `ParallelString` trait]: ../str/trait.ParallelString.html
54 //! [`par_extend`]: trait.ParallelExtend.html
55 //! [`collect()`]: trait.ParallelIterator.html#method.collect
57 //! To see the full range of methods available on parallel iterators,
58 //! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
61 //! If you'd like to build a custom parallel iterator, or to write your own
62 //! combinator, then check out the [split] function and the [plumbing] module.
64 //! [regular iterator]: http://doc.rust-lang.org/std/iter/trait.Iterator.html
65 //! [`ParallelIterator`]: trait.ParallelIterator.html
66 //! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
67 //! [split]: fn.split.html
68 //! [plumbing]: plumbing/index.html
70 //! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
71 //! has been deliberately obscured from the public API. This trait is intended
72 //! to mirror the unstable `std::ops::Try` with implementations for `Option` and
73 //! `Result`, where `Some`/`Ok` values will let those iterators continue, but
74 //! `None`/`Err` values will exit early.
76 //! A note about object safety: It is currently _not_ possible to wrap
77 //! a `ParallelIterator` (or any trait that depends on it) using a
78 //! `Box<dyn ParallelIterator>` or other kind of dynamic allocation,
79 //! because `ParallelIterator` is **not object-safe**.
80 //! (This keeps the implementation simpler and allows extra optimizations.)
82 use self::plumbing
::*;
83 use self::private
::Try
;
84 pub use either
::Either
;
85 use std
::cmp
::{self, Ordering}
;
86 use std
::iter
::{Product, Sum}
;
94 // There is a method to the madness here:
96 // - These modules are private but expose certain types to the end-user
97 // (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
98 // public API surface of the `ParallelIterator` traits.
99 // - In **this** module, those public types are always used unprefixed, which forces
100 // us to add a `pub use` and helps identify if we missed anything.
101 // - In contrast, items that appear **only** in the body of a method,
102 // e.g. `find::find()`, are always used **prefixed**, so that they
103 // can be readily distinguished.
124 mod interleave_shortest
;
156 empty
::{empty, Empty}
,
157 enumerate
::Enumerate
,
159 filter_map
::FilterMap
,
162 fold
::{Fold, FoldWith}
,
164 interleave
::Interleave
,
165 interleave_shortest
::InterleaveShortest
,
166 intersperse
::Intersperse
,
167 len
::{MaxLen, MinLen}
,
169 map_with
::{MapInit, MapWith}
,
172 panic_fuse
::PanicFuse
,
173 par_bridge
::{IterBridge, ParallelBridge}
,
174 repeat
::{repeat, repeatn, Repeat, RepeatN}
,
177 splitter
::{split, Split}
,
179 try_fold
::{TryFold, TryFoldWith}
,
181 while_some
::WhileSome
,
188 pub use self::step_by
::StepBy
;
190 /// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
192 /// By implementing `IntoParallelIterator` for a type, you define how it will
193 /// transformed into an iterator. This is a parallel version of the standard
194 /// library's [`std::iter::IntoIterator`] trait.
196 /// [`ParallelIterator`]: trait.ParallelIterator.html
197 /// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
198 pub trait IntoParallelIterator
{
199 /// The parallel iterator type that will be created.
200 type Iter
: ParallelIterator
<Item
= Self::Item
>;
202 /// The type of item that the parallel iterator will produce.
205 /// Converts `self` into a parallel iterator.
210 /// use rayon::prelude::*;
212 /// println!("counting in parallel:");
213 /// (0..100).into_par_iter()
214 /// .for_each(|i| println!("{}", i));
217 /// This conversion is often implicit for arguments to methods like [`zip`].
220 /// use rayon::prelude::*;
222 /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
223 /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
226 /// [`zip`]: trait.IndexedParallelIterator.html#method.zip
227 fn into_par_iter(self) -> Self::Iter
;
230 /// `IntoParallelRefIterator` implements the conversion to a
231 /// [`ParallelIterator`], providing shared references to the data.
233 /// This is a parallel version of the `iter()` method
234 /// defined by various collections.
236 /// This trait is automatically implemented
237 /// `for I where &I: IntoParallelIterator`. In most cases, users
238 /// will want to implement [`IntoParallelIterator`] rather than implement
239 /// this trait directly.
241 /// [`ParallelIterator`]: trait.ParallelIterator.html
242 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
243 pub trait IntoParallelRefIterator
<'data
> {
244 /// The type of the parallel iterator that will be returned.
245 type Iter
: ParallelIterator
<Item
= Self::Item
>;
247 /// The type of item that the parallel iterator will produce.
248 /// This will typically be an `&'data T` reference type.
249 type Item
: Send
+ 'data
;
251 /// Converts `self` into a parallel iterator.
256 /// use rayon::prelude::*;
258 /// let v: Vec<_> = (0..100).collect();
259 /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
261 /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
262 /// // producing the exact same references.
263 /// assert!(v.par_iter().zip(&v)
264 /// .all(|(a, b)| std::ptr::eq(a, b)));
266 fn par_iter(&'data
self) -> Self::Iter
;
269 impl<'data
, I
: 'data
+ ?Sized
> IntoParallelRefIterator
<'data
> for I
271 &'data I
: IntoParallelIterator
,
273 type Iter
= <&'data I
as IntoParallelIterator
>::Iter
;
274 type Item
= <&'data I
as IntoParallelIterator
>::Item
;
276 fn par_iter(&'data
self) -> Self::Iter
{
281 /// `IntoParallelRefMutIterator` implements the conversion to a
282 /// [`ParallelIterator`], providing mutable references to the data.
284 /// This is a parallel version of the `iter_mut()` method
285 /// defined by various collections.
287 /// This trait is automatically implemented
288 /// `for I where &mut I: IntoParallelIterator`. In most cases, users
289 /// will want to implement [`IntoParallelIterator`] rather than implement
290 /// this trait directly.
292 /// [`ParallelIterator`]: trait.ParallelIterator.html
293 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
294 pub trait IntoParallelRefMutIterator
<'data
> {
295 /// The type of iterator that will be created.
296 type Iter
: ParallelIterator
<Item
= Self::Item
>;
298 /// The type of item that will be produced; this is typically an
299 /// `&'data mut T` reference.
300 type Item
: Send
+ 'data
;
302 /// Creates the parallel iterator from `self`.
307 /// use rayon::prelude::*;
309 /// let mut v = vec![0usize; 5];
310 /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
311 /// assert_eq!(v, [0, 1, 2, 3, 4]);
313 fn par_iter_mut(&'data
mut self) -> Self::Iter
;
316 impl<'data
, I
: 'data
+ ?Sized
> IntoParallelRefMutIterator
<'data
> for I
318 &'data
mut I
: IntoParallelIterator
,
320 type Iter
= <&'data
mut I
as IntoParallelIterator
>::Iter
;
321 type Item
= <&'data
mut I
as IntoParallelIterator
>::Item
;
323 fn par_iter_mut(&'data
mut self) -> Self::Iter
{
328 /// Parallel version of the standard iterator trait.
330 /// The combinators on this trait are available on **all** parallel
331 /// iterators. Additional methods can be found on the
332 /// [`IndexedParallelIterator`] trait: those methods are only
333 /// available for parallel iterators where the number of items is
334 /// known in advance (so, e.g., after invoking `filter`, those methods
335 /// become unavailable).
337 /// For examples of using parallel iterators, see [the docs on the
338 /// `iter` module][iter].
340 /// [iter]: index.html
341 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
342 pub trait ParallelIterator
: Sized
+ Send
{
343 /// The type of item that this parallel iterator produces.
344 /// For example, if you use the [`for_each`] method, this is the type of
345 /// item that your closure will be invoked with.
347 /// [`for_each`]: #method.for_each
350 /// Executes `OP` on each item produced by the iterator, in parallel.
355 /// use rayon::prelude::*;
357 /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
359 fn for_each
<OP
>(self, op
: OP
)
361 OP
: Fn(Self::Item
) + Sync
+ Send
,
363 for_each
::for_each(self, &op
)
366 /// Executes `OP` on the given `init` value with each item produced by
367 /// the iterator, in parallel.
369 /// The `init` value will be cloned only as needed to be paired with
370 /// the group of items in each rayon job. It does not require the type
376 /// use std::sync::mpsc::channel;
377 /// use rayon::prelude::*;
379 /// let (sender, receiver) = channel();
381 /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
383 /// let mut res: Vec<_> = receiver.iter().collect();
387 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
389 fn for_each_with
<OP
, T
>(self, init
: T
, op
: OP
)
391 OP
: Fn(&mut T
, Self::Item
) + Sync
+ Send
,
394 self.map_with(init
, op
).collect()
397 /// Executes `OP` on a value returned by `init` with each item produced by
398 /// the iterator, in parallel.
400 /// The `init` function will be called only as needed for a value to be
401 /// paired with the group of items in each rayon job. There is no
402 /// constraint on that returned type at all!
408 /// use rayon::prelude::*;
410 /// let mut v = vec![0u8; 1_000_000];
412 /// v.par_chunks_mut(1000)
414 /// || rand::thread_rng(),
415 /// |rng, chunk| rng.fill(chunk),
418 /// // There's a remote chance that this will fail...
419 /// for i in 0u8..=255 {
420 /// assert!(v.contains(&i));
423 fn for_each_init
<OP
, INIT
, T
>(self, init
: INIT
, op
: OP
)
425 OP
: Fn(&mut T
, Self::Item
) + Sync
+ Send
,
426 INIT
: Fn() -> T
+ Sync
+ Send
,
428 self.map_init(init
, op
).collect()
431 /// Executes a fallible `OP` on each item produced by the iterator, in parallel.
433 /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
434 /// stop processing the rest of the items in the iterator as soon as
435 /// possible, and we will return that terminating value. Otherwise, we will
436 /// return an empty `Result::Ok(())` or `Option::Some(())`. If there are
437 /// multiple errors in parallel, it is not specified which will be returned.
442 /// use rayon::prelude::*;
443 /// use std::io::{self, Write};
445 /// // This will stop iteration early if there's any write error, like
446 /// // having piped output get closed on the other end.
447 /// (0..100).into_par_iter()
448 /// .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
449 /// .expect("expected no write errors");
451 fn try_for_each
<OP
, R
>(self, op
: OP
) -> R
453 OP
: Fn(Self::Item
) -> R
+ Sync
+ Send
,
454 R
: Try
<Ok
= ()> + Send
,
456 fn ok
<R
: Try
<Ok
= ()>>(_
: (), _
: ()) -> R
{
460 self.map(op
).try_reduce(<()>::default, ok
)
463 /// Executes a fallible `OP` on the given `init` value with each item
464 /// produced by the iterator, in parallel.
466 /// This combines the `init` semantics of [`for_each_with()`] and the
467 /// failure semantics of [`try_for_each()`].
469 /// [`for_each_with()`]: #method.for_each_with
470 /// [`try_for_each()`]: #method.try_for_each
475 /// use std::sync::mpsc::channel;
476 /// use rayon::prelude::*;
478 /// let (sender, receiver) = channel();
480 /// (0..5).into_par_iter()
481 /// .try_for_each_with(sender, |s, x| s.send(x))
482 /// .expect("expected no send errors");
484 /// let mut res: Vec<_> = receiver.iter().collect();
488 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
490 fn try_for_each_with
<OP
, T
, R
>(self, init
: T
, op
: OP
) -> R
492 OP
: Fn(&mut T
, Self::Item
) -> R
+ Sync
+ Send
,
494 R
: Try
<Ok
= ()> + Send
,
496 fn ok
<R
: Try
<Ok
= ()>>(_
: (), _
: ()) -> R
{
500 self.map_with(init
, op
).try_reduce(<()>::default, ok
)
503 /// Executes a fallible `OP` on a value returned by `init` with each item
504 /// produced by the iterator, in parallel.
506 /// This combines the `init` semantics of [`for_each_init()`] and the
507 /// failure semantics of [`try_for_each()`].
509 /// [`for_each_init()`]: #method.for_each_init
510 /// [`try_for_each()`]: #method.try_for_each
516 /// use rayon::prelude::*;
518 /// let mut v = vec![0u8; 1_000_000];
520 /// v.par_chunks_mut(1000)
521 /// .try_for_each_init(
522 /// || rand::thread_rng(),
523 /// |rng, chunk| rng.try_fill(chunk),
525 /// .expect("expected no rand errors");
527 /// // There's a remote chance that this will fail...
528 /// for i in 0u8..=255 {
529 /// assert!(v.contains(&i));
532 fn try_for_each_init
<OP
, INIT
, T
, R
>(self, init
: INIT
, op
: OP
) -> R
534 OP
: Fn(&mut T
, Self::Item
) -> R
+ Sync
+ Send
,
535 INIT
: Fn() -> T
+ Sync
+ Send
,
536 R
: Try
<Ok
= ()> + Send
,
538 fn ok
<R
: Try
<Ok
= ()>>(_
: (), _
: ()) -> R
{
542 self.map_init(init
, op
).try_reduce(<()>::default, ok
)
545 /// Counts the number of items in this parallel iterator.
550 /// use rayon::prelude::*;
552 /// let count = (0..100).into_par_iter().count();
554 /// assert_eq!(count, 100);
556 fn count(self) -> usize {
557 fn one
<T
>(_
: T
) -> usize {
564 /// Applies `map_op` to each item of this iterator, producing a new
565 /// iterator with the results.
570 /// use rayon::prelude::*;
572 /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
574 /// let doubles: Vec<_> = par_iter.collect();
576 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
578 fn map
<F
, R
>(self, map_op
: F
) -> Map
<Self, F
>
580 F
: Fn(Self::Item
) -> R
+ Sync
+ Send
,
583 Map
::new(self, map_op
)
586 /// Applies `map_op` to the given `init` value with each item of this
587 /// iterator, producing a new iterator with the results.
589 /// The `init` value will be cloned only as needed to be paired with
590 /// the group of items in each rayon job. It does not require the type
596 /// use std::sync::mpsc::channel;
597 /// use rayon::prelude::*;
599 /// let (sender, receiver) = channel();
601 /// let a: Vec<_> = (0..5)
602 /// .into_par_iter() // iterating over i32
603 /// .map_with(sender, |s, x| {
604 /// s.send(x).unwrap(); // sending i32 values through the channel
605 /// x // returning i32
607 /// .collect(); // collecting the returned values into a vector
609 /// let mut b: Vec<_> = receiver.iter() // iterating over the values in the channel
610 /// .collect(); // and collecting them
613 /// assert_eq!(a, b);
615 fn map_with
<F
, T
, R
>(self, init
: T
, map_op
: F
) -> MapWith
<Self, T
, F
>
617 F
: Fn(&mut T
, Self::Item
) -> R
+ Sync
+ Send
,
621 MapWith
::new(self, init
, map_op
)
624 /// Applies `map_op` to a value returned by `init` with each item of this
625 /// iterator, producing a new iterator with the results.
627 /// The `init` function will be called only as needed for a value to be
628 /// paired with the group of items in each rayon job. There is no
629 /// constraint on that returned type at all!
635 /// use rayon::prelude::*;
637 /// let a: Vec<_> = (1i32..1_000_000)
640 /// || rand::thread_rng(), // get the thread-local RNG
641 /// |rng, x| if rng.gen() { // randomly negate items
648 /// // There's a remote chance that this will fail...
649 /// assert!(a.iter().any(|&x| x < 0));
650 /// assert!(a.iter().any(|&x| x > 0));
652 fn map_init
<F
, INIT
, T
, R
>(self, init
: INIT
, map_op
: F
) -> MapInit
<Self, INIT
, F
>
654 F
: Fn(&mut T
, Self::Item
) -> R
+ Sync
+ Send
,
655 INIT
: Fn() -> T
+ Sync
+ Send
,
658 MapInit
::new(self, init
, map_op
)
661 /// Creates an iterator which clones all of its elements. This may be
662 /// useful when you have an iterator over `&T`, but you need `T`, and
663 /// that type implements `Clone`. See also [`copied()`].
665 /// [`copied()`]: #method.copied
670 /// use rayon::prelude::*;
672 /// let a = [1, 2, 3];
674 /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
676 /// // cloned is the same as .map(|&x| x), for integers
677 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
679 /// assert_eq!(v_cloned, vec![1, 2, 3]);
680 /// assert_eq!(v_map, vec![1, 2, 3]);
682 fn cloned
<'a
, T
>(self) -> Cloned
<Self>
684 T
: 'a
+ Clone
+ Send
,
685 Self: ParallelIterator
<Item
= &'a T
>,
690 /// Creates an iterator which copies all of its elements. This may be
691 /// useful when you have an iterator over `&T`, but you need `T`, and
692 /// that type implements `Copy`. See also [`cloned()`].
694 /// [`cloned()`]: #method.cloned
699 /// use rayon::prelude::*;
701 /// let a = [1, 2, 3];
703 /// let v_copied: Vec<_> = a.par_iter().copied().collect();
705 /// // copied is the same as .map(|&x| x), for integers
706 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
708 /// assert_eq!(v_copied, vec![1, 2, 3]);
709 /// assert_eq!(v_map, vec![1, 2, 3]);
711 fn copied
<'a
, T
>(self) -> Copied
<Self>
714 Self: ParallelIterator
<Item
= &'a T
>,
719 /// Applies `inspect_op` to a reference to each item of this iterator,
720 /// producing a new iterator passing through the original items. This is
721 /// often useful for debugging to see what's happening in iterator stages.
726 /// use rayon::prelude::*;
728 /// let a = [1, 4, 2, 3];
730 /// // this iterator sequence is complex.
731 /// let sum = a.par_iter()
733 /// .filter(|&x| x % 2 == 0)
734 /// .reduce(|| 0, |sum, i| sum + i);
736 /// println!("{}", sum);
738 /// // let's add some inspect() calls to investigate what's happening
739 /// let sum = a.par_iter()
741 /// .inspect(|x| println!("about to filter: {}", x))
742 /// .filter(|&x| x % 2 == 0)
743 /// .inspect(|x| println!("made it through filter: {}", x))
744 /// .reduce(|| 0, |sum, i| sum + i);
746 /// println!("{}", sum);
748 fn inspect
<OP
>(self, inspect_op
: OP
) -> Inspect
<Self, OP
>
750 OP
: Fn(&Self::Item
) + Sync
+ Send
,
752 Inspect
::new(self, inspect_op
)
755 /// Mutates each item of this iterator before yielding it.
760 /// use rayon::prelude::*;
762 /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
764 /// let doubles: Vec<_> = par_iter.collect();
766 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
768 fn update
<F
>(self, update_op
: F
) -> Update
<Self, F
>
770 F
: Fn(&mut Self::Item
) + Sync
+ Send
,
772 Update
::new(self, update_op
)
775 /// Applies `filter_op` to each item of this iterator, producing a new
776 /// iterator with only the items that gave `true` results.
781 /// use rayon::prelude::*;
783 /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
785 /// let even_numbers: Vec<_> = par_iter.collect();
787 /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
789 fn filter
<P
>(self, filter_op
: P
) -> Filter
<Self, P
>
791 P
: Fn(&Self::Item
) -> bool
+ Sync
+ Send
,
793 Filter
::new(self, filter_op
)
796 /// Applies `filter_op` to each item of this iterator to get an `Option`,
797 /// producing a new iterator with only the items from `Some` results.
802 /// use rayon::prelude::*;
804 /// let mut par_iter = (0..10).into_par_iter()
805 /// .filter_map(|x| {
806 /// if x % 2 == 0 { Some(x * 3) }
810 /// let even_numbers: Vec<_> = par_iter.collect();
812 /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
814 fn filter_map
<P
, R
>(self, filter_op
: P
) -> FilterMap
<Self, P
>
816 P
: Fn(Self::Item
) -> Option
<R
> + Sync
+ Send
,
819 FilterMap
::new(self, filter_op
)
822 /// Applies `map_op` to each item of this iterator to get nested iterators,
823 /// producing a new iterator that flattens these back into one.
828 /// use rayon::prelude::*;
830 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
832 /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
834 /// let vec: Vec<_> = par_iter.collect();
836 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
838 fn flat_map
<F
, PI
>(self, map_op
: F
) -> FlatMap
<Self, F
>
840 F
: Fn(Self::Item
) -> PI
+ Sync
+ Send
,
841 PI
: IntoParallelIterator
,
843 FlatMap
::new(self, map_op
)
846 /// An adaptor that flattens iterable `Item`s into one large iterator
851 /// use rayon::prelude::*;
853 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
854 /// let y: Vec<_> = x.into_par_iter().flatten().collect();
856 /// assert_eq!(y, vec![1, 2, 3, 4]);
858 fn flatten(self) -> Flatten
<Self>
860 Self::Item
: IntoParallelIterator
,
865 /// Reduces the items in the iterator into one item using `op`.
866 /// The argument `identity` should be a closure that can produce
867 /// "identity" value which may be inserted into the sequence as
868 /// needed to create opportunities for parallel execution. So, for
869 /// example, if you are doing a summation, then `identity()` ought
870 /// to produce something that represents the zero for your type
871 /// (but consider just calling `sum()` in that case).
876 /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
877 /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
878 /// // where the first/second elements are summed separately.
879 /// use rayon::prelude::*;
880 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
881 /// .par_iter() // iterating over &(i32, i32)
882 /// .cloned() // iterating over (i32, i32)
883 /// .reduce(|| (0, 0), // the "identity" is 0 in both columns
884 /// |a, b| (a.0 + b.0, a.1 + b.1));
885 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
888 /// **Note:** unlike a sequential `fold` operation, the order in
889 /// which `op` will be applied to reduce the result is not fully
890 /// specified. So `op` should be [associative] or else the results
891 /// will be non-deterministic. And of course `identity()` should
892 /// produce a true identity.
894 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
895 fn reduce
<OP
, ID
>(self, identity
: ID
, op
: OP
) -> Self::Item
897 OP
: Fn(Self::Item
, Self::Item
) -> Self::Item
+ Sync
+ Send
,
898 ID
: Fn() -> Self::Item
+ Sync
+ Send
,
900 reduce
::reduce(self, identity
, op
)
903 /// Reduces the items in the iterator into one item using `op`.
904 /// If the iterator is empty, `None` is returned; otherwise,
905 /// `Some` is returned.
907 /// This version of `reduce` is simple but somewhat less
908 /// efficient. If possible, it is better to call `reduce()`, which
909 /// requires an identity element.
914 /// use rayon::prelude::*;
915 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
916 /// .par_iter() // iterating over &(i32, i32)
917 /// .cloned() // iterating over (i32, i32)
918 /// .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
920 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
923 /// **Note:** unlike a sequential `fold` operation, the order in
924 /// which `op` will be applied to reduce the result is not fully
925 /// specified. So `op` should be [associative] or else the results
926 /// will be non-deterministic.
928 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
929 fn reduce_with
<OP
>(self, op
: OP
) -> Option
<Self::Item
>
931 OP
: Fn(Self::Item
, Self::Item
) -> Self::Item
+ Sync
+ Send
,
933 fn opt_fold
<T
>(op
: impl Fn(T
, T
) -> T
) -> impl Fn(Option
<T
>, T
) -> Option
<T
> {
934 move |opt_a
, b
| match opt_a
{
935 Some(a
) => Some(op(a
, b
)),
940 fn opt_reduce
<T
>(op
: impl Fn(T
, T
) -> T
) -> impl Fn(Option
<T
>, Option
<T
>) -> Option
<T
> {
941 move |opt_a
, opt_b
| match (opt_a
, opt_b
) {
942 (Some(a
), Some(b
)) => Some(op(a
, b
)),
943 (Some(v
), None
) | (None
, Some(v
)) => Some(v
),
944 (None
, None
) => None
,
948 self.fold(<_
>::default, opt_fold(&op
))
949 .reduce(<_
>::default, opt_reduce(&op
))
952 /// Reduces the items in the iterator into one item using a fallible `op`.
953 /// The `identity` argument is used the same way as in [`reduce()`].
955 /// [`reduce()`]: #method.reduce
957 /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
958 /// to one, we will attempt to stop processing the rest of the items in the
959 /// iterator as soon as possible, and we will return that terminating value.
960 /// Otherwise, we will return the final reduced `Result::Ok(T)` or
961 /// `Option::Some(T)`. If there are multiple errors in parallel, it is not
962 /// specified which will be returned.
967 /// use rayon::prelude::*;
969 /// // Compute the sum of squares, being careful about overflow.
970 /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
971 /// iter.into_par_iter()
972 /// .map(|i| i.checked_mul(i)) // square each item,
973 /// .try_reduce(|| 0, i32::checked_add) // and add them up!
975 /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
977 /// // The sum might overflow
978 /// assert_eq!(sum_squares(0..10_000), None);
980 /// // Or the squares might overflow before it even reaches `try_reduce`
981 /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
983 fn try_reduce
<T
, OP
, ID
>(self, identity
: ID
, op
: OP
) -> Self::Item
985 OP
: Fn(T
, T
) -> Self::Item
+ Sync
+ Send
,
986 ID
: Fn() -> T
+ Sync
+ Send
,
987 Self::Item
: Try
<Ok
= T
>,
989 try_reduce
::try_reduce(self, identity
, op
)
992 /// Reduces the items in the iterator into one item using a fallible `op`.
994 /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
995 /// otherwise, `Some` is returned. Beyond that, it behaves like
996 /// [`try_reduce()`] for handling `Err`/`None`.
998 /// [`reduce_with()`]: #method.reduce_with
999 /// [`try_reduce()`]: #method.try_reduce
1001 /// For instance, with `Option` items, the return value may be:
1002 /// - `None`, the iterator was empty
1003 /// - `Some(None)`, we stopped after encountering `None`.
1004 /// - `Some(Some(x))`, the entire iterator reduced to `x`.
1006 /// With `Result` items, the nesting is more obvious:
1007 /// - `None`, the iterator was empty
1008 /// - `Some(Err(e))`, we stopped after encountering an error `e`.
1009 /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
1014 /// use rayon::prelude::*;
1016 /// let files = ["/dev/null", "/does/not/exist"];
1018 /// // Find the biggest file
1019 /// files.into_par_iter()
1020 /// .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
1021 /// .try_reduce_with(|a, b| {
1022 /// Ok(if a.1 >= b.1 { a } else { b })
1024 /// .expect("Some value, since the iterator is not empty")
1025 /// .expect_err("not found");
1027 fn try_reduce_with
<T
, OP
>(self, op
: OP
) -> Option
<Self::Item
>
1029 OP
: Fn(T
, T
) -> Self::Item
+ Sync
+ Send
,
1030 Self::Item
: Try
<Ok
= T
>,
1032 try_reduce_with
::try_reduce_with(self, op
)
1035 /// Parallel fold is similar to sequential fold except that the
1036 /// sequence of items may be subdivided before it is
1037 /// folded. Consider a list of numbers like `22 3 77 89 46`. If
1038 /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
1039 /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
1040 /// 77, and so forth. The **parallel fold** works similarly except
1041 /// that it first breaks up your list into sublists, and hence
1042 /// instead of yielding up a single sum at the end, it yields up
1043 /// multiple sums. The number of results is nondeterministic, as
1044 /// is the point where the breaks occur.
1046 /// So if did the same parallel fold (`fold(0, |a,b| a+b)`) on
1047 /// our example list, we might wind up with a sequence of two numbers,
1056 /// Or perhaps these three numbers:
1064 /// In general, Rayon will attempt to find good breaking points
1065 /// that keep all of your cores busy.
1067 /// ### Fold versus reduce
1069 /// The `fold()` and `reduce()` methods each take an identity element
1070 /// and a combining function, but they operate rather differently.
1072 /// `reduce()` requires that the identity function has the same
1073 /// type as the things you are iterating over, and it fully
1074 /// reduces the list of items into a single item. So, for example,
1075 /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1076 /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1077 /// u8| a + b)`, we would get an overflow. This is because `0`,
1078 /// `a`, and `b` here are all bytes, just like the numbers in the
1079 /// list (I wrote the types explicitly above, but those are the
1080 /// only types you can use). To avoid the overflow, we would need
1081 /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1082 /// b| a + b)`, in which case our result would be `256`.
1084 /// In contrast, with `fold()`, the identity function does not
1085 /// have to have the same type as the things you are iterating
1086 /// over, and you potentially get back many results. So, if we
1087 /// continue with the `bytes` example from the previous paragraph,
1088 /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1089 /// convert our bytes into `u32`. And of course we might not get
1090 /// back a single sum.
1092 /// There is a more subtle distinction as well, though it's
1093 /// actually implied by the above points. When you use `reduce()`,
1094 /// your reduction function is sometimes called with values that
1095 /// were never part of your original parallel iterator (for
1096 /// example, both the left and right might be a partial sum). With
1097 /// `fold()`, in contrast, the left value in the fold function is
1098 /// always the accumulator, and the right value is always from
1099 /// your original sequence.
1101 /// ### Fold vs Map/Reduce
1103 /// Fold makes sense if you have some operation where it is
1104 /// cheaper to create groups of elements at a time. For example,
1105 /// imagine collecting characters into a string. If you were going
1106 /// to use map/reduce, you might try this:
1109 /// use rayon::prelude::*;
1112 /// ['a', 'b', 'c', 'd', 'e']
1114 /// .map(|c: &char| format!("{}", c))
1115 /// .reduce(|| String::new(),
1116 /// |mut a: String, b: String| { a.push_str(&b); a });
1118 /// assert_eq!(s, "abcde");
1121 /// Because reduce produces the same type of element as its input,
1122 /// you have to first map each character into a string, and then
1123 /// you can reduce them. This means we create one string per
1124 /// element in our iterator -- not so great. Using `fold`, we can
1125 /// do this instead:
1128 /// use rayon::prelude::*;
1131 /// ['a', 'b', 'c', 'd', 'e']
1133 /// .fold(|| String::new(),
1134 /// |mut s: String, c: &char| { s.push(*c); s })
1135 /// .reduce(|| String::new(),
1136 /// |mut a: String, b: String| { a.push_str(&b); a });
1138 /// assert_eq!(s, "abcde");
1141 /// Now `fold` will process groups of our characters at a time,
1142 /// and we only make one string per group. We should wind up with
1143 /// some small-ish number of strings roughly proportional to the
1144 /// number of CPUs you have (it will ultimately depend on how busy
1145 /// your processors are). Note that we still need to do a reduce
1146 /// afterwards to combine those groups of strings into a single
1149 /// You could use a similar trick to save partial results (e.g., a
1150 /// cache) or something similar.
1152 /// ### Combining fold with other operations
1154 /// You can combine `fold` with `reduce` if you want to produce a
1155 /// single value. This is then roughly equivalent to a map/reduce
1156 /// combination in effect:
1159 /// use rayon::prelude::*;
1161 /// let bytes = 0..22_u8;
1162 /// let sum = bytes.into_par_iter()
1163 /// .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1166 /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1168 fn fold
<T
, ID
, F
>(self, identity
: ID
, fold_op
: F
) -> Fold
<Self, ID
, F
>
1170 F
: Fn(T
, Self::Item
) -> T
+ Sync
+ Send
,
1171 ID
: Fn() -> T
+ Sync
+ Send
,
1174 Fold
::new(self, identity
, fold_op
)
1177 /// Applies `fold_op` to the given `init` value with each item of this
1178 /// iterator, finally producing the value for further use.
1180 /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1181 /// it doesn't require the `init` type to be `Sync`, nor any other form
1182 /// of added synchronization.
1187 /// use rayon::prelude::*;
1189 /// let bytes = 0..22_u8;
1190 /// let sum = bytes.into_par_iter()
1191 /// .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1194 /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1196 fn fold_with
<F
, T
>(self, init
: T
, fold_op
: F
) -> FoldWith
<Self, T
, F
>
1198 F
: Fn(T
, Self::Item
) -> T
+ Sync
+ Send
,
1201 FoldWith
::new(self, init
, fold_op
)
1204 /// Performs a fallible parallel fold.
1206 /// This is a variation of [`fold()`] for operations which can fail with
1207 /// `Option::None` or `Result::Err`. The first such failure stops
1208 /// processing the local set of items, without affecting other folds in the
1209 /// iterator's subdivisions.
1211 /// Often, `try_fold()` will be followed by [`try_reduce()`]
1212 /// for a final reduction and global short-circuiting effect.
1214 /// [`fold()`]: #method.fold
1215 /// [`try_reduce()`]: #method.try_reduce
1220 /// use rayon::prelude::*;
1222 /// let bytes = 0..22_u8;
1223 /// let sum = bytes.into_par_iter()
1224 /// .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1225 /// .try_reduce(|| 0, u32::checked_add);
1227 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1229 fn try_fold
<T
, R
, ID
, F
>(self, identity
: ID
, fold_op
: F
) -> TryFold
<Self, R
, ID
, F
>
1231 F
: Fn(T
, Self::Item
) -> R
+ Sync
+ Send
,
1232 ID
: Fn() -> T
+ Sync
+ Send
,
1233 R
: Try
<Ok
= T
> + Send
,
1235 TryFold
::new(self, identity
, fold_op
)
1238 /// Performs a fallible parallel fold with a cloneable `init` value.
1240 /// This combines the `init` semantics of [`fold_with()`] and the failure
1241 /// semantics of [`try_fold()`].
1243 /// [`fold_with()`]: #method.fold_with
1244 /// [`try_fold()`]: #method.try_fold
1247 /// use rayon::prelude::*;
1249 /// let bytes = 0..22_u8;
1250 /// let sum = bytes.into_par_iter()
1251 /// .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1252 /// .try_reduce(|| 0, u32::checked_add);
1254 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1256 fn try_fold_with
<F
, T
, R
>(self, init
: T
, fold_op
: F
) -> TryFoldWith
<Self, R
, F
>
1258 F
: Fn(T
, Self::Item
) -> R
+ Sync
+ Send
,
1259 R
: Try
<Ok
= T
> + Send
,
1262 TryFoldWith
::new(self, init
, fold_op
)
1265 /// Sums up the items in the iterator.
1267 /// Note that the order in items will be reduced is not specified,
1268 /// so if the `+` operator is not truly [associative] \(as is the
1269 /// case for floating point numbers), then the results are not
1270 /// fully deterministic.
1272 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1274 /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1275 /// except that the type of `0` and the `+` operation may vary
1276 /// depending on the type of value being produced.
1281 /// use rayon::prelude::*;
1283 /// let a = [1, 5, 7];
1285 /// let sum: i32 = a.par_iter().sum();
1287 /// assert_eq!(sum, 13);
1289 fn sum
<S
>(self) -> S
1291 S
: Send
+ Sum
<Self::Item
> + Sum
<S
>,
1296 /// Multiplies all the items in the iterator.
1298 /// Note that the order in items will be reduced is not specified,
1299 /// so if the `*` operator is not truly [associative] \(as is the
1300 /// case for floating point numbers), then the results are not
1301 /// fully deterministic.
1303 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1305 /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1306 /// except that the type of `1` and the `*` operation may vary
1307 /// depending on the type of value being produced.
1312 /// use rayon::prelude::*;
1314 /// fn factorial(n: u32) -> u32 {
1315 /// (1..n+1).into_par_iter().product()
1318 /// assert_eq!(factorial(0), 1);
1319 /// assert_eq!(factorial(1), 1);
1320 /// assert_eq!(factorial(5), 120);
1322 fn product
<P
>(self) -> P
1324 P
: Send
+ Product
<Self::Item
> + Product
<P
>,
1326 product
::product(self)
1329 /// Computes the minimum of all the items in the iterator. If the
1330 /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1333 /// Note that the order in which the items will be reduced is not
1334 /// specified, so if the `Ord` impl is not truly associative, then
1335 /// the results are not deterministic.
1337 /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`.
1342 /// use rayon::prelude::*;
1344 /// let a = [45, 74, 32];
1346 /// assert_eq!(a.par_iter().min(), Some(&32));
1348 /// let b: [i32; 0] = [];
1350 /// assert_eq!(b.par_iter().min(), None);
1352 fn min(self) -> Option
<Self::Item
>
1356 self.reduce_with(cmp
::min
)
1359 /// Computes the minimum of all the items in the iterator with respect to
1360 /// the given comparison function. If the iterator is empty, `None` is
1361 /// returned; otherwise, `Some(min)` is returned.
1363 /// Note that the order in which the items will be reduced is not
1364 /// specified, so if the comparison function is not associative, then
1365 /// the results are not deterministic.
1370 /// use rayon::prelude::*;
1372 /// let a = [-3_i32, 77, 53, 240, -1];
1374 /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1376 fn min_by
<F
>(self, f
: F
) -> Option
<Self::Item
>
1378 F
: Sync
+ Send
+ Fn(&Self::Item
, &Self::Item
) -> Ordering
,
1380 fn min
<T
>(f
: impl Fn(&T
, &T
) -> Ordering
) -> impl Fn(T
, T
) -> T
{
1381 move |a
, b
| match f(&a
, &b
) {
1382 Ordering
::Greater
=> b
,
1387 self.reduce_with(min(f
))
1390 /// Computes the item that yields the minimum value for the given
1391 /// function. If the iterator is empty, `None` is returned;
1392 /// otherwise, `Some(item)` is returned.
1394 /// Note that the order in which the items will be reduced is not
1395 /// specified, so if the `Ord` impl is not truly associative, then
1396 /// the results are not deterministic.
1401 /// use rayon::prelude::*;
1403 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1405 /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1407 fn min_by_key
<K
, F
>(self, f
: F
) -> Option
<Self::Item
>
1410 F
: Sync
+ Send
+ Fn(&Self::Item
) -> K
,
1412 fn key
<T
, K
>(f
: impl Fn(&T
) -> K
) -> impl Fn(T
) -> (K
, T
) {
1416 fn min_key
<T
, K
: Ord
>(a
: (K
, T
), b
: (K
, T
)) -> (K
, T
) {
1417 match (a
.0).cmp(&b
.0) {
1418 Ordering
::Greater
=> b
,
1423 let (_
, x
) = self.map(key(f
)).reduce_with(min_key
)?
;
1427 /// Computes the maximum of all the items in the iterator. If the
1428 /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1431 /// Note that the order in which the items will be reduced is not
1432 /// specified, so if the `Ord` impl is not truly associative, then
1433 /// the results are not deterministic.
1435 /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`.
1440 /// use rayon::prelude::*;
1442 /// let a = [45, 74, 32];
1444 /// assert_eq!(a.par_iter().max(), Some(&74));
1446 /// let b: [i32; 0] = [];
1448 /// assert_eq!(b.par_iter().max(), None);
1450 fn max(self) -> Option
<Self::Item
>
1454 self.reduce_with(cmp
::max
)
1457 /// Computes the maximum of all the items in the iterator with respect to
1458 /// the given comparison function. If the iterator is empty, `None` is
1459 /// returned; otherwise, `Some(min)` is returned.
1461 /// Note that the order in which the items will be reduced is not
1462 /// specified, so if the comparison function is not associative, then
1463 /// the results are not deterministic.
1468 /// use rayon::prelude::*;
1470 /// let a = [-3_i32, 77, 53, 240, -1];
1472 /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1474 fn max_by
<F
>(self, f
: F
) -> Option
<Self::Item
>
1476 F
: Sync
+ Send
+ Fn(&Self::Item
, &Self::Item
) -> Ordering
,
1478 fn max
<T
>(f
: impl Fn(&T
, &T
) -> Ordering
) -> impl Fn(T
, T
) -> T
{
1479 move |a
, b
| match f(&a
, &b
) {
1480 Ordering
::Greater
=> a
,
1485 self.reduce_with(max(f
))
1488 /// Computes the item that yields the maximum value for the given
1489 /// function. If the iterator is empty, `None` is returned;
1490 /// otherwise, `Some(item)` is returned.
1492 /// Note that the order in which the items will be reduced is not
1493 /// specified, so if the `Ord` impl is not truly associative, then
1494 /// the results are not deterministic.
1499 /// use rayon::prelude::*;
1501 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1503 /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1505 fn max_by_key
<K
, F
>(self, f
: F
) -> Option
<Self::Item
>
1508 F
: Sync
+ Send
+ Fn(&Self::Item
) -> K
,
1510 fn key
<T
, K
>(f
: impl Fn(&T
) -> K
) -> impl Fn(T
) -> (K
, T
) {
1514 fn max_key
<T
, K
: Ord
>(a
: (K
, T
), b
: (K
, T
)) -> (K
, T
) {
1515 match (a
.0).cmp(&b
.0) {
1516 Ordering
::Greater
=> a
,
1521 let (_
, x
) = self.map(key(f
)).reduce_with(max_key
)?
;
1525 /// Takes two iterators and creates a new iterator over both.
1530 /// use rayon::prelude::*;
1532 /// let a = [0, 1, 2];
1533 /// let b = [9, 8, 7];
1535 /// let par_iter = a.par_iter().chain(b.par_iter());
1537 /// let chained: Vec<_> = par_iter.cloned().collect();
1539 /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1541 fn chain
<C
>(self, chain
: C
) -> Chain
<Self, C
::Iter
>
1543 C
: IntoParallelIterator
<Item
= Self::Item
>,
1545 Chain
::new(self, chain
.into_par_iter())
1548 /// Searches for **some** item in the parallel iterator that
1549 /// matches the given predicate and returns it. This operation
1550 /// is similar to [`find` on sequential iterators][find] but
1551 /// the item returned may not be the **first** one in the parallel
1552 /// sequence which matches, since we search the entire sequence in parallel.
1554 /// Once a match is found, we will attempt to stop processing
1555 /// the rest of the items in the iterator as soon as possible
1556 /// (just as `find` stops iterating once a match is found).
1558 /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find
1563 /// use rayon::prelude::*;
1565 /// let a = [1, 2, 3, 3];
1567 /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1569 /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1571 fn find_any
<P
>(self, predicate
: P
) -> Option
<Self::Item
>
1573 P
: Fn(&Self::Item
) -> bool
+ Sync
+ Send
,
1575 find
::find(self, predicate
)
1578 /// Searches for the sequentially **first** item in the parallel iterator
1579 /// that matches the given predicate and returns it.
1581 /// Once a match is found, all attempts to the right of the match
1582 /// will be stopped, while attempts to the left must continue in case
1583 /// an earlier match is found.
1585 /// Note that not all parallel iterators have a useful order, much like
1586 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1587 /// just want the first match that discovered anywhere in the iterator,
1588 /// `find_any` is a better choice.
1593 /// use rayon::prelude::*;
1595 /// let a = [1, 2, 3, 3];
1597 /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1599 /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1601 fn find_first
<P
>(self, predicate
: P
) -> Option
<Self::Item
>
1603 P
: Fn(&Self::Item
) -> bool
+ Sync
+ Send
,
1605 find_first_last
::find_first(self, predicate
)
1608 /// Searches for the sequentially **last** item in the parallel iterator
1609 /// that matches the given predicate and returns it.
1611 /// Once a match is found, all attempts to the left of the match
1612 /// will be stopped, while attempts to the right must continue in case
1613 /// a later match is found.
1615 /// Note that not all parallel iterators have a useful order, much like
1616 /// sequential `HashMap` iteration, so "last" may be nebulous. When the
1617 /// order doesn't actually matter to you, `find_any` is a better choice.
1622 /// use rayon::prelude::*;
1624 /// let a = [1, 2, 3, 3];
1626 /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1628 /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1630 fn find_last
<P
>(self, predicate
: P
) -> Option
<Self::Item
>
1632 P
: Fn(&Self::Item
) -> bool
+ Sync
+ Send
,
1634 find_first_last
::find_last(self, predicate
)
1637 /// Applies the given predicate to the items in the parallel iterator
1638 /// and returns **any** non-None result of the map operation.
1640 /// Once a non-None value is produced from the map operation, we will
1641 /// attempt to stop processing the rest of the items in the iterator
1642 /// as soon as possible.
1644 /// Note that this method only returns **some** item in the parallel
1645 /// iterator that is not None from the map predicate. The item returned
1646 /// may not be the **first** non-None value produced in the parallel
1647 /// sequence, since the entire sequence is mapped over in parallel.
1652 /// use rayon::prelude::*;
1654 /// let c = ["lol", "NaN", "5", "5"];
1656 /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok());
1658 /// assert_eq!(found_number, Some(5));
1660 fn find_map_any
<P
, R
>(self, predicate
: P
) -> Option
<R
>
1662 P
: Fn(Self::Item
) -> Option
<R
> + Sync
+ Send
,
1665 fn yes
<T
>(_
: &T
) -> bool
{
1668 self.filter_map(predicate
).find_any(yes
)
1671 /// Applies the given predicate to the items in the parallel iterator and
1672 /// returns the sequentially **first** non-None result of the map operation.
1674 /// Once a non-None value is produced from the map operation, all attempts
1675 /// to the right of the match will be stopped, while attempts to the left
1676 /// must continue in case an earlier match is found.
1678 /// Note that not all parallel iterators have a useful order, much like
1679 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1680 /// just want the first non-None value discovered anywhere in the iterator,
1681 /// `find_map_any` is a better choice.
1686 /// use rayon::prelude::*;
1688 /// let c = ["lol", "NaN", "2", "5"];
1690 /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1692 /// assert_eq!(first_number, Some(2));
1694 fn find_map_first
<P
, R
>(self, predicate
: P
) -> Option
<R
>
1696 P
: Fn(Self::Item
) -> Option
<R
> + Sync
+ Send
,
1699 fn yes
<T
>(_
: &T
) -> bool
{
1702 self.filter_map(predicate
).find_first(yes
)
1705 /// Applies the given predicate to the items in the parallel iterator and
1706 /// returns the sequentially **last** non-None result of the map operation.
1708 /// Once a non-None value is produced from the map operation, all attempts
1709 /// to the left of the match will be stopped, while attempts to the right
1710 /// must continue in case a later match is found.
1712 /// Note that not all parallel iterators have a useful order, much like
1713 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1714 /// just want the first non-None value discovered anywhere in the iterator,
1715 /// `find_map_any` is a better choice.
1720 /// use rayon::prelude::*;
1722 /// let c = ["lol", "NaN", "2", "5"];
1724 /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok());
1726 /// assert_eq!(last_number, Some(5));
1728 fn find_map_last
<P
, R
>(self, predicate
: P
) -> Option
<R
>
1730 P
: Fn(Self::Item
) -> Option
<R
> + Sync
+ Send
,
1733 fn yes
<T
>(_
: &T
) -> bool
{
1736 self.filter_map(predicate
).find_last(yes
)
1740 #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1741 `find_first`, or `find_last`")]
1742 fn find
<P
>(self, predicate
: P
) -> Option
<Self::Item
>
1744 P
: Fn(&Self::Item
) -> bool
+ Sync
+ Send
,
1746 self.find_any(predicate
)
1749 /// Searches for **some** item in the parallel iterator that
1750 /// matches the given predicate, and if so returns true. Once
1751 /// a match is found, we'll attempt to stop process the rest
1752 /// of the items. Proving that there's no match, returning false,
1753 /// does require visiting every item.
1758 /// use rayon::prelude::*;
1760 /// let a = [0, 12, 3, 4, 0, 23, 0];
1762 /// let is_valid = a.par_iter().any(|&x| x > 10);
1764 /// assert!(is_valid);
1766 fn any
<P
>(self, predicate
: P
) -> bool
1768 P
: Fn(Self::Item
) -> bool
+ Sync
+ Send
,
1770 self.map(predicate
).find_any(bool
::clone
).is_some()
1773 /// Tests that every item in the parallel iterator matches the given
1774 /// predicate, and if so returns true. If a counter-example is found,
1775 /// we'll attempt to stop processing more items, then return false.
1780 /// use rayon::prelude::*;
1782 /// let a = [0, 12, 3, 4, 0, 23, 0];
1784 /// let is_valid = a.par_iter().all(|&x| x > 10);
1786 /// assert!(!is_valid);
1788 fn all
<P
>(self, predicate
: P
) -> bool
1790 P
: Fn(Self::Item
) -> bool
+ Sync
+ Send
,
1793 fn is_false(x
: &bool
) -> bool
{
1797 self.map(predicate
).find_any(is_false
).is_none()
1800 /// Creates an iterator over the `Some` items of this iterator, halting
1801 /// as soon as any `None` is found.
1806 /// use rayon::prelude::*;
1807 /// use std::sync::atomic::{AtomicUsize, Ordering};
1809 /// let counter = AtomicUsize::new(0);
1810 /// let value = (0_i32..2048)
1811 /// .into_par_iter()
1813 /// counter.fetch_add(1, Ordering::SeqCst);
1814 /// if x < 1024 { Some(x) } else { None }
1819 /// assert!(value < Some(1024));
1820 /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1822 fn while_some
<T
>(self) -> WhileSome
<Self>
1824 Self: ParallelIterator
<Item
= Option
<T
>>,
1827 WhileSome
::new(self)
1830 /// Wraps an iterator with a fuse in case of panics, to halt all threads
1831 /// as soon as possible.
1833 /// Panics within parallel iterators are always propagated to the caller,
1834 /// but they don't always halt the rest of the iterator right away, due to
1835 /// the internal semantics of [`join`]. This adaptor makes a greater effort
1836 /// to stop processing other items sooner, with the cost of additional
1837 /// synchronization overhead, which may also inhibit some optimizations.
1839 /// [`join`]: ../fn.join.html#panics
1843 /// If this code didn't use `panic_fuse()`, it would continue processing
1844 /// many more items in other threads (with long sleep delays) before the
1845 /// panic is finally propagated.
1848 /// use rayon::prelude::*;
1849 /// use std::{thread, time};
1852 /// .into_par_iter()
1855 /// // simulate some work
1856 /// thread::sleep(time::Duration::from_secs(1));
1857 /// assert!(i > 0); // oops!
1860 fn panic_fuse(self) -> PanicFuse
<Self> {
1861 PanicFuse
::new(self)
1864 /// Creates a fresh collection containing all the elements produced
1865 /// by this parallel iterator.
1867 /// You may prefer to use `collect_into_vec()`, which allocates more
1868 /// efficiently with precise knowledge of how many elements the
1869 /// iterator contains, and even allows you to reuse an existing
1870 /// vector's backing store rather than allocating a fresh vector.
1875 /// use rayon::prelude::*;
1877 /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1879 /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
1881 /// assert_eq!(sync_vec, async_vec);
1883 fn collect
<C
>(self) -> C
1885 C
: FromParallelIterator
<Self::Item
>,
1887 C
::from_par_iter(self)
1890 /// Unzips the items of a parallel iterator into a pair of arbitrary
1891 /// `ParallelExtend` containers.
1893 /// You may prefer to use `unzip_into_vecs()`, which allocates more
1894 /// efficiently with precise knowledge of how many elements the
1895 /// iterator contains, and even allows you to reuse existing
1896 /// vectors' backing stores rather than allocating fresh vectors.
1901 /// use rayon::prelude::*;
1903 /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
1905 /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
1907 /// assert_eq!(left, [0, 1, 2, 3]);
1908 /// assert_eq!(right, [1, 2, 3, 4]);
1911 /// Nested pairs can be unzipped too.
1914 /// use rayon::prelude::*;
1916 /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
1917 /// .map(|i| (i, (i * i, i * i * i)))
1920 /// assert_eq!(values, [0, 1, 2, 3]);
1921 /// assert_eq!(squares, [0, 1, 4, 9]);
1922 /// assert_eq!(cubes, [0, 1, 8, 27]);
1924 fn unzip
<A
, B
, FromA
, FromB
>(self) -> (FromA
, FromB
)
1926 Self: ParallelIterator
<Item
= (A
, B
)>,
1927 FromA
: Default
+ Send
+ ParallelExtend
<A
>,
1928 FromB
: Default
+ Send
+ ParallelExtend
<B
>,
1935 /// Partitions the items of a parallel iterator into a pair of arbitrary
1936 /// `ParallelExtend` containers. Items for which the `predicate` returns
1937 /// true go into the first container, and the rest go into the second.
1939 /// Note: unlike the standard `Iterator::partition`, this allows distinct
1940 /// collection types for the left and right items. This is more flexible,
1941 /// but may require new type annotations when converting sequential code
1942 /// that used type inferrence assuming the two were the same.
1947 /// use rayon::prelude::*;
1949 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
1951 /// assert_eq!(left, [0, 2, 4, 6]);
1952 /// assert_eq!(right, [1, 3, 5, 7]);
1954 fn partition
<A
, B
, P
>(self, predicate
: P
) -> (A
, B
)
1956 A
: Default
+ Send
+ ParallelExtend
<Self::Item
>,
1957 B
: Default
+ Send
+ ParallelExtend
<Self::Item
>,
1958 P
: Fn(&Self::Item
) -> bool
+ Sync
+ Send
,
1960 unzip
::partition(self, predicate
)
1963 /// Partitions and maps the items of a parallel iterator into a pair of
1964 /// arbitrary `ParallelExtend` containers. `Either::Left` items go into
1965 /// the first container, and `Either::Right` items go into the second.
1970 /// use rayon::prelude::*;
1971 /// use rayon::iter::Either;
1973 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
1974 /// .partition_map(|x| {
1976 /// Either::Left(x * 4)
1978 /// Either::Right(x * 3)
1982 /// assert_eq!(left, [0, 8, 16, 24]);
1983 /// assert_eq!(right, [3, 9, 15, 21]);
1986 /// Nested `Either` enums can be split as well.
1989 /// use rayon::prelude::*;
1990 /// use rayon::iter::Either::*;
1992 /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
1993 /// .into_par_iter()
1994 /// .partition_map(|x| match (x % 3, x % 5) {
1995 /// (0, 0) => Left(Left(x)),
1996 /// (0, _) => Left(Right(x)),
1997 /// (_, 0) => Right(Left(x)),
1998 /// (_, _) => Right(Right(x)),
2001 /// assert_eq!(fizzbuzz, [15]);
2002 /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
2003 /// assert_eq!(buzz, [5, 10]);
2004 /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
2006 fn partition_map
<A
, B
, P
, L
, R
>(self, predicate
: P
) -> (A
, B
)
2008 A
: Default
+ Send
+ ParallelExtend
<L
>,
2009 B
: Default
+ Send
+ ParallelExtend
<R
>,
2010 P
: Fn(Self::Item
) -> Either
<L
, R
> + Sync
+ Send
,
2014 unzip
::partition_map(self, predicate
)
2017 /// Intersperses clones of an element between items of this iterator.
2022 /// use rayon::prelude::*;
2024 /// let x = vec![1, 2, 3];
2025 /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
2027 /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
2029 fn intersperse(self, element
: Self::Item
) -> Intersperse
<Self>
2033 Intersperse
::new(self, element
)
2036 /// Internal method used to define the behavior of this parallel
2037 /// iterator. You should not need to call this directly.
2039 /// This method causes the iterator `self` to start producing
2040 /// items and to feed them to the consumer `consumer` one by one.
2041 /// It may split the consumer before doing so to create the
2042 /// opportunity to produce in parallel.
2044 /// See the [README] for more details on the internals of parallel
2047 /// [README]: README.md
2048 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
2050 C
: UnindexedConsumer
<Self::Item
>;
2052 /// Internal method used to define the behavior of this parallel
2053 /// iterator. You should not need to call this directly.
2055 /// Returns the number of items produced by this iterator, if known
2056 /// statically. This can be used by consumers to trigger special fast
2057 /// paths. Therefore, if `Some(_)` is returned, this iterator must only
2058 /// use the (indexed) `Consumer` methods when driving a consumer, such
2059 /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
2060 /// other `UnindexedConsumer` methods -- or returning an inaccurate
2061 /// value -- may result in panics.
2063 /// This method is currently used to optimize `collect` for want
2064 /// of true Rust specialization; it may be removed when
2065 /// specialization is stable.
2066 fn opt_len(&self) -> Option
<usize> {
2071 impl<T
: ParallelIterator
> IntoParallelIterator
for T
{
2073 type Item
= T
::Item
;
2075 fn into_par_iter(self) -> T
{
2080 /// An iterator that supports "random access" to its data, meaning
2081 /// that you can split it at arbitrary indices and draw data from
2084 /// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
2085 pub trait IndexedParallelIterator
: ParallelIterator
{
2086 /// Collects the results of the iterator into the specified
2087 /// vector. The vector is always truncated before execution
2088 /// begins. If possible, reusing the vector across calls can lead
2089 /// to better performance since it reuses the same backing buffer.
2094 /// use rayon::prelude::*;
2096 /// // any prior data will be truncated
2097 /// let mut vec = vec![-1, -2, -3];
2099 /// (0..5).into_par_iter()
2100 /// .collect_into_vec(&mut vec);
2102 /// assert_eq!(vec, [0, 1, 2, 3, 4]);
2104 fn collect_into_vec(self, target
: &mut Vec
<Self::Item
>) {
2105 collect
::collect_into_vec(self, target
);
2108 /// Unzips the results of the iterator into the specified
2109 /// vectors. The vectors are always truncated before execution
2110 /// begins. If possible, reusing the vectors across calls can lead
2111 /// to better performance since they reuse the same backing buffer.
2116 /// use rayon::prelude::*;
2118 /// // any prior data will be truncated
2119 /// let mut left = vec![42; 10];
2120 /// let mut right = vec![-1; 10];
2122 /// (10..15).into_par_iter()
2124 /// .unzip_into_vecs(&mut left, &mut right);
2126 /// assert_eq!(left, [0, 1, 2, 3, 4]);
2127 /// assert_eq!(right, [10, 11, 12, 13, 14]);
2129 fn unzip_into_vecs
<A
, B
>(self, left
: &mut Vec
<A
>, right
: &mut Vec
<B
>)
2131 Self: IndexedParallelIterator
<Item
= (A
, B
)>,
2135 collect
::unzip_into_vecs(self, left
, right
);
2138 /// Iterates over tuples `(A, B)`, where the items `A` are from
2139 /// this iterator and `B` are from the iterator given as argument.
2140 /// Like the `zip` method on ordinary iterators, if the two
2141 /// iterators are of unequal length, you only get the items they
2147 /// use rayon::prelude::*;
2149 /// let result: Vec<_> = (1..4)
2150 /// .into_par_iter()
2151 /// .zip(vec!['a', 'b', 'c'])
2154 /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
2156 fn zip
<Z
>(self, zip_op
: Z
) -> Zip
<Self, Z
::Iter
>
2158 Z
: IntoParallelIterator
,
2159 Z
::Iter
: IndexedParallelIterator
,
2161 Zip
::new(self, zip_op
.into_par_iter())
2164 /// The same as `Zip`, but requires that both iterators have the same length.
2167 /// Will panic if `self` and `zip_op` are not the same length.
2170 /// use rayon::prelude::*;
2172 /// let one = [1u8];
2173 /// let two = [2u8, 2];
2174 /// let one_iter = one.par_iter();
2175 /// let two_iter = two.par_iter();
2177 /// // this will panic
2178 /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
2180 /// // we should never get here
2181 /// assert_eq!(1, zipped.len());
2183 fn zip_eq
<Z
>(self, zip_op
: Z
) -> ZipEq
<Self, Z
::Iter
>
2185 Z
: IntoParallelIterator
,
2186 Z
::Iter
: IndexedParallelIterator
,
2188 let zip_op_iter
= zip_op
.into_par_iter();
2189 assert_eq
!(self.len(), zip_op_iter
.len());
2190 ZipEq
::new(self, zip_op_iter
)
2193 /// Interleaves elements of this iterator and the other given
2194 /// iterator. Alternately yields elements from this iterator and
2195 /// the given iterator, until both are exhausted. If one iterator
2196 /// is exhausted before the other, the last elements are provided
2202 /// use rayon::prelude::*;
2203 /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
2204 /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
2205 /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
2207 fn interleave
<I
>(self, other
: I
) -> Interleave
<Self, I
::Iter
>
2209 I
: IntoParallelIterator
<Item
= Self::Item
>,
2210 I
::Iter
: IndexedParallelIterator
<Item
= Self::Item
>,
2212 Interleave
::new(self, other
.into_par_iter())
2215 /// Interleaves elements of this iterator and the other given
2216 /// iterator, until one is exhausted.
2221 /// use rayon::prelude::*;
2222 /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
2223 /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2224 /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2226 fn interleave_shortest
<I
>(self, other
: I
) -> InterleaveShortest
<Self, I
::Iter
>
2228 I
: IntoParallelIterator
<Item
= Self::Item
>,
2229 I
::Iter
: IndexedParallelIterator
<Item
= Self::Item
>,
2231 InterleaveShortest
::new(self, other
.into_par_iter())
2234 /// Splits an iterator up into fixed-size chunks.
2236 /// Returns an iterator that returns `Vec`s of the given number of elements.
2237 /// If the number of elements in the iterator is not divisible by `chunk_size`,
2238 /// the last chunk may be shorter than `chunk_size`.
2240 /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
2241 /// slices, without having to allocate intermediate `Vec`s for the chunks.
2243 /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks
2244 /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut
2249 /// use rayon::prelude::*;
2250 /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2251 /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2252 /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2254 fn chunks(self, chunk_size
: usize) -> Chunks
<Self> {
2255 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
2256 Chunks
::new(self, chunk_size
)
2259 /// Lexicographically compares the elements of this `ParallelIterator` with those of
2265 /// use rayon::prelude::*;
2266 /// use std::cmp::Ordering::*;
2268 /// let x = vec![1, 2, 3];
2269 /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2270 /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2271 /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2273 fn cmp
<I
>(self, other
: I
) -> Ordering
2275 I
: IntoParallelIterator
<Item
= Self::Item
>,
2276 I
::Iter
: IndexedParallelIterator
,
2280 fn ordering
<T
: Ord
>((x
, y
): (T
, T
)) -> Ordering
{
2285 fn inequal(&ord
: &Ordering
) -> bool
{
2286 ord
!= Ordering
::Equal
2289 let other
= other
.into_par_iter();
2290 let ord_len
= self.len().cmp(&other
.len());
2293 .find_first(inequal
)
2297 /// Lexicographically compares the elements of this `ParallelIterator` with those of
2303 /// use rayon::prelude::*;
2304 /// use std::cmp::Ordering::*;
2305 /// use std::f64::NAN;
2307 /// let x = vec![1.0, 2.0, 3.0];
2308 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2309 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2310 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2311 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);
2313 fn partial_cmp
<I
>(self, other
: I
) -> Option
<Ordering
>
2315 I
: IntoParallelIterator
,
2316 I
::Iter
: IndexedParallelIterator
,
2317 Self::Item
: PartialOrd
<I
::Item
>,
2320 fn ordering
<T
: PartialOrd
<U
>, U
>((x
, y
): (T
, U
)) -> Option
<Ordering
> {
2321 PartialOrd
::partial_cmp(&x
, &y
)
2325 fn inequal(&ord
: &Option
<Ordering
>) -> bool
{
2326 ord
!= Some(Ordering
::Equal
)
2329 let other
= other
.into_par_iter();
2330 let ord_len
= self.len().cmp(&other
.len());
2333 .find_first(inequal
)
2334 .unwrap_or(Some(ord_len
))
2337 /// Determines if the elements of this `ParallelIterator`
2338 /// are equal to those of another
2339 fn eq
<I
>(self, other
: I
) -> bool
2341 I
: IntoParallelIterator
,
2342 I
::Iter
: IndexedParallelIterator
,
2343 Self::Item
: PartialEq
<I
::Item
>,
2346 fn eq
<T
: PartialEq
<U
>, U
>((x
, y
): (T
, U
)) -> bool
{
2347 PartialEq
::eq(&x
, &y
)
2350 let other
= other
.into_par_iter();
2351 self.len() == other
.len() && self.zip(other
).all(eq
)
2354 /// Determines if the elements of this `ParallelIterator`
2355 /// are unequal to those of another
2356 fn ne
<I
>(self, other
: I
) -> bool
2358 I
: IntoParallelIterator
,
2359 I
::Iter
: IndexedParallelIterator
,
2360 Self::Item
: PartialEq
<I
::Item
>,
2365 /// Determines if the elements of this `ParallelIterator`
2366 /// are lexicographically less than those of another.
2367 fn lt
<I
>(self, other
: I
) -> bool
2369 I
: IntoParallelIterator
,
2370 I
::Iter
: IndexedParallelIterator
,
2371 Self::Item
: PartialOrd
<I
::Item
>,
2373 self.partial_cmp(other
) == Some(Ordering
::Less
)
2376 /// Determines if the elements of this `ParallelIterator`
2377 /// are less or equal to those of another.
2378 fn le
<I
>(self, other
: I
) -> bool
2380 I
: IntoParallelIterator
,
2381 I
::Iter
: IndexedParallelIterator
,
2382 Self::Item
: PartialOrd
<I
::Item
>,
2384 let ord
= self.partial_cmp(other
);
2385 ord
== Some(Ordering
::Equal
) || ord
== Some(Ordering
::Less
)
2388 /// Determines if the elements of this `ParallelIterator`
2389 /// are lexicographically greater than those of another.
2390 fn gt
<I
>(self, other
: I
) -> bool
2392 I
: IntoParallelIterator
,
2393 I
::Iter
: IndexedParallelIterator
,
2394 Self::Item
: PartialOrd
<I
::Item
>,
2396 self.partial_cmp(other
) == Some(Ordering
::Greater
)
2399 /// Determines if the elements of this `ParallelIterator`
2400 /// are less or equal to those of another.
2401 fn ge
<I
>(self, other
: I
) -> bool
2403 I
: IntoParallelIterator
,
2404 I
::Iter
: IndexedParallelIterator
,
2405 Self::Item
: PartialOrd
<I
::Item
>,
2407 let ord
= self.partial_cmp(other
);
2408 ord
== Some(Ordering
::Equal
) || ord
== Some(Ordering
::Greater
)
2411 /// Yields an index along with each item.
2416 /// use rayon::prelude::*;
2418 /// let chars = vec!['a', 'b', 'c'];
2419 /// let result: Vec<_> = chars
2420 /// .into_par_iter()
2424 /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2426 fn enumerate(self) -> Enumerate
<Self> {
2427 Enumerate
::new(self)
2430 /// Creates an iterator that steps by the given amount
2435 ///use rayon::prelude::*;
2437 /// let range = (3..10);
2438 /// let result: Vec<i32> = range
2439 /// .into_par_iter()
2443 /// assert_eq!(result, [3, 6, 9])
2448 /// This method is only available on Rust 1.38 or greater.
2450 fn step_by(self, step
: usize) -> StepBy
<Self> {
2451 StepBy
::new(self, step
)
2454 /// Creates an iterator that skips the first `n` elements.
2459 /// use rayon::prelude::*;
2461 /// let result: Vec<_> = (0..100)
2462 /// .into_par_iter()
2466 /// assert_eq!(result, [95, 96, 97, 98, 99]);
2468 fn skip(self, n
: usize) -> Skip
<Self> {
2472 /// Creates an iterator that yields the first `n` elements.
2477 /// use rayon::prelude::*;
2479 /// let result: Vec<_> = (0..100)
2480 /// .into_par_iter()
2484 /// assert_eq!(result, [0, 1, 2, 3, 4]);
2486 fn take(self, n
: usize) -> Take
<Self> {
2490 /// Searches for **some** item in the parallel iterator that
2491 /// matches the given predicate, and returns its index. Like
2492 /// `ParallelIterator::find_any`, the parallel search will not
2493 /// necessarily find the **first** match, and once a match is
2494 /// found we'll attempt to stop processing any more.
2499 /// use rayon::prelude::*;
2501 /// let a = [1, 2, 3, 3];
2503 /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
2504 /// assert!(i == 2 || i == 3);
2506 /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
2508 fn position_any
<P
>(self, predicate
: P
) -> Option
<usize>
2510 P
: Fn(Self::Item
) -> bool
+ Sync
+ Send
,
2513 fn check(&(_
, p
): &(usize, bool
)) -> bool
{
2517 let (i
, _
) = self.map(predicate
).enumerate().find_any(check
)?
;
2521 /// Searches for the sequentially **first** item in the parallel iterator
2522 /// that matches the given predicate, and returns its index.
2524 /// Like `ParallelIterator::find_first`, once a match is found,
2525 /// all attempts to the right of the match will be stopped, while
2526 /// attempts to the left must continue in case an earlier match
2529 /// Note that not all parallel iterators have a useful order, much like
2530 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
2531 /// just want the first match that discovered anywhere in the iterator,
2532 /// `position_any` is a better choice.
2537 /// use rayon::prelude::*;
2539 /// let a = [1, 2, 3, 3];
2541 /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
2543 /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
2545 fn position_first
<P
>(self, predicate
: P
) -> Option
<usize>
2547 P
: Fn(Self::Item
) -> bool
+ Sync
+ Send
,
2550 fn check(&(_
, p
): &(usize, bool
)) -> bool
{
2554 let (i
, _
) = self.map(predicate
).enumerate().find_first(check
)?
;
2558 /// Searches for the sequentially **last** item in the parallel iterator
2559 /// that matches the given predicate, and returns its index.
2561 /// Like `ParallelIterator::find_last`, once a match is found,
2562 /// all attempts to the left of the match will be stopped, while
2563 /// attempts to the right must continue in case a later match
2566 /// Note that not all parallel iterators have a useful order, much like
2567 /// sequential `HashMap` iteration, so "last" may be nebulous. When the
2568 /// order doesn't actually matter to you, `position_any` is a better
2574 /// use rayon::prelude::*;
2576 /// let a = [1, 2, 3, 3];
2578 /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
2580 /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
2582 fn position_last
<P
>(self, predicate
: P
) -> Option
<usize>
2584 P
: Fn(Self::Item
) -> bool
+ Sync
+ Send
,
2587 fn check(&(_
, p
): &(usize, bool
)) -> bool
{
2591 let (i
, _
) = self.map(predicate
).enumerate().find_last(check
)?
;
2597 note
= "parallel `position` does not search in order -- use `position_any`, \\
2598 `position_first`, or `position_last`"
2600 fn position
<P
>(self, predicate
: P
) -> Option
<usize>
2602 P
: Fn(Self::Item
) -> bool
+ Sync
+ Send
,
2604 self.position_any(predicate
)
2607 /// Produces a new iterator with the elements of this iterator in
2613 /// use rayon::prelude::*;
2615 /// let result: Vec<_> = (0..5)
2616 /// .into_par_iter()
2620 /// assert_eq!(result, [4, 3, 2, 1, 0]);
2622 fn rev(self) -> Rev
<Self> {
2626 /// Sets the minimum length of iterators desired to process in each
2627 /// thread. Rayon will not split any smaller than this length, but
2628 /// of course an iterator could already be smaller to begin with.
2630 /// Producers like `zip` and `interleave` will use greater of the two
2632 /// Chained iterators and iterators inside `flat_map` may each use
2633 /// their own minimum length.
2638 /// use rayon::prelude::*;
2640 /// let min = (0..1_000_000)
2641 /// .into_par_iter()
2642 /// .with_min_len(1234)
2643 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
2644 /// .min().unwrap();
2646 /// assert!(min >= 1234);
2648 fn with_min_len(self, min
: usize) -> MinLen
<Self> {
2649 MinLen
::new(self, min
)
2652 /// Sets the maximum length of iterators desired to process in each
2653 /// thread. Rayon will try to split at least below this length,
2654 /// unless that would put it below the length from `with_min_len()`.
2655 /// For example, given min=10 and max=15, a length of 16 will not be
2656 /// split any further.
2658 /// Producers like `zip` and `interleave` will use lesser of the two
2660 /// Chained iterators and iterators inside `flat_map` may each use
2661 /// their own maximum length.
2666 /// use rayon::prelude::*;
2668 /// let max = (0..1_000_000)
2669 /// .into_par_iter()
2670 /// .with_max_len(1234)
2671 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
2672 /// .max().unwrap();
2674 /// assert!(max <= 1234);
2676 fn with_max_len(self, max
: usize) -> MaxLen
<Self> {
2677 MaxLen
::new(self, max
)
2680 /// Produces an exact count of how many items this iterator will
2681 /// produce, presuming no panic occurs.
2686 /// use rayon::prelude::*;
2688 /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
2689 /// assert_eq!(par_iter.len(), 10);
2691 /// let vec: Vec<_> = par_iter.collect();
2692 /// assert_eq!(vec.len(), 10);
2694 fn len(&self) -> usize;
2696 /// Internal method used to define the behavior of this parallel
2697 /// iterator. You should not need to call this directly.
2699 /// This method causes the iterator `self` to start producing
2700 /// items and to feed them to the consumer `consumer` one by one.
2701 /// It may split the consumer before doing so to create the
2702 /// opportunity to produce in parallel. If a split does happen, it
2703 /// will inform the consumer of the index where the split should
2704 /// occur (unlike `ParallelIterator::drive_unindexed()`).
2706 /// See the [README] for more details on the internals of parallel
2709 /// [README]: README.md
2710 fn drive
<C
: Consumer
<Self::Item
>>(self, consumer
: C
) -> C
::Result
;
2712 /// Internal method used to define the behavior of this parallel
2713 /// iterator. You should not need to call this directly.
2715 /// This method converts the iterator into a producer P and then
2716 /// invokes `callback.callback()` with P. Note that the type of
2717 /// this producer is not defined as part of the API, since
2718 /// `callback` must be defined generically for all producers. This
2719 /// allows the producer type to contain references; it also means
2720 /// that parallel iterators can adjust that type without causing a
2721 /// breaking change.
2723 /// See the [README] for more details on the internals of parallel
2726 /// [README]: README.md
2727 fn with_producer
<CB
: ProducerCallback
<Self::Item
>>(self, callback
: CB
) -> CB
::Output
;
2730 /// `FromParallelIterator` implements the creation of a collection
2731 /// from a [`ParallelIterator`]. By implementing
2732 /// `FromParallelIterator` for a given type, you define how it will be
2733 /// created from an iterator.
2735 /// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
2737 /// [`ParallelIterator`]: trait.ParallelIterator.html
2738 /// [`collect()`]: trait.ParallelIterator.html#method.collect
2742 /// Implementing `FromParallelIterator` for your type:
2745 /// use rayon::prelude::*;
2748 /// struct BlackHole {
2752 /// impl<T: Send> FromParallelIterator<T> for BlackHole {
2753 /// fn from_par_iter<I>(par_iter: I) -> Self
2754 /// where I: IntoParallelIterator<Item = T>
2756 /// let par_iter = par_iter.into_par_iter();
2758 /// mass: par_iter.count() * mem::size_of::<T>(),
2763 /// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
2764 /// assert_eq!(bh.mass, 4000);
2766 pub trait FromParallelIterator
<T
>
2770 /// Creates an instance of the collection from the parallel iterator `par_iter`.
2772 /// If your collection is not naturally parallel, the easiest (and
2773 /// fastest) way to do this is often to collect `par_iter` into a
2774 /// [`LinkedList`] or other intermediate data structure and then
2775 /// sequentially extend your collection. However, a more 'native'
2776 /// technique is to use the [`par_iter.fold`] or
2777 /// [`par_iter.fold_with`] methods to create the collection.
2778 /// Alternatively, if your collection is 'natively' parallel, you
2779 /// can use `par_iter.for_each` to process each element in turn.
2781 /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html
2782 /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold
2783 /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with
2784 /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each
2785 fn from_par_iter
<I
>(par_iter
: I
) -> Self
2787 I
: IntoParallelIterator
<Item
= T
>;
2790 /// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
2792 /// [`ParallelIterator`]: trait.ParallelIterator.html
2796 /// Implementing `ParallelExtend` for your type:
2799 /// use rayon::prelude::*;
2802 /// struct BlackHole {
2806 /// impl<T: Send> ParallelExtend<T> for BlackHole {
2807 /// fn par_extend<I>(&mut self, par_iter: I)
2808 /// where I: IntoParallelIterator<Item = T>
2810 /// let par_iter = par_iter.into_par_iter();
2811 /// self.mass += par_iter.count() * mem::size_of::<T>();
2815 /// let mut bh = BlackHole { mass: 0 };
2816 /// bh.par_extend(0i32..1000);
2817 /// assert_eq!(bh.mass, 4000);
2818 /// bh.par_extend(0i64..10);
2819 /// assert_eq!(bh.mass, 4080);
2821 pub trait ParallelExtend
<T
>
2825 /// Extends an instance of the collection with the elements drawn
2826 /// from the parallel iterator `par_iter`.
2831 /// use rayon::prelude::*;
2833 /// let mut vec = vec![];
2834 /// vec.par_extend(0..5);
2835 /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
2836 /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
2838 fn par_extend
<I
>(&mut self, par_iter
: I
)
2840 I
: IntoParallelIterator
<Item
= T
>;
2843 /// We hide the `Try` trait in a private module, as it's only meant to be a
2844 /// stable clone of the standard library's `Try` trait, as yet unstable.
2846 /// Clone of `std::ops::Try`.
2848 /// Implementing this trait is not permitted outside of `rayon`.
2854 fn into_result(self) -> Result
<Self::Ok
, Self::Error
>;
2855 fn from_ok(v
: Self::Ok
) -> Self;
2856 fn from_error(v
: Self::Error
) -> Self;
2859 impl<T
> Try
for Option
<T
> {
2865 fn into_result(self) -> Result
<T
, ()> {
2868 fn from_ok(v
: T
) -> Self {
2871 fn from_error(_
: ()) -> Self {
2876 impl<T
, E
> Try
for Result
<T
, E
> {
2882 fn into_result(self) -> Result
<T
, E
> {
2885 fn from_ok(v
: T
) -> Self {
2888 fn from_error(v
: E
) -> Self {