]> git.proxmox.com Git - rustc.git/blob - vendor/rayon/src/iter/mod.rs
New upstream version 1.40.0+dfsg1
[rustc.git] / vendor / rayon / src / iter / mod.rs
1 //! Traits for writing parallel programs using an iterator-style interface
2 //!
3 //! You will rarely need to interact with this module directly unless you have
4 //! need to name one of the iterator types.
5 //!
6 //! Parallel iterators make it easy to write iterator-like chains that
7 //! execute in parallel: typically all you have to do is convert the
8 //! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9 //! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10 //! example, to compute the sum of the squares of a sequence of
11 //! integers, one might write:
12 //!
13 //! ```rust
14 //! use rayon::prelude::*;
15 //! fn sum_of_squares(input: &[i32]) -> i32 {
16 //! input.par_iter()
17 //! .map(|i| i * i)
18 //! .sum()
19 //! }
20 //! ```
21 //!
22 //! Or, to increment all the integers in a slice, you could write:
23 //!
24 //! ```rust
25 //! use rayon::prelude::*;
26 //! fn increment_all(input: &mut [i32]) {
27 //! input.par_iter_mut()
28 //! .for_each(|p| *p += 1);
29 //! }
30 //! ```
31 //!
32 //! To use parallel iterators, first import the traits by adding
33 //! something like `use rayon::prelude::*` to your module. You can
34 //! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35 //! parallel iterator. Like a [regular iterator][], parallel
36 //! iterators work by first constructing a computation and then
37 //! executing it.
38 //!
39 //! In addition to `par_iter()` and friends, some types offer other
40 //! ways to create (or consume) parallel iterators:
41 //!
42 //! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43 //! `par_windows`, as well as various parallel sorting
44 //! operations. See [the `ParallelSlice` trait] for the full list.
45 //! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
46 //! See [the `ParallelString` trait] for the full list.
47 //! - Various collections offer [`par_extend`], which grows a
48 //! collection given a parallel iterator. (If you don't have a
49 //! collection to extend, you can use [`collect()`] to create a new
50 //! one from scratch.)
51 //!
52 //! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html
53 //! [the `ParallelString` trait]: ../str/trait.ParallelString.html
54 //! [`par_extend`]: trait.ParallelExtend.html
55 //! [`collect()`]: trait.ParallelIterator.html#method.collect
56 //!
57 //! To see the full range of methods available on parallel iterators,
58 //! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
59 //! traits.
60 //!
61 //! If you'd like to build a custom parallel iterator, or to write your own
62 //! combinator, then check out the [split] function and the [plumbing] module.
63 //!
64 //! [regular iterator]: http://doc.rust-lang.org/std/iter/trait.Iterator.html
65 //! [`ParallelIterator`]: trait.ParallelIterator.html
66 //! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
67 //! [split]: fn.split.html
68 //! [plumbing]: plumbing/index.html
69 //!
70 //! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
71 //! has been deliberately obscured from the public API. This trait is intended
72 //! to mirror the unstable `std::ops::Try` with implementations for `Option` and
73 //! `Result`, where `Some`/`Ok` values will let those iterators continue, but
74 //! `None`/`Err` values will exit early.
75
76 use self::plumbing::*;
77 use self::private::Try;
78 pub use either::Either;
79 use std::cmp::{self, Ordering};
80 use std::iter::{Product, Sum};
81 use std::ops::Fn;
82
83 // There is a method to the madness here:
84 //
85 // - Most of these modules are private but expose certain types to the end-user
86 // (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
87 // public API surface of the `ParallelIterator` traits.
88 // - In **this** module, those public types are always used unprefixed, which forces
89 // us to add a `pub use` and helps identify if we missed anything.
90 // - In contrast, items that appear **only** in the body of a method,
91 // e.g. `find::find()`, are always used **prefixed**, so that they
92 // can be readily distinguished.
93
94 mod par_bridge;
95 pub use self::par_bridge::{IterBridge, ParallelBridge};
96
97 mod chain;
98 mod find;
99 mod find_first_last;
100 pub use self::chain::Chain;
101 mod chunks;
102 pub use self::chunks::Chunks;
103 mod collect;
104 mod enumerate;
105 pub use self::enumerate::Enumerate;
106 mod filter;
107 pub use self::filter::Filter;
108 mod filter_map;
109 pub use self::filter_map::FilterMap;
110 mod flat_map;
111 pub use self::flat_map::FlatMap;
112 mod flatten;
113 pub use self::flatten::Flatten;
114 mod fold;
115 mod for_each;
116 mod from_par_iter;
117 pub mod plumbing;
118 pub use self::fold::{Fold, FoldWith};
119 mod try_fold;
120 pub use self::try_fold::{TryFold, TryFoldWith};
121 mod reduce;
122 mod skip;
123 mod try_reduce;
124 mod try_reduce_with;
125 pub use self::skip::Skip;
126 mod splitter;
127 pub use self::splitter::{split, Split};
128 mod take;
129 pub use self::take::Take;
130 mod map;
131 pub use self::map::Map;
132 mod map_with;
133 pub use self::map_with::{MapInit, MapWith};
134 mod zip;
135 pub use self::zip::Zip;
136 mod zip_eq;
137 pub use self::zip_eq::ZipEq;
138 mod interleave;
139 pub use self::interleave::Interleave;
140 mod interleave_shortest;
141 pub use self::interleave_shortest::InterleaveShortest;
142 mod intersperse;
143 pub use self::intersperse::Intersperse;
144 mod update;
145 pub use self::update::Update;
146
147 mod noop;
148 mod rev;
149 pub use self::rev::Rev;
150 mod len;
151 pub use self::len::{MaxLen, MinLen};
152
153 mod cloned;
154 pub use self::cloned::Cloned;
155 mod copied;
156 pub use self::copied::Copied;
157
158 mod product;
159 mod sum;
160
161 mod inspect;
162 pub use self::inspect::Inspect;
163 mod panic_fuse;
164 pub use self::panic_fuse::PanicFuse;
165 mod while_some;
166 pub use self::while_some::WhileSome;
167 mod extend;
168 mod repeat;
169 mod unzip;
170 pub use self::repeat::{repeat, Repeat};
171 pub use self::repeat::{repeatn, RepeatN};
172
173 mod empty;
174 pub use self::empty::{empty, Empty};
175 mod once;
176 pub use self::once::{once, Once};
177
178 #[cfg(test)]
179 mod test;
180
181 /// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
182 ///
183 /// By implementing `IntoParallelIterator` for a type, you define how it will
184 /// transformed into an iterator. This is a parallel version of the standard
185 /// library's [`std::iter::IntoIterator`] trait.
186 ///
187 /// [`ParallelIterator`]: trait.ParallelIterator.html
188 /// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
189 pub trait IntoParallelIterator {
190 /// The parallel iterator type that will be created.
191 type Iter: ParallelIterator<Item = Self::Item>;
192
193 /// The type of item that the parallel iterator will produce.
194 type Item: Send;
195
196 /// Converts `self` into a parallel iterator.
197 ///
198 /// # Examples
199 ///
200 /// ```
201 /// use rayon::prelude::*;
202 ///
203 /// println!("counting in parallel:");
204 /// (0..100).into_par_iter()
205 /// .for_each(|i| println!("{}", i));
206 /// ```
207 ///
208 /// This conversion is often implicit for arguments to methods like [`zip`].
209 ///
210 /// ```
211 /// use rayon::prelude::*;
212 ///
213 /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
214 /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
215 /// ```
216 ///
217 /// [`zip`]: trait.IndexedParallelIterator.html#method.zip
218 fn into_par_iter(self) -> Self::Iter;
219 }
220
221 /// `IntoParallelRefIterator` implements the conversion to a
222 /// [`ParallelIterator`], providing shared references to the data.
223 ///
224 /// This is a parallel version of the `iter()` method
225 /// defined by various collections.
226 ///
227 /// This trait is automatically implemented
228 /// `for I where &I: IntoParallelIterator`. In most cases, users
229 /// will want to implement [`IntoParallelIterator`] rather than implement
230 /// this trait directly.
231 ///
232 /// [`ParallelIterator`]: trait.ParallelIterator.html
233 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
234 pub trait IntoParallelRefIterator<'data> {
235 /// The type of the parallel iterator that will be returned.
236 type Iter: ParallelIterator<Item = Self::Item>;
237
238 /// The type of item that the parallel iterator will produce.
239 /// This will typically be an `&'data T` reference type.
240 type Item: Send + 'data;
241
242 /// Converts `self` into a parallel iterator.
243 ///
244 /// # Examples
245 ///
246 /// ```
247 /// use rayon::prelude::*;
248 ///
249 /// let v: Vec<_> = (0..100).collect();
250 /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
251 ///
252 /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
253 /// // producing the exact same references.
254 /// assert!(v.par_iter().zip(&v)
255 /// .all(|(a, b)| std::ptr::eq(a, b)));
256 /// ```
257 fn par_iter(&'data self) -> Self::Iter;
258 }
259
260 impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
261 where
262 &'data I: IntoParallelIterator,
263 {
264 type Iter = <&'data I as IntoParallelIterator>::Iter;
265 type Item = <&'data I as IntoParallelIterator>::Item;
266
267 fn par_iter(&'data self) -> Self::Iter {
268 self.into_par_iter()
269 }
270 }
271
272 /// `IntoParallelRefMutIterator` implements the conversion to a
273 /// [`ParallelIterator`], providing mutable references to the data.
274 ///
275 /// This is a parallel version of the `iter_mut()` method
276 /// defined by various collections.
277 ///
278 /// This trait is automatically implemented
279 /// `for I where &mut I: IntoParallelIterator`. In most cases, users
280 /// will want to implement [`IntoParallelIterator`] rather than implement
281 /// this trait directly.
282 ///
283 /// [`ParallelIterator`]: trait.ParallelIterator.html
284 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
285 pub trait IntoParallelRefMutIterator<'data> {
286 /// The type of iterator that will be created.
287 type Iter: ParallelIterator<Item = Self::Item>;
288
289 /// The type of item that will be produced; this is typically an
290 /// `&'data mut T` reference.
291 type Item: Send + 'data;
292
293 /// Creates the parallel iterator from `self`.
294 ///
295 /// # Examples
296 ///
297 /// ```
298 /// use rayon::prelude::*;
299 ///
300 /// let mut v = vec![0usize; 5];
301 /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
302 /// assert_eq!(v, [0, 1, 2, 3, 4]);
303 /// ```
304 fn par_iter_mut(&'data mut self) -> Self::Iter;
305 }
306
307 impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
308 where
309 &'data mut I: IntoParallelIterator,
310 {
311 type Iter = <&'data mut I as IntoParallelIterator>::Iter;
312 type Item = <&'data mut I as IntoParallelIterator>::Item;
313
314 fn par_iter_mut(&'data mut self) -> Self::Iter {
315 self.into_par_iter()
316 }
317 }
318
319 /// Parallel version of the standard iterator trait.
320 ///
321 /// The combinators on this trait are available on **all** parallel
322 /// iterators. Additional methods can be found on the
323 /// [`IndexedParallelIterator`] trait: those methods are only
324 /// available for parallel iterators where the number of items is
325 /// known in advance (so, e.g., after invoking `filter`, those methods
326 /// become unavailable).
327 ///
328 /// For examples of using parallel iterators, see [the docs on the
329 /// `iter` module][iter].
330 ///
331 /// [iter]: index.html
332 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
333 pub trait ParallelIterator: Sized + Send {
334 /// The type of item that this parallel iterator produces.
335 /// For example, if you use the [`for_each`] method, this is the type of
336 /// item that your closure will be invoked with.
337 ///
338 /// [`for_each`]: #method.for_each
339 type Item: Send;
340
341 /// Executes `OP` on each item produced by the iterator, in parallel.
342 ///
343 /// # Examples
344 ///
345 /// ```
346 /// use rayon::prelude::*;
347 ///
348 /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
349 /// ```
350 fn for_each<OP>(self, op: OP)
351 where
352 OP: Fn(Self::Item) + Sync + Send,
353 {
354 for_each::for_each(self, &op)
355 }
356
357 /// Executes `OP` on the given `init` value with each item produced by
358 /// the iterator, in parallel.
359 ///
360 /// The `init` value will be cloned only as needed to be paired with
361 /// the group of items in each rayon job. It does not require the type
362 /// to be `Sync`.
363 ///
364 /// # Examples
365 ///
366 /// ```
367 /// use std::sync::mpsc::channel;
368 /// use rayon::prelude::*;
369 ///
370 /// let (sender, receiver) = channel();
371 ///
372 /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
373 ///
374 /// let mut res: Vec<_> = receiver.iter().collect();
375 ///
376 /// res.sort();
377 ///
378 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
379 /// ```
380 fn for_each_with<OP, T>(self, init: T, op: OP)
381 where
382 OP: Fn(&mut T, Self::Item) + Sync + Send,
383 T: Send + Clone,
384 {
385 self.map_with(init, op).collect()
386 }
387
388 /// Executes `OP` on a value returned by `init` with each item produced by
389 /// the iterator, in parallel.
390 ///
391 /// The `init` function will be called only as needed for a value to be
392 /// paired with the group of items in each rayon job. There is no
393 /// constraint on that returned type at all!
394 ///
395 /// # Examples
396 ///
397 /// ```
398 /// extern crate rand;
399 /// extern crate rayon;
400 ///
401 /// use rand::Rng;
402 /// use rayon::prelude::*;
403 ///
404 /// let mut v = vec![0u8; 1_000_000];
405 ///
406 /// v.par_chunks_mut(1000)
407 /// .for_each_init(
408 /// || rand::thread_rng(),
409 /// |rng, chunk| rng.fill(chunk),
410 /// );
411 ///
412 /// // There's a remote chance that this will fail...
413 /// for i in 0u8..=255 {
414 /// assert!(v.contains(&i));
415 /// }
416 /// ```
417 fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP)
418 where
419 OP: Fn(&mut T, Self::Item) + Sync + Send,
420 INIT: Fn() -> T + Sync + Send,
421 {
422 self.map_init(init, op).collect()
423 }
424
425 /// Executes a fallible `OP` on each item produced by the iterator, in parallel.
426 ///
427 /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
428 /// stop processing the rest of the items in the iterator as soon as
429 /// possible, and we will return that terminating value. Otherwise, we will
430 /// return an empty `Result::Ok(())` or `Option::Some(())`. If there are
431 /// multiple errors in parallel, it is not specified which will be returned.
432 ///
433 /// # Examples
434 ///
435 /// ```
436 /// use rayon::prelude::*;
437 /// use std::io::{self, Write};
438 ///
439 /// // This will stop iteration early if there's any write error, like
440 /// // having piped output get closed on the other end.
441 /// (0..100).into_par_iter()
442 /// .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
443 /// .expect("expected no write errors");
444 /// ```
445 fn try_for_each<OP, R>(self, op: OP) -> R
446 where
447 OP: Fn(Self::Item) -> R + Sync + Send,
448 R: Try<Ok = ()> + Send,
449 {
450 fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R {
451 R::from_ok(())
452 }
453
454 self.map(op).try_reduce(<()>::default, ok)
455 }
456
457 /// Executes a fallible `OP` on the given `init` value with each item
458 /// produced by the iterator, in parallel.
459 ///
460 /// This combines the `init` semantics of [`for_each_with()`] and the
461 /// failure semantics of [`try_for_each()`].
462 ///
463 /// [`for_each_with()`]: #method.for_each_with
464 /// [`try_for_each()`]: #method.try_for_each
465 ///
466 /// # Examples
467 ///
468 /// ```
469 /// use std::sync::mpsc::channel;
470 /// use rayon::prelude::*;
471 ///
472 /// let (sender, receiver) = channel();
473 ///
474 /// (0..5).into_par_iter()
475 /// .try_for_each_with(sender, |s, x| s.send(x))
476 /// .expect("expected no send errors");
477 ///
478 /// let mut res: Vec<_> = receiver.iter().collect();
479 ///
480 /// res.sort();
481 ///
482 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
483 /// ```
484 fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
485 where
486 OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
487 T: Send + Clone,
488 R: Try<Ok = ()> + Send,
489 {
490 fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R {
491 R::from_ok(())
492 }
493
494 self.map_with(init, op).try_reduce(<()>::default, ok)
495 }
496
497 /// Executes a fallible `OP` on a value returned by `init` with each item
498 /// produced by the iterator, in parallel.
499 ///
500 /// This combines the `init` semantics of [`for_each_init()`] and the
501 /// failure semantics of [`try_for_each()`].
502 ///
503 /// [`for_each_init()`]: #method.for_each_init
504 /// [`try_for_each()`]: #method.try_for_each
505 ///
506 /// # Examples
507 ///
508 /// ```
509 /// extern crate rand;
510 /// extern crate rayon;
511 ///
512 /// use rand::Rng;
513 /// use rayon::prelude::*;
514 ///
515 /// let mut v = vec![0u8; 1_000_000];
516 ///
517 /// v.par_chunks_mut(1000)
518 /// .try_for_each_init(
519 /// || rand::thread_rng(),
520 /// |rng, chunk| rng.try_fill(chunk),
521 /// )
522 /// .expect("expected no rand errors");
523 ///
524 /// // There's a remote chance that this will fail...
525 /// for i in 0u8..=255 {
526 /// assert!(v.contains(&i));
527 /// }
528 /// ```
529 fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
530 where
531 OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
532 INIT: Fn() -> T + Sync + Send,
533 R: Try<Ok = ()> + Send,
534 {
535 fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R {
536 R::from_ok(())
537 }
538
539 self.map_init(init, op).try_reduce(<()>::default, ok)
540 }
541
542 /// Counts the number of items in this parallel iterator.
543 ///
544 /// # Examples
545 ///
546 /// ```
547 /// use rayon::prelude::*;
548 ///
549 /// let count = (0..100).into_par_iter().count();
550 ///
551 /// assert_eq!(count, 100);
552 /// ```
553 fn count(self) -> usize {
554 fn one<T>(_: T) -> usize {
555 1
556 }
557
558 self.map(one).sum()
559 }
560
561 /// Applies `map_op` to each item of this iterator, producing a new
562 /// iterator with the results.
563 ///
564 /// # Examples
565 ///
566 /// ```
567 /// use rayon::prelude::*;
568 ///
569 /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
570 ///
571 /// let doubles: Vec<_> = par_iter.collect();
572 ///
573 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
574 /// ```
575 fn map<F, R>(self, map_op: F) -> Map<Self, F>
576 where
577 F: Fn(Self::Item) -> R + Sync + Send,
578 R: Send,
579 {
580 Map::new(self, map_op)
581 }
582
583 /// Applies `map_op` to the given `init` value with each item of this
584 /// iterator, producing a new iterator with the results.
585 ///
586 /// The `init` value will be cloned only as needed to be paired with
587 /// the group of items in each rayon job. It does not require the type
588 /// to be `Sync`.
589 ///
590 /// # Examples
591 ///
592 /// ```
593 /// use std::sync::mpsc::channel;
594 /// use rayon::prelude::*;
595 ///
596 /// let (sender, receiver) = channel();
597 ///
598 /// let a: Vec<_> = (0..5)
599 /// .into_par_iter() // iterating over i32
600 /// .map_with(sender, |s, x| {
601 /// s.send(x).unwrap(); // sending i32 values through the channel
602 /// x // returning i32
603 /// })
604 /// .collect(); // collecting the returned values into a vector
605 ///
606 /// let mut b: Vec<_> = receiver.iter() // iterating over the values in the channel
607 /// .collect(); // and collecting them
608 /// b.sort();
609 ///
610 /// assert_eq!(a, b);
611 /// ```
612 fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
613 where
614 F: Fn(&mut T, Self::Item) -> R + Sync + Send,
615 T: Send + Clone,
616 R: Send,
617 {
618 MapWith::new(self, init, map_op)
619 }
620
621 /// Applies `map_op` to a value returned by `init` with each item of this
622 /// iterator, producing a new iterator with the results.
623 ///
624 /// The `init` function will be called only as needed for a value to be
625 /// paired with the group of items in each rayon job. There is no
626 /// constraint on that returned type at all!
627 ///
628 /// # Examples
629 ///
630 /// ```
631 /// extern crate rand;
632 /// extern crate rayon;
633 ///
634 /// use rand::Rng;
635 /// use rayon::prelude::*;
636 ///
637 /// let a: Vec<_> = (1i32..1_000_000)
638 /// .into_par_iter()
639 /// .map_init(
640 /// || rand::thread_rng(), // get the thread-local RNG
641 /// |rng, x| if rng.gen() { // randomly negate items
642 /// -x
643 /// } else {
644 /// x
645 /// },
646 /// ).collect();
647 ///
648 /// // There's a remote chance that this will fail...
649 /// assert!(a.iter().any(|&x| x < 0));
650 /// assert!(a.iter().any(|&x| x > 0));
651 /// ```
652 fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F>
653 where
654 F: Fn(&mut T, Self::Item) -> R + Sync + Send,
655 INIT: Fn() -> T + Sync + Send,
656 R: Send,
657 {
658 MapInit::new(self, init, map_op)
659 }
660
661 /// Creates an iterator which clones all of its elements. This may be
662 /// useful when you have an iterator over `&T`, but you need `T`, and
663 /// that type implements `Clone`. See also [`copied()`].
664 ///
665 /// [`copied()`]: #method.copied
666 ///
667 /// # Examples
668 ///
669 /// ```
670 /// use rayon::prelude::*;
671 ///
672 /// let a = [1, 2, 3];
673 ///
674 /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
675 ///
676 /// // cloned is the same as .map(|&x| x), for integers
677 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
678 ///
679 /// assert_eq!(v_cloned, vec![1, 2, 3]);
680 /// assert_eq!(v_map, vec![1, 2, 3]);
681 /// ```
682 fn cloned<'a, T>(self) -> Cloned<Self>
683 where
684 T: 'a + Clone + Send,
685 Self: ParallelIterator<Item = &'a T>,
686 {
687 Cloned::new(self)
688 }
689
690 /// Creates an iterator which copies all of its elements. This may be
691 /// useful when you have an iterator over `&T`, but you need `T`, and
692 /// that type implements `Copy`. See also [`cloned()`].
693 ///
694 /// [`cloned()`]: #method.cloned
695 ///
696 /// # Examples
697 ///
698 /// ```
699 /// use rayon::prelude::*;
700 ///
701 /// let a = [1, 2, 3];
702 ///
703 /// let v_copied: Vec<_> = a.par_iter().copied().collect();
704 ///
705 /// // copied is the same as .map(|&x| x), for integers
706 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
707 ///
708 /// assert_eq!(v_copied, vec![1, 2, 3]);
709 /// assert_eq!(v_map, vec![1, 2, 3]);
710 /// ```
711 fn copied<'a, T>(self) -> Copied<Self>
712 where
713 T: 'a + Copy + Send,
714 Self: ParallelIterator<Item = &'a T>,
715 {
716 Copied::new(self)
717 }
718
719 /// Applies `inspect_op` to a reference to each item of this iterator,
720 /// producing a new iterator passing through the original items. This is
721 /// often useful for debugging to see what's happening in iterator stages.
722 ///
723 /// # Examples
724 ///
725 /// ```
726 /// use rayon::prelude::*;
727 ///
728 /// let a = [1, 4, 2, 3];
729 ///
730 /// // this iterator sequence is complex.
731 /// let sum = a.par_iter()
732 /// .cloned()
733 /// .filter(|&x| x % 2 == 0)
734 /// .reduce(|| 0, |sum, i| sum + i);
735 ///
736 /// println!("{}", sum);
737 ///
738 /// // let's add some inspect() calls to investigate what's happening
739 /// let sum = a.par_iter()
740 /// .cloned()
741 /// .inspect(|x| println!("about to filter: {}", x))
742 /// .filter(|&x| x % 2 == 0)
743 /// .inspect(|x| println!("made it through filter: {}", x))
744 /// .reduce(|| 0, |sum, i| sum + i);
745 ///
746 /// println!("{}", sum);
747 /// ```
748 fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
749 where
750 OP: Fn(&Self::Item) + Sync + Send,
751 {
752 Inspect::new(self, inspect_op)
753 }
754
755 /// Mutates each item of this iterator before yielding it.
756 ///
757 /// # Examples
758 ///
759 /// ```
760 /// use rayon::prelude::*;
761 ///
762 /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
763 ///
764 /// let doubles: Vec<_> = par_iter.collect();
765 ///
766 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
767 /// ```
768 fn update<F>(self, update_op: F) -> Update<Self, F>
769 where
770 F: Fn(&mut Self::Item) + Sync + Send,
771 {
772 Update::new(self, update_op)
773 }
774
775 /// Applies `filter_op` to each item of this iterator, producing a new
776 /// iterator with only the items that gave `true` results.
777 ///
778 /// # Examples
779 ///
780 /// ```
781 /// use rayon::prelude::*;
782 ///
783 /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
784 ///
785 /// let even_numbers: Vec<_> = par_iter.collect();
786 ///
787 /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
788 /// ```
789 fn filter<P>(self, filter_op: P) -> Filter<Self, P>
790 where
791 P: Fn(&Self::Item) -> bool + Sync + Send,
792 {
793 Filter::new(self, filter_op)
794 }
795
796 /// Applies `filter_op` to each item of this iterator to get an `Option`,
797 /// producing a new iterator with only the items from `Some` results.
798 ///
799 /// # Examples
800 ///
801 /// ```
802 /// use rayon::prelude::*;
803 ///
804 /// let mut par_iter = (0..10).into_par_iter()
805 /// .filter_map(|x| {
806 /// if x % 2 == 0 { Some(x * 3) }
807 /// else { None }
808 /// });
809 ///
810 /// let even_numbers: Vec<_> = par_iter.collect();
811 ///
812 /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
813 /// ```
814 fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
815 where
816 P: Fn(Self::Item) -> Option<R> + Sync + Send,
817 R: Send,
818 {
819 FilterMap::new(self, filter_op)
820 }
821
822 /// Applies `map_op` to each item of this iterator to get nested iterators,
823 /// producing a new iterator that flattens these back into one.
824 ///
825 /// # Examples
826 ///
827 /// ```
828 /// use rayon::prelude::*;
829 ///
830 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
831 ///
832 /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
833 ///
834 /// let vec: Vec<_> = par_iter.collect();
835 ///
836 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
837 /// ```
838 fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
839 where
840 F: Fn(Self::Item) -> PI + Sync + Send,
841 PI: IntoParallelIterator,
842 {
843 FlatMap::new(self, map_op)
844 }
845
846 /// An adaptor that flattens iterable `Item`s into one large iterator
847 ///
848 /// # Examples
849 ///
850 /// ```
851 /// use rayon::prelude::*;
852 ///
853 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
854 /// let y: Vec<_> = x.into_par_iter().flatten().collect();
855 ///
856 /// assert_eq!(y, vec![1, 2, 3, 4]);
857 /// ```
858 fn flatten(self) -> Flatten<Self>
859 where
860 Self::Item: IntoParallelIterator,
861 {
862 Flatten::new(self)
863 }
864
865 /// Reduces the items in the iterator into one item using `op`.
866 /// The argument `identity` should be a closure that can produce
867 /// "identity" value which may be inserted into the sequence as
868 /// needed to create opportunities for parallel execution. So, for
869 /// example, if you are doing a summation, then `identity()` ought
870 /// to produce something that represents the zero for your type
871 /// (but consider just calling `sum()` in that case).
872 ///
873 /// # Examples
874 ///
875 /// ```
876 /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
877 /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
878 /// // where the first/second elements are summed separately.
879 /// use rayon::prelude::*;
880 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
881 /// .par_iter() // iterating over &(i32, i32)
882 /// .cloned() // iterating over (i32, i32)
883 /// .reduce(|| (0, 0), // the "identity" is 0 in both columns
884 /// |a, b| (a.0 + b.0, a.1 + b.1));
885 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
886 /// ```
887 ///
888 /// **Note:** unlike a sequential `fold` operation, the order in
889 /// which `op` will be applied to reduce the result is not fully
890 /// specified. So `op` should be [associative] or else the results
891 /// will be non-deterministic. And of course `identity()` should
892 /// produce a true identity.
893 ///
894 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
895 fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
896 where
897 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
898 ID: Fn() -> Self::Item + Sync + Send,
899 {
900 reduce::reduce(self, identity, op)
901 }
902
903 /// Reduces the items in the iterator into one item using `op`.
904 /// If the iterator is empty, `None` is returned; otherwise,
905 /// `Some` is returned.
906 ///
907 /// This version of `reduce` is simple but somewhat less
908 /// efficient. If possible, it is better to call `reduce()`, which
909 /// requires an identity element.
910 ///
911 /// # Examples
912 ///
913 /// ```
914 /// use rayon::prelude::*;
915 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
916 /// .par_iter() // iterating over &(i32, i32)
917 /// .cloned() // iterating over (i32, i32)
918 /// .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
919 /// .unwrap();
920 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
921 /// ```
922 ///
923 /// **Note:** unlike a sequential `fold` operation, the order in
924 /// which `op` will be applied to reduce the result is not fully
925 /// specified. So `op` should be [associative] or else the results
926 /// will be non-deterministic.
927 ///
928 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
929 fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
930 where
931 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
932 {
933 fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> {
934 move |opt_a, b| match opt_a {
935 Some(a) => Some(op(a, b)),
936 None => Some(b),
937 }
938 }
939
940 fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> {
941 move |opt_a, opt_b| match (opt_a, opt_b) {
942 (Some(a), Some(b)) => Some(op(a, b)),
943 (Some(v), None) | (None, Some(v)) => Some(v),
944 (None, None) => None,
945 }
946 }
947
948 self.fold(<_>::default, opt_fold(&op))
949 .reduce(<_>::default, opt_reduce(&op))
950 }
951
952 /// Reduces the items in the iterator into one item using a fallible `op`.
953 /// The `identity` argument is used the same way as in [`reduce()`].
954 ///
955 /// [`reduce()`]: #method.reduce
956 ///
957 /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
958 /// to one, we will attempt to stop processing the rest of the items in the
959 /// iterator as soon as possible, and we will return that terminating value.
960 /// Otherwise, we will return the final reduced `Result::Ok(T)` or
961 /// `Option::Some(T)`. If there are multiple errors in parallel, it is not
962 /// specified which will be returned.
963 ///
964 /// # Examples
965 ///
966 /// ```
967 /// use rayon::prelude::*;
968 ///
969 /// // Compute the sum of squares, being careful about overflow.
970 /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
971 /// iter.into_par_iter()
972 /// .map(|i| i.checked_mul(i)) // square each item,
973 /// .try_reduce(|| 0, i32::checked_add) // and add them up!
974 /// }
975 /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
976 ///
977 /// // The sum might overflow
978 /// assert_eq!(sum_squares(0..10_000), None);
979 ///
980 /// // Or the squares might overflow before it even reaches `try_reduce`
981 /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
982 /// ```
983 fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
984 where
985 OP: Fn(T, T) -> Self::Item + Sync + Send,
986 ID: Fn() -> T + Sync + Send,
987 Self::Item: Try<Ok = T>,
988 {
989 try_reduce::try_reduce(self, identity, op)
990 }
991
992 /// Reduces the items in the iterator into one item using a fallible `op`.
993 ///
994 /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
995 /// otherwise, `Some` is returned. Beyond that, it behaves like
996 /// [`try_reduce()`] for handling `Err`/`None`.
997 ///
998 /// [`reduce_with()`]: #method.reduce_with
999 /// [`try_reduce()`]: #method.try_reduce
1000 ///
1001 /// For instance, with `Option` items, the return value may be:
1002 /// - `None`, the iterator was empty
1003 /// - `Some(None)`, we stopped after encountering `None`.
1004 /// - `Some(Some(x))`, the entire iterator reduced to `x`.
1005 ///
1006 /// With `Result` items, the nesting is more obvious:
1007 /// - `None`, the iterator was empty
1008 /// - `Some(Err(e))`, we stopped after encountering an error `e`.
1009 /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
1010 ///
1011 /// # Examples
1012 ///
1013 /// ```
1014 /// use rayon::prelude::*;
1015 ///
1016 /// let files = ["/dev/null", "/does/not/exist"];
1017 ///
1018 /// // Find the biggest file
1019 /// files.into_par_iter()
1020 /// .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
1021 /// .try_reduce_with(|a, b| {
1022 /// Ok(if a.1 >= b.1 { a } else { b })
1023 /// })
1024 /// .expect("Some value, since the iterator is not empty")
1025 /// .expect_err("not found");
1026 /// ```
1027 fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item>
1028 where
1029 OP: Fn(T, T) -> Self::Item + Sync + Send,
1030 Self::Item: Try<Ok = T>,
1031 {
1032 try_reduce_with::try_reduce_with(self, op)
1033 }
1034
1035 /// Parallel fold is similar to sequential fold except that the
1036 /// sequence of items may be subdivided before it is
1037 /// folded. Consider a list of numbers like `22 3 77 89 46`. If
1038 /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
1039 /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
1040 /// 77, and so forth. The **parallel fold** works similarly except
1041 /// that it first breaks up your list into sublists, and hence
1042 /// instead of yielding up a single sum at the end, it yields up
1043 /// multiple sums. The number of results is nondeterministic, as
1044 /// is the point where the breaks occur.
1045 ///
1046 /// So if did the same parallel fold (`fold(0, |a,b| a+b)`) on
1047 /// our example list, we might wind up with a sequence of two numbers,
1048 /// like so:
1049 ///
1050 /// ```notrust
1051 /// 22 3 77 89 46
1052 /// | |
1053 /// 102 135
1054 /// ```
1055 ///
1056 /// Or perhaps these three numbers:
1057 ///
1058 /// ```notrust
1059 /// 22 3 77 89 46
1060 /// | | |
1061 /// 102 89 46
1062 /// ```
1063 ///
1064 /// In general, Rayon will attempt to find good breaking points
1065 /// that keep all of your cores busy.
1066 ///
1067 /// ### Fold versus reduce
1068 ///
1069 /// The `fold()` and `reduce()` methods each take an identity element
1070 /// and a combining function, but they operate rather differently.
1071 ///
1072 /// `reduce()` requires that the identity function has the same
1073 /// type as the things you are iterating over, and it fully
1074 /// reduces the list of items into a single item. So, for example,
1075 /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1076 /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1077 /// u8| a + b)`, we would get an overflow. This is because `0`,
1078 /// `a`, and `b` here are all bytes, just like the numbers in the
1079 /// list (I wrote the types explicitly above, but those are the
1080 /// only types you can use). To avoid the overflow, we would need
1081 /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1082 /// b| a + b)`, in which case our result would be `256`.
1083 ///
1084 /// In contrast, with `fold()`, the identity function does not
1085 /// have to have the same type as the things you are iterating
1086 /// over, and you potentially get back many results. So, if we
1087 /// continue with the `bytes` example from the previous paragraph,
1088 /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1089 /// convert our bytes into `u32`. And of course we might not get
1090 /// back a single sum.
1091 ///
1092 /// There is a more subtle distinction as well, though it's
1093 /// actually implied by the above points. When you use `reduce()`,
1094 /// your reduction function is sometimes called with values that
1095 /// were never part of your original parallel iterator (for
1096 /// example, both the left and right might be a partial sum). With
1097 /// `fold()`, in contrast, the left value in the fold function is
1098 /// always the accumulator, and the right value is always from
1099 /// your original sequence.
1100 ///
1101 /// ### Fold vs Map/Reduce
1102 ///
1103 /// Fold makes sense if you have some operation where it is
1104 /// cheaper to create groups of elements at a time. For example,
1105 /// imagine collecting characters into a string. If you were going
1106 /// to use map/reduce, you might try this:
1107 ///
1108 /// ```
1109 /// use rayon::prelude::*;
1110 ///
1111 /// let s =
1112 /// ['a', 'b', 'c', 'd', 'e']
1113 /// .par_iter()
1114 /// .map(|c: &char| format!("{}", c))
1115 /// .reduce(|| String::new(),
1116 /// |mut a: String, b: String| { a.push_str(&b); a });
1117 ///
1118 /// assert_eq!(s, "abcde");
1119 /// ```
1120 ///
1121 /// Because reduce produces the same type of element as its input,
1122 /// you have to first map each character into a string, and then
1123 /// you can reduce them. This means we create one string per
1124 /// element in our iterator -- not so great. Using `fold`, we can
1125 /// do this instead:
1126 ///
1127 /// ```
1128 /// use rayon::prelude::*;
1129 ///
1130 /// let s =
1131 /// ['a', 'b', 'c', 'd', 'e']
1132 /// .par_iter()
1133 /// .fold(|| String::new(),
1134 /// |mut s: String, c: &char| { s.push(*c); s })
1135 /// .reduce(|| String::new(),
1136 /// |mut a: String, b: String| { a.push_str(&b); a });
1137 ///
1138 /// assert_eq!(s, "abcde");
1139 /// ```
1140 ///
1141 /// Now `fold` will process groups of our characters at a time,
1142 /// and we only make one string per group. We should wind up with
1143 /// some small-ish number of strings roughly proportional to the
1144 /// number of CPUs you have (it will ultimately depend on how busy
1145 /// your processors are). Note that we still need to do a reduce
1146 /// afterwards to combine those groups of strings into a single
1147 /// string.
1148 ///
1149 /// You could use a similar trick to save partial results (e.g., a
1150 /// cache) or something similar.
1151 ///
1152 /// ### Combining fold with other operations
1153 ///
1154 /// You can combine `fold` with `reduce` if you want to produce a
1155 /// single value. This is then roughly equivalent to a map/reduce
1156 /// combination in effect:
1157 ///
1158 /// ```
1159 /// use rayon::prelude::*;
1160 ///
1161 /// let bytes = 0..22_u8;
1162 /// let sum = bytes.into_par_iter()
1163 /// .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1164 /// .sum::<u32>();
1165 ///
1166 /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1167 /// ```
1168 fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
1169 where
1170 F: Fn(T, Self::Item) -> T + Sync + Send,
1171 ID: Fn() -> T + Sync + Send,
1172 T: Send,
1173 {
1174 Fold::new(self, identity, fold_op)
1175 }
1176
1177 /// Applies `fold_op` to the given `init` value with each item of this
1178 /// iterator, finally producing the value for further use.
1179 ///
1180 /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1181 /// it doesn't require the `init` type to be `Sync`, nor any other form
1182 /// of added synchronization.
1183 ///
1184 /// # Examples
1185 ///
1186 /// ```
1187 /// use rayon::prelude::*;
1188 ///
1189 /// let bytes = 0..22_u8;
1190 /// let sum = bytes.into_par_iter()
1191 /// .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1192 /// .sum::<u32>();
1193 ///
1194 /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1195 /// ```
1196 fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
1197 where
1198 F: Fn(T, Self::Item) -> T + Sync + Send,
1199 T: Send + Clone,
1200 {
1201 FoldWith::new(self, init, fold_op)
1202 }
1203
1204 /// Perform a fallible parallel fold.
1205 ///
1206 /// This is a variation of [`fold()`] for operations which can fail with
1207 /// `Option::None` or `Result::Err`. The first such failure stops
1208 /// processing the local set of items, without affecting other folds in the
1209 /// iterator's subdivisions.
1210 ///
1211 /// Often, `try_fold()` will be followed by [`try_reduce()`]
1212 /// for a final reduction and global short-circuiting effect.
1213 ///
1214 /// [`fold()`]: #method.fold
1215 /// [`try_reduce()`]: #method.try_reduce
1216 ///
1217 /// # Examples
1218 ///
1219 /// ```
1220 /// use rayon::prelude::*;
1221 ///
1222 /// let bytes = 0..22_u8;
1223 /// let sum = bytes.into_par_iter()
1224 /// .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1225 /// .try_reduce(|| 0, u32::checked_add);
1226 ///
1227 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1228 /// ```
1229 fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F>
1230 where
1231 F: Fn(T, Self::Item) -> R + Sync + Send,
1232 ID: Fn() -> T + Sync + Send,
1233 R: Try<Ok = T> + Send,
1234 {
1235 TryFold::new(self, identity, fold_op)
1236 }
1237
1238 /// Perform a fallible parallel fold with a cloneable `init` value.
1239 ///
1240 /// This combines the `init` semantics of [`fold_with()`] and the failure
1241 /// semantics of [`try_fold()`].
1242 ///
1243 /// [`fold_with()`]: #method.fold_with
1244 /// [`try_fold()`]: #method.try_fold
1245 ///
1246 /// ```
1247 /// use rayon::prelude::*;
1248 ///
1249 /// let bytes = 0..22_u8;
1250 /// let sum = bytes.into_par_iter()
1251 /// .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1252 /// .try_reduce(|| 0, u32::checked_add);
1253 ///
1254 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1255 /// ```
1256 fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F>
1257 where
1258 F: Fn(T, Self::Item) -> R + Sync + Send,
1259 R: Try<Ok = T> + Send,
1260 T: Clone + Send,
1261 {
1262 TryFoldWith::new(self, init, fold_op)
1263 }
1264
1265 /// Sums up the items in the iterator.
1266 ///
1267 /// Note that the order in items will be reduced is not specified,
1268 /// so if the `+` operator is not truly [associative] \(as is the
1269 /// case for floating point numbers), then the results are not
1270 /// fully deterministic.
1271 ///
1272 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1273 ///
1274 /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1275 /// except that the type of `0` and the `+` operation may vary
1276 /// depending on the type of value being produced.
1277 ///
1278 /// # Examples
1279 ///
1280 /// ```
1281 /// use rayon::prelude::*;
1282 ///
1283 /// let a = [1, 5, 7];
1284 ///
1285 /// let sum: i32 = a.par_iter().sum();
1286 ///
1287 /// assert_eq!(sum, 13);
1288 /// ```
1289 fn sum<S>(self) -> S
1290 where
1291 S: Send + Sum<Self::Item> + Sum<S>,
1292 {
1293 sum::sum(self)
1294 }
1295
1296 /// Multiplies all the items in the iterator.
1297 ///
1298 /// Note that the order in items will be reduced is not specified,
1299 /// so if the `*` operator is not truly [associative] \(as is the
1300 /// case for floating point numbers), then the results are not
1301 /// fully deterministic.
1302 ///
1303 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1304 ///
1305 /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1306 /// except that the type of `1` and the `*` operation may vary
1307 /// depending on the type of value being produced.
1308 ///
1309 /// # Examples
1310 ///
1311 /// ```
1312 /// use rayon::prelude::*;
1313 ///
1314 /// fn factorial(n: u32) -> u32 {
1315 /// (1..n+1).into_par_iter().product()
1316 /// }
1317 ///
1318 /// assert_eq!(factorial(0), 1);
1319 /// assert_eq!(factorial(1), 1);
1320 /// assert_eq!(factorial(5), 120);
1321 /// ```
1322 fn product<P>(self) -> P
1323 where
1324 P: Send + Product<Self::Item> + Product<P>,
1325 {
1326 product::product(self)
1327 }
1328
1329 /// Computes the minimum of all the items in the iterator. If the
1330 /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1331 /// is returned.
1332 ///
1333 /// Note that the order in which the items will be reduced is not
1334 /// specified, so if the `Ord` impl is not truly associative, then
1335 /// the results are not deterministic.
1336 ///
1337 /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`.
1338 ///
1339 /// # Examples
1340 ///
1341 /// ```
1342 /// use rayon::prelude::*;
1343 ///
1344 /// let a = [45, 74, 32];
1345 ///
1346 /// assert_eq!(a.par_iter().min(), Some(&32));
1347 ///
1348 /// let b: [i32; 0] = [];
1349 ///
1350 /// assert_eq!(b.par_iter().min(), None);
1351 /// ```
1352 fn min(self) -> Option<Self::Item>
1353 where
1354 Self::Item: Ord,
1355 {
1356 self.reduce_with(cmp::min)
1357 }
1358
1359 /// Computes the minimum of all the items in the iterator with respect to
1360 /// the given comparison function. If the iterator is empty, `None` is
1361 /// returned; otherwise, `Some(min)` is returned.
1362 ///
1363 /// Note that the order in which the items will be reduced is not
1364 /// specified, so if the comparison function is not associative, then
1365 /// the results are not deterministic.
1366 ///
1367 /// # Examples
1368 ///
1369 /// ```
1370 /// use rayon::prelude::*;
1371 ///
1372 /// let a = [-3_i32, 77, 53, 240, -1];
1373 ///
1374 /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1375 /// ```
1376 fn min_by<F>(self, f: F) -> Option<Self::Item>
1377 where
1378 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1379 {
1380 fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1381 move |a, b| match f(&a, &b) {
1382 Ordering::Greater => b,
1383 _ => a,
1384 }
1385 }
1386
1387 self.reduce_with(min(f))
1388 }
1389
1390 /// Computes the item that yields the minimum value for the given
1391 /// function. If the iterator is empty, `None` is returned;
1392 /// otherwise, `Some(item)` is returned.
1393 ///
1394 /// Note that the order in which the items will be reduced is not
1395 /// specified, so if the `Ord` impl is not truly associative, then
1396 /// the results are not deterministic.
1397 ///
1398 /// # Examples
1399 ///
1400 /// ```
1401 /// use rayon::prelude::*;
1402 ///
1403 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1404 ///
1405 /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1406 /// ```
1407 fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
1408 where
1409 K: Ord + Send,
1410 F: Sync + Send + Fn(&Self::Item) -> K,
1411 {
1412 fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1413 move |x| (f(&x), x)
1414 }
1415
1416 fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1417 match (a.0).cmp(&b.0) {
1418 Ordering::Greater => b,
1419 _ => a,
1420 }
1421 }
1422
1423 let (_, x) = self.map(key(f)).reduce_with(min_key)?;
1424 Some(x)
1425 }
1426
1427 /// Computes the maximum of all the items in the iterator. If the
1428 /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1429 /// is returned.
1430 ///
1431 /// Note that the order in which the items will be reduced is not
1432 /// specified, so if the `Ord` impl is not truly associative, then
1433 /// the results are not deterministic.
1434 ///
1435 /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`.
1436 ///
1437 /// # Examples
1438 ///
1439 /// ```
1440 /// use rayon::prelude::*;
1441 ///
1442 /// let a = [45, 74, 32];
1443 ///
1444 /// assert_eq!(a.par_iter().max(), Some(&74));
1445 ///
1446 /// let b: [i32; 0] = [];
1447 ///
1448 /// assert_eq!(b.par_iter().max(), None);
1449 /// ```
1450 fn max(self) -> Option<Self::Item>
1451 where
1452 Self::Item: Ord,
1453 {
1454 self.reduce_with(cmp::max)
1455 }
1456
1457 /// Computes the maximum of all the items in the iterator with respect to
1458 /// the given comparison function. If the iterator is empty, `None` is
1459 /// returned; otherwise, `Some(min)` is returned.
1460 ///
1461 /// Note that the order in which the items will be reduced is not
1462 /// specified, so if the comparison function is not associative, then
1463 /// the results are not deterministic.
1464 ///
1465 /// # Examples
1466 ///
1467 /// ```
1468 /// use rayon::prelude::*;
1469 ///
1470 /// let a = [-3_i32, 77, 53, 240, -1];
1471 ///
1472 /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1473 /// ```
1474 fn max_by<F>(self, f: F) -> Option<Self::Item>
1475 where
1476 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1477 {
1478 fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1479 move |a, b| match f(&a, &b) {
1480 Ordering::Greater => a,
1481 _ => b,
1482 }
1483 }
1484
1485 self.reduce_with(max(f))
1486 }
1487
1488 /// Computes the item that yields the maximum value for the given
1489 /// function. If the iterator is empty, `None` is returned;
1490 /// otherwise, `Some(item)` is returned.
1491 ///
1492 /// Note that the order in which the items will be reduced is not
1493 /// specified, so if the `Ord` impl is not truly associative, then
1494 /// the results are not deterministic.
1495 ///
1496 /// # Examples
1497 ///
1498 /// ```
1499 /// use rayon::prelude::*;
1500 ///
1501 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1502 ///
1503 /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1504 /// ```
1505 fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
1506 where
1507 K: Ord + Send,
1508 F: Sync + Send + Fn(&Self::Item) -> K,
1509 {
1510 fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1511 move |x| (f(&x), x)
1512 }
1513
1514 fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1515 match (a.0).cmp(&b.0) {
1516 Ordering::Greater => a,
1517 _ => b,
1518 }
1519 }
1520
1521 let (_, x) = self.map(key(f)).reduce_with(max_key)?;
1522 Some(x)
1523 }
1524
1525 /// Takes two iterators and creates a new iterator over both.
1526 ///
1527 /// # Examples
1528 ///
1529 /// ```
1530 /// use rayon::prelude::*;
1531 ///
1532 /// let a = [0, 1, 2];
1533 /// let b = [9, 8, 7];
1534 ///
1535 /// let par_iter = a.par_iter().chain(b.par_iter());
1536 ///
1537 /// let chained: Vec<_> = par_iter.cloned().collect();
1538 ///
1539 /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1540 /// ```
1541 fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
1542 where
1543 C: IntoParallelIterator<Item = Self::Item>,
1544 {
1545 Chain::new(self, chain.into_par_iter())
1546 }
1547
1548 /// Searches for **some** item in the parallel iterator that
1549 /// matches the given predicate and returns it. This operation
1550 /// is similar to [`find` on sequential iterators][find] but
1551 /// the item returned may not be the **first** one in the parallel
1552 /// sequence which matches, since we search the entire sequence in parallel.
1553 ///
1554 /// Once a match is found, we will attempt to stop processing
1555 /// the rest of the items in the iterator as soon as possible
1556 /// (just as `find` stops iterating once a match is found).
1557 ///
1558 /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find
1559 ///
1560 /// # Examples
1561 ///
1562 /// ```
1563 /// use rayon::prelude::*;
1564 ///
1565 /// let a = [1, 2, 3, 3];
1566 ///
1567 /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1568 ///
1569 /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1570 /// ```
1571 fn find_any<P>(self, predicate: P) -> Option<Self::Item>
1572 where
1573 P: Fn(&Self::Item) -> bool + Sync + Send,
1574 {
1575 find::find(self, predicate)
1576 }
1577
1578 /// Searches for the sequentially **first** item in the parallel iterator
1579 /// that matches the given predicate and returns it.
1580 ///
1581 /// Once a match is found, all attempts to the right of the match
1582 /// will be stopped, while attempts to the left must continue in case
1583 /// an earlier match is found.
1584 ///
1585 /// Note that not all parallel iterators have a useful order, much like
1586 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1587 /// just want the first match that discovered anywhere in the iterator,
1588 /// `find_any` is a better choice.
1589 ///
1590 /// # Examples
1591 ///
1592 /// ```
1593 /// use rayon::prelude::*;
1594 ///
1595 /// let a = [1, 2, 3, 3];
1596 ///
1597 /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1598 ///
1599 /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1600 /// ```
1601 fn find_first<P>(self, predicate: P) -> Option<Self::Item>
1602 where
1603 P: Fn(&Self::Item) -> bool + Sync + Send,
1604 {
1605 find_first_last::find_first(self, predicate)
1606 }
1607
1608 /// Searches for the sequentially **last** item in the parallel iterator
1609 /// that matches the given predicate and returns it.
1610 ///
1611 /// Once a match is found, all attempts to the left of the match
1612 /// will be stopped, while attempts to the right must continue in case
1613 /// a later match is found.
1614 ///
1615 /// Note that not all parallel iterators have a useful order, much like
1616 /// sequential `HashMap` iteration, so "last" may be nebulous. When the
1617 /// order doesn't actually matter to you, `find_any` is a better choice.
1618 ///
1619 /// # Examples
1620 ///
1621 /// ```
1622 /// use rayon::prelude::*;
1623 ///
1624 /// let a = [1, 2, 3, 3];
1625 ///
1626 /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1627 ///
1628 /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1629 /// ```
1630 fn find_last<P>(self, predicate: P) -> Option<Self::Item>
1631 where
1632 P: Fn(&Self::Item) -> bool + Sync + Send,
1633 {
1634 find_first_last::find_last(self, predicate)
1635 }
1636
1637 /// Applies the given predicate to the items in the parallel iterator
1638 /// and returns **any** non-None result of the map operation.
1639 ///
1640 /// Once a non-None value is produced from the map operation, we will
1641 /// attempt to stop processing the rest of the items in the iterator
1642 /// as soon as possible.
1643 ///
1644 /// Note that this method only returns **some** item in the parallel
1645 /// iterator that is not None from the map predicate. The item returned
1646 /// may not be the **first** non-None value produced in the parallel
1647 /// sequence, since the entire sequence is mapped over in parallel.
1648 ///
1649 /// # Examples
1650 ///
1651 /// ```
1652 /// use rayon::prelude::*;
1653 ///
1654 /// let c = ["lol", "NaN", "5", "5"];
1655 ///
1656 /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1657 ///
1658 /// assert_eq!(first_number, Some(5));
1659 /// ```
1660 fn find_map_any<P, R>(self, predicate: P) -> Option<R>
1661 where
1662 P: Fn(Self::Item) -> Option<R> + Sync + Send,
1663 R: Send,
1664 {
1665 fn yes<T>(_: &T) -> bool {
1666 true
1667 }
1668 self.filter_map(predicate).find_any(yes)
1669 }
1670
1671 /// Applies the given predicate to the items in the parallel iterator and
1672 /// returns the sequentially **first** non-None result of the map operation.
1673 ///
1674 /// Once a non-None value is produced from the map operation, all attempts
1675 /// to the right of the match will be stopped, while attempts to the left
1676 /// must continue in case an earlier match is found.
1677 ///
1678 /// Note that not all parallel iterators have a useful order, much like
1679 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1680 /// just want the first non-None value discovered anywhere in the iterator,
1681 /// `find_map_any` is a better choice.
1682 ///
1683 /// # Examples
1684 ///
1685 /// ```
1686 /// use rayon::prelude::*;
1687 ///
1688 /// let c = ["lol", "NaN", "2", "5"];
1689 ///
1690 /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1691 ///
1692 /// assert_eq!(first_number, Some(2));
1693 /// ```
1694 fn find_map_first<P, R>(self, predicate: P) -> Option<R>
1695 where
1696 P: Fn(Self::Item) -> Option<R> + Sync + Send,
1697 R: Send,
1698 {
1699 fn yes<T>(_: &T) -> bool {
1700 true
1701 }
1702 self.filter_map(predicate).find_first(yes)
1703 }
1704
1705 /// Applies the given predicate to the items in the parallel iterator and
1706 /// returns the sequentially **last** non-None result of the map operation.
1707 ///
1708 /// Once a non-None value is produced from the map operation, all attempts
1709 /// to the left of the match will be stopped, while attempts to the right
1710 /// must continue in case a later match is found.
1711 ///
1712 /// Note that not all parallel iterators have a useful order, much like
1713 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1714 /// just want the first non-None value discovered anywhere in the iterator,
1715 /// `find_map_any` is a better choice.
1716 ///
1717 /// # Examples
1718 ///
1719 /// ```
1720 /// use rayon::prelude::*;
1721 ///
1722 /// let c = ["lol", "NaN", "2", "5"];
1723 ///
1724 /// let first_number = c.par_iter().find_map_last(|s| s.parse().ok());
1725 ///
1726 /// assert_eq!(first_number, Some(5));
1727 /// ```
1728 fn find_map_last<P, R>(self, predicate: P) -> Option<R>
1729 where
1730 P: Fn(Self::Item) -> Option<R> + Sync + Send,
1731 R: Send,
1732 {
1733 fn yes<T>(_: &T) -> bool {
1734 true
1735 }
1736 self.filter_map(predicate).find_last(yes)
1737 }
1738
1739 #[doc(hidden)]
1740 #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1741 `find_first`, or `find_last`")]
1742 fn find<P>(self, predicate: P) -> Option<Self::Item>
1743 where
1744 P: Fn(&Self::Item) -> bool + Sync + Send,
1745 {
1746 self.find_any(predicate)
1747 }
1748
1749 /// Searches for **some** item in the parallel iterator that
1750 /// matches the given predicate, and if so returns true. Once
1751 /// a match is found, we'll attempt to stop process the rest
1752 /// of the items. Proving that there's no match, returning false,
1753 /// does require visiting every item.
1754 ///
1755 /// # Examples
1756 ///
1757 /// ```
1758 /// use rayon::prelude::*;
1759 ///
1760 /// let a = [0, 12, 3, 4, 0, 23, 0];
1761 ///
1762 /// let is_valid = a.par_iter().any(|&x| x > 10);
1763 ///
1764 /// assert!(is_valid);
1765 /// ```
1766 fn any<P>(self, predicate: P) -> bool
1767 where
1768 P: Fn(Self::Item) -> bool + Sync + Send,
1769 {
1770 self.map(predicate).find_any(bool::clone).is_some()
1771 }
1772
1773 /// Tests that every item in the parallel iterator matches the given
1774 /// predicate, and if so returns true. If a counter-example is found,
1775 /// we'll attempt to stop processing more items, then return false.
1776 ///
1777 /// # Examples
1778 ///
1779 /// ```
1780 /// use rayon::prelude::*;
1781 ///
1782 /// let a = [0, 12, 3, 4, 0, 23, 0];
1783 ///
1784 /// let is_valid = a.par_iter().all(|&x| x > 10);
1785 ///
1786 /// assert!(!is_valid);
1787 /// ```
1788 fn all<P>(self, predicate: P) -> bool
1789 where
1790 P: Fn(Self::Item) -> bool + Sync + Send,
1791 {
1792 #[inline]
1793 fn is_false(x: &bool) -> bool {
1794 !x
1795 }
1796
1797 self.map(predicate).find_any(is_false).is_none()
1798 }
1799
1800 /// Creates an iterator over the `Some` items of this iterator, halting
1801 /// as soon as any `None` is found.
1802 ///
1803 /// # Examples
1804 ///
1805 /// ```
1806 /// use rayon::prelude::*;
1807 /// use std::sync::atomic::{AtomicUsize, Ordering};
1808 ///
1809 /// let counter = AtomicUsize::new(0);
1810 /// let value = (0_i32..2048)
1811 /// .into_par_iter()
1812 /// .map(|x| {
1813 /// counter.fetch_add(1, Ordering::SeqCst);
1814 /// if x < 1024 { Some(x) } else { None }
1815 /// })
1816 /// .while_some()
1817 /// .max();
1818 ///
1819 /// assert!(value < Some(1024));
1820 /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1821 /// ```
1822 fn while_some<T>(self) -> WhileSome<Self>
1823 where
1824 Self: ParallelIterator<Item = Option<T>>,
1825 T: Send,
1826 {
1827 WhileSome::new(self)
1828 }
1829
1830 /// Wraps an iterator with a fuse in case of panics, to halt all threads
1831 /// as soon as possible.
1832 ///
1833 /// Panics within parallel iterators are always propagated to the caller,
1834 /// but they don't always halt the rest of the iterator right away, due to
1835 /// the internal semantics of [`join`]. This adaptor makes a greater effort
1836 /// to stop processing other items sooner, with the cost of additional
1837 /// synchronization overhead, which may also inhibit some optimizations.
1838 ///
1839 /// [`join`]: ../fn.join.html#panics
1840 ///
1841 /// # Examples
1842 ///
1843 /// If this code didn't use `panic_fuse()`, it would continue processing
1844 /// many more items in other threads (with long sleep delays) before the
1845 /// panic is finally propagated.
1846 ///
1847 /// ```should_panic
1848 /// use rayon::prelude::*;
1849 /// use std::{thread, time};
1850 ///
1851 /// (0..1_000_000)
1852 /// .into_par_iter()
1853 /// .panic_fuse()
1854 /// .for_each(|i| {
1855 /// // simulate some work
1856 /// thread::sleep(time::Duration::from_secs(1));
1857 /// assert!(i > 0); // oops!
1858 /// });
1859 /// ```
1860 fn panic_fuse(self) -> PanicFuse<Self> {
1861 PanicFuse::new(self)
1862 }
1863
1864 /// Create a fresh collection containing all the element produced
1865 /// by this parallel iterator.
1866 ///
1867 /// You may prefer to use `collect_into_vec()`, which allocates more
1868 /// efficiently with precise knowledge of how many elements the
1869 /// iterator contains, and even allows you to reuse an existing
1870 /// vector's backing store rather than allocating a fresh vector.
1871 ///
1872 /// # Examples
1873 ///
1874 /// ```
1875 /// use rayon::prelude::*;
1876 ///
1877 /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1878 ///
1879 /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
1880 ///
1881 /// assert_eq!(sync_vec, async_vec);
1882 /// ```
1883 fn collect<C>(self) -> C
1884 where
1885 C: FromParallelIterator<Self::Item>,
1886 {
1887 C::from_par_iter(self)
1888 }
1889
1890 /// Unzips the items of a parallel iterator into a pair of arbitrary
1891 /// `ParallelExtend` containers.
1892 ///
1893 /// You may prefer to use `unzip_into_vecs()`, which allocates more
1894 /// efficiently with precise knowledge of how many elements the
1895 /// iterator contains, and even allows you to reuse existing
1896 /// vectors' backing stores rather than allocating fresh vectors.
1897 ///
1898 /// # Examples
1899 ///
1900 /// ```
1901 /// use rayon::prelude::*;
1902 ///
1903 /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
1904 ///
1905 /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
1906 ///
1907 /// assert_eq!(left, [0, 1, 2, 3]);
1908 /// assert_eq!(right, [1, 2, 3, 4]);
1909 /// ```
1910 ///
1911 /// Nested pairs can be unzipped too.
1912 ///
1913 /// ```
1914 /// use rayon::prelude::*;
1915 ///
1916 /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
1917 /// .map(|i| (i, (i * i, i * i * i)))
1918 /// .unzip();
1919 ///
1920 /// assert_eq!(values, [0, 1, 2, 3]);
1921 /// assert_eq!(squares, [0, 1, 4, 9]);
1922 /// assert_eq!(cubes, [0, 1, 8, 27]);
1923 /// ```
1924 fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
1925 where
1926 Self: ParallelIterator<Item = (A, B)>,
1927 FromA: Default + Send + ParallelExtend<A>,
1928 FromB: Default + Send + ParallelExtend<B>,
1929 A: Send,
1930 B: Send,
1931 {
1932 unzip::unzip(self)
1933 }
1934
1935 /// Partitions the items of a parallel iterator into a pair of arbitrary
1936 /// `ParallelExtend` containers. Items for which the `predicate` returns
1937 /// true go into the first container, and the rest go into the second.
1938 ///
1939 /// Note: unlike the standard `Iterator::partition`, this allows distinct
1940 /// collection types for the left and right items. This is more flexible,
1941 /// but may require new type annotations when converting sequential code
1942 /// that used type inferrence assuming the two were the same.
1943 ///
1944 /// # Examples
1945 ///
1946 /// ```
1947 /// use rayon::prelude::*;
1948 ///
1949 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
1950 ///
1951 /// assert_eq!(left, [0, 2, 4, 6]);
1952 /// assert_eq!(right, [1, 3, 5, 7]);
1953 /// ```
1954 fn partition<A, B, P>(self, predicate: P) -> (A, B)
1955 where
1956 A: Default + Send + ParallelExtend<Self::Item>,
1957 B: Default + Send + ParallelExtend<Self::Item>,
1958 P: Fn(&Self::Item) -> bool + Sync + Send,
1959 {
1960 unzip::partition(self, predicate)
1961 }
1962
1963 /// Partitions and maps the items of a parallel iterator into a pair of
1964 /// arbitrary `ParallelExtend` containers. `Either::Left` items go into
1965 /// the first container, and `Either::Right` items go into the second.
1966 ///
1967 /// # Examples
1968 ///
1969 /// ```
1970 /// use rayon::prelude::*;
1971 /// use rayon::iter::Either;
1972 ///
1973 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
1974 /// .partition_map(|x| {
1975 /// if x % 2 == 0 {
1976 /// Either::Left(x * 4)
1977 /// } else {
1978 /// Either::Right(x * 3)
1979 /// }
1980 /// });
1981 ///
1982 /// assert_eq!(left, [0, 8, 16, 24]);
1983 /// assert_eq!(right, [3, 9, 15, 21]);
1984 /// ```
1985 ///
1986 /// Nested `Either` enums can be split as well.
1987 ///
1988 /// ```
1989 /// use rayon::prelude::*;
1990 /// use rayon::iter::Either::*;
1991 ///
1992 /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
1993 /// .into_par_iter()
1994 /// .partition_map(|x| match (x % 3, x % 5) {
1995 /// (0, 0) => Left(Left(x)),
1996 /// (0, _) => Left(Right(x)),
1997 /// (_, 0) => Right(Left(x)),
1998 /// (_, _) => Right(Right(x)),
1999 /// });
2000 ///
2001 /// assert_eq!(fizzbuzz, [15]);
2002 /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
2003 /// assert_eq!(buzz, [5, 10]);
2004 /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
2005 /// ```
2006 fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
2007 where
2008 A: Default + Send + ParallelExtend<L>,
2009 B: Default + Send + ParallelExtend<R>,
2010 P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
2011 L: Send,
2012 R: Send,
2013 {
2014 unzip::partition_map(self, predicate)
2015 }
2016
2017 /// Intersperses clones of an element between items of this iterator.
2018 ///
2019 /// # Examples
2020 ///
2021 /// ```
2022 /// use rayon::prelude::*;
2023 ///
2024 /// let x = vec![1, 2, 3];
2025 /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
2026 ///
2027 /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
2028 /// ```
2029 fn intersperse(self, element: Self::Item) -> Intersperse<Self>
2030 where
2031 Self::Item: Clone,
2032 {
2033 Intersperse::new(self, element)
2034 }
2035
2036 /// Internal method used to define the behavior of this parallel
2037 /// iterator. You should not need to call this directly.
2038 ///
2039 /// This method causes the iterator `self` to start producing
2040 /// items and to feed them to the consumer `consumer` one by one.
2041 /// It may split the consumer before doing so to create the
2042 /// opportunity to produce in parallel.
2043 ///
2044 /// See the [README] for more details on the internals of parallel
2045 /// iterators.
2046 ///
2047 /// [README]: README.md
2048 fn drive_unindexed<C>(self, consumer: C) -> C::Result
2049 where
2050 C: UnindexedConsumer<Self::Item>;
2051
2052 /// Internal method used to define the behavior of this parallel
2053 /// iterator. You should not need to call this directly.
2054 ///
2055 /// Returns the number of items produced by this iterator, if known
2056 /// statically. This can be used by consumers to trigger special fast
2057 /// paths. Therefore, if `Some(_)` is returned, this iterator must only
2058 /// use the (indexed) `Consumer` methods when driving a consumer, such
2059 /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
2060 /// other `UnindexedConsumer` methods -- or returning an inaccurate
2061 /// value -- may result in panics.
2062 ///
2063 /// This method is currently used to optimize `collect` for want
2064 /// of true Rust specialization; it may be removed when
2065 /// specialization is stable.
2066 fn opt_len(&self) -> Option<usize> {
2067 None
2068 }
2069 }
2070
2071 impl<T: ParallelIterator> IntoParallelIterator for T {
2072 type Iter = T;
2073 type Item = T::Item;
2074
2075 fn into_par_iter(self) -> T {
2076 self
2077 }
2078 }
2079
2080 /// An iterator that supports "random access" to its data, meaning
2081 /// that you can split it at arbitrary indices and draw data from
2082 /// those points.
2083 ///
2084 /// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
2085 pub trait IndexedParallelIterator: ParallelIterator {
2086 /// Collects the results of the iterator into the specified
2087 /// vector. The vector is always truncated before execution
2088 /// begins. If possible, reusing the vector across calls can lead
2089 /// to better performance since it reuses the same backing buffer.
2090 ///
2091 /// # Examples
2092 ///
2093 /// ```
2094 /// use rayon::prelude::*;
2095 ///
2096 /// // any prior data will be truncated
2097 /// let mut vec = vec![-1, -2, -3];
2098 ///
2099 /// (0..5).into_par_iter()
2100 /// .collect_into_vec(&mut vec);
2101 ///
2102 /// assert_eq!(vec, [0, 1, 2, 3, 4]);
2103 /// ```
2104 fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
2105 collect::collect_into_vec(self, target);
2106 }
2107
2108 /// Unzips the results of the iterator into the specified
2109 /// vectors. The vectors are always truncated before execution
2110 /// begins. If possible, reusing the vectors across calls can lead
2111 /// to better performance since they reuse the same backing buffer.
2112 ///
2113 /// # Examples
2114 ///
2115 /// ```
2116 /// use rayon::prelude::*;
2117 ///
2118 /// // any prior data will be truncated
2119 /// let mut left = vec![42; 10];
2120 /// let mut right = vec![-1; 10];
2121 ///
2122 /// (10..15).into_par_iter()
2123 /// .enumerate()
2124 /// .unzip_into_vecs(&mut left, &mut right);
2125 ///
2126 /// assert_eq!(left, [0, 1, 2, 3, 4]);
2127 /// assert_eq!(right, [10, 11, 12, 13, 14]);
2128 /// ```
2129 fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
2130 where
2131 Self: IndexedParallelIterator<Item = (A, B)>,
2132 A: Send,
2133 B: Send,
2134 {
2135 collect::unzip_into_vecs(self, left, right);
2136 }
2137
2138 /// Iterate over tuples `(A, B)`, where the items `A` are from
2139 /// this iterator and `B` are from the iterator given as argument.
2140 /// Like the `zip` method on ordinary iterators, if the two
2141 /// iterators are of unequal length, you only get the items they
2142 /// have in common.
2143 ///
2144 /// # Examples
2145 ///
2146 /// ```
2147 /// use rayon::prelude::*;
2148 ///
2149 /// let result: Vec<_> = (1..4)
2150 /// .into_par_iter()
2151 /// .zip(vec!['a', 'b', 'c'])
2152 /// .collect();
2153 ///
2154 /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
2155 /// ```
2156 fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
2157 where
2158 Z: IntoParallelIterator,
2159 Z::Iter: IndexedParallelIterator,
2160 {
2161 Zip::new(self, zip_op.into_par_iter())
2162 }
2163
2164 /// The same as `Zip`, but requires that both iterators have the same length.
2165 ///
2166 /// # Panics
2167 /// Will panic if `self` and `zip_op` are not the same length.
2168 ///
2169 /// ```should_panic
2170 /// use rayon::prelude::*;
2171 ///
2172 /// let one = [1u8];
2173 /// let two = [2u8, 2];
2174 /// let one_iter = one.par_iter();
2175 /// let two_iter = two.par_iter();
2176 ///
2177 /// // this will panic
2178 /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
2179 ///
2180 /// // we should never get here
2181 /// assert_eq!(1, zipped.len());
2182 /// ```
2183 fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
2184 where
2185 Z: IntoParallelIterator,
2186 Z::Iter: IndexedParallelIterator,
2187 {
2188 let zip_op_iter = zip_op.into_par_iter();
2189 assert_eq!(self.len(), zip_op_iter.len());
2190 ZipEq::new(self, zip_op_iter)
2191 }
2192
2193 /// Interleave elements of this iterator and the other given
2194 /// iterator. Alternately yields elements from this iterator and
2195 /// the given iterator, until both are exhausted. If one iterator
2196 /// is exhausted before the other, the last elements are provided
2197 /// from the other.
2198 ///
2199 /// # Examples
2200 ///
2201 /// ```
2202 /// use rayon::prelude::*;
2203 /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
2204 /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
2205 /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
2206 /// ```
2207 fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
2208 where
2209 I: IntoParallelIterator<Item = Self::Item>,
2210 I::Iter: IndexedParallelIterator<Item = Self::Item>,
2211 {
2212 Interleave::new(self, other.into_par_iter())
2213 }
2214
2215 /// Interleave elements of this iterator and the other given
2216 /// iterator, until one is exhausted.
2217 ///
2218 /// # Examples
2219 ///
2220 /// ```
2221 /// use rayon::prelude::*;
2222 /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
2223 /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2224 /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2225 /// ```
2226 fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
2227 where
2228 I: IntoParallelIterator<Item = Self::Item>,
2229 I::Iter: IndexedParallelIterator<Item = Self::Item>,
2230 {
2231 InterleaveShortest::new(self, other.into_par_iter())
2232 }
2233
2234 /// Split an iterator up into fixed-size chunks.
2235 ///
2236 /// Returns an iterator that returns `Vec`s of the given number of elements.
2237 /// If the number of elements in the iterator is not divisible by `chunk_size`,
2238 /// the last chunk may be shorter than `chunk_size`.
2239 ///
2240 /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
2241 /// slices, without having to allocate intermediate `Vec`s for the chunks.
2242 ///
2243 /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks
2244 /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut
2245 ///
2246 /// # Examples
2247 ///
2248 /// ```
2249 /// use rayon::prelude::*;
2250 /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2251 /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2252 /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2253 /// ```
2254 fn chunks(self, chunk_size: usize) -> Chunks<Self> {
2255 assert!(chunk_size != 0, "chunk_size must not be zero");
2256 Chunks::new(self, chunk_size)
2257 }
2258
2259 /// Lexicographically compares the elements of this `ParallelIterator` with those of
2260 /// another.
2261 ///
2262 /// # Examples
2263 ///
2264 /// ```
2265 /// use rayon::prelude::*;
2266 /// use std::cmp::Ordering::*;
2267 ///
2268 /// let x = vec![1, 2, 3];
2269 /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2270 /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2271 /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2272 /// ```
2273 fn cmp<I>(self, other: I) -> Ordering
2274 where
2275 I: IntoParallelIterator<Item = Self::Item>,
2276 I::Iter: IndexedParallelIterator,
2277 Self::Item: Ord,
2278 {
2279 #[inline]
2280 fn ordering<T: Ord>((x, y): (T, T)) -> Ordering {
2281 Ord::cmp(&x, &y)
2282 }
2283
2284 #[inline]
2285 fn inequal(&ord: &Ordering) -> bool {
2286 ord != Ordering::Equal
2287 }
2288
2289 let other = other.into_par_iter();
2290 let ord_len = self.len().cmp(&other.len());
2291 self.zip(other)
2292 .map(ordering)
2293 .find_first(inequal)
2294 .unwrap_or(ord_len)
2295 }
2296
2297 /// Lexicographically compares the elements of this `ParallelIterator` with those of
2298 /// another.
2299 ///
2300 /// # Examples
2301 ///
2302 /// ```
2303 /// use rayon::prelude::*;
2304 /// use std::cmp::Ordering::*;
2305 /// use std::f64::NAN;
2306 ///
2307 /// let x = vec![1.0, 2.0, 3.0];
2308 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2309 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2310 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2311 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);
2312 /// ```
2313 fn partial_cmp<I>(self, other: I) -> Option<Ordering>
2314 where
2315 I: IntoParallelIterator,
2316 I::Iter: IndexedParallelIterator,
2317 Self::Item: PartialOrd<I::Item>,
2318 {
2319 #[inline]
2320 fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> {
2321 PartialOrd::partial_cmp(&x, &y)
2322 }
2323
2324 #[inline]
2325 fn inequal(&ord: &Option<Ordering>) -> bool {
2326 ord != Some(Ordering::Equal)
2327 }
2328
2329 let other = other.into_par_iter();
2330 let ord_len = self.len().cmp(&other.len());
2331 self.zip(other)
2332 .map(ordering)
2333 .find_first(inequal)
2334 .unwrap_or(Some(ord_len))
2335 }
2336
2337 /// Determines if the elements of this `ParallelIterator`
2338 /// are equal to those of another
2339 fn eq<I>(self, other: I) -> bool
2340 where
2341 I: IntoParallelIterator,
2342 I::Iter: IndexedParallelIterator,
2343 Self::Item: PartialEq<I::Item>,
2344 {
2345 #[inline]
2346 fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool {
2347 PartialEq::eq(&x, &y)
2348 }
2349
2350 let other = other.into_par_iter();
2351 self.len() == other.len() && self.zip(other).all(eq)
2352 }
2353
2354 /// Determines if the elements of this `ParallelIterator`
2355 /// are unequal to those of another
2356 fn ne<I>(self, other: I) -> bool
2357 where
2358 I: IntoParallelIterator,
2359 I::Iter: IndexedParallelIterator,
2360 Self::Item: PartialEq<I::Item>,
2361 {
2362 !self.eq(other)
2363 }
2364
2365 /// Determines if the elements of this `ParallelIterator`
2366 /// are lexicographically less than those of another.
2367 fn lt<I>(self, other: I) -> bool
2368 where
2369 I: IntoParallelIterator,
2370 I::Iter: IndexedParallelIterator,
2371 Self::Item: PartialOrd<I::Item>,
2372 {
2373 self.partial_cmp(other) == Some(Ordering::Less)
2374 }
2375
2376 /// Determines if the elements of this `ParallelIterator`
2377 /// are less or equal to those of another.
2378 fn le<I>(self, other: I) -> bool
2379 where
2380 I: IntoParallelIterator,
2381 I::Iter: IndexedParallelIterator,
2382 Self::Item: PartialOrd<I::Item>,
2383 {
2384 let ord = self.partial_cmp(other);
2385 ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
2386 }
2387
2388 /// Determines if the elements of this `ParallelIterator`
2389 /// are lexicographically greater than those of another.
2390 fn gt<I>(self, other: I) -> bool
2391 where
2392 I: IntoParallelIterator,
2393 I::Iter: IndexedParallelIterator,
2394 Self::Item: PartialOrd<I::Item>,
2395 {
2396 self.partial_cmp(other) == Some(Ordering::Greater)
2397 }
2398
2399 /// Determines if the elements of this `ParallelIterator`
2400 /// are less or equal to those of another.
2401 fn ge<I>(self, other: I) -> bool
2402 where
2403 I: IntoParallelIterator,
2404 I::Iter: IndexedParallelIterator,
2405 Self::Item: PartialOrd<I::Item>,
2406 {
2407 let ord = self.partial_cmp(other);
2408 ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
2409 }
2410
2411 /// Yields an index along with each item.
2412 ///
2413 /// # Examples
2414 ///
2415 /// ```
2416 /// use rayon::prelude::*;
2417 ///
2418 /// let chars = vec!['a', 'b', 'c'];
2419 /// let result: Vec<_> = chars
2420 /// .into_par_iter()
2421 /// .enumerate()
2422 /// .collect();
2423 ///
2424 /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2425 /// ```
2426 fn enumerate(self) -> Enumerate<Self> {
2427 Enumerate::new(self)
2428 }
2429
2430 /// Creates an iterator that skips the first `n` elements.
2431 ///
2432 /// # Examples
2433 ///
2434 /// ```
2435 /// use rayon::prelude::*;
2436 ///
2437 /// let result: Vec<_> = (0..100)
2438 /// .into_par_iter()
2439 /// .skip(95)
2440 /// .collect();
2441 ///
2442 /// assert_eq!(result, [95, 96, 97, 98, 99]);
2443 /// ```
2444 fn skip(self, n: usize) -> Skip<Self> {
2445 Skip::new(self, n)
2446 }
2447
2448 /// Creates an iterator that yields the first `n` elements.
2449 ///
2450 /// # Examples
2451 ///
2452 /// ```
2453 /// use rayon::prelude::*;
2454 ///
2455 /// let result: Vec<_> = (0..100)
2456 /// .into_par_iter()
2457 /// .take(5)
2458 /// .collect();
2459 ///
2460 /// assert_eq!(result, [0, 1, 2, 3, 4]);
2461 /// ```
2462 fn take(self, n: usize) -> Take<Self> {
2463 Take::new(self, n)
2464 }
2465
2466 /// Searches for **some** item in the parallel iterator that
2467 /// matches the given predicate, and returns its index. Like
2468 /// `ParallelIterator::find_any`, the parallel search will not
2469 /// necessarily find the **first** match, and once a match is
2470 /// found we'll attempt to stop processing any more.
2471 ///
2472 /// # Examples
2473 ///
2474 /// ```
2475 /// use rayon::prelude::*;
2476 ///
2477 /// let a = [1, 2, 3, 3];
2478 ///
2479 /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
2480 /// assert!(i == 2 || i == 3);
2481 ///
2482 /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
2483 /// ```
2484 fn position_any<P>(self, predicate: P) -> Option<usize>
2485 where
2486 P: Fn(Self::Item) -> bool + Sync + Send,
2487 {
2488 #[inline]
2489 fn check(&(_, p): &(usize, bool)) -> bool {
2490 p
2491 }
2492
2493 let (i, _) = self.map(predicate).enumerate().find_any(check)?;
2494 Some(i)
2495 }
2496
2497 /// Searches for the sequentially **first** item in the parallel iterator
2498 /// that matches the given predicate, and returns its index.
2499 ///
2500 /// Like `ParallelIterator::find_first`, once a match is found,
2501 /// all attempts to the right of the match will be stopped, while
2502 /// attempts to the left must continue in case an earlier match
2503 /// is found.
2504 ///
2505 /// Note that not all parallel iterators have a useful order, much like
2506 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
2507 /// just want the first match that discovered anywhere in the iterator,
2508 /// `position_any` is a better choice.
2509 ///
2510 /// # Examples
2511 ///
2512 /// ```
2513 /// use rayon::prelude::*;
2514 ///
2515 /// let a = [1, 2, 3, 3];
2516 ///
2517 /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
2518 ///
2519 /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
2520 /// ```
2521 fn position_first<P>(self, predicate: P) -> Option<usize>
2522 where
2523 P: Fn(Self::Item) -> bool + Sync + Send,
2524 {
2525 #[inline]
2526 fn check(&(_, p): &(usize, bool)) -> bool {
2527 p
2528 }
2529
2530 let (i, _) = self.map(predicate).enumerate().find_first(check)?;
2531 Some(i)
2532 }
2533
2534 /// Searches for the sequentially **last** item in the parallel iterator
2535 /// that matches the given predicate, and returns its index.
2536 ///
2537 /// Like `ParallelIterator::find_last`, once a match is found,
2538 /// all attempts to the left of the match will be stopped, while
2539 /// attempts to the right must continue in case a later match
2540 /// is found.
2541 ///
2542 /// Note that not all parallel iterators have a useful order, much like
2543 /// sequential `HashMap` iteration, so "last" may be nebulous. When the
2544 /// order doesn't actually matter to you, `position_any` is a better
2545 /// choice.
2546 ///
2547 /// # Examples
2548 ///
2549 /// ```
2550 /// use rayon::prelude::*;
2551 ///
2552 /// let a = [1, 2, 3, 3];
2553 ///
2554 /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
2555 ///
2556 /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
2557 /// ```
2558 fn position_last<P>(self, predicate: P) -> Option<usize>
2559 where
2560 P: Fn(Self::Item) -> bool + Sync + Send,
2561 {
2562 #[inline]
2563 fn check(&(_, p): &(usize, bool)) -> bool {
2564 p
2565 }
2566
2567 let (i, _) = self.map(predicate).enumerate().find_last(check)?;
2568 Some(i)
2569 }
2570
2571 #[doc(hidden)]
2572 #[deprecated(
2573 note = "parallel `position` does not search in order -- use `position_any`, \\
2574 `position_first`, or `position_last`"
2575 )]
2576 fn position<P>(self, predicate: P) -> Option<usize>
2577 where
2578 P: Fn(Self::Item) -> bool + Sync + Send,
2579 {
2580 self.position_any(predicate)
2581 }
2582
2583 /// Produces a new iterator with the elements of this iterator in
2584 /// reverse order.
2585 ///
2586 /// # Examples
2587 ///
2588 /// ```
2589 /// use rayon::prelude::*;
2590 ///
2591 /// let result: Vec<_> = (0..5)
2592 /// .into_par_iter()
2593 /// .rev()
2594 /// .collect();
2595 ///
2596 /// assert_eq!(result, [4, 3, 2, 1, 0]);
2597 /// ```
2598 fn rev(self) -> Rev<Self> {
2599 Rev::new(self)
2600 }
2601
2602 /// Sets the minimum length of iterators desired to process in each
2603 /// thread. Rayon will not split any smaller than this length, but
2604 /// of course an iterator could already be smaller to begin with.
2605 ///
2606 /// Producers like `zip` and `interleave` will use greater of the two
2607 /// minimums.
2608 /// Chained iterators and iterators inside `flat_map` may each use
2609 /// their own minimum length.
2610 ///
2611 /// # Examples
2612 ///
2613 /// ```
2614 /// use rayon::prelude::*;
2615 ///
2616 /// let min = (0..1_000_000)
2617 /// .into_par_iter()
2618 /// .with_min_len(1234)
2619 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
2620 /// .min().unwrap();
2621 ///
2622 /// assert!(min >= 1234);
2623 /// ```
2624 fn with_min_len(self, min: usize) -> MinLen<Self> {
2625 MinLen::new(self, min)
2626 }
2627
2628 /// Sets the maximum length of iterators desired to process in each
2629 /// thread. Rayon will try to split at least below this length,
2630 /// unless that would put it below the length from `with_min_len()`.
2631 /// For example, given min=10 and max=15, a length of 16 will not be
2632 /// split any further.
2633 ///
2634 /// Producers like `zip` and `interleave` will use lesser of the two
2635 /// maximums.
2636 /// Chained iterators and iterators inside `flat_map` may each use
2637 /// their own maximum length.
2638 ///
2639 /// # Examples
2640 ///
2641 /// ```
2642 /// use rayon::prelude::*;
2643 ///
2644 /// let max = (0..1_000_000)
2645 /// .into_par_iter()
2646 /// .with_max_len(1234)
2647 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
2648 /// .max().unwrap();
2649 ///
2650 /// assert!(max <= 1234);
2651 /// ```
2652 fn with_max_len(self, max: usize) -> MaxLen<Self> {
2653 MaxLen::new(self, max)
2654 }
2655
2656 /// Produces an exact count of how many items this iterator will
2657 /// produce, presuming no panic occurs.
2658 ///
2659 /// # Examples
2660 ///
2661 /// ```
2662 /// use rayon::prelude::*;
2663 ///
2664 /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
2665 /// assert_eq!(par_iter.len(), 10);
2666 ///
2667 /// let vec: Vec<_> = par_iter.collect();
2668 /// assert_eq!(vec.len(), 10);
2669 /// ```
2670 fn len(&self) -> usize;
2671
2672 /// Internal method used to define the behavior of this parallel
2673 /// iterator. You should not need to call this directly.
2674 ///
2675 /// This method causes the iterator `self` to start producing
2676 /// items and to feed them to the consumer `consumer` one by one.
2677 /// It may split the consumer before doing so to create the
2678 /// opportunity to produce in parallel. If a split does happen, it
2679 /// will inform the consumer of the index where the split should
2680 /// occur (unlike `ParallelIterator::drive_unindexed()`).
2681 ///
2682 /// See the [README] for more details on the internals of parallel
2683 /// iterators.
2684 ///
2685 /// [README]: README.md
2686 fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
2687
2688 /// Internal method used to define the behavior of this parallel
2689 /// iterator. You should not need to call this directly.
2690 ///
2691 /// This method converts the iterator into a producer P and then
2692 /// invokes `callback.callback()` with P. Note that the type of
2693 /// this producer is not defined as part of the API, since
2694 /// `callback` must be defined generically for all producers. This
2695 /// allows the producer type to contain references; it also means
2696 /// that parallel iterators can adjust that type without causing a
2697 /// breaking change.
2698 ///
2699 /// See the [README] for more details on the internals of parallel
2700 /// iterators.
2701 ///
2702 /// [README]: README.md
2703 fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
2704 }
2705
2706 /// `FromParallelIterator` implements the creation of a collection
2707 /// from a [`ParallelIterator`]. By implementing
2708 /// `FromParallelIterator` for a given type, you define how it will be
2709 /// created from an iterator.
2710 ///
2711 /// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
2712 ///
2713 /// [`ParallelIterator`]: trait.ParallelIterator.html
2714 /// [`collect()`]: trait.ParallelIterator.html#method.collect
2715 ///
2716 /// # Examples
2717 ///
2718 /// Implementing `FromParallelIterator` for your type:
2719 ///
2720 /// ```
2721 /// use rayon::prelude::*;
2722 /// use std::mem;
2723 ///
2724 /// struct BlackHole {
2725 /// mass: usize,
2726 /// }
2727 ///
2728 /// impl<T: Send> FromParallelIterator<T> for BlackHole {
2729 /// fn from_par_iter<I>(par_iter: I) -> Self
2730 /// where I: IntoParallelIterator<Item = T>
2731 /// {
2732 /// let par_iter = par_iter.into_par_iter();
2733 /// BlackHole {
2734 /// mass: par_iter.count() * mem::size_of::<T>(),
2735 /// }
2736 /// }
2737 /// }
2738 ///
2739 /// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
2740 /// assert_eq!(bh.mass, 4000);
2741 /// ```
2742 pub trait FromParallelIterator<T>
2743 where
2744 T: Send,
2745 {
2746 /// Creates an instance of the collection from the parallel iterator `par_iter`.
2747 ///
2748 /// If your collection is not naturally parallel, the easiest (and
2749 /// fastest) way to do this is often to collect `par_iter` into a
2750 /// [`LinkedList`] or other intermediate data structure and then
2751 /// sequentially extend your collection. However, a more 'native'
2752 /// technique is to use the [`par_iter.fold`] or
2753 /// [`par_iter.fold_with`] methods to create the collection.
2754 /// Alternatively, if your collection is 'natively' parallel, you
2755 /// can use `par_iter.for_each` to process each element in turn.
2756 ///
2757 /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html
2758 /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold
2759 /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with
2760 /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each
2761 fn from_par_iter<I>(par_iter: I) -> Self
2762 where
2763 I: IntoParallelIterator<Item = T>;
2764 }
2765
2766 /// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
2767 ///
2768 /// [`ParallelIterator`]: trait.ParallelIterator.html
2769 ///
2770 /// # Examples
2771 ///
2772 /// Implementing `ParallelExtend` for your type:
2773 ///
2774 /// ```
2775 /// use rayon::prelude::*;
2776 /// use std::mem;
2777 ///
2778 /// struct BlackHole {
2779 /// mass: usize,
2780 /// }
2781 ///
2782 /// impl<T: Send> ParallelExtend<T> for BlackHole {
2783 /// fn par_extend<I>(&mut self, par_iter: I)
2784 /// where I: IntoParallelIterator<Item = T>
2785 /// {
2786 /// let par_iter = par_iter.into_par_iter();
2787 /// self.mass += par_iter.count() * mem::size_of::<T>();
2788 /// }
2789 /// }
2790 ///
2791 /// let mut bh = BlackHole { mass: 0 };
2792 /// bh.par_extend(0i32..1000);
2793 /// assert_eq!(bh.mass, 4000);
2794 /// bh.par_extend(0i64..10);
2795 /// assert_eq!(bh.mass, 4080);
2796 /// ```
2797 pub trait ParallelExtend<T>
2798 where
2799 T: Send,
2800 {
2801 /// Extends an instance of the collection with the elements drawn
2802 /// from the parallel iterator `par_iter`.
2803 ///
2804 /// # Examples
2805 ///
2806 /// ```
2807 /// use rayon::prelude::*;
2808 ///
2809 /// let mut vec = vec![];
2810 /// vec.par_extend(0..5);
2811 /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
2812 /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
2813 /// ```
2814 fn par_extend<I>(&mut self, par_iter: I)
2815 where
2816 I: IntoParallelIterator<Item = T>;
2817 }
2818
2819 /// We hide the `Try` trait in a private module, as it's only meant to be a
2820 /// stable clone of the standard library's `Try` trait, as yet unstable.
2821 mod private {
2822 /// Clone of `std::ops::Try`.
2823 ///
2824 /// Implementing this trait is not permitted outside of `rayon`.
2825 pub trait Try {
2826 private_decl! {}
2827
2828 type Ok;
2829 type Error;
2830 fn into_result(self) -> Result<Self::Ok, Self::Error>;
2831 fn from_ok(v: Self::Ok) -> Self;
2832 fn from_error(v: Self::Error) -> Self;
2833 }
2834
2835 impl<T> Try for Option<T> {
2836 private_impl! {}
2837
2838 type Ok = T;
2839 type Error = ();
2840
2841 fn into_result(self) -> Result<T, ()> {
2842 self.ok_or(())
2843 }
2844 fn from_ok(v: T) -> Self {
2845 Some(v)
2846 }
2847 fn from_error(_: ()) -> Self {
2848 None
2849 }
2850 }
2851
2852 impl<T, E> Try for Result<T, E> {
2853 private_impl! {}
2854
2855 type Ok = T;
2856 type Error = E;
2857
2858 fn into_result(self) -> Result<T, E> {
2859 self
2860 }
2861 fn from_ok(v: T) -> Self {
2862 Ok(v)
2863 }
2864 fn from_error(v: E) -> Self {
2865 Err(v)
2866 }
2867 }
2868 }