]> git.proxmox.com Git - rustc.git/blob - vendor/rustc-rayon/src/iter/mod.rs
New upstream version 1.35.0+dfsg1
[rustc.git] / vendor / rustc-rayon / src / iter / mod.rs
1 //! Traits for writing parallel programs using an iterator-style interface
2 //!
3 //! You will rarely need to interact with this module directly unless you have
4 //! need to name one of the iterator types.
5 //!
6 //! Parallel iterators make it easy to write iterator-like chains that
7 //! execute in parallel: typically all you have to do is convert the
8 //! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9 //! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10 //! example, to compute the sum of the squares of a sequence of
11 //! integers, one might write:
12 //!
13 //! ```rust
14 //! use rayon::prelude::*;
15 //! fn sum_of_squares(input: &[i32]) -> i32 {
16 //! input.par_iter()
17 //! .map(|i| i * i)
18 //! .sum()
19 //! }
20 //! ```
21 //!
22 //! Or, to increment all the integers in a slice, you could write:
23 //!
24 //! ```rust
25 //! use rayon::prelude::*;
26 //! fn increment_all(input: &mut [i32]) {
27 //! input.par_iter_mut()
28 //! .for_each(|p| *p += 1);
29 //! }
30 //! ```
31 //!
32 //! To use parallel iterators, first import the traits by adding
33 //! something like `use rayon::prelude::*` to your module. You can
34 //! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35 //! parallel iterator. Like a [regular iterator][], parallel
36 //! iterators work by first constructing a computation and then
37 //! executing it.
38 //!
39 //! In addition to `par_iter()` and friends, some types offer other
40 //! ways to create (or consume) parallel iterators:
41 //!
42 //! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43 //! `par_windows`, as well as various parallel sorting
44 //! operations. See [the `ParallelSlice` trait] for the full list.
45 //! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
46 //! See [the `ParallelString` trait] for the full list.
47 //! - Various collections offer [`par_extend`], which grows a
48 //! collection given a parallel iterator. (If you don't have a
49 //! collection to extend, you can use [`collect()`] to create a new
50 //! one from scratch.)
51 //!
52 //! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html
53 //! [the `ParallelString` trait]: ../str/trait.ParallelString.html
54 //! [`par_extend`]: trait.ParallelExtend.html
55 //! [`collect()`]: trait.ParallelIterator.html#method.collect
56 //!
57 //! To see the full range of methods available on parallel iterators,
58 //! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
59 //! traits.
60 //!
61 //! If you'd like to build a custom parallel iterator, or to write your own
62 //! combinator, then check out the [split] function and the [plumbing] module.
63 //!
64 //! [regular iterator]: http://doc.rust-lang.org/std/iter/trait.Iterator.html
65 //! [`ParallelIterator`]: trait.ParallelIterator.html
66 //! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
67 //! [split]: fn.split.html
68 //! [plumbing]: plumbing/index.html
69 //!
70 //! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
71 //! has been deliberately obscured from the public API. This trait is intended
72 //! to mirror the unstable `std::ops::Try` with implementations for `Option` and
73 //! `Result`, where `Some`/`Ok` values will let those iterators continue, but
74 //! `None`/`Err` values will exit early.
75
76 use self::plumbing::*;
77 use self::private::Try;
78 pub use either::Either;
79 use std::cmp::{self, Ordering};
80 use std::iter::{Product, Sum};
81 use std::ops::Fn;
82
83 // There is a method to the madness here:
84 //
85 // - Most of these modules are private but expose certain types to the end-user
86 // (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
87 // public API surface of the `ParallelIterator` traits.
88 // - In **this** module, those public types are always used unprefixed, which forces
89 // us to add a `pub use` and helps identify if we missed anything.
90 // - In contrast, items that appear **only** in the body of a method,
91 // e.g. `find::find()`, are always used **prefixed**, so that they
92 // can be readily distinguished.
93
94 mod par_bridge;
95 pub use self::par_bridge::{IterBridge, ParallelBridge};
96
97 mod chain;
98 mod find;
99 mod find_first_last;
100 pub use self::chain::Chain;
101 mod chunks;
102 pub use self::chunks::Chunks;
103 mod collect;
104 mod enumerate;
105 pub use self::enumerate::Enumerate;
106 mod filter;
107 pub use self::filter::Filter;
108 mod filter_map;
109 pub use self::filter_map::FilterMap;
110 mod flat_map;
111 pub use self::flat_map::FlatMap;
112 mod flatten;
113 pub use self::flatten::Flatten;
114 mod fold;
115 mod for_each;
116 mod from_par_iter;
117 pub mod plumbing;
118 pub use self::fold::{Fold, FoldWith};
119 mod try_fold;
120 pub use self::try_fold::{TryFold, TryFoldWith};
121 mod reduce;
122 mod skip;
123 mod try_reduce;
124 mod try_reduce_with;
125 pub use self::skip::Skip;
126 mod splitter;
127 pub use self::splitter::{split, Split};
128 mod take;
129 pub use self::take::Take;
130 mod map;
131 pub use self::map::Map;
132 mod map_with;
133 pub use self::map_with::{MapInit, MapWith};
134 mod zip;
135 pub use self::zip::Zip;
136 mod zip_eq;
137 pub use self::zip_eq::ZipEq;
138 mod interleave;
139 pub use self::interleave::Interleave;
140 mod interleave_shortest;
141 pub use self::interleave_shortest::InterleaveShortest;
142 mod intersperse;
143 pub use self::intersperse::Intersperse;
144 mod update;
145 pub use self::update::Update;
146
147 mod noop;
148 mod rev;
149 pub use self::rev::Rev;
150 mod len;
151 pub use self::len::{MaxLen, MinLen};
152 mod cloned;
153 mod product;
154 mod sum;
155 pub use self::cloned::Cloned;
156 mod inspect;
157 pub use self::inspect::Inspect;
158 mod while_some;
159 pub use self::while_some::WhileSome;
160 mod extend;
161 mod repeat;
162 mod unzip;
163 pub use self::repeat::{repeat, Repeat};
164 pub use self::repeat::{repeatn, RepeatN};
165
166 mod empty;
167 pub use self::empty::{empty, Empty};
168 mod once;
169 pub use self::once::{once, Once};
170
171 #[cfg(test)]
172 mod test;
173
174 /// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
175 ///
176 /// By implementing `IntoParallelIterator` for a type, you define how it will
177 /// transformed into an iterator. This is a parallel version of the standard
178 /// library's [`std::iter::IntoIterator`] trait.
179 ///
180 /// [`ParallelIterator`]: trait.ParallelIterator.html
181 /// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
182 pub trait IntoParallelIterator {
183 /// The parallel iterator type that will be created.
184 type Iter: ParallelIterator<Item = Self::Item>;
185
186 /// The type of item that the parallel iterator will produce.
187 type Item: Send;
188
189 /// Converts `self` into a parallel iterator.
190 ///
191 /// # Examples
192 ///
193 /// ```
194 /// use rayon::prelude::*;
195 ///
196 /// println!("counting in parallel:");
197 /// (0..100).into_par_iter()
198 /// .for_each(|i| println!("{}", i));
199 /// ```
200 ///
201 /// This conversion is often implicit for arguments to methods like [`zip`].
202 ///
203 /// ```
204 /// use rayon::prelude::*;
205 ///
206 /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
207 /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
208 /// ```
209 ///
210 /// [`zip`]: trait.IndexedParallelIterator.html#method.zip
211 fn into_par_iter(self) -> Self::Iter;
212 }
213
214 /// `IntoParallelRefIterator` implements the conversion to a
215 /// [`ParallelIterator`], providing shared references to the data.
216 ///
217 /// This is a parallel version of the `iter()` method
218 /// defined by various collections.
219 ///
220 /// This trait is automatically implemented
221 /// `for I where &I: IntoParallelIterator`. In most cases, users
222 /// will want to implement [`IntoParallelIterator`] rather than implement
223 /// this trait directly.
224 ///
225 /// [`ParallelIterator`]: trait.ParallelIterator.html
226 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
227 pub trait IntoParallelRefIterator<'data> {
228 /// The type of the parallel iterator that will be returned.
229 type Iter: ParallelIterator<Item = Self::Item>;
230
231 /// The type of item that the parallel iterator will produce.
232 /// This will typically be an `&'data T` reference type.
233 type Item: Send + 'data;
234
235 /// Converts `self` into a parallel iterator.
236 ///
237 /// # Examples
238 ///
239 /// ```
240 /// use rayon::prelude::*;
241 ///
242 /// let v: Vec<_> = (0..100).collect();
243 /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
244 ///
245 /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
246 /// // producing the exact same references.
247 /// assert!(v.par_iter().zip(&v)
248 /// .all(|(a, b)| std::ptr::eq(a, b)));
249 /// ```
250 fn par_iter(&'data self) -> Self::Iter;
251 }
252
253 impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
254 where
255 &'data I: IntoParallelIterator,
256 {
257 type Iter = <&'data I as IntoParallelIterator>::Iter;
258 type Item = <&'data I as IntoParallelIterator>::Item;
259
260 fn par_iter(&'data self) -> Self::Iter {
261 self.into_par_iter()
262 }
263 }
264
265 /// `IntoParallelRefMutIterator` implements the conversion to a
266 /// [`ParallelIterator`], providing mutable references to the data.
267 ///
268 /// This is a parallel version of the `iter_mut()` method
269 /// defined by various collections.
270 ///
271 /// This trait is automatically implemented
272 /// `for I where &mut I: IntoParallelIterator`. In most cases, users
273 /// will want to implement [`IntoParallelIterator`] rather than implement
274 /// this trait directly.
275 ///
276 /// [`ParallelIterator`]: trait.ParallelIterator.html
277 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
278 pub trait IntoParallelRefMutIterator<'data> {
279 /// The type of iterator that will be created.
280 type Iter: ParallelIterator<Item = Self::Item>;
281
282 /// The type of item that will be produced; this is typically an
283 /// `&'data mut T` reference.
284 type Item: Send + 'data;
285
286 /// Creates the parallel iterator from `self`.
287 ///
288 /// # Examples
289 ///
290 /// ```
291 /// use rayon::prelude::*;
292 ///
293 /// let mut v = vec![0usize; 5];
294 /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
295 /// assert_eq!(v, [0, 1, 2, 3, 4]);
296 /// ```
297 fn par_iter_mut(&'data mut self) -> Self::Iter;
298 }
299
300 impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
301 where
302 &'data mut I: IntoParallelIterator,
303 {
304 type Iter = <&'data mut I as IntoParallelIterator>::Iter;
305 type Item = <&'data mut I as IntoParallelIterator>::Item;
306
307 fn par_iter_mut(&'data mut self) -> Self::Iter {
308 self.into_par_iter()
309 }
310 }
311
312 /// Parallel version of the standard iterator trait.
313 ///
314 /// The combinators on this trait are available on **all** parallel
315 /// iterators. Additional methods can be found on the
316 /// [`IndexedParallelIterator`] trait: those methods are only
317 /// available for parallel iterators where the number of items is
318 /// known in advance (so, e.g., after invoking `filter`, those methods
319 /// become unavailable).
320 ///
321 /// For examples of using parallel iterators, see [the docs on the
322 /// `iter` module][iter].
323 ///
324 /// [iter]: index.html
325 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
326 pub trait ParallelIterator: Sized + Send {
327 /// The type of item that this parallel iterator produces.
328 /// For example, if you use the [`for_each`] method, this is the type of
329 /// item that your closure will be invoked with.
330 ///
331 /// [`for_each`]: #method.for_each
332 type Item: Send;
333
334 /// Executes `OP` on each item produced by the iterator, in parallel.
335 ///
336 /// # Examples
337 ///
338 /// ```
339 /// use rayon::prelude::*;
340 ///
341 /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
342 /// ```
343 fn for_each<OP>(self, op: OP)
344 where
345 OP: Fn(Self::Item) + Sync + Send,
346 {
347 for_each::for_each(self, &op)
348 }
349
350 /// Executes `OP` on the given `init` value with each item produced by
351 /// the iterator, in parallel.
352 ///
353 /// The `init` value will be cloned only as needed to be paired with
354 /// the group of items in each rayon job. It does not require the type
355 /// to be `Sync`.
356 ///
357 /// # Examples
358 ///
359 /// ```
360 /// use std::sync::mpsc::channel;
361 /// use rayon::prelude::*;
362 ///
363 /// let (sender, receiver) = channel();
364 ///
365 /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
366 ///
367 /// let mut res: Vec<_> = receiver.iter().collect();
368 ///
369 /// res.sort();
370 ///
371 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
372 /// ```
373 fn for_each_with<OP, T>(self, init: T, op: OP)
374 where
375 OP: Fn(&mut T, Self::Item) + Sync + Send,
376 T: Send + Clone,
377 {
378 self.map_with(init, op).for_each(|()| ())
379 }
380
381 /// Executes `OP` on a value returned by `init` with each item produced by
382 /// the iterator, in parallel.
383 ///
384 /// The `init` function will be called only as needed for a value to be
385 /// paired with the group of items in each rayon job. There is no
386 /// constraint on that returned type at all!
387 ///
388 /// # Examples
389 ///
390 /// ```
391 /// extern crate rand;
392 /// extern crate rayon;
393 ///
394 /// use rand::Rng;
395 /// use rayon::prelude::*;
396 ///
397 /// let mut v = vec![0u8; 1_000_000];
398 ///
399 /// v.par_chunks_mut(1000)
400 /// .for_each_init(
401 /// || rand::thread_rng(),
402 /// |rng, chunk| rng.fill(chunk),
403 /// );
404 ///
405 /// // There's a remote chance that this will fail...
406 /// for i in 0u8..=255 {
407 /// assert!(v.contains(&i));
408 /// }
409 /// ```
410 fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP)
411 where
412 OP: Fn(&mut T, Self::Item) + Sync + Send,
413 INIT: Fn() -> T + Sync + Send,
414 {
415 self.map_init(init, op).for_each(|()| ())
416 }
417
418 /// Executes a fallible `OP` on each item produced by the iterator, in parallel.
419 ///
420 /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
421 /// stop processing the rest of the items in the iterator as soon as
422 /// possible, and we will return that terminating value. Otherwise, we will
423 /// return an empty `Result::Ok(())` or `Option::Some(())`. If there are
424 /// multiple errors in parallel, it is not specified which will be returned.
425 ///
426 /// # Examples
427 ///
428 /// ```
429 /// use rayon::prelude::*;
430 /// use std::io::{self, Write};
431 ///
432 /// // This will stop iteration early if there's any write error, like
433 /// // having piped output get closed on the other end.
434 /// (0..100).into_par_iter()
435 /// .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
436 /// .expect("expected no write errors");
437 /// ```
438 fn try_for_each<OP, R>(self, op: OP) -> R
439 where
440 OP: Fn(Self::Item) -> R + Sync + Send,
441 R: Try<Ok = ()> + Send,
442 {
443 self.map(op).try_reduce(|| (), |(), ()| R::from_ok(()))
444 }
445
446 /// Executes a fallible `OP` on the given `init` value with each item
447 /// produced by the iterator, in parallel.
448 ///
449 /// This combines the `init` semantics of [`for_each_with()`] and the
450 /// failure semantics of [`try_for_each()`].
451 ///
452 /// [`for_each_with()`]: #method.for_each_with
453 /// [`try_for_each()`]: #method.try_for_each
454 ///
455 /// # Examples
456 ///
457 /// ```
458 /// use std::sync::mpsc::channel;
459 /// use rayon::prelude::*;
460 ///
461 /// let (sender, receiver) = channel();
462 ///
463 /// (0..5).into_par_iter()
464 /// .try_for_each_with(sender, |s, x| s.send(x))
465 /// .expect("expected no send errors");
466 ///
467 /// let mut res: Vec<_> = receiver.iter().collect();
468 ///
469 /// res.sort();
470 ///
471 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
472 /// ```
473 fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
474 where
475 OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
476 T: Send + Clone,
477 R: Try<Ok = ()> + Send,
478 {
479 self.map_with(init, op)
480 .try_reduce(|| (), |(), ()| R::from_ok(()))
481 }
482
483 /// Executes a fallible `OP` on a value returned by `init` with each item
484 /// produced by the iterator, in parallel.
485 ///
486 /// This combines the `init` semantics of [`for_each_init()`] and the
487 /// failure semantics of [`try_for_each()`].
488 ///
489 /// [`for_each_init()`]: #method.for_each_init
490 /// [`try_for_each()`]: #method.try_for_each
491 ///
492 /// # Examples
493 ///
494 /// ```
495 /// extern crate rand;
496 /// extern crate rayon;
497 ///
498 /// use rand::Rng;
499 /// use rayon::prelude::*;
500 ///
501 /// let mut v = vec![0u8; 1_000_000];
502 ///
503 /// v.par_chunks_mut(1000)
504 /// .try_for_each_init(
505 /// || rand::thread_rng(),
506 /// |rng, chunk| rng.try_fill(chunk),
507 /// )
508 /// .expect("expected no rand errors");
509 ///
510 /// // There's a remote chance that this will fail...
511 /// for i in 0u8..=255 {
512 /// assert!(v.contains(&i));
513 /// }
514 /// ```
515 fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
516 where
517 OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
518 INIT: Fn() -> T + Sync + Send,
519 R: Try<Ok = ()> + Send,
520 {
521 self.map_init(init, op)
522 .try_reduce(|| (), |(), ()| R::from_ok(()))
523 }
524
525 /// Counts the number of items in this parallel iterator.
526 ///
527 /// # Examples
528 ///
529 /// ```
530 /// use rayon::prelude::*;
531 ///
532 /// let count = (0..100).into_par_iter().count();
533 ///
534 /// assert_eq!(count, 100);
535 /// ```
536 fn count(self) -> usize {
537 self.map(|_| 1).sum()
538 }
539
540 /// Applies `map_op` to each item of this iterator, producing a new
541 /// iterator with the results.
542 ///
543 /// # Examples
544 ///
545 /// ```
546 /// use rayon::prelude::*;
547 ///
548 /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
549 ///
550 /// let doubles: Vec<_> = par_iter.collect();
551 ///
552 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
553 /// ```
554 fn map<F, R>(self, map_op: F) -> Map<Self, F>
555 where
556 F: Fn(Self::Item) -> R + Sync + Send,
557 R: Send,
558 {
559 map::new(self, map_op)
560 }
561
562 /// Applies `map_op` to the given `init` value with each item of this
563 /// iterator, producing a new iterator with the results.
564 ///
565 /// The `init` value will be cloned only as needed to be paired with
566 /// the group of items in each rayon job. It does not require the type
567 /// to be `Sync`.
568 ///
569 /// # Examples
570 ///
571 /// ```
572 /// use std::sync::mpsc::channel;
573 /// use rayon::prelude::*;
574 ///
575 /// let (sender, receiver) = channel();
576 ///
577 /// let a: Vec<_> = (0..5)
578 /// .into_par_iter() // iterating over i32
579 /// .map_with(sender, |s, x| {
580 /// s.send(x).unwrap(); // sending i32 values through the channel
581 /// x // returning i32
582 /// })
583 /// .collect(); // collecting the returned values into a vector
584 ///
585 /// let mut b: Vec<_> = receiver.iter() // iterating over the values in the channel
586 /// .collect(); // and collecting them
587 /// b.sort();
588 ///
589 /// assert_eq!(a, b);
590 /// ```
591 fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
592 where
593 F: Fn(&mut T, Self::Item) -> R + Sync + Send,
594 T: Send + Clone,
595 R: Send,
596 {
597 map_with::new(self, init, map_op)
598 }
599
600 /// Applies `map_op` to a value returned by `init` with each item of this
601 /// iterator, producing a new iterator with the results.
602 ///
603 /// The `init` function will be called only as needed for a value to be
604 /// paired with the group of items in each rayon job. There is no
605 /// constraint on that returned type at all!
606 ///
607 /// # Examples
608 ///
609 /// ```
610 /// extern crate rand;
611 /// extern crate rayon;
612 ///
613 /// use rand::Rng;
614 /// use rayon::prelude::*;
615 ///
616 /// let a: Vec<_> = (1i32..1_000_000)
617 /// .into_par_iter()
618 /// .map_init(
619 /// || rand::thread_rng(), // get the thread-local RNG
620 /// |rng, x| if rng.gen() { // randomly negate items
621 /// -x
622 /// } else {
623 /// x
624 /// },
625 /// ).collect();
626 ///
627 /// // There's a remote chance that this will fail...
628 /// assert!(a.iter().any(|&x| x < 0));
629 /// assert!(a.iter().any(|&x| x > 0));
630 /// ```
631 fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F>
632 where
633 F: Fn(&mut T, Self::Item) -> R + Sync + Send,
634 INIT: Fn() -> T + Sync + Send,
635 R: Send,
636 {
637 map_with::new_init(self, init, map_op)
638 }
639
640 /// Creates an iterator which clones all of its elements. This may be
641 /// useful when you have an iterator over `&T`, but you need `T`.
642 ///
643 /// # Examples
644 ///
645 /// ```
646 /// use rayon::prelude::*;
647 ///
648 /// let a = [1, 2, 3];
649 ///
650 /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
651 ///
652 /// // cloned is the same as .map(|&x| x), for integers
653 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
654 ///
655 /// assert_eq!(v_cloned, vec![1, 2, 3]);
656 /// assert_eq!(v_map, vec![1, 2, 3]);
657 /// ```
658 fn cloned<'a, T>(self) -> Cloned<Self>
659 where
660 T: 'a + Clone + Send,
661 Self: ParallelIterator<Item = &'a T>,
662 {
663 cloned::new(self)
664 }
665
666 /// Applies `inspect_op` to a reference to each item of this iterator,
667 /// producing a new iterator passing through the original items. This is
668 /// often useful for debugging to see what's happening in iterator stages.
669 ///
670 /// # Examples
671 ///
672 /// ```
673 /// use rayon::prelude::*;
674 ///
675 /// let a = [1, 4, 2, 3];
676 ///
677 /// // this iterator sequence is complex.
678 /// let sum = a.par_iter()
679 /// .cloned()
680 /// .filter(|&x| x % 2 == 0)
681 /// .reduce(|| 0, |sum, i| sum + i);
682 ///
683 /// println!("{}", sum);
684 ///
685 /// // let's add some inspect() calls to investigate what's happening
686 /// let sum = a.par_iter()
687 /// .cloned()
688 /// .inspect(|x| println!("about to filter: {}", x))
689 /// .filter(|&x| x % 2 == 0)
690 /// .inspect(|x| println!("made it through filter: {}", x))
691 /// .reduce(|| 0, |sum, i| sum + i);
692 ///
693 /// println!("{}", sum);
694 /// ```
695 fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
696 where
697 OP: Fn(&Self::Item) + Sync + Send,
698 {
699 inspect::new(self, inspect_op)
700 }
701
702 /// Mutates each item of this iterator before yielding it.
703 ///
704 /// # Examples
705 ///
706 /// ```
707 /// use rayon::prelude::*;
708 ///
709 /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
710 ///
711 /// let doubles: Vec<_> = par_iter.collect();
712 ///
713 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
714 /// ```
715 fn update<F>(self, update_op: F) -> Update<Self, F>
716 where
717 F: Fn(&mut Self::Item) + Sync + Send,
718 {
719 update::new(self, update_op)
720 }
721
722 /// Applies `filter_op` to each item of this iterator, producing a new
723 /// iterator with only the items that gave `true` results.
724 ///
725 /// # Examples
726 ///
727 /// ```
728 /// use rayon::prelude::*;
729 ///
730 /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
731 ///
732 /// let even_numbers: Vec<_> = par_iter.collect();
733 ///
734 /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
735 /// ```
736 fn filter<P>(self, filter_op: P) -> Filter<Self, P>
737 where
738 P: Fn(&Self::Item) -> bool + Sync + Send,
739 {
740 filter::new(self, filter_op)
741 }
742
743 /// Applies `filter_op` to each item of this iterator to get an `Option`,
744 /// producing a new iterator with only the items from `Some` results.
745 ///
746 /// # Examples
747 ///
748 /// ```
749 /// use rayon::prelude::*;
750 ///
751 /// let mut par_iter = (0..10).into_par_iter()
752 /// .filter_map(|x| {
753 /// if x % 2 == 0 { Some(x * 3) }
754 /// else { None }
755 /// });
756 ///
757 /// let even_numbers: Vec<_> = par_iter.collect();
758 ///
759 /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
760 /// ```
761 fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
762 where
763 P: Fn(Self::Item) -> Option<R> + Sync + Send,
764 R: Send,
765 {
766 filter_map::new(self, filter_op)
767 }
768
769 /// Applies `map_op` to each item of this iterator to get nested iterators,
770 /// producing a new iterator that flattens these back into one.
771 ///
772 /// # Examples
773 ///
774 /// ```
775 /// use rayon::prelude::*;
776 ///
777 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
778 ///
779 /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
780 ///
781 /// let vec: Vec<_> = par_iter.collect();
782 ///
783 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
784 /// ```
785 fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
786 where
787 F: Fn(Self::Item) -> PI + Sync + Send,
788 PI: IntoParallelIterator,
789 {
790 flat_map::new(self, map_op)
791 }
792
793 /// An adaptor that flattens iterable `Item`s into one large iterator
794 ///
795 /// # Examples
796 ///
797 /// ```
798 /// use rayon::prelude::*;
799 ///
800 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
801 /// let y: Vec<_> = x.into_par_iter().flatten().collect();
802 ///
803 /// assert_eq!(y, vec![1, 2, 3, 4]);
804 /// ```
805 fn flatten(self) -> Flatten<Self>
806 where
807 Self::Item: IntoParallelIterator,
808 {
809 flatten::new(self)
810 }
811
812 /// Reduces the items in the iterator into one item using `op`.
813 /// The argument `identity` should be a closure that can produce
814 /// "identity" value which may be inserted into the sequence as
815 /// needed to create opportunities for parallel execution. So, for
816 /// example, if you are doing a summation, then `identity()` ought
817 /// to produce something that represents the zero for your type
818 /// (but consider just calling `sum()` in that case).
819 ///
820 /// # Examples
821 ///
822 /// ```
823 /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
824 /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
825 /// // where the first/second elements are summed separately.
826 /// use rayon::prelude::*;
827 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
828 /// .par_iter() // iterating over &(i32, i32)
829 /// .cloned() // iterating over (i32, i32)
830 /// .reduce(|| (0, 0), // the "identity" is 0 in both columns
831 /// |a, b| (a.0 + b.0, a.1 + b.1));
832 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
833 /// ```
834 ///
835 /// **Note:** unlike a sequential `fold` operation, the order in
836 /// which `op` will be applied to reduce the result is not fully
837 /// specified. So `op` should be [associative] or else the results
838 /// will be non-deterministic. And of course `identity()` should
839 /// produce a true identity.
840 ///
841 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
842 fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
843 where
844 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
845 ID: Fn() -> Self::Item + Sync + Send,
846 {
847 reduce::reduce(self, identity, op)
848 }
849
850 /// Reduces the items in the iterator into one item using `op`.
851 /// If the iterator is empty, `None` is returned; otherwise,
852 /// `Some` is returned.
853 ///
854 /// This version of `reduce` is simple but somewhat less
855 /// efficient. If possible, it is better to call `reduce()`, which
856 /// requires an identity element.
857 ///
858 /// # Examples
859 ///
860 /// ```
861 /// use rayon::prelude::*;
862 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
863 /// .par_iter() // iterating over &(i32, i32)
864 /// .cloned() // iterating over (i32, i32)
865 /// .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
866 /// .unwrap();
867 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
868 /// ```
869 ///
870 /// **Note:** unlike a sequential `fold` operation, the order in
871 /// which `op` will be applied to reduce the result is not fully
872 /// specified. So `op` should be [associative] or else the results
873 /// will be non-deterministic.
874 ///
875 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
876 fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
877 where
878 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
879 {
880 self.fold(
881 || None,
882 |opt_a, b| match opt_a {
883 Some(a) => Some(op(a, b)),
884 None => Some(b),
885 },
886 )
887 .reduce(
888 || None,
889 |opt_a, opt_b| match (opt_a, opt_b) {
890 (Some(a), Some(b)) => Some(op(a, b)),
891 (Some(v), None) | (None, Some(v)) => Some(v),
892 (None, None) => None,
893 },
894 )
895 }
896
897 /// Reduces the items in the iterator into one item using a fallible `op`.
898 /// The `identity` argument is used the same way as in [`reduce()`].
899 ///
900 /// [`reduce()`]: #method.reduce
901 ///
902 /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
903 /// to one, we will attempt to stop processing the rest of the items in the
904 /// iterator as soon as possible, and we will return that terminating value.
905 /// Otherwise, we will return the final reduced `Result::Ok(T)` or
906 /// `Option::Some(T)`. If there are multiple errors in parallel, it is not
907 /// specified which will be returned.
908 ///
909 /// # Examples
910 ///
911 /// ```
912 /// use rayon::prelude::*;
913 ///
914 /// // Compute the sum of squares, being careful about overflow.
915 /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
916 /// iter.into_par_iter()
917 /// .map(|i| i.checked_mul(i)) // square each item,
918 /// .try_reduce(|| 0, i32::checked_add) // and add them up!
919 /// }
920 /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
921 ///
922 /// // The sum might overflow
923 /// assert_eq!(sum_squares(0..10_000), None);
924 ///
925 /// // Or the squares might overflow before it even reaches `try_reduce`
926 /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
927 /// ```
928 fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
929 where
930 OP: Fn(T, T) -> Self::Item + Sync + Send,
931 ID: Fn() -> T + Sync + Send,
932 Self::Item: Try<Ok = T>,
933 {
934 try_reduce::try_reduce(self, identity, op)
935 }
936
937 /// Reduces the items in the iterator into one item using a fallible `op`.
938 ///
939 /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
940 /// otherwise, `Some` is returned. Beyond that, it behaves like
941 /// [`try_reduce()`] for handling `Err`/`None`.
942 ///
943 /// [`reduce_with()`]: #method.reduce_with
944 /// [`try_reduce()`]: #method.try_reduce
945 ///
946 /// For instance, with `Option` items, the return value may be:
947 /// - `None`, the iterator was empty
948 /// - `Some(None)`, we stopped after encountering `None`.
949 /// - `Some(Some(x))`, the entire iterator reduced to `x`.
950 ///
951 /// With `Result` items, the nesting is more obvious:
952 /// - `None`, the iterator was empty
953 /// - `Some(Err(e))`, we stopped after encountering an error `e`.
954 /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
955 ///
956 /// # Examples
957 ///
958 /// ```
959 /// use rayon::prelude::*;
960 ///
961 /// let files = ["/dev/null", "/does/not/exist"];
962 ///
963 /// // Find the biggest file
964 /// files.into_par_iter()
965 /// .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
966 /// .try_reduce_with(|a, b| {
967 /// Ok(if a.1 >= b.1 { a } else { b })
968 /// })
969 /// .expect("Some value, since the iterator is not empty")
970 /// .expect_err("not found");
971 /// ```
972 fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item>
973 where
974 OP: Fn(T, T) -> Self::Item + Sync + Send,
975 Self::Item: Try<Ok = T>,
976 {
977 try_reduce_with::try_reduce_with(self, op)
978 }
979
980 /// Parallel fold is similar to sequential fold except that the
981 /// sequence of items may be subdivided before it is
982 /// folded. Consider a list of numbers like `22 3 77 89 46`. If
983 /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
984 /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
985 /// 77, and so forth. The **parallel fold** works similarly except
986 /// that it first breaks up your list into sublists, and hence
987 /// instead of yielding up a single sum at the end, it yields up
988 /// multiple sums. The number of results is nondeterministic, as
989 /// is the point where the breaks occur.
990 ///
991 /// So if did the same parallel fold (`fold(0, |a,b| a+b)`) on
992 /// our example list, we might wind up with a sequence of two numbers,
993 /// like so:
994 ///
995 /// ```notrust
996 /// 22 3 77 89 46
997 /// | |
998 /// 102 135
999 /// ```
1000 ///
1001 /// Or perhaps these three numbers:
1002 ///
1003 /// ```notrust
1004 /// 22 3 77 89 46
1005 /// | | |
1006 /// 102 89 46
1007 /// ```
1008 ///
1009 /// In general, Rayon will attempt to find good breaking points
1010 /// that keep all of your cores busy.
1011 ///
1012 /// ### Fold versus reduce
1013 ///
1014 /// The `fold()` and `reduce()` methods each take an identity element
1015 /// and a combining function, but they operate rather differently.
1016 ///
1017 /// `reduce()` requires that the identity function has the same
1018 /// type as the things you are iterating over, and it fully
1019 /// reduces the list of items into a single item. So, for example,
1020 /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1021 /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1022 /// u8| a + b)`, we would get an overflow. This is because `0`,
1023 /// `a`, and `b` here are all bytes, just like the numbers in the
1024 /// list (I wrote the types explicitly above, but those are the
1025 /// only types you can use). To avoid the overflow, we would need
1026 /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1027 /// b| a + b)`, in which case our result would be `256`.
1028 ///
1029 /// In contrast, with `fold()`, the identity function does not
1030 /// have to have the same type as the things you are iterating
1031 /// over, and you potentially get back many results. So, if we
1032 /// continue with the `bytes` example from the previous paragraph,
1033 /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1034 /// convert our bytes into `u32`. And of course we might not get
1035 /// back a single sum.
1036 ///
1037 /// There is a more subtle distinction as well, though it's
1038 /// actually implied by the above points. When you use `reduce()`,
1039 /// your reduction function is sometimes called with values that
1040 /// were never part of your original parallel iterator (for
1041 /// example, both the left and right might be a partial sum). With
1042 /// `fold()`, in contrast, the left value in the fold function is
1043 /// always the accumulator, and the right value is always from
1044 /// your original sequence.
1045 ///
1046 /// ### Fold vs Map/Reduce
1047 ///
1048 /// Fold makes sense if you have some operation where it is
1049 /// cheaper to create groups of elements at a time. For example,
1050 /// imagine collecting characters into a string. If you were going
1051 /// to use map/reduce, you might try this:
1052 ///
1053 /// ```
1054 /// use rayon::prelude::*;
1055 ///
1056 /// let s =
1057 /// ['a', 'b', 'c', 'd', 'e']
1058 /// .par_iter()
1059 /// .map(|c: &char| format!("{}", c))
1060 /// .reduce(|| String::new(),
1061 /// |mut a: String, b: String| { a.push_str(&b); a });
1062 ///
1063 /// assert_eq!(s, "abcde");
1064 /// ```
1065 ///
1066 /// Because reduce produces the same type of element as its input,
1067 /// you have to first map each character into a string, and then
1068 /// you can reduce them. This means we create one string per
1069 /// element in our iterator -- not so great. Using `fold`, we can
1070 /// do this instead:
1071 ///
1072 /// ```
1073 /// use rayon::prelude::*;
1074 ///
1075 /// let s =
1076 /// ['a', 'b', 'c', 'd', 'e']
1077 /// .par_iter()
1078 /// .fold(|| String::new(),
1079 /// |mut s: String, c: &char| { s.push(*c); s })
1080 /// .reduce(|| String::new(),
1081 /// |mut a: String, b: String| { a.push_str(&b); a });
1082 ///
1083 /// assert_eq!(s, "abcde");
1084 /// ```
1085 ///
1086 /// Now `fold` will process groups of our characters at a time,
1087 /// and we only make one string per group. We should wind up with
1088 /// some small-ish number of strings roughly proportional to the
1089 /// number of CPUs you have (it will ultimately depend on how busy
1090 /// your processors are). Note that we still need to do a reduce
1091 /// afterwards to combine those groups of strings into a single
1092 /// string.
1093 ///
1094 /// You could use a similar trick to save partial results (e.g., a
1095 /// cache) or something similar.
1096 ///
1097 /// ### Combining fold with other operations
1098 ///
1099 /// You can combine `fold` with `reduce` if you want to produce a
1100 /// single value. This is then roughly equivalent to a map/reduce
1101 /// combination in effect:
1102 ///
1103 /// ```
1104 /// use rayon::prelude::*;
1105 ///
1106 /// let bytes = 0..22_u8;
1107 /// let sum = bytes.into_par_iter()
1108 /// .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1109 /// .sum::<u32>();
1110 ///
1111 /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1112 /// ```
1113 fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
1114 where
1115 F: Fn(T, Self::Item) -> T + Sync + Send,
1116 ID: Fn() -> T + Sync + Send,
1117 T: Send,
1118 {
1119 fold::fold(self, identity, fold_op)
1120 }
1121
1122 /// Applies `fold_op` to the given `init` value with each item of this
1123 /// iterator, finally producing the value for further use.
1124 ///
1125 /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1126 /// it doesn't require the `init` type to be `Sync`, nor any other form
1127 /// of added synchronization.
1128 ///
1129 /// # Examples
1130 ///
1131 /// ```
1132 /// use rayon::prelude::*;
1133 ///
1134 /// let bytes = 0..22_u8;
1135 /// let sum = bytes.into_par_iter()
1136 /// .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1137 /// .sum::<u32>();
1138 ///
1139 /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1140 /// ```
1141 fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
1142 where
1143 F: Fn(T, Self::Item) -> T + Sync + Send,
1144 T: Send + Clone,
1145 {
1146 fold::fold_with(self, init, fold_op)
1147 }
1148
1149 /// Perform a fallible parallel fold.
1150 ///
1151 /// This is a variation of [`fold()`] for operations which can fail with
1152 /// `Option::None` or `Result::Err`. The first such failure stops
1153 /// processing the local set of items, without affecting other folds in the
1154 /// iterator's subdivisions.
1155 ///
1156 /// Often, `try_fold()` will be followed by [`try_reduce()`]
1157 /// for a final reduction and global short-circuiting effect.
1158 ///
1159 /// [`fold()`]: #method.fold
1160 /// [`try_reduce()`]: #method.try_reduce
1161 ///
1162 /// # Examples
1163 ///
1164 /// ```
1165 /// use rayon::prelude::*;
1166 ///
1167 /// let bytes = 0..22_u8;
1168 /// let sum = bytes.into_par_iter()
1169 /// .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1170 /// .try_reduce(|| 0, u32::checked_add);
1171 ///
1172 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1173 /// ```
1174 fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F>
1175 where
1176 F: Fn(T, Self::Item) -> R + Sync + Send,
1177 ID: Fn() -> T + Sync + Send,
1178 R: Try<Ok = T> + Send,
1179 {
1180 try_fold::try_fold(self, identity, fold_op)
1181 }
1182
1183 /// Perform a fallible parallel fold with a cloneable `init` value.
1184 ///
1185 /// This combines the `init` semantics of [`fold_with()`] and the failure
1186 /// semantics of [`try_fold()`].
1187 ///
1188 /// [`fold_with()`]: #method.fold_with
1189 /// [`try_fold()`]: #method.try_fold
1190 ///
1191 /// ```
1192 /// use rayon::prelude::*;
1193 ///
1194 /// let bytes = 0..22_u8;
1195 /// let sum = bytes.into_par_iter()
1196 /// .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1197 /// .try_reduce(|| 0, u32::checked_add);
1198 ///
1199 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1200 /// ```
1201 fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F>
1202 where
1203 F: Fn(T, Self::Item) -> R + Sync + Send,
1204 R: Try<Ok = T> + Send,
1205 T: Clone + Send,
1206 {
1207 try_fold::try_fold_with(self, init, fold_op)
1208 }
1209
1210 /// Sums up the items in the iterator.
1211 ///
1212 /// Note that the order in items will be reduced is not specified,
1213 /// so if the `+` operator is not truly [associative] \(as is the
1214 /// case for floating point numbers), then the results are not
1215 /// fully deterministic.
1216 ///
1217 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1218 ///
1219 /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1220 /// except that the type of `0` and the `+` operation may vary
1221 /// depending on the type of value being produced.
1222 ///
1223 /// # Examples
1224 ///
1225 /// ```
1226 /// use rayon::prelude::*;
1227 ///
1228 /// let a = [1, 5, 7];
1229 ///
1230 /// let sum: i32 = a.par_iter().sum();
1231 ///
1232 /// assert_eq!(sum, 13);
1233 /// ```
1234 fn sum<S>(self) -> S
1235 where
1236 S: Send + Sum<Self::Item> + Sum<S>,
1237 {
1238 sum::sum(self)
1239 }
1240
1241 /// Multiplies all the items in the iterator.
1242 ///
1243 /// Note that the order in items will be reduced is not specified,
1244 /// so if the `*` operator is not truly [associative] \(as is the
1245 /// case for floating point numbers), then the results are not
1246 /// fully deterministic.
1247 ///
1248 /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1249 ///
1250 /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1251 /// except that the type of `1` and the `*` operation may vary
1252 /// depending on the type of value being produced.
1253 ///
1254 /// # Examples
1255 ///
1256 /// ```
1257 /// use rayon::prelude::*;
1258 ///
1259 /// fn factorial(n: u32) -> u32 {
1260 /// (1..n+1).into_par_iter().product()
1261 /// }
1262 ///
1263 /// assert_eq!(factorial(0), 1);
1264 /// assert_eq!(factorial(1), 1);
1265 /// assert_eq!(factorial(5), 120);
1266 /// ```
1267 fn product<P>(self) -> P
1268 where
1269 P: Send + Product<Self::Item> + Product<P>,
1270 {
1271 product::product(self)
1272 }
1273
1274 /// Computes the minimum of all the items in the iterator. If the
1275 /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1276 /// is returned.
1277 ///
1278 /// Note that the order in which the items will be reduced is not
1279 /// specified, so if the `Ord` impl is not truly associative, then
1280 /// the results are not deterministic.
1281 ///
1282 /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`.
1283 ///
1284 /// # Examples
1285 ///
1286 /// ```
1287 /// use rayon::prelude::*;
1288 ///
1289 /// let a = [45, 74, 32];
1290 ///
1291 /// assert_eq!(a.par_iter().min(), Some(&32));
1292 ///
1293 /// let b: [i32; 0] = [];
1294 ///
1295 /// assert_eq!(b.par_iter().min(), None);
1296 /// ```
1297 fn min(self) -> Option<Self::Item>
1298 where
1299 Self::Item: Ord,
1300 {
1301 self.reduce_with(cmp::min)
1302 }
1303
1304 /// Computes the minimum of all the items in the iterator with respect to
1305 /// the given comparison function. If the iterator is empty, `None` is
1306 /// returned; otherwise, `Some(min)` is returned.
1307 ///
1308 /// Note that the order in which the items will be reduced is not
1309 /// specified, so if the comparison function is not associative, then
1310 /// the results are not deterministic.
1311 ///
1312 /// # Examples
1313 ///
1314 /// ```
1315 /// use rayon::prelude::*;
1316 ///
1317 /// let a = [-3_i32, 77, 53, 240, -1];
1318 ///
1319 /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1320 /// ```
1321 fn min_by<F>(self, f: F) -> Option<Self::Item>
1322 where
1323 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1324 {
1325 self.reduce_with(|a, b| match f(&a, &b) {
1326 Ordering::Greater => b,
1327 _ => a,
1328 })
1329 }
1330
1331 /// Computes the item that yields the minimum value for the given
1332 /// function. If the iterator is empty, `None` is returned;
1333 /// otherwise, `Some(item)` is returned.
1334 ///
1335 /// Note that the order in which the items will be reduced is not
1336 /// specified, so if the `Ord` impl is not truly associative, then
1337 /// the results are not deterministic.
1338 ///
1339 /// # Examples
1340 ///
1341 /// ```
1342 /// use rayon::prelude::*;
1343 ///
1344 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1345 ///
1346 /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1347 /// ```
1348 fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
1349 where
1350 K: Ord + Send,
1351 F: Sync + Send + Fn(&Self::Item) -> K,
1352 {
1353 self.map(|x| (f(&x), x))
1354 .min_by(|a, b| (a.0).cmp(&b.0))
1355 .map(|(_, x)| x)
1356 }
1357
1358 /// Computes the maximum of all the items in the iterator. If the
1359 /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1360 /// is returned.
1361 ///
1362 /// Note that the order in which the items will be reduced is not
1363 /// specified, so if the `Ord` impl is not truly associative, then
1364 /// the results are not deterministic.
1365 ///
1366 /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`.
1367 ///
1368 /// # Examples
1369 ///
1370 /// ```
1371 /// use rayon::prelude::*;
1372 ///
1373 /// let a = [45, 74, 32];
1374 ///
1375 /// assert_eq!(a.par_iter().max(), Some(&74));
1376 ///
1377 /// let b: [i32; 0] = [];
1378 ///
1379 /// assert_eq!(b.par_iter().max(), None);
1380 /// ```
1381 fn max(self) -> Option<Self::Item>
1382 where
1383 Self::Item: Ord,
1384 {
1385 self.reduce_with(cmp::max)
1386 }
1387
1388 /// Computes the maximum of all the items in the iterator with respect to
1389 /// the given comparison function. If the iterator is empty, `None` is
1390 /// returned; otherwise, `Some(min)` is returned.
1391 ///
1392 /// Note that the order in which the items will be reduced is not
1393 /// specified, so if the comparison function is not associative, then
1394 /// the results are not deterministic.
1395 ///
1396 /// # Examples
1397 ///
1398 /// ```
1399 /// use rayon::prelude::*;
1400 ///
1401 /// let a = [-3_i32, 77, 53, 240, -1];
1402 ///
1403 /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1404 /// ```
1405 fn max_by<F>(self, f: F) -> Option<Self::Item>
1406 where
1407 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1408 {
1409 self.reduce_with(|a, b| match f(&a, &b) {
1410 Ordering::Greater => a,
1411 _ => b,
1412 })
1413 }
1414
1415 /// Computes the item that yields the maximum value for the given
1416 /// function. If the iterator is empty, `None` is returned;
1417 /// otherwise, `Some(item)` is returned.
1418 ///
1419 /// Note that the order in which the items will be reduced is not
1420 /// specified, so if the `Ord` impl is not truly associative, then
1421 /// the results are not deterministic.
1422 ///
1423 /// # Examples
1424 ///
1425 /// ```
1426 /// use rayon::prelude::*;
1427 ///
1428 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1429 ///
1430 /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1431 /// ```
1432 fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
1433 where
1434 K: Ord + Send,
1435 F: Sync + Send + Fn(&Self::Item) -> K,
1436 {
1437 self.map(|x| (f(&x), x))
1438 .max_by(|a, b| (a.0).cmp(&b.0))
1439 .map(|(_, x)| x)
1440 }
1441
1442 /// Takes two iterators and creates a new iterator over both.
1443 ///
1444 /// # Examples
1445 ///
1446 /// ```
1447 /// use rayon::prelude::*;
1448 ///
1449 /// let a = [0, 1, 2];
1450 /// let b = [9, 8, 7];
1451 ///
1452 /// let par_iter = a.par_iter().chain(b.par_iter());
1453 ///
1454 /// let chained: Vec<_> = par_iter.cloned().collect();
1455 ///
1456 /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1457 /// ```
1458 fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
1459 where
1460 C: IntoParallelIterator<Item = Self::Item>,
1461 {
1462 chain::new(self, chain.into_par_iter())
1463 }
1464
1465 /// Searches for **some** item in the parallel iterator that
1466 /// matches the given predicate and returns it. This operation
1467 /// is similar to [`find` on sequential iterators][find] but
1468 /// the item returned may not be the **first** one in the parallel
1469 /// sequence which matches, since we search the entire sequence in parallel.
1470 ///
1471 /// Once a match is found, we will attempt to stop processing
1472 /// the rest of the items in the iterator as soon as possible
1473 /// (just as `find` stops iterating once a match is found).
1474 ///
1475 /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find
1476 ///
1477 /// # Examples
1478 ///
1479 /// ```
1480 /// use rayon::prelude::*;
1481 ///
1482 /// let a = [1, 2, 3, 3];
1483 ///
1484 /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1485 ///
1486 /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1487 /// ```
1488 fn find_any<P>(self, predicate: P) -> Option<Self::Item>
1489 where
1490 P: Fn(&Self::Item) -> bool + Sync + Send,
1491 {
1492 find::find(self, predicate)
1493 }
1494
1495 /// Searches for the sequentially **first** item in the parallel iterator
1496 /// that matches the given predicate and returns it.
1497 ///
1498 /// Once a match is found, all attempts to the right of the match
1499 /// will be stopped, while attempts to the left must continue in case
1500 /// an earlier match is found.
1501 ///
1502 /// Note that not all parallel iterators have a useful order, much like
1503 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1504 /// just want the first match that discovered anywhere in the iterator,
1505 /// `find_any` is a better choice.
1506 ///
1507 /// # Exmaples
1508 ///
1509 /// ```
1510 /// use rayon::prelude::*;
1511 ///
1512 /// let a = [1, 2, 3, 3];
1513 ///
1514 /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1515 ///
1516 /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1517 /// ```
1518 fn find_first<P>(self, predicate: P) -> Option<Self::Item>
1519 where
1520 P: Fn(&Self::Item) -> bool + Sync + Send,
1521 {
1522 find_first_last::find_first(self, predicate)
1523 }
1524
1525 /// Searches for the sequentially **last** item in the parallel iterator
1526 /// that matches the given predicate and returns it.
1527 ///
1528 /// Once a match is found, all attempts to the left of the match
1529 /// will be stopped, while attempts to the right must continue in case
1530 /// a later match is found.
1531 ///
1532 /// Note that not all parallel iterators have a useful order, much like
1533 /// sequential `HashMap` iteration, so "last" may be nebulous. When the
1534 /// order doesn't actually matter to you, `find_any` is a better choice.
1535 ///
1536 /// # Examples
1537 ///
1538 /// ```
1539 /// use rayon::prelude::*;
1540 ///
1541 /// let a = [1, 2, 3, 3];
1542 ///
1543 /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1544 ///
1545 /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1546 /// ```
1547 fn find_last<P>(self, predicate: P) -> Option<Self::Item>
1548 where
1549 P: Fn(&Self::Item) -> bool + Sync + Send,
1550 {
1551 find_first_last::find_last(self, predicate)
1552 }
1553
1554 #[doc(hidden)]
1555 #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1556 `find_first`, or `find_last`")]
1557 fn find<P>(self, predicate: P) -> Option<Self::Item>
1558 where
1559 P: Fn(&Self::Item) -> bool + Sync + Send,
1560 {
1561 self.find_any(predicate)
1562 }
1563
1564 /// Searches for **some** item in the parallel iterator that
1565 /// matches the given predicate, and if so returns true. Once
1566 /// a match is found, we'll attempt to stop process the rest
1567 /// of the items. Proving that there's no match, returning false,
1568 /// does require visiting every item.
1569 ///
1570 /// # Examples
1571 ///
1572 /// ```
1573 /// use rayon::prelude::*;
1574 ///
1575 /// let a = [0, 12, 3, 4, 0, 23, 0];
1576 ///
1577 /// let is_valid = a.par_iter().any(|&x| x > 10);
1578 ///
1579 /// assert!(is_valid);
1580 /// ```
1581 fn any<P>(self, predicate: P) -> bool
1582 where
1583 P: Fn(Self::Item) -> bool + Sync + Send,
1584 {
1585 self.map(predicate).find_any(|&p| p).is_some()
1586 }
1587
1588 /// Tests that every item in the parallel iterator matches the given
1589 /// predicate, and if so returns true. If a counter-example is found,
1590 /// we'll attempt to stop processing more items, then return false.
1591 ///
1592 /// # Examples
1593 ///
1594 /// ```
1595 /// use rayon::prelude::*;
1596 ///
1597 /// let a = [0, 12, 3, 4, 0, 23, 0];
1598 ///
1599 /// let is_valid = a.par_iter().all(|&x| x > 10);
1600 ///
1601 /// assert!(!is_valid);
1602 /// ```
1603 fn all<P>(self, predicate: P) -> bool
1604 where
1605 P: Fn(Self::Item) -> bool + Sync + Send,
1606 {
1607 self.map(predicate).find_any(|&p| !p).is_none()
1608 }
1609
1610 /// Creates an iterator over the `Some` items of this iterator, halting
1611 /// as soon as any `None` is found.
1612 ///
1613 /// # Examples
1614 ///
1615 /// ```
1616 /// use rayon::prelude::*;
1617 /// use std::sync::atomic::{AtomicUsize, Ordering};
1618 ///
1619 /// let counter = AtomicUsize::new(0);
1620 /// let value = (0_i32..2048)
1621 /// .into_par_iter()
1622 /// .map(|x| {
1623 /// counter.fetch_add(1, Ordering::SeqCst);
1624 /// if x < 1024 { Some(x) } else { None }
1625 /// })
1626 /// .while_some()
1627 /// .max();
1628 ///
1629 /// assert!(value < Some(1024));
1630 /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1631 /// ```
1632 fn while_some<T>(self) -> WhileSome<Self>
1633 where
1634 Self: ParallelIterator<Item = Option<T>>,
1635 T: Send,
1636 {
1637 while_some::new(self)
1638 }
1639
1640 /// Create a fresh collection containing all the element produced
1641 /// by this parallel iterator.
1642 ///
1643 /// You may prefer to use `collect_into_vec()`, which allocates more
1644 /// efficiently with precise knowledge of how many elements the
1645 /// iterator contains, and even allows you to reuse an existing
1646 /// vector's backing store rather than allocating a fresh vector.
1647 ///
1648 /// # Examples
1649 ///
1650 /// ```
1651 /// use rayon::prelude::*;
1652 ///
1653 /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1654 ///
1655 /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
1656 ///
1657 /// assert_eq!(sync_vec, async_vec);
1658 /// ```
1659 fn collect<C>(self) -> C
1660 where
1661 C: FromParallelIterator<Self::Item>,
1662 {
1663 C::from_par_iter(self)
1664 }
1665
1666 /// Unzips the items of a parallel iterator into a pair of arbitrary
1667 /// `ParallelExtend` containers.
1668 ///
1669 /// You may prefer to use `unzip_into_vecs()`, which allocates more
1670 /// efficiently with precise knowledge of how many elements the
1671 /// iterator contains, and even allows you to reuse existing
1672 /// vectors' backing stores rather than allocating fresh vectors.
1673 ///
1674 /// # Examples
1675 ///
1676 /// ```
1677 /// use rayon::prelude::*;
1678 ///
1679 /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
1680 ///
1681 /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
1682 ///
1683 /// assert_eq!(left, [0, 1, 2, 3]);
1684 /// assert_eq!(right, [1, 2, 3, 4]);
1685 /// ```
1686 ///
1687 /// Nested pairs can be unzipped too.
1688 ///
1689 /// ```
1690 /// use rayon::prelude::*;
1691 ///
1692 /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
1693 /// .map(|i| (i, (i * i, i * i * i)))
1694 /// .unzip();
1695 ///
1696 /// assert_eq!(values, [0, 1, 2, 3]);
1697 /// assert_eq!(squares, [0, 1, 4, 9]);
1698 /// assert_eq!(cubes, [0, 1, 8, 27]);
1699 /// ```
1700 fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
1701 where
1702 Self: ParallelIterator<Item = (A, B)>,
1703 FromA: Default + Send + ParallelExtend<A>,
1704 FromB: Default + Send + ParallelExtend<B>,
1705 A: Send,
1706 B: Send,
1707 {
1708 unzip::unzip(self)
1709 }
1710
1711 /// Partitions the items of a parallel iterator into a pair of arbitrary
1712 /// `ParallelExtend` containers. Items for which the `predicate` returns
1713 /// true go into the first container, and the rest go into the second.
1714 ///
1715 /// Note: unlike the standard `Iterator::partition`, this allows distinct
1716 /// collection types for the left and right items. This is more flexible,
1717 /// but may require new type annotations when converting sequential code
1718 /// that used type inferrence assuming the two were the same.
1719 ///
1720 /// # Examples
1721 ///
1722 /// ```
1723 /// use rayon::prelude::*;
1724 ///
1725 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
1726 ///
1727 /// assert_eq!(left, [0, 2, 4, 6]);
1728 /// assert_eq!(right, [1, 3, 5, 7]);
1729 /// ```
1730 fn partition<A, B, P>(self, predicate: P) -> (A, B)
1731 where
1732 A: Default + Send + ParallelExtend<Self::Item>,
1733 B: Default + Send + ParallelExtend<Self::Item>,
1734 P: Fn(&Self::Item) -> bool + Sync + Send,
1735 {
1736 unzip::partition(self, predicate)
1737 }
1738
1739 /// Partitions and maps the items of a parallel iterator into a pair of
1740 /// arbitrary `ParallelExtend` containers. `Either::Left` items go into
1741 /// the first container, and `Either::Right` items go into the second.
1742 ///
1743 /// # Examples
1744 ///
1745 /// ```
1746 /// use rayon::prelude::*;
1747 /// use rayon::iter::Either;
1748 ///
1749 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
1750 /// .partition_map(|x| {
1751 /// if x % 2 == 0 {
1752 /// Either::Left(x * 4)
1753 /// } else {
1754 /// Either::Right(x * 3)
1755 /// }
1756 /// });
1757 ///
1758 /// assert_eq!(left, [0, 8, 16, 24]);
1759 /// assert_eq!(right, [3, 9, 15, 21]);
1760 /// ```
1761 ///
1762 /// Nested `Either` enums can be split as well.
1763 ///
1764 /// ```
1765 /// use rayon::prelude::*;
1766 /// use rayon::iter::Either::*;
1767 ///
1768 /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
1769 /// .into_par_iter()
1770 /// .partition_map(|x| match (x % 3, x % 5) {
1771 /// (0, 0) => Left(Left(x)),
1772 /// (0, _) => Left(Right(x)),
1773 /// (_, 0) => Right(Left(x)),
1774 /// (_, _) => Right(Right(x)),
1775 /// });
1776 ///
1777 /// assert_eq!(fizzbuzz, [15]);
1778 /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
1779 /// assert_eq!(buzz, [5, 10]);
1780 /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
1781 /// ```
1782 fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
1783 where
1784 A: Default + Send + ParallelExtend<L>,
1785 B: Default + Send + ParallelExtend<R>,
1786 P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
1787 L: Send,
1788 R: Send,
1789 {
1790 unzip::partition_map(self, predicate)
1791 }
1792
1793 /// Intersperses clones of an element between items of this iterator.
1794 ///
1795 /// # Examples
1796 ///
1797 /// ```
1798 /// use rayon::prelude::*;
1799 ///
1800 /// let x = vec![1, 2, 3];
1801 /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
1802 ///
1803 /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
1804 /// ```
1805 fn intersperse(self, element: Self::Item) -> Intersperse<Self>
1806 where
1807 Self::Item: Clone,
1808 {
1809 intersperse::new(self, element)
1810 }
1811
1812 /// Internal method used to define the behavior of this parallel
1813 /// iterator. You should not need to call this directly.
1814 ///
1815 /// This method causes the iterator `self` to start producing
1816 /// items and to feed them to the consumer `consumer` one by one.
1817 /// It may split the consumer before doing so to create the
1818 /// opportunity to produce in parallel.
1819 ///
1820 /// See the [README] for more details on the internals of parallel
1821 /// iterators.
1822 ///
1823 /// [README]: README.md
1824 fn drive_unindexed<C>(self, consumer: C) -> C::Result
1825 where
1826 C: UnindexedConsumer<Self::Item>;
1827
1828 /// Internal method used to define the behavior of this parallel
1829 /// iterator. You should not need to call this directly.
1830 ///
1831 /// Returns the number of items produced by this iterator, if known
1832 /// statically. This can be used by consumers to trigger special fast
1833 /// paths. Therefore, if `Some(_)` is returned, this iterator must only
1834 /// use the (indexed) `Consumer` methods when driving a consumer, such
1835 /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
1836 /// other `UnindexedConsumer` methods -- or returning an inaccurate
1837 /// value -- may result in panics.
1838 ///
1839 /// This method is currently used to optimize `collect` for want
1840 /// of true Rust specialization; it may be removed when
1841 /// specialization is stable.
1842 fn opt_len(&self) -> Option<usize> {
1843 None
1844 }
1845 }
1846
1847 impl<T: ParallelIterator> IntoParallelIterator for T {
1848 type Iter = T;
1849 type Item = T::Item;
1850
1851 fn into_par_iter(self) -> T {
1852 self
1853 }
1854 }
1855
1856 /// An iterator that supports "random access" to its data, meaning
1857 /// that you can split it at arbitrary indices and draw data from
1858 /// those points.
1859 ///
1860 /// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
1861 pub trait IndexedParallelIterator: ParallelIterator {
1862 /// Collects the results of the iterator into the specified
1863 /// vector. The vector is always truncated before execution
1864 /// begins. If possible, reusing the vector across calls can lead
1865 /// to better performance since it reuses the same backing buffer.
1866 ///
1867 /// # Examples
1868 ///
1869 /// ```
1870 /// use rayon::prelude::*;
1871 ///
1872 /// // any prior data will be truncated
1873 /// let mut vec = vec![-1, -2, -3];
1874 ///
1875 /// (0..5).into_par_iter()
1876 /// .collect_into_vec(&mut vec);
1877 ///
1878 /// assert_eq!(vec, [0, 1, 2, 3, 4]);
1879 /// ```
1880 fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
1881 collect::collect_into_vec(self, target);
1882 }
1883
1884 /// Unzips the results of the iterator into the specified
1885 /// vectors. The vectors are always truncated before execution
1886 /// begins. If possible, reusing the vectors across calls can lead
1887 /// to better performance since they reuse the same backing buffer.
1888 ///
1889 /// # Examples
1890 ///
1891 /// ```
1892 /// use rayon::prelude::*;
1893 ///
1894 /// // any prior data will be truncated
1895 /// let mut left = vec![42; 10];
1896 /// let mut right = vec![-1; 10];
1897 ///
1898 /// (10..15).into_par_iter()
1899 /// .enumerate()
1900 /// .unzip_into_vecs(&mut left, &mut right);
1901 ///
1902 /// assert_eq!(left, [0, 1, 2, 3, 4]);
1903 /// assert_eq!(right, [10, 11, 12, 13, 14]);
1904 /// ```
1905 fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
1906 where
1907 Self: IndexedParallelIterator<Item = (A, B)>,
1908 A: Send,
1909 B: Send,
1910 {
1911 collect::unzip_into_vecs(self, left, right);
1912 }
1913
1914 /// Iterate over tuples `(A, B)`, where the items `A` are from
1915 /// this iterator and `B` are from the iterator given as argument.
1916 /// Like the `zip` method on ordinary iterators, if the two
1917 /// iterators are of unequal length, you only get the items they
1918 /// have in common.
1919 ///
1920 /// # Examples
1921 ///
1922 /// ```
1923 /// use rayon::prelude::*;
1924 ///
1925 /// let result: Vec<_> = (1..4)
1926 /// .into_par_iter()
1927 /// .zip(vec!['a', 'b', 'c'])
1928 /// .collect();
1929 ///
1930 /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
1931 /// ```
1932 fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
1933 where
1934 Z: IntoParallelIterator,
1935 Z::Iter: IndexedParallelIterator,
1936 {
1937 zip::new(self, zip_op.into_par_iter())
1938 }
1939
1940 /// The same as `Zip`, but requires that both iterators have the same length.
1941 ///
1942 /// # Panics
1943 /// Will panic if `self` and `zip_op` are not the same length.
1944 ///
1945 /// ```should_panic
1946 /// use rayon::prelude::*;
1947 ///
1948 /// let one = [1u8];
1949 /// let two = [2u8, 2];
1950 /// let one_iter = one.par_iter();
1951 /// let two_iter = two.par_iter();
1952 ///
1953 /// // this will panic
1954 /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
1955 ///
1956 /// // we should never get here
1957 /// assert_eq!(1, zipped.len());
1958 /// ```
1959 fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
1960 where
1961 Z: IntoParallelIterator,
1962 Z::Iter: IndexedParallelIterator,
1963 {
1964 let zip_op_iter = zip_op.into_par_iter();
1965 assert_eq!(self.len(), zip_op_iter.len());
1966 zip_eq::new(self, zip_op_iter)
1967 }
1968
1969 /// Interleave elements of this iterator and the other given
1970 /// iterator. Alternately yields elements from this iterator and
1971 /// the given iterator, until both are exhausted. If one iterator
1972 /// is exhausted before the other, the last elements are provided
1973 /// from the other.
1974 ///
1975 /// # Examples
1976 ///
1977 /// ```
1978 /// use rayon::prelude::*;
1979 /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
1980 /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
1981 /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
1982 /// ```
1983 fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
1984 where
1985 I: IntoParallelIterator<Item = Self::Item>,
1986 I::Iter: IndexedParallelIterator<Item = Self::Item>,
1987 {
1988 interleave::new(self, other.into_par_iter())
1989 }
1990
1991 /// Interleave elements of this iterator and the other given
1992 /// iterator, until one is exhausted.
1993 ///
1994 /// # Examples
1995 ///
1996 /// ```
1997 /// use rayon::prelude::*;
1998 /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
1999 /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2000 /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2001 /// ```
2002 fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
2003 where
2004 I: IntoParallelIterator<Item = Self::Item>,
2005 I::Iter: IndexedParallelIterator<Item = Self::Item>,
2006 {
2007 interleave_shortest::new(self, other.into_par_iter())
2008 }
2009
2010 /// Split an iterator up into fixed-size chunks.
2011 ///
2012 /// Returns an iterator that returns `Vec`s of the given number of elements.
2013 /// If the number of elements in the iterator is not divisible by `chunk_size`,
2014 /// the last chunk may be shorter than `chunk_size`.
2015 ///
2016 /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
2017 /// slices, without having to allocate intermediate `Vec`s for the chunks.
2018 ///
2019 /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks
2020 /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut
2021 ///
2022 /// # Examples
2023 ///
2024 /// ```
2025 /// use rayon::prelude::*;
2026 /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2027 /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2028 /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2029 /// ```
2030 fn chunks(self, chunk_size: usize) -> Chunks<Self> {
2031 assert!(chunk_size != 0, "chunk_size must not be zero");
2032 chunks::new(self, chunk_size)
2033 }
2034
2035 /// Lexicographically compares the elements of this `ParallelIterator` with those of
2036 /// another.
2037 ///
2038 /// # Examples
2039 ///
2040 /// ```
2041 /// use rayon::prelude::*;
2042 /// use std::cmp::Ordering::*;
2043 ///
2044 /// let x = vec![1, 2, 3];
2045 /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2046 /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2047 /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2048 /// ```
2049 fn cmp<I>(self, other: I) -> Ordering
2050 where
2051 I: IntoParallelIterator<Item = Self::Item>,
2052 I::Iter: IndexedParallelIterator,
2053 Self::Item: Ord,
2054 {
2055 let other = other.into_par_iter();
2056 let ord_len = self.len().cmp(&other.len());
2057 self.zip(other)
2058 .map(|(x, y)| Ord::cmp(&x, &y))
2059 .find_first(|&ord| ord != Ordering::Equal)
2060 .unwrap_or(ord_len)
2061 }
2062
2063 /// Lexicographically compares the elements of this `ParallelIterator` with those of
2064 /// another.
2065 ///
2066 /// # Examples
2067 ///
2068 /// ```
2069 /// use rayon::prelude::*;
2070 /// use std::cmp::Ordering::*;
2071 /// use std::f64::NAN;
2072 ///
2073 /// let x = vec![1.0, 2.0, 3.0];
2074 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2075 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2076 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2077 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);
2078 /// ```
2079 fn partial_cmp<I>(self, other: I) -> Option<Ordering>
2080 where
2081 I: IntoParallelIterator,
2082 I::Iter: IndexedParallelIterator,
2083 Self::Item: PartialOrd<I::Item>,
2084 {
2085 let other = other.into_par_iter();
2086 let ord_len = self.len().cmp(&other.len());
2087 self.zip(other)
2088 .map(|(x, y)| PartialOrd::partial_cmp(&x, &y))
2089 .find_first(|&ord| ord != Some(Ordering::Equal))
2090 .unwrap_or(Some(ord_len))
2091 }
2092
2093 /// Determines if the elements of this `ParallelIterator`
2094 /// are equal to those of another
2095 fn eq<I>(self, other: I) -> bool
2096 where
2097 I: IntoParallelIterator,
2098 I::Iter: IndexedParallelIterator,
2099 Self::Item: PartialEq<I::Item>,
2100 {
2101 let other = other.into_par_iter();
2102 self.len() == other.len() && self.zip(other).all(|(x, y)| x.eq(&y))
2103 }
2104
2105 /// Determines if the elements of this `ParallelIterator`
2106 /// are unequal to those of another
2107 fn ne<I>(self, other: I) -> bool
2108 where
2109 I: IntoParallelIterator,
2110 I::Iter: IndexedParallelIterator,
2111 Self::Item: PartialEq<I::Item>,
2112 {
2113 !self.eq(other)
2114 }
2115
2116 /// Determines if the elements of this `ParallelIterator`
2117 /// are lexicographically less than those of another.
2118 fn lt<I>(self, other: I) -> bool
2119 where
2120 I: IntoParallelIterator,
2121 I::Iter: IndexedParallelIterator,
2122 Self::Item: PartialOrd<I::Item>,
2123 {
2124 self.partial_cmp(other) == Some(Ordering::Less)
2125 }
2126
2127 /// Determines if the elements of this `ParallelIterator`
2128 /// are less or equal to those of another.
2129 fn le<I>(self, other: I) -> bool
2130 where
2131 I: IntoParallelIterator,
2132 I::Iter: IndexedParallelIterator,
2133 Self::Item: PartialOrd<I::Item>,
2134 {
2135 let ord = self.partial_cmp(other);
2136 ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
2137 }
2138
2139 /// Determines if the elements of this `ParallelIterator`
2140 /// are lexicographically greater than those of another.
2141 fn gt<I>(self, other: I) -> bool
2142 where
2143 I: IntoParallelIterator,
2144 I::Iter: IndexedParallelIterator,
2145 Self::Item: PartialOrd<I::Item>,
2146 {
2147 self.partial_cmp(other) == Some(Ordering::Greater)
2148 }
2149
2150 /// Determines if the elements of this `ParallelIterator`
2151 /// are less or equal to those of another.
2152 fn ge<I>(self, other: I) -> bool
2153 where
2154 I: IntoParallelIterator,
2155 I::Iter: IndexedParallelIterator,
2156 Self::Item: PartialOrd<I::Item>,
2157 {
2158 let ord = self.partial_cmp(other);
2159 ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
2160 }
2161
2162 /// Yields an index along with each item.
2163 ///
2164 /// # Examples
2165 ///
2166 /// ```
2167 /// use rayon::prelude::*;
2168 ///
2169 /// let chars = vec!['a', 'b', 'c'];
2170 /// let result: Vec<_> = chars
2171 /// .into_par_iter()
2172 /// .enumerate()
2173 /// .collect();
2174 ///
2175 /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2176 /// ```
2177 fn enumerate(self) -> Enumerate<Self> {
2178 enumerate::new(self)
2179 }
2180
2181 /// Creates an iterator that skips the first `n` elements.
2182 ///
2183 /// # Examples
2184 ///
2185 /// ```
2186 /// use rayon::prelude::*;
2187 ///
2188 /// let result: Vec<_> = (0..100)
2189 /// .into_par_iter()
2190 /// .skip(95)
2191 /// .collect();
2192 ///
2193 /// assert_eq!(result, [95, 96, 97, 98, 99]);
2194 /// ```
2195 fn skip(self, n: usize) -> Skip<Self> {
2196 skip::new(self, n)
2197 }
2198
2199 /// Creates an iterator that yields the first `n` elements.
2200 ///
2201 /// # Examples
2202 ///
2203 /// ```
2204 /// use rayon::prelude::*;
2205 ///
2206 /// let result: Vec<_> = (0..100)
2207 /// .into_par_iter()
2208 /// .take(5)
2209 /// .collect();
2210 ///
2211 /// assert_eq!(result, [0, 1, 2, 3, 4]);
2212 /// ```
2213 fn take(self, n: usize) -> Take<Self> {
2214 take::new(self, n)
2215 }
2216
2217 /// Searches for **some** item in the parallel iterator that
2218 /// matches the given predicate, and returns its index. Like
2219 /// `ParallelIterator::find_any`, the parallel search will not
2220 /// necessarily find the **first** match, and once a match is
2221 /// found we'll attempt to stop processing any more.
2222 ///
2223 /// # Examples
2224 ///
2225 /// ```
2226 /// use rayon::prelude::*;
2227 ///
2228 /// let a = [1, 2, 3, 3];
2229 ///
2230 /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
2231 /// assert!(i == 2 || i == 3);
2232 ///
2233 /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
2234 /// ```
2235 fn position_any<P>(self, predicate: P) -> Option<usize>
2236 where
2237 P: Fn(Self::Item) -> bool + Sync + Send,
2238 {
2239 self.map(predicate)
2240 .enumerate()
2241 .find_any(|&(_, p)| p)
2242 .map(|(i, _)| i)
2243 }
2244
2245 /// Searches for the sequentially **first** item in the parallel iterator
2246 /// that matches the given predicate, and returns its index.
2247 ///
2248 /// Like `ParallelIterator::find_first`, once a match is found,
2249 /// all attempts to the right of the match will be stopped, while
2250 /// attempts to the left must continue in case an earlier match
2251 /// is found.
2252 ///
2253 /// Note that not all parallel iterators have a useful order, much like
2254 /// sequential `HashMap` iteration, so "first" may be nebulous. If you
2255 /// just want the first match that discovered anywhere in the iterator,
2256 /// `position_any` is a better choice.
2257 ///
2258 /// # Examples
2259 ///
2260 /// ```
2261 /// use rayon::prelude::*;
2262 ///
2263 /// let a = [1, 2, 3, 3];
2264 ///
2265 /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
2266 ///
2267 /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
2268 /// ```
2269 fn position_first<P>(self, predicate: P) -> Option<usize>
2270 where
2271 P: Fn(Self::Item) -> bool + Sync + Send,
2272 {
2273 self.map(predicate)
2274 .enumerate()
2275 .find_first(|&(_, p)| p)
2276 .map(|(i, _)| i)
2277 }
2278
2279 /// Searches for the sequentially **last** item in the parallel iterator
2280 /// that matches the given predicate, and returns its index.
2281 ///
2282 /// Like `ParallelIterator::find_last`, once a match is found,
2283 /// all attempts to the left of the match will be stopped, while
2284 /// attempts to the right must continue in case a later match
2285 /// is found.
2286 ///
2287 /// Note that not all parallel iterators have a useful order, much like
2288 /// sequential `HashMap` iteration, so "last" may be nebulous. When the
2289 /// order doesn't actually matter to you, `position_any` is a better
2290 /// choice.
2291 ///
2292 /// # Examples
2293 ///
2294 /// ```
2295 /// use rayon::prelude::*;
2296 ///
2297 /// let a = [1, 2, 3, 3];
2298 ///
2299 /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
2300 ///
2301 /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
2302 /// ```
2303 fn position_last<P>(self, predicate: P) -> Option<usize>
2304 where
2305 P: Fn(Self::Item) -> bool + Sync + Send,
2306 {
2307 self.map(predicate)
2308 .enumerate()
2309 .find_last(|&(_, p)| p)
2310 .map(|(i, _)| i)
2311 }
2312
2313 #[doc(hidden)]
2314 #[deprecated(
2315 note = "parallel `position` does not search in order -- use `position_any`, \\
2316 `position_first`, or `position_last`"
2317 )]
2318 fn position<P>(self, predicate: P) -> Option<usize>
2319 where
2320 P: Fn(Self::Item) -> bool + Sync + Send,
2321 {
2322 self.position_any(predicate)
2323 }
2324
2325 /// Produces a new iterator with the elements of this iterator in
2326 /// reverse order.
2327 ///
2328 /// # Examples
2329 ///
2330 /// ```
2331 /// use rayon::prelude::*;
2332 ///
2333 /// let result: Vec<_> = (0..5)
2334 /// .into_par_iter()
2335 /// .rev()
2336 /// .collect();
2337 ///
2338 /// assert_eq!(result, [4, 3, 2, 1, 0]);
2339 /// ```
2340 fn rev(self) -> Rev<Self> {
2341 rev::new(self)
2342 }
2343
2344 /// Sets the minimum length of iterators desired to process in each
2345 /// thread. Rayon will not split any smaller than this length, but
2346 /// of course an iterator could already be smaller to begin with.
2347 ///
2348 /// Producers like `zip` and `interleave` will use greater of the two
2349 /// minimums.
2350 /// Chained iterators and iterators inside `flat_map` may each use
2351 /// their own minimum length.
2352 ///
2353 /// # Examples
2354 ///
2355 /// ```
2356 /// use rayon::prelude::*;
2357 ///
2358 /// let min = (0..1_000_000)
2359 /// .into_par_iter()
2360 /// .with_min_len(1234)
2361 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
2362 /// .min().unwrap();
2363 ///
2364 /// assert!(min >= 1234);
2365 /// ```
2366 fn with_min_len(self, min: usize) -> MinLen<Self> {
2367 len::new_min_len(self, min)
2368 }
2369
2370 /// Sets the maximum length of iterators desired to process in each
2371 /// thread. Rayon will try to split at least below this length,
2372 /// unless that would put it below the length from `with_min_len()`.
2373 /// For example, given min=10 and max=15, a length of 16 will not be
2374 /// split any further.
2375 ///
2376 /// Producers like `zip` and `interleave` will use lesser of the two
2377 /// maximums.
2378 /// Chained iterators and iterators inside `flat_map` may each use
2379 /// their own maximum length.
2380 ///
2381 /// # Examples
2382 ///
2383 /// ```
2384 /// use rayon::prelude::*;
2385 ///
2386 /// let max = (0..1_000_000)
2387 /// .into_par_iter()
2388 /// .with_max_len(1234)
2389 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
2390 /// .max().unwrap();
2391 ///
2392 /// assert!(max <= 1234);
2393 /// ```
2394 fn with_max_len(self, max: usize) -> MaxLen<Self> {
2395 len::new_max_len(self, max)
2396 }
2397
2398 /// Produces an exact count of how many items this iterator will
2399 /// produce, presuming no panic occurs.
2400 ///
2401 /// # Examples
2402 ///
2403 /// ```
2404 /// use rayon::prelude::*;
2405 ///
2406 /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
2407 /// assert_eq!(par_iter.len(), 10);
2408 ///
2409 /// let vec: Vec<_> = par_iter.collect();
2410 /// assert_eq!(vec.len(), 10);
2411 /// ```
2412 fn len(&self) -> usize;
2413
2414 /// Internal method used to define the behavior of this parallel
2415 /// iterator. You should not need to call this directly.
2416 ///
2417 /// This method causes the iterator `self` to start producing
2418 /// items and to feed them to the consumer `consumer` one by one.
2419 /// It may split the consumer before doing so to create the
2420 /// opportunity to produce in parallel. If a split does happen, it
2421 /// will inform the consumer of the index where the split should
2422 /// occur (unlike `ParallelIterator::drive_unindexed()`).
2423 ///
2424 /// See the [README] for more details on the internals of parallel
2425 /// iterators.
2426 ///
2427 /// [README]: README.md
2428 fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
2429
2430 /// Internal method used to define the behavior of this parallel
2431 /// iterator. You should not need to call this directly.
2432 ///
2433 /// This method converts the iterator into a producer P and then
2434 /// invokes `callback.callback()` with P. Note that the type of
2435 /// this producer is not defined as part of the API, since
2436 /// `callback` must be defined generically for all producers. This
2437 /// allows the producer type to contain references; it also means
2438 /// that parallel iterators can adjust that type without causing a
2439 /// breaking change.
2440 ///
2441 /// See the [README] for more details on the internals of parallel
2442 /// iterators.
2443 ///
2444 /// [README]: README.md
2445 fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
2446 }
2447
2448 /// `FromParallelIterator` implements the creation of a collection
2449 /// from a [`ParallelIterator`]. By implementing
2450 /// `FromParallelIterator` for a given type, you define how it will be
2451 /// created from an iterator.
2452 ///
2453 /// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
2454 ///
2455 /// [`ParallelIterator`]: trait.ParallelIterator.html
2456 /// [`collect()`]: trait.ParallelIterator.html#method.collect
2457 ///
2458 /// # Examples
2459 ///
2460 /// Implementing `FromParallelIterator` for your type:
2461 ///
2462 /// ```
2463 /// use rayon::prelude::*;
2464 /// use std::mem;
2465 ///
2466 /// struct BlackHole {
2467 /// mass: usize,
2468 /// }
2469 ///
2470 /// impl<T: Send> FromParallelIterator<T> for BlackHole {
2471 /// fn from_par_iter<I>(par_iter: I) -> Self
2472 /// where I: IntoParallelIterator<Item = T>
2473 /// {
2474 /// let par_iter = par_iter.into_par_iter();
2475 /// BlackHole {
2476 /// mass: par_iter.count() * mem::size_of::<T>(),
2477 /// }
2478 /// }
2479 /// }
2480 ///
2481 /// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
2482 /// assert_eq!(bh.mass, 4000);
2483 /// ```
2484 pub trait FromParallelIterator<T>
2485 where
2486 T: Send,
2487 {
2488 /// Creates an instance of the collection from the parallel iterator `par_iter`.
2489 ///
2490 /// If your collection is not naturally parallel, the easiest (and
2491 /// fastest) way to do this is often to collect `par_iter` into a
2492 /// [`LinkedList`] or other intermediate data structure and then
2493 /// sequentially extend your collection. However, a more 'native'
2494 /// technique is to use the [`par_iter.fold`] or
2495 /// [`par_iter.fold_with`] methods to create the collection.
2496 /// Alternatively, if your collection is 'natively' parallel, you
2497 /// can use `par_iter.for_each` to process each element in turn.
2498 ///
2499 /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html
2500 /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold
2501 /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with
2502 /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each
2503 fn from_par_iter<I>(par_iter: I) -> Self
2504 where
2505 I: IntoParallelIterator<Item = T>;
2506 }
2507
2508 /// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
2509 ///
2510 /// [`ParallelIterator`]: trait.ParallelIterator.html
2511 ///
2512 /// # Examples
2513 ///
2514 /// Implementing `ParallelExtend` for your type:
2515 ///
2516 /// ```
2517 /// use rayon::prelude::*;
2518 /// use std::mem;
2519 ///
2520 /// struct BlackHole {
2521 /// mass: usize,
2522 /// }
2523 ///
2524 /// impl<T: Send> ParallelExtend<T> for BlackHole {
2525 /// fn par_extend<I>(&mut self, par_iter: I)
2526 /// where I: IntoParallelIterator<Item = T>
2527 /// {
2528 /// let par_iter = par_iter.into_par_iter();
2529 /// self.mass += par_iter.count() * mem::size_of::<T>();
2530 /// }
2531 /// }
2532 ///
2533 /// let mut bh = BlackHole { mass: 0 };
2534 /// bh.par_extend(0i32..1000);
2535 /// assert_eq!(bh.mass, 4000);
2536 /// bh.par_extend(0i64..10);
2537 /// assert_eq!(bh.mass, 4080);
2538 /// ```
2539 pub trait ParallelExtend<T>
2540 where
2541 T: Send,
2542 {
2543 /// Extends an instance of the collection with the elements drawn
2544 /// from the parallel iterator `par_iter`.
2545 ///
2546 /// # Examples
2547 ///
2548 /// ```
2549 /// use rayon::prelude::*;
2550 ///
2551 /// let mut vec = vec![];
2552 /// vec.par_extend(0..5);
2553 /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
2554 /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
2555 /// ```
2556 fn par_extend<I>(&mut self, par_iter: I)
2557 where
2558 I: IntoParallelIterator<Item = T>;
2559 }
2560
2561 /// We hide the `Try` trait in a private module, as it's only meant to be a
2562 /// stable clone of the standard library's `Try` trait, as yet unstable.
2563 mod private {
2564 /// Clone of `std::ops::Try`.
2565 ///
2566 /// Implementing this trait is not permitted outside of `rayon`.
2567 pub trait Try {
2568 private_decl! {}
2569
2570 type Ok;
2571 type Error;
2572 fn into_result(self) -> Result<Self::Ok, Self::Error>;
2573 fn from_ok(v: Self::Ok) -> Self;
2574 fn from_error(v: Self::Error) -> Self;
2575 }
2576
2577 impl<T> Try for Option<T> {
2578 private_impl! {}
2579
2580 type Ok = T;
2581 type Error = ();
2582
2583 fn into_result(self) -> Result<T, ()> {
2584 self.ok_or(())
2585 }
2586 fn from_ok(v: T) -> Self {
2587 Some(v)
2588 }
2589 fn from_error(_: ()) -> Self {
2590 None
2591 }
2592 }
2593
2594 impl<T, E> Try for Result<T, E> {
2595 private_impl! {}
2596
2597 type Ok = T;
2598 type Error = E;
2599
2600 fn into_result(self) -> Result<T, E> {
2601 self
2602 }
2603 fn from_ok(v: T) -> Self {
2604 Ok(v)
2605 }
2606 fn from_error(v: E) -> Self {
2607 Err(v)
2608 }
2609 }
2610 }