]> git.proxmox.com Git - rustc.git/blob - vendor/rayon/src/range_inclusive.rs
New upstream version 1.46.0~beta.2+dfsg1
[rustc.git] / vendor / rayon / src / range_inclusive.rs
1 //! Parallel iterator types for [inclusive ranges][std::range],
2 //! the type for values created by `a..=b` expressions
3 //!
4 //! You will rarely need to interact with this module directly unless you have
5 //! need to name one of the iterator types.
6 //!
7 //! ```
8 //! use rayon::prelude::*;
9 //!
10 //! let r = (0..=100u64).into_par_iter()
11 //! .sum();
12 //!
13 //! // compare result with sequential calculation
14 //! assert_eq!((0..=100).sum::<u64>(), r);
15 //! ```
16 //!
17 //! [std::range]: https://doc.rust-lang.org/core/ops/struct.RangeInclusive.html
18
19 use crate::iter::plumbing::*;
20 use crate::iter::*;
21 use std::ops::RangeInclusive;
22
23 /// Parallel iterator over an inclusive range, implemented for all integer types.
24 ///
25 /// **Note:** The `zip` operation requires `IndexedParallelIterator`
26 /// which is only implemented for `u8`, `i8`, `u16`, and `i16`.
27 ///
28 /// ```
29 /// use rayon::prelude::*;
30 ///
31 /// let p = (0..=25u16).into_par_iter()
32 /// .zip(0..=25u16)
33 /// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
34 /// .map(|(x, y)| x * y)
35 /// .sum::<u16>();
36 ///
37 /// let s = (0..=25u16).zip(0..=25u16)
38 /// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
39 /// .map(|(x, y)| x * y)
40 /// .sum();
41 ///
42 /// assert_eq!(p, s);
43 /// ```
44 #[derive(Debug, Clone)]
45 pub struct Iter<T> {
46 range: RangeInclusive<T>,
47 }
48
49 impl<T> Iter<T>
50 where
51 RangeInclusive<T>: Clone + Iterator<Item = T> + DoubleEndedIterator,
52 {
53 /// Returns `Some((start, end))` for `start..=end`, or `None` if it is exhausted.
54 ///
55 /// Note that `RangeInclusive` does not specify the bounds of an exhausted iterator,
56 /// so this is a way for us to figure out what we've got. Thankfully, all of the
57 /// integer types we care about can be trivially cloned.
58 fn bounds(&self) -> Option<(T, T)> {
59 Some((self.range.clone().next()?, self.range.clone().next_back()?))
60 }
61 }
62
63 impl<T> IntoParallelIterator for RangeInclusive<T>
64 where
65 Iter<T>: ParallelIterator,
66 {
67 type Item = <Iter<T> as ParallelIterator>::Item;
68 type Iter = Iter<T>;
69
70 fn into_par_iter(self) -> Self::Iter {
71 Iter { range: self }
72 }
73 }
74
75 macro_rules! convert {
76 ( $self:ident . $method:ident ( $( $arg:expr ),* ) ) => {
77 if let Some((start, end)) = $self.bounds() {
78 if let Some(end) = end.checked_add(1) {
79 (start..end).into_par_iter().$method($( $arg ),*)
80 } else {
81 (start..end).into_par_iter().chain(once(end)).$method($( $arg ),*)
82 }
83 } else {
84 empty::<Self::Item>().$method($( $arg ),*)
85 }
86 };
87 }
88
89 macro_rules! parallel_range_impl {
90 ( $t:ty ) => {
91 impl ParallelIterator for Iter<$t> {
92 type Item = $t;
93
94 fn drive_unindexed<C>(self, consumer: C) -> C::Result
95 where
96 C: UnindexedConsumer<Self::Item>,
97 {
98 convert!(self.drive_unindexed(consumer))
99 }
100
101 fn opt_len(&self) -> Option<usize> {
102 convert!(self.opt_len())
103 }
104 }
105 };
106 }
107
108 macro_rules! indexed_range_impl {
109 ( $t:ty ) => {
110 parallel_range_impl! { $t }
111
112 impl IndexedParallelIterator for Iter<$t> {
113 fn drive<C>(self, consumer: C) -> C::Result
114 where
115 C: Consumer<Self::Item>,
116 {
117 convert!(self.drive(consumer))
118 }
119
120 fn len(&self) -> usize {
121 self.range.len()
122 }
123
124 fn with_producer<CB>(self, callback: CB) -> CB::Output
125 where
126 CB: ProducerCallback<Self::Item>,
127 {
128 convert!(self.with_producer(callback))
129 }
130 }
131 };
132 }
133
134 // all RangeInclusive<T> with ExactSizeIterator
135 indexed_range_impl! {u8}
136 indexed_range_impl! {u16}
137 indexed_range_impl! {i8}
138 indexed_range_impl! {i16}
139
140 // other RangeInclusive<T> with just Iterator
141 parallel_range_impl! {usize}
142 parallel_range_impl! {isize}
143 parallel_range_impl! {u32}
144 parallel_range_impl! {i32}
145 parallel_range_impl! {u64}
146 parallel_range_impl! {i64}
147 parallel_range_impl! {u128}
148 parallel_range_impl! {i128}
149
150 #[test]
151 #[cfg(target_pointer_width = "64")]
152 fn test_u32_opt_len() {
153 use std::u32;
154 assert_eq!(Some(101), (0..=100u32).into_par_iter().opt_len());
155 assert_eq!(
156 Some(u32::MAX as usize),
157 (0..=u32::MAX - 1).into_par_iter().opt_len()
158 );
159 assert_eq!(
160 Some(u32::MAX as usize + 1),
161 (0..=u32::MAX).into_par_iter().opt_len()
162 );
163 }
164
165 #[test]
166 fn test_u64_opt_len() {
167 use std::{u64, usize};
168 assert_eq!(Some(101), (0..=100u64).into_par_iter().opt_len());
169 assert_eq!(
170 Some(usize::MAX),
171 (0..=usize::MAX as u64 - 1).into_par_iter().opt_len()
172 );
173 assert_eq!(None, (0..=usize::MAX as u64).into_par_iter().opt_len());
174 assert_eq!(None, (0..=u64::MAX).into_par_iter().opt_len());
175 }
176
177 #[test]
178 fn test_u128_opt_len() {
179 use std::{u128, usize};
180 assert_eq!(Some(101), (0..=100u128).into_par_iter().opt_len());
181 assert_eq!(
182 Some(usize::MAX),
183 (0..=usize::MAX as u128 - 1).into_par_iter().opt_len()
184 );
185 assert_eq!(None, (0..=usize::MAX as u128).into_par_iter().opt_len());
186 assert_eq!(None, (0..=u128::MAX).into_par_iter().opt_len());
187 }
188
189 // `usize as i64` can overflow, so make sure to wrap it appropriately
190 // when using the `opt_len` "indexed" mode.
191 #[test]
192 #[cfg(target_pointer_width = "64")]
193 fn test_usize_i64_overflow() {
194 use crate::ThreadPoolBuilder;
195 use std::i64;
196
197 let iter = (-2..=i64::MAX).into_par_iter();
198 assert_eq!(iter.opt_len(), Some(i64::MAX as usize + 3));
199
200 // always run with multiple threads to split into, or this will take forever...
201 let pool = ThreadPoolBuilder::new().num_threads(8).build().unwrap();
202 pool.install(|| assert_eq!(iter.find_last(|_| true), Some(i64::MAX)));
203 }