]> git.proxmox.com Git - rustc.git/blob - vendor/rayon/src/range.rs
New upstream version 1.48.0~beta.8+dfsg1
[rustc.git] / vendor / rayon / src / range.rs
1 //! Parallel iterator types for [ranges][std::range],
2 //! the type for values created by `a..b` expressions
3 //!
4 //! You will rarely need to interact with this module directly unless you have
5 //! need to name one of the iterator types.
6 //!
7 //! ```
8 //! use rayon::prelude::*;
9 //!
10 //! let r = (0..100u64).into_par_iter()
11 //! .sum();
12 //!
13 //! // compare result with sequential calculation
14 //! assert_eq!((0..100).sum::<u64>(), r);
15 //! ```
16 //!
17 //! [std::range]: https://doc.rust-lang.org/core/ops/struct.Range.html
18
19 use crate::iter::plumbing::*;
20 use crate::iter::*;
21 use std::char;
22 use std::ops::Range;
23 use std::usize;
24
25 /// Parallel iterator over a range, implemented for all integer types.
26 ///
27 /// **Note:** The `zip` operation requires `IndexedParallelIterator`
28 /// which is not implemented for `u64`, `i64`, `u128`, or `i128`.
29 ///
30 /// ```
31 /// use rayon::prelude::*;
32 ///
33 /// let p = (0..25usize).into_par_iter()
34 /// .zip(0..25usize)
35 /// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
36 /// .map(|(x, y)| x * y)
37 /// .sum::<usize>();
38 ///
39 /// let s = (0..25usize).zip(0..25)
40 /// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
41 /// .map(|(x, y)| x * y)
42 /// .sum();
43 ///
44 /// assert_eq!(p, s);
45 /// ```
46 #[derive(Debug, Clone)]
47 pub struct Iter<T> {
48 range: Range<T>,
49 }
50
51 impl<T> IntoParallelIterator for Range<T>
52 where
53 Iter<T>: ParallelIterator,
54 {
55 type Item = <Iter<T> as ParallelIterator>::Item;
56 type Iter = Iter<T>;
57
58 fn into_par_iter(self) -> Self::Iter {
59 Iter { range: self }
60 }
61 }
62
63 struct IterProducer<T> {
64 range: Range<T>,
65 }
66
67 impl<T> IntoIterator for IterProducer<T>
68 where
69 Range<T>: Iterator,
70 {
71 type Item = <Range<T> as Iterator>::Item;
72 type IntoIter = Range<T>;
73
74 fn into_iter(self) -> Self::IntoIter {
75 self.range
76 }
77 }
78
79 macro_rules! indexed_range_impl {
80 ( $t:ty ) => {
81 impl ParallelIterator for Iter<$t> {
82 type Item = $t;
83
84 fn drive_unindexed<C>(self, consumer: C) -> C::Result
85 where
86 C: UnindexedConsumer<Self::Item>,
87 {
88 bridge(self, consumer)
89 }
90
91 fn opt_len(&self) -> Option<usize> {
92 Some(self.len())
93 }
94 }
95
96 impl IndexedParallelIterator for Iter<$t> {
97 fn drive<C>(self, consumer: C) -> C::Result
98 where
99 C: Consumer<Self::Item>,
100 {
101 bridge(self, consumer)
102 }
103
104 fn len(&self) -> usize {
105 self.range.len()
106 }
107
108 fn with_producer<CB>(self, callback: CB) -> CB::Output
109 where
110 CB: ProducerCallback<Self::Item>,
111 {
112 callback.callback(IterProducer { range: self.range })
113 }
114 }
115
116 impl Producer for IterProducer<$t> {
117 type Item = <Range<$t> as Iterator>::Item;
118 type IntoIter = Range<$t>;
119 fn into_iter(self) -> Self::IntoIter {
120 self.range
121 }
122
123 fn split_at(self, index: usize) -> (Self, Self) {
124 assert!(index <= self.range.len());
125 // For signed $t, the length and requested index could be greater than $t::MAX, and
126 // then `index as $t` could wrap to negative, so wrapping_add is necessary.
127 let mid = self.range.start.wrapping_add(index as $t);
128 let left = self.range.start..mid;
129 let right = mid..self.range.end;
130 (IterProducer { range: left }, IterProducer { range: right })
131 }
132 }
133 };
134 }
135
136 trait UnindexedRangeLen<L> {
137 fn len(&self) -> L;
138 }
139
140 macro_rules! unindexed_range_impl {
141 ( $t:ty, $len_t:ty ) => {
142 impl UnindexedRangeLen<$len_t> for Range<$t> {
143 fn len(&self) -> $len_t {
144 let &Range { start, end } = self;
145 if end > start {
146 end.wrapping_sub(start) as $len_t
147 } else {
148 0
149 }
150 }
151 }
152
153 impl ParallelIterator for Iter<$t> {
154 type Item = $t;
155
156 fn drive_unindexed<C>(self, consumer: C) -> C::Result
157 where
158 C: UnindexedConsumer<Self::Item>,
159 {
160 #[inline]
161 fn offset(start: $t) -> impl Fn(usize) -> $t {
162 move |i| start.wrapping_add(i as $t)
163 }
164
165 if let Some(len) = self.opt_len() {
166 // Drive this in indexed mode for better `collect`.
167 (0..len)
168 .into_par_iter()
169 .map(offset(self.range.start))
170 .drive(consumer)
171 } else {
172 bridge_unindexed(IterProducer { range: self.range }, consumer)
173 }
174 }
175
176 fn opt_len(&self) -> Option<usize> {
177 let len = self.range.len();
178 if len <= usize::MAX as $len_t {
179 Some(len as usize)
180 } else {
181 None
182 }
183 }
184 }
185
186 impl UnindexedProducer for IterProducer<$t> {
187 type Item = $t;
188
189 fn split(mut self) -> (Self, Option<Self>) {
190 let index = self.range.len() / 2;
191 if index > 0 {
192 let mid = self.range.start.wrapping_add(index as $t);
193 let right = mid..self.range.end;
194 self.range.end = mid;
195 (self, Some(IterProducer { range: right }))
196 } else {
197 (self, None)
198 }
199 }
200
201 fn fold_with<F>(self, folder: F) -> F
202 where
203 F: Folder<Self::Item>,
204 {
205 folder.consume_iter(self)
206 }
207 }
208 };
209 }
210
211 // all Range<T> with ExactSizeIterator
212 indexed_range_impl! {u8}
213 indexed_range_impl! {u16}
214 indexed_range_impl! {u32}
215 indexed_range_impl! {usize}
216 indexed_range_impl! {i8}
217 indexed_range_impl! {i16}
218 indexed_range_impl! {i32}
219 indexed_range_impl! {isize}
220
221 // other Range<T> with just Iterator
222 unindexed_range_impl! {u64, u64}
223 unindexed_range_impl! {i64, u64}
224 unindexed_range_impl! {u128, u128}
225 unindexed_range_impl! {i128, u128}
226
227 // char is special because of the surrogate range hole
228 macro_rules! convert_char {
229 ( $self:ident . $method:ident ( $( $arg:expr ),* ) ) => {{
230 let start = $self.range.start as u32;
231 let end = $self.range.end as u32;
232 if start < 0xD800 && 0xE000 < end {
233 // chain the before and after surrogate range fragments
234 (start..0xD800)
235 .into_par_iter()
236 .chain(0xE000..end)
237 .map(|codepoint| unsafe { char::from_u32_unchecked(codepoint) })
238 .$method($( $arg ),*)
239 } else {
240 // no surrogate range to worry about
241 (start..end)
242 .into_par_iter()
243 .map(|codepoint| unsafe { char::from_u32_unchecked(codepoint) })
244 .$method($( $arg ),*)
245 }
246 }};
247 }
248
249 impl ParallelIterator for Iter<char> {
250 type Item = char;
251
252 fn drive_unindexed<C>(self, consumer: C) -> C::Result
253 where
254 C: UnindexedConsumer<Self::Item>,
255 {
256 convert_char!(self.drive(consumer))
257 }
258
259 fn opt_len(&self) -> Option<usize> {
260 Some(self.len())
261 }
262 }
263
264 impl IndexedParallelIterator for Iter<char> {
265 // Split at the surrogate range first if we're allowed to
266 fn drive<C>(self, consumer: C) -> C::Result
267 where
268 C: Consumer<Self::Item>,
269 {
270 convert_char!(self.drive(consumer))
271 }
272
273 fn len(&self) -> usize {
274 // Taken from <char as Step>::steps_between
275 let start = self.range.start as u32;
276 let end = self.range.end as u32;
277 if start < end {
278 let mut count = end - start;
279 if start < 0xD800 && 0xE000 <= end {
280 count -= 0x800
281 }
282 count as usize
283 } else {
284 0
285 }
286 }
287
288 fn with_producer<CB>(self, callback: CB) -> CB::Output
289 where
290 CB: ProducerCallback<Self::Item>,
291 {
292 convert_char!(self.with_producer(callback))
293 }
294 }
295
296 #[test]
297 fn check_range_split_at_overflow() {
298 // Note, this split index overflows i8!
299 let producer = IterProducer { range: -100i8..100 };
300 let (left, right) = producer.split_at(150);
301 let r1: i32 = left.range.map(i32::from).sum();
302 let r2: i32 = right.range.map(i32::from).sum();
303 assert_eq!(r1 + r2, -100);
304 }
305
306 #[test]
307 fn test_i128_len_doesnt_overflow() {
308 use std::{i128, u128};
309
310 // Using parse because some versions of rust don't allow long literals
311 let octillion: i128 = "1000000000000000000000000000".parse().unwrap();
312 let producer = IterProducer {
313 range: 0..octillion,
314 };
315
316 assert_eq!(octillion as u128, producer.range.len());
317 assert_eq!(octillion as u128, (0..octillion).len());
318 assert_eq!(2 * octillion as u128, (-octillion..octillion).len());
319
320 assert_eq!(u128::MAX, (i128::MIN..i128::MAX).len());
321 }
322
323 #[test]
324 fn test_u64_opt_len() {
325 use std::{u64, usize};
326 assert_eq!(Some(100), (0..100u64).into_par_iter().opt_len());
327 assert_eq!(
328 Some(usize::MAX),
329 (0..usize::MAX as u64).into_par_iter().opt_len()
330 );
331 if (usize::MAX as u64) < u64::MAX {
332 assert_eq!(
333 None,
334 (0..(usize::MAX as u64).wrapping_add(1))
335 .into_par_iter()
336 .opt_len()
337 );
338 assert_eq!(None, (0..u64::MAX).into_par_iter().opt_len());
339 }
340 }
341
342 #[test]
343 fn test_u128_opt_len() {
344 use std::{u128, usize};
345 assert_eq!(Some(100), (0..100u128).into_par_iter().opt_len());
346 assert_eq!(
347 Some(usize::MAX),
348 (0..usize::MAX as u128).into_par_iter().opt_len()
349 );
350 assert_eq!(None, (0..1 + usize::MAX as u128).into_par_iter().opt_len());
351 assert_eq!(None, (0..u128::MAX).into_par_iter().opt_len());
352 }
353
354 // `usize as i64` can overflow, so make sure to wrap it appropriately
355 // when using the `opt_len` "indexed" mode.
356 #[test]
357 #[cfg(target_pointer_width = "64")]
358 fn test_usize_i64_overflow() {
359 use crate::ThreadPoolBuilder;
360 use std::i64;
361
362 let iter = (-2..i64::MAX).into_par_iter();
363 assert_eq!(iter.opt_len(), Some(i64::MAX as usize + 2));
364
365 // always run with multiple threads to split into, or this will take forever...
366 let pool = ThreadPoolBuilder::new().num_threads(8).build().unwrap();
367 pool.install(|| assert_eq!(iter.find_last(|_| true), Some(i64::MAX - 1)));
368 }