]> git.proxmox.com Git - rustc.git/blob - vendor/rayon/src/slice/mod.rs
New upstream version 1.48.0~beta.8+dfsg1
[rustc.git] / vendor / rayon / src / slice / mod.rs
1 //! Parallel iterator types for [slices][std::slice]
2 //!
3 //! You will rarely need to interact with this module directly unless you need
4 //! to name one of the iterator types.
5 //!
6 //! [std::slice]: https://doc.rust-lang.org/stable/std/slice/
7
8 mod mergesort;
9 mod quicksort;
10
11 mod test;
12
13 use self::mergesort::par_mergesort;
14 use self::quicksort::par_quicksort;
15 use crate::iter::plumbing::*;
16 use crate::iter::*;
17 use crate::split_producer::*;
18 use std::cmp;
19 use std::cmp::Ordering;
20 use std::fmt::{self, Debug};
21
22 use super::math::div_round_up;
23
24 /// Parallel extensions for slices.
25 pub trait ParallelSlice<T: Sync> {
26 /// Returns a plain slice, which is used to implement the rest of the
27 /// parallel methods.
28 fn as_parallel_slice(&self) -> &[T];
29
30 /// Returns a parallel iterator over subslices separated by elements that
31 /// match the separator.
32 ///
33 /// # Examples
34 ///
35 /// ```
36 /// use rayon::prelude::*;
37 /// let smallest = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9]
38 /// .par_split(|i| *i == 0)
39 /// .map(|numbers| numbers.iter().min().unwrap())
40 /// .min();
41 /// assert_eq!(Some(&1), smallest);
42 /// ```
43 fn par_split<P>(&self, separator: P) -> Split<'_, T, P>
44 where
45 P: Fn(&T) -> bool + Sync + Send,
46 {
47 Split {
48 slice: self.as_parallel_slice(),
49 separator,
50 }
51 }
52
53 /// Returns a parallel iterator over all contiguous windows of length
54 /// `window_size`. The windows overlap.
55 ///
56 /// # Examples
57 ///
58 /// ```
59 /// use rayon::prelude::*;
60 /// let windows: Vec<_> = [1, 2, 3].par_windows(2).collect();
61 /// assert_eq!(vec![[1, 2], [2, 3]], windows);
62 /// ```
63 fn par_windows(&self, window_size: usize) -> Windows<'_, T> {
64 Windows {
65 window_size,
66 slice: self.as_parallel_slice(),
67 }
68 }
69
70 /// Returns a parallel iterator over at most `chunk_size` elements of
71 /// `self` at a time. The chunks do not overlap.
72 ///
73 /// If the number of elements in the iterator is not divisible by
74 /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
75 /// other chunks will have that exact length.
76 ///
77 /// # Examples
78 ///
79 /// ```
80 /// use rayon::prelude::*;
81 /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks(2).collect();
82 /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]);
83 /// ```
84 fn par_chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
85 assert!(chunk_size != 0, "chunk_size must not be zero");
86 Chunks {
87 chunk_size,
88 slice: self.as_parallel_slice(),
89 }
90 }
91
92 /// Returns a parallel iterator over `chunk_size` elements of
93 /// `self` at a time. The chunks do not overlap.
94 ///
95 /// If `chunk_size` does not divide the length of the slice, then the
96 /// last up to `chunk_size-1` elements will be omitted and can be
97 /// retrieved from the remainder function of the iterator.
98 ///
99 /// # Examples
100 ///
101 /// ```
102 /// use rayon::prelude::*;
103 /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks_exact(2).collect();
104 /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4]]);
105 /// ```
106 fn par_chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
107 assert!(chunk_size != 0, "chunk_size must not be zero");
108 let slice = self.as_parallel_slice();
109 let rem = slice.len() % chunk_size;
110 let len = slice.len() - rem;
111 let (fst, snd) = slice.split_at(len);
112 ChunksExact {
113 chunk_size,
114 slice: fst,
115 rem: snd,
116 }
117 }
118 }
119
120 impl<T: Sync> ParallelSlice<T> for [T] {
121 #[inline]
122 fn as_parallel_slice(&self) -> &[T] {
123 self
124 }
125 }
126
127 /// Parallel extensions for mutable slices.
128 pub trait ParallelSliceMut<T: Send> {
129 /// Returns a plain mutable slice, which is used to implement the rest of
130 /// the parallel methods.
131 fn as_parallel_slice_mut(&mut self) -> &mut [T];
132
133 /// Returns a parallel iterator over mutable subslices separated by
134 /// elements that match the separator.
135 ///
136 /// # Examples
137 ///
138 /// ```
139 /// use rayon::prelude::*;
140 /// let mut array = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9];
141 /// array.par_split_mut(|i| *i == 0)
142 /// .for_each(|slice| slice.reverse());
143 /// assert_eq!(array, [3, 2, 1, 0, 8, 4, 2, 0, 9, 6, 3]);
144 /// ```
145 fn par_split_mut<P>(&mut self, separator: P) -> SplitMut<'_, T, P>
146 where
147 P: Fn(&T) -> bool + Sync + Send,
148 {
149 SplitMut {
150 slice: self.as_parallel_slice_mut(),
151 separator,
152 }
153 }
154
155 /// Returns a parallel iterator over at most `chunk_size` elements of
156 /// `self` at a time. The chunks are mutable and do not overlap.
157 ///
158 /// If the number of elements in the iterator is not divisible by
159 /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
160 /// other chunks will have that exact length.
161 ///
162 /// # Examples
163 ///
164 /// ```
165 /// use rayon::prelude::*;
166 /// let mut array = [1, 2, 3, 4, 5];
167 /// array.par_chunks_mut(2)
168 /// .for_each(|slice| slice.reverse());
169 /// assert_eq!(array, [2, 1, 4, 3, 5]);
170 /// ```
171 fn par_chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
172 assert!(chunk_size != 0, "chunk_size must not be zero");
173 ChunksMut {
174 chunk_size,
175 slice: self.as_parallel_slice_mut(),
176 }
177 }
178
179 /// Returns a parallel iterator over `chunk_size` elements of
180 /// `self` at a time. The chunks are mutable and do not overlap.
181 ///
182 /// If `chunk_size` does not divide the length of the slice, then the
183 /// last up to `chunk_size-1` elements will be omitted and can be
184 /// retrieved from the remainder function of the iterator.
185 ///
186 /// # Examples
187 ///
188 /// ```
189 /// use rayon::prelude::*;
190 /// let mut array = [1, 2, 3, 4, 5];
191 /// array.par_chunks_exact_mut(3)
192 /// .for_each(|slice| slice.reverse());
193 /// assert_eq!(array, [3, 2, 1, 4, 5]);
194 /// ```
195 fn par_chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
196 assert!(chunk_size != 0, "chunk_size must not be zero");
197 let slice = self.as_parallel_slice_mut();
198 let rem = slice.len() % chunk_size;
199 let len = slice.len() - rem;
200 let (fst, snd) = slice.split_at_mut(len);
201 ChunksExactMut {
202 chunk_size,
203 slice: fst,
204 rem: snd,
205 }
206 }
207
208 /// Sorts the slice in parallel.
209 ///
210 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
211 ///
212 /// When applicable, unstable sorting is preferred because it is generally faster than stable
213 /// sorting and it doesn't allocate auxiliary memory.
214 /// See [`par_sort_unstable`](#method.par_sort_unstable).
215 ///
216 /// # Current implementation
217 ///
218 /// The current algorithm is an adaptive merge sort inspired by
219 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
220 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
221 /// two or more sorted sequences concatenated one after another.
222 ///
223 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
224 /// non-allocating insertion sort is used instead.
225 ///
226 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
227 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
228 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
229 /// parallel subdivision of chunks and parallel merge operation.
230 ///
231 /// # Examples
232 ///
233 /// ```
234 /// use rayon::prelude::*;
235 ///
236 /// let mut v = [-5, 4, 1, -3, 2];
237 ///
238 /// v.par_sort();
239 /// assert_eq!(v, [-5, -3, 1, 2, 4]);
240 /// ```
241 fn par_sort(&mut self)
242 where
243 T: Ord,
244 {
245 par_mergesort(self.as_parallel_slice_mut(), T::lt);
246 }
247
248 /// Sorts the slice in parallel with a comparator function.
249 ///
250 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
251 ///
252 /// When applicable, unstable sorting is preferred because it is generally faster than stable
253 /// sorting and it doesn't allocate auxiliary memory.
254 /// See [`par_sort_unstable_by`](#method.par_sort_unstable_by).
255 ///
256 /// # Current implementation
257 ///
258 /// The current algorithm is an adaptive merge sort inspired by
259 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
260 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
261 /// two or more sorted sequences concatenated one after another.
262 ///
263 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
264 /// non-allocating insertion sort is used instead.
265 ///
266 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
267 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
268 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
269 /// parallel subdivision of chunks and parallel merge operation.
270 ///
271 /// # Examples
272 ///
273 /// ```
274 /// use rayon::prelude::*;
275 ///
276 /// let mut v = [5, 4, 1, 3, 2];
277 /// v.par_sort_by(|a, b| a.cmp(b));
278 /// assert_eq!(v, [1, 2, 3, 4, 5]);
279 ///
280 /// // reverse sorting
281 /// v.par_sort_by(|a, b| b.cmp(a));
282 /// assert_eq!(v, [5, 4, 3, 2, 1]);
283 /// ```
284 fn par_sort_by<F>(&mut self, compare: F)
285 where
286 F: Fn(&T, &T) -> Ordering + Sync,
287 {
288 par_mergesort(self.as_parallel_slice_mut(), |a, b| {
289 compare(a, b) == Ordering::Less
290 });
291 }
292
293 /// Sorts the slice in parallel with a key extraction function.
294 ///
295 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
296 ///
297 /// When applicable, unstable sorting is preferred because it is generally faster than stable
298 /// sorting and it doesn't allocate auxiliary memory.
299 /// See [`par_sort_unstable_by_key`](#method.par_sort_unstable_by_key).
300 ///
301 /// # Current implementation
302 ///
303 /// The current algorithm is an adaptive merge sort inspired by
304 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
305 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
306 /// two or more sorted sequences concatenated one after another.
307 ///
308 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
309 /// non-allocating insertion sort is used instead.
310 ///
311 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
312 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
313 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
314 /// parallel subdivision of chunks and parallel merge operation.
315 ///
316 /// # Examples
317 ///
318 /// ```
319 /// use rayon::prelude::*;
320 ///
321 /// let mut v = [-5i32, 4, 1, -3, 2];
322 ///
323 /// v.par_sort_by_key(|k| k.abs());
324 /// assert_eq!(v, [1, 2, -3, 4, -5]);
325 /// ```
326 fn par_sort_by_key<B, F>(&mut self, f: F)
327 where
328 B: Ord,
329 F: Fn(&T) -> B + Sync,
330 {
331 par_mergesort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
332 }
333
334 /// Sorts the slice in parallel, but may not preserve the order of equal elements.
335 ///
336 /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
337 /// and `O(n log n)` worst-case.
338 ///
339 /// # Current implementation
340 ///
341 /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
342 /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
343 /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
344 /// heapsort on degenerate inputs.
345 ///
346 /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
347 /// slice consists of several concatenated sorted sequences.
348 ///
349 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
350 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
351 /// parallel.
352 ///
353 /// [pdqsort]: https://github.com/orlp/pdqsort
354 ///
355 /// # Examples
356 ///
357 /// ```
358 /// use rayon::prelude::*;
359 ///
360 /// let mut v = [-5, 4, 1, -3, 2];
361 ///
362 /// v.par_sort_unstable();
363 /// assert_eq!(v, [-5, -3, 1, 2, 4]);
364 /// ```
365 fn par_sort_unstable(&mut self)
366 where
367 T: Ord,
368 {
369 par_quicksort(self.as_parallel_slice_mut(), T::lt);
370 }
371
372 /// Sorts the slice in parallel with a comparator function, but may not preserve the order of
373 /// equal elements.
374 ///
375 /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
376 /// and `O(n log n)` worst-case.
377 ///
378 /// # Current implementation
379 ///
380 /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
381 /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
382 /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
383 /// heapsort on degenerate inputs.
384 ///
385 /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
386 /// slice consists of several concatenated sorted sequences.
387 ///
388 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
389 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
390 /// parallel.
391 ///
392 /// [pdqsort]: https://github.com/orlp/pdqsort
393 ///
394 /// # Examples
395 ///
396 /// ```
397 /// use rayon::prelude::*;
398 ///
399 /// let mut v = [5, 4, 1, 3, 2];
400 /// v.par_sort_unstable_by(|a, b| a.cmp(b));
401 /// assert_eq!(v, [1, 2, 3, 4, 5]);
402 ///
403 /// // reverse sorting
404 /// v.par_sort_unstable_by(|a, b| b.cmp(a));
405 /// assert_eq!(v, [5, 4, 3, 2, 1]);
406 /// ```
407 fn par_sort_unstable_by<F>(&mut self, compare: F)
408 where
409 F: Fn(&T, &T) -> Ordering + Sync,
410 {
411 par_quicksort(self.as_parallel_slice_mut(), |a, b| {
412 compare(a, b) == Ordering::Less
413 });
414 }
415
416 /// Sorts the slice in parallel with a key extraction function, but may not preserve the order
417 /// of equal elements.
418 ///
419 /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
420 /// and `O(n log n)` worst-case.
421 ///
422 /// # Current implementation
423 ///
424 /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
425 /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
426 /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
427 /// heapsort on degenerate inputs.
428 ///
429 /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
430 /// slice consists of several concatenated sorted sequences.
431 ///
432 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
433 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
434 /// parallel.
435 ///
436 /// [pdqsort]: https://github.com/orlp/pdqsort
437 ///
438 /// # Examples
439 ///
440 /// ```
441 /// use rayon::prelude::*;
442 ///
443 /// let mut v = [-5i32, 4, 1, -3, 2];
444 ///
445 /// v.par_sort_unstable_by_key(|k| k.abs());
446 /// assert_eq!(v, [1, 2, -3, 4, -5]);
447 /// ```
448 fn par_sort_unstable_by_key<B, F>(&mut self, f: F)
449 where
450 B: Ord,
451 F: Fn(&T) -> B + Sync,
452 {
453 par_quicksort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
454 }
455 }
456
457 impl<T: Send> ParallelSliceMut<T> for [T] {
458 #[inline]
459 fn as_parallel_slice_mut(&mut self) -> &mut [T] {
460 self
461 }
462 }
463
464 impl<'data, T: Sync + 'data> IntoParallelIterator for &'data [T] {
465 type Item = &'data T;
466 type Iter = Iter<'data, T>;
467
468 fn into_par_iter(self) -> Self::Iter {
469 Iter { slice: self }
470 }
471 }
472
473 impl<'data, T: Sync + 'data> IntoParallelIterator for &'data Vec<T> {
474 type Item = &'data T;
475 type Iter = Iter<'data, T>;
476
477 fn into_par_iter(self) -> Self::Iter {
478 Iter { slice: self }
479 }
480 }
481
482 impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut [T] {
483 type Item = &'data mut T;
484 type Iter = IterMut<'data, T>;
485
486 fn into_par_iter(self) -> Self::Iter {
487 IterMut { slice: self }
488 }
489 }
490
491 impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut Vec<T> {
492 type Item = &'data mut T;
493 type Iter = IterMut<'data, T>;
494
495 fn into_par_iter(self) -> Self::Iter {
496 IterMut { slice: self }
497 }
498 }
499
500 /// Parallel iterator over immutable items in a slice
501 #[derive(Debug)]
502 pub struct Iter<'data, T: Sync> {
503 slice: &'data [T],
504 }
505
506 impl<'data, T: Sync> Clone for Iter<'data, T> {
507 fn clone(&self) -> Self {
508 Iter { ..*self }
509 }
510 }
511
512 impl<'data, T: Sync + 'data> ParallelIterator for Iter<'data, T> {
513 type Item = &'data T;
514
515 fn drive_unindexed<C>(self, consumer: C) -> C::Result
516 where
517 C: UnindexedConsumer<Self::Item>,
518 {
519 bridge(self, consumer)
520 }
521
522 fn opt_len(&self) -> Option<usize> {
523 Some(self.len())
524 }
525 }
526
527 impl<'data, T: Sync + 'data> IndexedParallelIterator for Iter<'data, T> {
528 fn drive<C>(self, consumer: C) -> C::Result
529 where
530 C: Consumer<Self::Item>,
531 {
532 bridge(self, consumer)
533 }
534
535 fn len(&self) -> usize {
536 self.slice.len()
537 }
538
539 fn with_producer<CB>(self, callback: CB) -> CB::Output
540 where
541 CB: ProducerCallback<Self::Item>,
542 {
543 callback.callback(IterProducer { slice: self.slice })
544 }
545 }
546
547 struct IterProducer<'data, T: Sync> {
548 slice: &'data [T],
549 }
550
551 impl<'data, T: 'data + Sync> Producer for IterProducer<'data, T> {
552 type Item = &'data T;
553 type IntoIter = ::std::slice::Iter<'data, T>;
554
555 fn into_iter(self) -> Self::IntoIter {
556 self.slice.iter()
557 }
558
559 fn split_at(self, index: usize) -> (Self, Self) {
560 let (left, right) = self.slice.split_at(index);
561 (IterProducer { slice: left }, IterProducer { slice: right })
562 }
563 }
564
565 /// Parallel iterator over immutable non-overlapping chunks of a slice
566 #[derive(Debug)]
567 pub struct Chunks<'data, T: Sync> {
568 chunk_size: usize,
569 slice: &'data [T],
570 }
571
572 impl<'data, T: Sync> Clone for Chunks<'data, T> {
573 fn clone(&self) -> Self {
574 Chunks { ..*self }
575 }
576 }
577
578 impl<'data, T: Sync + 'data> ParallelIterator for Chunks<'data, T> {
579 type Item = &'data [T];
580
581 fn drive_unindexed<C>(self, consumer: C) -> C::Result
582 where
583 C: UnindexedConsumer<Self::Item>,
584 {
585 bridge(self, consumer)
586 }
587
588 fn opt_len(&self) -> Option<usize> {
589 Some(self.len())
590 }
591 }
592
593 impl<'data, T: Sync + 'data> IndexedParallelIterator for Chunks<'data, T> {
594 fn drive<C>(self, consumer: C) -> C::Result
595 where
596 C: Consumer<Self::Item>,
597 {
598 bridge(self, consumer)
599 }
600
601 fn len(&self) -> usize {
602 div_round_up(self.slice.len(), self.chunk_size)
603 }
604
605 fn with_producer<CB>(self, callback: CB) -> CB::Output
606 where
607 CB: ProducerCallback<Self::Item>,
608 {
609 callback.callback(ChunksProducer {
610 chunk_size: self.chunk_size,
611 slice: self.slice,
612 })
613 }
614 }
615
616 struct ChunksProducer<'data, T: Sync> {
617 chunk_size: usize,
618 slice: &'data [T],
619 }
620
621 impl<'data, T: 'data + Sync> Producer for ChunksProducer<'data, T> {
622 type Item = &'data [T];
623 type IntoIter = ::std::slice::Chunks<'data, T>;
624
625 fn into_iter(self) -> Self::IntoIter {
626 self.slice.chunks(self.chunk_size)
627 }
628
629 fn split_at(self, index: usize) -> (Self, Self) {
630 let elem_index = cmp::min(index * self.chunk_size, self.slice.len());
631 let (left, right) = self.slice.split_at(elem_index);
632 (
633 ChunksProducer {
634 chunk_size: self.chunk_size,
635 slice: left,
636 },
637 ChunksProducer {
638 chunk_size: self.chunk_size,
639 slice: right,
640 },
641 )
642 }
643 }
644
645 /// Parallel iterator over immutable non-overlapping chunks of a slice
646 #[derive(Debug)]
647 pub struct ChunksExact<'data, T: Sync> {
648 chunk_size: usize,
649 slice: &'data [T],
650 rem: &'data [T],
651 }
652
653 impl<'data, T: Sync> ChunksExact<'data, T> {
654 /// Return the remainder of the original slice that is not going to be
655 /// returned by the iterator. The returned slice has at most `chunk_size-1`
656 /// elements.
657 pub fn remainder(&self) -> &'data [T] {
658 self.rem
659 }
660 }
661
662 impl<'data, T: Sync> Clone for ChunksExact<'data, T> {
663 fn clone(&self) -> Self {
664 ChunksExact { ..*self }
665 }
666 }
667
668 impl<'data, T: Sync + 'data> ParallelIterator for ChunksExact<'data, T> {
669 type Item = &'data [T];
670
671 fn drive_unindexed<C>(self, consumer: C) -> C::Result
672 where
673 C: UnindexedConsumer<Self::Item>,
674 {
675 bridge(self, consumer)
676 }
677
678 fn opt_len(&self) -> Option<usize> {
679 Some(self.len())
680 }
681 }
682
683 impl<'data, T: Sync + 'data> IndexedParallelIterator for ChunksExact<'data, T> {
684 fn drive<C>(self, consumer: C) -> C::Result
685 where
686 C: Consumer<Self::Item>,
687 {
688 bridge(self, consumer)
689 }
690
691 fn len(&self) -> usize {
692 self.slice.len() / self.chunk_size
693 }
694
695 fn with_producer<CB>(self, callback: CB) -> CB::Output
696 where
697 CB: ProducerCallback<Self::Item>,
698 {
699 callback.callback(ChunksExactProducer {
700 chunk_size: self.chunk_size,
701 slice: self.slice,
702 })
703 }
704 }
705
706 struct ChunksExactProducer<'data, T: Sync> {
707 chunk_size: usize,
708 slice: &'data [T],
709 }
710
711 impl<'data, T: 'data + Sync> Producer for ChunksExactProducer<'data, T> {
712 type Item = &'data [T];
713 type IntoIter = ::std::slice::ChunksExact<'data, T>;
714
715 fn into_iter(self) -> Self::IntoIter {
716 self.slice.chunks_exact(self.chunk_size)
717 }
718
719 fn split_at(self, index: usize) -> (Self, Self) {
720 let elem_index = index * self.chunk_size;
721 let (left, right) = self.slice.split_at(elem_index);
722 (
723 ChunksExactProducer {
724 chunk_size: self.chunk_size,
725 slice: left,
726 },
727 ChunksExactProducer {
728 chunk_size: self.chunk_size,
729 slice: right,
730 },
731 )
732 }
733 }
734
735 /// Parallel iterator over immutable overlapping windows of a slice
736 #[derive(Debug)]
737 pub struct Windows<'data, T: Sync> {
738 window_size: usize,
739 slice: &'data [T],
740 }
741
742 impl<'data, T: Sync> Clone for Windows<'data, T> {
743 fn clone(&self) -> Self {
744 Windows { ..*self }
745 }
746 }
747
748 impl<'data, T: Sync + 'data> ParallelIterator for Windows<'data, T> {
749 type Item = &'data [T];
750
751 fn drive_unindexed<C>(self, consumer: C) -> C::Result
752 where
753 C: UnindexedConsumer<Self::Item>,
754 {
755 bridge(self, consumer)
756 }
757
758 fn opt_len(&self) -> Option<usize> {
759 Some(self.len())
760 }
761 }
762
763 impl<'data, T: Sync + 'data> IndexedParallelIterator for Windows<'data, T> {
764 fn drive<C>(self, consumer: C) -> C::Result
765 where
766 C: Consumer<Self::Item>,
767 {
768 bridge(self, consumer)
769 }
770
771 fn len(&self) -> usize {
772 assert!(self.window_size >= 1);
773 self.slice.len().saturating_sub(self.window_size - 1)
774 }
775
776 fn with_producer<CB>(self, callback: CB) -> CB::Output
777 where
778 CB: ProducerCallback<Self::Item>,
779 {
780 callback.callback(WindowsProducer {
781 window_size: self.window_size,
782 slice: self.slice,
783 })
784 }
785 }
786
787 struct WindowsProducer<'data, T: Sync> {
788 window_size: usize,
789 slice: &'data [T],
790 }
791
792 impl<'data, T: 'data + Sync> Producer for WindowsProducer<'data, T> {
793 type Item = &'data [T];
794 type IntoIter = ::std::slice::Windows<'data, T>;
795
796 fn into_iter(self) -> Self::IntoIter {
797 self.slice.windows(self.window_size)
798 }
799
800 fn split_at(self, index: usize) -> (Self, Self) {
801 let left_index = cmp::min(self.slice.len(), index + (self.window_size - 1));
802 let left = &self.slice[..left_index];
803 let right = &self.slice[index..];
804 (
805 WindowsProducer {
806 window_size: self.window_size,
807 slice: left,
808 },
809 WindowsProducer {
810 window_size: self.window_size,
811 slice: right,
812 },
813 )
814 }
815 }
816
817 /// Parallel iterator over mutable items in a slice
818 #[derive(Debug)]
819 pub struct IterMut<'data, T: Send> {
820 slice: &'data mut [T],
821 }
822
823 impl<'data, T: Send + 'data> ParallelIterator for IterMut<'data, T> {
824 type Item = &'data mut T;
825
826 fn drive_unindexed<C>(self, consumer: C) -> C::Result
827 where
828 C: UnindexedConsumer<Self::Item>,
829 {
830 bridge(self, consumer)
831 }
832
833 fn opt_len(&self) -> Option<usize> {
834 Some(self.len())
835 }
836 }
837
838 impl<'data, T: Send + 'data> IndexedParallelIterator for IterMut<'data, T> {
839 fn drive<C>(self, consumer: C) -> C::Result
840 where
841 C: Consumer<Self::Item>,
842 {
843 bridge(self, consumer)
844 }
845
846 fn len(&self) -> usize {
847 self.slice.len()
848 }
849
850 fn with_producer<CB>(self, callback: CB) -> CB::Output
851 where
852 CB: ProducerCallback<Self::Item>,
853 {
854 callback.callback(IterMutProducer { slice: self.slice })
855 }
856 }
857
858 struct IterMutProducer<'data, T: Send> {
859 slice: &'data mut [T],
860 }
861
862 impl<'data, T: 'data + Send> Producer for IterMutProducer<'data, T> {
863 type Item = &'data mut T;
864 type IntoIter = ::std::slice::IterMut<'data, T>;
865
866 fn into_iter(self) -> Self::IntoIter {
867 self.slice.iter_mut()
868 }
869
870 fn split_at(self, index: usize) -> (Self, Self) {
871 let (left, right) = self.slice.split_at_mut(index);
872 (
873 IterMutProducer { slice: left },
874 IterMutProducer { slice: right },
875 )
876 }
877 }
878
879 /// Parallel iterator over mutable non-overlapping chunks of a slice
880 #[derive(Debug)]
881 pub struct ChunksMut<'data, T: Send> {
882 chunk_size: usize,
883 slice: &'data mut [T],
884 }
885
886 impl<'data, T: Send + 'data> ParallelIterator for ChunksMut<'data, T> {
887 type Item = &'data mut [T];
888
889 fn drive_unindexed<C>(self, consumer: C) -> C::Result
890 where
891 C: UnindexedConsumer<Self::Item>,
892 {
893 bridge(self, consumer)
894 }
895
896 fn opt_len(&self) -> Option<usize> {
897 Some(self.len())
898 }
899 }
900
901 impl<'data, T: Send + 'data> IndexedParallelIterator for ChunksMut<'data, T> {
902 fn drive<C>(self, consumer: C) -> C::Result
903 where
904 C: Consumer<Self::Item>,
905 {
906 bridge(self, consumer)
907 }
908
909 fn len(&self) -> usize {
910 div_round_up(self.slice.len(), self.chunk_size)
911 }
912
913 fn with_producer<CB>(self, callback: CB) -> CB::Output
914 where
915 CB: ProducerCallback<Self::Item>,
916 {
917 callback.callback(ChunksMutProducer {
918 chunk_size: self.chunk_size,
919 slice: self.slice,
920 })
921 }
922 }
923
924 struct ChunksMutProducer<'data, T: Send> {
925 chunk_size: usize,
926 slice: &'data mut [T],
927 }
928
929 impl<'data, T: 'data + Send> Producer for ChunksMutProducer<'data, T> {
930 type Item = &'data mut [T];
931 type IntoIter = ::std::slice::ChunksMut<'data, T>;
932
933 fn into_iter(self) -> Self::IntoIter {
934 self.slice.chunks_mut(self.chunk_size)
935 }
936
937 fn split_at(self, index: usize) -> (Self, Self) {
938 let elem_index = cmp::min(index * self.chunk_size, self.slice.len());
939 let (left, right) = self.slice.split_at_mut(elem_index);
940 (
941 ChunksMutProducer {
942 chunk_size: self.chunk_size,
943 slice: left,
944 },
945 ChunksMutProducer {
946 chunk_size: self.chunk_size,
947 slice: right,
948 },
949 )
950 }
951 }
952
953 /// Parallel iterator over mutable non-overlapping chunks of a slice
954 #[derive(Debug)]
955 pub struct ChunksExactMut<'data, T: Send> {
956 chunk_size: usize,
957 slice: &'data mut [T],
958 rem: &'data mut [T],
959 }
960
961 impl<'data, T: Send> ChunksExactMut<'data, T> {
962 /// Return the remainder of the original slice that is not going to be
963 /// returned by the iterator. The returned slice has at most `chunk_size-1`
964 /// elements.
965 ///
966 /// Note that this has to consume `self` to return the original lifetime of
967 /// the data, which prevents this from actually being used as a parallel
968 /// iterator since that also consumes. This method is provided for parity
969 /// with `std::iter::ChunksExactMut`, but consider calling `remainder()` or
970 /// `take_remainder()` as alternatives.
971 pub fn into_remainder(self) -> &'data mut [T] {
972 self.rem
973 }
974
975 /// Return the remainder of the original slice that is not going to be
976 /// returned by the iterator. The returned slice has at most `chunk_size-1`
977 /// elements.
978 ///
979 /// Consider `take_remainder()` if you need access to the data with its
980 /// original lifetime, rather than borrowing through `&mut self` here.
981 pub fn remainder(&mut self) -> &mut [T] {
982 self.rem
983 }
984
985 /// Return the remainder of the original slice that is not going to be
986 /// returned by the iterator. The returned slice has at most `chunk_size-1`
987 /// elements. Subsequent calls will return an empty slice.
988 pub fn take_remainder(&mut self) -> &'data mut [T] {
989 std::mem::replace(&mut self.rem, &mut [])
990 }
991 }
992
993 impl<'data, T: Send + 'data> ParallelIterator for ChunksExactMut<'data, T> {
994 type Item = &'data mut [T];
995
996 fn drive_unindexed<C>(self, consumer: C) -> C::Result
997 where
998 C: UnindexedConsumer<Self::Item>,
999 {
1000 bridge(self, consumer)
1001 }
1002
1003 fn opt_len(&self) -> Option<usize> {
1004 Some(self.len())
1005 }
1006 }
1007
1008 impl<'data, T: Send + 'data> IndexedParallelIterator for ChunksExactMut<'data, T> {
1009 fn drive<C>(self, consumer: C) -> C::Result
1010 where
1011 C: Consumer<Self::Item>,
1012 {
1013 bridge(self, consumer)
1014 }
1015
1016 fn len(&self) -> usize {
1017 self.slice.len() / self.chunk_size
1018 }
1019
1020 fn with_producer<CB>(self, callback: CB) -> CB::Output
1021 where
1022 CB: ProducerCallback<Self::Item>,
1023 {
1024 callback.callback(ChunksExactMutProducer {
1025 chunk_size: self.chunk_size,
1026 slice: self.slice,
1027 })
1028 }
1029 }
1030
1031 struct ChunksExactMutProducer<'data, T: Send> {
1032 chunk_size: usize,
1033 slice: &'data mut [T],
1034 }
1035
1036 impl<'data, T: 'data + Send> Producer for ChunksExactMutProducer<'data, T> {
1037 type Item = &'data mut [T];
1038 type IntoIter = ::std::slice::ChunksExactMut<'data, T>;
1039
1040 fn into_iter(self) -> Self::IntoIter {
1041 self.slice.chunks_exact_mut(self.chunk_size)
1042 }
1043
1044 fn split_at(self, index: usize) -> (Self, Self) {
1045 let elem_index = index * self.chunk_size;
1046 let (left, right) = self.slice.split_at_mut(elem_index);
1047 (
1048 ChunksExactMutProducer {
1049 chunk_size: self.chunk_size,
1050 slice: left,
1051 },
1052 ChunksExactMutProducer {
1053 chunk_size: self.chunk_size,
1054 slice: right,
1055 },
1056 )
1057 }
1058 }
1059
1060 /// Parallel iterator over slices separated by a predicate
1061 pub struct Split<'data, T, P> {
1062 slice: &'data [T],
1063 separator: P,
1064 }
1065
1066 impl<'data, T, P: Clone> Clone for Split<'data, T, P> {
1067 fn clone(&self) -> Self {
1068 Split {
1069 separator: self.separator.clone(),
1070 ..*self
1071 }
1072 }
1073 }
1074
1075 impl<'data, T: Debug, P> Debug for Split<'data, T, P> {
1076 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1077 f.debug_struct("Split").field("slice", &self.slice).finish()
1078 }
1079 }
1080
1081 impl<'data, T, P> ParallelIterator for Split<'data, T, P>
1082 where
1083 P: Fn(&T) -> bool + Sync + Send,
1084 T: Sync,
1085 {
1086 type Item = &'data [T];
1087
1088 fn drive_unindexed<C>(self, consumer: C) -> C::Result
1089 where
1090 C: UnindexedConsumer<Self::Item>,
1091 {
1092 let producer = SplitProducer::new(self.slice, &self.separator);
1093 bridge_unindexed(producer, consumer)
1094 }
1095 }
1096
1097 /// Implement support for `SplitProducer`.
1098 impl<'data, T, P> Fissile<P> for &'data [T]
1099 where
1100 P: Fn(&T) -> bool,
1101 {
1102 fn length(&self) -> usize {
1103 self.len()
1104 }
1105
1106 fn midpoint(&self, end: usize) -> usize {
1107 end / 2
1108 }
1109
1110 fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
1111 self[start..end].iter().position(separator)
1112 }
1113
1114 fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
1115 self[..end].iter().rposition(separator)
1116 }
1117
1118 fn split_once(self, index: usize) -> (Self, Self) {
1119 let (left, right) = self.split_at(index);
1120 (left, &right[1..]) // skip the separator
1121 }
1122
1123 fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
1124 where
1125 F: Folder<Self>,
1126 Self: Send,
1127 {
1128 let mut split = self.split(separator);
1129 if skip_last {
1130 split.next_back();
1131 }
1132 folder.consume_iter(split)
1133 }
1134 }
1135
1136 /// Parallel iterator over mutable slices separated by a predicate
1137 pub struct SplitMut<'data, T, P> {
1138 slice: &'data mut [T],
1139 separator: P,
1140 }
1141
1142 impl<'data, T: Debug, P> Debug for SplitMut<'data, T, P> {
1143 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1144 f.debug_struct("SplitMut")
1145 .field("slice", &self.slice)
1146 .finish()
1147 }
1148 }
1149
1150 impl<'data, T, P> ParallelIterator for SplitMut<'data, T, P>
1151 where
1152 P: Fn(&T) -> bool + Sync + Send,
1153 T: Send,
1154 {
1155 type Item = &'data mut [T];
1156
1157 fn drive_unindexed<C>(self, consumer: C) -> C::Result
1158 where
1159 C: UnindexedConsumer<Self::Item>,
1160 {
1161 let producer = SplitProducer::new(self.slice, &self.separator);
1162 bridge_unindexed(producer, consumer)
1163 }
1164 }
1165
1166 /// Implement support for `SplitProducer`.
1167 impl<'data, T, P> Fissile<P> for &'data mut [T]
1168 where
1169 P: Fn(&T) -> bool,
1170 {
1171 fn length(&self) -> usize {
1172 self.len()
1173 }
1174
1175 fn midpoint(&self, end: usize) -> usize {
1176 end / 2
1177 }
1178
1179 fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
1180 self[start..end].iter().position(separator)
1181 }
1182
1183 fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
1184 self[..end].iter().rposition(separator)
1185 }
1186
1187 fn split_once(self, index: usize) -> (Self, Self) {
1188 let (left, right) = self.split_at_mut(index);
1189 (left, &mut right[1..]) // skip the separator
1190 }
1191
1192 fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
1193 where
1194 F: Folder<Self>,
1195 Self: Send,
1196 {
1197 let mut split = self.split_mut(separator);
1198 if skip_last {
1199 split.next_back();
1200 }
1201 folder.consume_iter(split)
1202 }
1203 }