1 //! Parallel iterator types for [slices][std::slice]
3 //! You will rarely need to interact with this module directly unless you need
4 //! to name one of the iterator types.
6 //! [std::slice]: https://doc.rust-lang.org/stable/std/slice/
15 use self::mergesort
::par_mergesort
;
16 use self::quicksort
::par_quicksort
;
17 use crate::iter
::plumbing
::*;
19 use crate::split_producer
::*;
21 use std
::cmp
::Ordering
;
22 use std
::fmt
::{self, Debug}
;
25 pub use self::chunks
::{Chunks, ChunksExact, ChunksExactMut, ChunksMut}
;
26 pub use self::rchunks
::{RChunks, RChunksExact, RChunksExactMut, RChunksMut}
;
28 /// Parallel extensions for slices.
29 pub trait ParallelSlice
<T
: Sync
> {
30 /// Returns a plain slice, which is used to implement the rest of the
32 fn as_parallel_slice(&self) -> &[T
];
34 /// Returns a parallel iterator over subslices separated by elements that
35 /// match the separator.
40 /// use rayon::prelude::*;
41 /// let smallest = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9]
42 /// .par_split(|i| *i == 0)
43 /// .map(|numbers| numbers.iter().min().unwrap())
45 /// assert_eq!(Some(&1), smallest);
47 fn par_split
<P
>(&self, separator
: P
) -> Split
<'_
, T
, P
>
49 P
: Fn(&T
) -> bool
+ Sync
+ Send
,
52 slice
: self.as_parallel_slice(),
57 /// Returns a parallel iterator over all contiguous windows of length
58 /// `window_size`. The windows overlap.
63 /// use rayon::prelude::*;
64 /// let windows: Vec<_> = [1, 2, 3].par_windows(2).collect();
65 /// assert_eq!(vec![[1, 2], [2, 3]], windows);
67 fn par_windows(&self, window_size
: usize) -> Windows
<'_
, T
> {
70 slice
: self.as_parallel_slice(),
74 /// Returns a parallel iterator over at most `chunk_size` elements of
75 /// `self` at a time. The chunks do not overlap.
77 /// If the number of elements in the iterator is not divisible by
78 /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
79 /// other chunks will have that exact length.
84 /// use rayon::prelude::*;
85 /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks(2).collect();
86 /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]);
89 fn par_chunks(&self, chunk_size
: usize) -> Chunks
<'_
, T
> {
90 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
91 Chunks
::new(chunk_size
, self.as_parallel_slice())
94 /// Returns a parallel iterator over `chunk_size` elements of
95 /// `self` at a time. The chunks do not overlap.
97 /// If `chunk_size` does not divide the length of the slice, then the
98 /// last up to `chunk_size-1` elements will be omitted and can be
99 /// retrieved from the remainder function of the iterator.
104 /// use rayon::prelude::*;
105 /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks_exact(2).collect();
106 /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4]]);
109 fn par_chunks_exact(&self, chunk_size
: usize) -> ChunksExact
<'_
, T
> {
110 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
111 ChunksExact
::new(chunk_size
, self.as_parallel_slice())
114 /// Returns a parallel iterator over at most `chunk_size` elements of `self` at a time,
115 /// starting at the end. The chunks do not overlap.
117 /// If the number of elements in the iterator is not divisible by
118 /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
119 /// other chunks will have that exact length.
124 /// use rayon::prelude::*;
125 /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_rchunks(2).collect();
126 /// assert_eq!(chunks, vec![&[4, 5][..], &[2, 3], &[1]]);
129 fn par_rchunks(&self, chunk_size
: usize) -> RChunks
<'_
, T
> {
130 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
131 RChunks
::new(chunk_size
, self.as_parallel_slice())
134 /// Returns a parallel iterator over `chunk_size` elements of `self` at a time,
135 /// starting at the end. The chunks do not overlap.
137 /// If `chunk_size` does not divide the length of the slice, then the
138 /// last up to `chunk_size-1` elements will be omitted and can be
139 /// retrieved from the remainder function of the iterator.
144 /// use rayon::prelude::*;
145 /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_rchunks_exact(2).collect();
146 /// assert_eq!(chunks, vec![&[4, 5][..], &[2, 3]]);
149 fn par_rchunks_exact(&self, chunk_size
: usize) -> RChunksExact
<'_
, T
> {
150 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
151 RChunksExact
::new(chunk_size
, self.as_parallel_slice())
155 impl<T
: Sync
> ParallelSlice
<T
> for [T
] {
157 fn as_parallel_slice(&self) -> &[T
] {
162 /// Parallel extensions for mutable slices.
163 pub trait ParallelSliceMut
<T
: Send
> {
164 /// Returns a plain mutable slice, which is used to implement the rest of
165 /// the parallel methods.
166 fn as_parallel_slice_mut(&mut self) -> &mut [T
];
168 /// Returns a parallel iterator over mutable subslices separated by
169 /// elements that match the separator.
174 /// use rayon::prelude::*;
175 /// let mut array = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9];
176 /// array.par_split_mut(|i| *i == 0)
177 /// .for_each(|slice| slice.reverse());
178 /// assert_eq!(array, [3, 2, 1, 0, 8, 4, 2, 0, 9, 6, 3]);
180 fn par_split_mut
<P
>(&mut self, separator
: P
) -> SplitMut
<'_
, T
, P
>
182 P
: Fn(&T
) -> bool
+ Sync
+ Send
,
185 slice
: self.as_parallel_slice_mut(),
190 /// Returns a parallel iterator over at most `chunk_size` elements of
191 /// `self` at a time. The chunks are mutable and do not overlap.
193 /// If the number of elements in the iterator is not divisible by
194 /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
195 /// other chunks will have that exact length.
200 /// use rayon::prelude::*;
201 /// let mut array = [1, 2, 3, 4, 5];
202 /// array.par_chunks_mut(2)
203 /// .for_each(|slice| slice.reverse());
204 /// assert_eq!(array, [2, 1, 4, 3, 5]);
207 fn par_chunks_mut(&mut self, chunk_size
: usize) -> ChunksMut
<'_
, T
> {
208 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
209 ChunksMut
::new(chunk_size
, self.as_parallel_slice_mut())
212 /// Returns a parallel iterator over `chunk_size` elements of
213 /// `self` at a time. The chunks are mutable and do not overlap.
215 /// If `chunk_size` does not divide the length of the slice, then the
216 /// last up to `chunk_size-1` elements will be omitted and can be
217 /// retrieved from the remainder function of the iterator.
222 /// use rayon::prelude::*;
223 /// let mut array = [1, 2, 3, 4, 5];
224 /// array.par_chunks_exact_mut(3)
225 /// .for_each(|slice| slice.reverse());
226 /// assert_eq!(array, [3, 2, 1, 4, 5]);
229 fn par_chunks_exact_mut(&mut self, chunk_size
: usize) -> ChunksExactMut
<'_
, T
> {
230 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
231 ChunksExactMut
::new(chunk_size
, self.as_parallel_slice_mut())
234 /// Returns a parallel iterator over at most `chunk_size` elements of `self` at a time,
235 /// starting at the end. The chunks are mutable and do not overlap.
237 /// If the number of elements in the iterator is not divisible by
238 /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
239 /// other chunks will have that exact length.
244 /// use rayon::prelude::*;
245 /// let mut array = [1, 2, 3, 4, 5];
246 /// array.par_rchunks_mut(2)
247 /// .for_each(|slice| slice.reverse());
248 /// assert_eq!(array, [1, 3, 2, 5, 4]);
251 fn par_rchunks_mut(&mut self, chunk_size
: usize) -> RChunksMut
<'_
, T
> {
252 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
253 RChunksMut
::new(chunk_size
, self.as_parallel_slice_mut())
256 /// Returns a parallel iterator over `chunk_size` elements of `self` at a time,
257 /// starting at the end. The chunks are mutable and do not overlap.
259 /// If `chunk_size` does not divide the length of the slice, then the
260 /// last up to `chunk_size-1` elements will be omitted and can be
261 /// retrieved from the remainder function of the iterator.
266 /// use rayon::prelude::*;
267 /// let mut array = [1, 2, 3, 4, 5];
268 /// array.par_rchunks_exact_mut(3)
269 /// .for_each(|slice| slice.reverse());
270 /// assert_eq!(array, [1, 2, 5, 4, 3]);
273 fn par_rchunks_exact_mut(&mut self, chunk_size
: usize) -> RChunksExactMut
<'_
, T
> {
274 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
275 RChunksExactMut
::new(chunk_size
, self.as_parallel_slice_mut())
278 /// Sorts the slice in parallel.
280 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
282 /// When applicable, unstable sorting is preferred because it is generally faster than stable
283 /// sorting and it doesn't allocate auxiliary memory.
284 /// See [`par_sort_unstable`](#method.par_sort_unstable).
286 /// # Current implementation
288 /// The current algorithm is an adaptive merge sort inspired by
289 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
290 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
291 /// two or more sorted sequences concatenated one after another.
293 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
294 /// non-allocating insertion sort is used instead.
296 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
297 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
298 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
299 /// parallel subdivision of chunks and parallel merge operation.
304 /// use rayon::prelude::*;
306 /// let mut v = [-5, 4, 1, -3, 2];
309 /// assert_eq!(v, [-5, -3, 1, 2, 4]);
311 fn par_sort(&mut self)
315 par_mergesort(self.as_parallel_slice_mut(), T
::lt
);
318 /// Sorts the slice in parallel with a comparator function.
320 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
322 /// The comparator function must define a total ordering for the elements in the slice. If
323 /// the ordering is not total, the order of the elements is unspecified. An order is a
324 /// total order if it is (for all `a`, `b` and `c`):
326 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
327 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
329 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
330 /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
333 /// use rayon::prelude::*;
335 /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
336 /// floats.par_sort_by(|a, b| a.partial_cmp(b).unwrap());
337 /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
340 /// When applicable, unstable sorting is preferred because it is generally faster than stable
341 /// sorting and it doesn't allocate auxiliary memory.
342 /// See [`par_sort_unstable_by`](#method.par_sort_unstable_by).
344 /// # Current implementation
346 /// The current algorithm is an adaptive merge sort inspired by
347 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
348 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
349 /// two or more sorted sequences concatenated one after another.
351 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
352 /// non-allocating insertion sort is used instead.
354 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
355 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
356 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
357 /// parallel subdivision of chunks and parallel merge operation.
362 /// use rayon::prelude::*;
364 /// let mut v = [5, 4, 1, 3, 2];
365 /// v.par_sort_by(|a, b| a.cmp(b));
366 /// assert_eq!(v, [1, 2, 3, 4, 5]);
368 /// // reverse sorting
369 /// v.par_sort_by(|a, b| b.cmp(a));
370 /// assert_eq!(v, [5, 4, 3, 2, 1]);
372 fn par_sort_by
<F
>(&mut self, compare
: F
)
374 F
: Fn(&T
, &T
) -> Ordering
+ Sync
,
376 par_mergesort(self.as_parallel_slice_mut(), |a
, b
| {
377 compare(a
, b
) == Ordering
::Less
381 /// Sorts the slice in parallel with a key extraction function.
383 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
384 /// worst-case, where the key function is *O*(*m*).
386 /// For expensive key functions (e.g. functions that are not simple property accesses or
387 /// basic operations), [`par_sort_by_cached_key`](#method.par_sort_by_cached_key) is likely to
388 /// be significantly faster, as it does not recompute element keys.
390 /// When applicable, unstable sorting is preferred because it is generally faster than stable
391 /// sorting and it doesn't allocate auxiliary memory.
392 /// See [`par_sort_unstable_by_key`](#method.par_sort_unstable_by_key).
394 /// # Current implementation
396 /// The current algorithm is an adaptive merge sort inspired by
397 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
398 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
399 /// two or more sorted sequences concatenated one after another.
401 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
402 /// non-allocating insertion sort is used instead.
404 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
405 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
406 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
407 /// parallel subdivision of chunks and parallel merge operation.
412 /// use rayon::prelude::*;
414 /// let mut v = [-5i32, 4, 1, -3, 2];
416 /// v.par_sort_by_key(|k| k.abs());
417 /// assert_eq!(v, [1, 2, -3, 4, -5]);
419 fn par_sort_by_key
<K
, F
>(&mut self, f
: F
)
422 F
: Fn(&T
) -> K
+ Sync
,
424 par_mergesort(self.as_parallel_slice_mut(), |a
, b
| f(a
).lt(&f(b
)));
427 /// Sorts the slice in parallel with a key extraction function.
429 /// During sorting, the key function is called at most once per element, by using
430 /// temporary storage to remember the results of key evaluation.
431 /// The key function is called in parallel, so the order of calls is completely unspecified.
433 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
434 /// worst-case, where the key function is *O*(*m*).
436 /// For simple key functions (e.g., functions that are property accesses or
437 /// basic operations), [`par_sort_by_key`](#method.par_sort_by_key) is likely to be
440 /// # Current implementation
442 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
443 /// which combines the fast average case of randomized quicksort with the fast worst case of
444 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
445 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
446 /// deterministic behavior.
448 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
449 /// length of the slice.
451 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
452 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
453 /// parallel. Finally, after sorting the cached keys, the item positions are updated sequentially.
455 /// [pdqsort]: https://github.com/orlp/pdqsort
460 /// use rayon::prelude::*;
462 /// let mut v = [-5i32, 4, 32, -3, 2];
464 /// v.par_sort_by_cached_key(|k| k.to_string());
465 /// assert!(v == [-3, -5, 2, 32, 4]);
467 fn par_sort_by_cached_key
<K
, F
>(&mut self, f
: F
)
469 F
: Fn(&T
) -> K
+ Sync
,
472 let slice
= self.as_parallel_slice_mut();
473 let len
= slice
.len();
478 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
479 macro_rules
! sort_by_key
{
481 let mut indices
: Vec
<_
> = slice
484 .map(|(i
, x
)| (f(&*x
), i
as $t
))
486 // The elements of `indices` are unique, as they are indexed, so any sort will be
487 // stable with respect to the original slice. We use `sort_unstable` here because
488 // it requires less memory allocation.
489 indices
.par_sort_unstable();
491 let mut index
= indices
[i
].1;
492 while (index
as usize) < i
{
493 index
= indices
[index
as usize].1;
495 indices
[i
].1 = index
;
496 slice
.swap(i
, index
as usize);
501 let sz_u8
= mem
::size_of
::<(K
, u8)>();
502 let sz_u16
= mem
::size_of
::<(K
, u16)>();
503 let sz_u32
= mem
::size_of
::<(K
, u32)>();
504 let sz_usize
= mem
::size_of
::<(K
, usize)>();
506 if sz_u8
< sz_u16
&& len
<= (std
::u8::MAX
as usize) {
507 return sort_by_key
!(u8);
509 if sz_u16
< sz_u32
&& len
<= (std
::u16::MAX
as usize) {
510 return sort_by_key
!(u16);
512 if sz_u32
< sz_usize
&& len
<= (std
::u32::MAX
as usize) {
513 return sort_by_key
!(u32);
518 /// Sorts the slice in parallel, but might not preserve the order of equal elements.
520 /// This sort is unstable (i.e., may reorder equal elements), in-place
521 /// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
523 /// # Current implementation
525 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
526 /// which combines the fast average case of randomized quicksort with the fast worst case of
527 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
528 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
529 /// deterministic behavior.
531 /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
532 /// slice consists of several concatenated sorted sequences.
534 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
535 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
538 /// [pdqsort]: https://github.com/orlp/pdqsort
543 /// use rayon::prelude::*;
545 /// let mut v = [-5, 4, 1, -3, 2];
547 /// v.par_sort_unstable();
548 /// assert_eq!(v, [-5, -3, 1, 2, 4]);
550 fn par_sort_unstable(&mut self)
554 par_quicksort(self.as_parallel_slice_mut(), T
::lt
);
557 /// Sorts the slice in parallel with a comparator function, but might not preserve the order of
560 /// This sort is unstable (i.e., may reorder equal elements), in-place
561 /// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
563 /// The comparator function must define a total ordering for the elements in the slice. If
564 /// the ordering is not total, the order of the elements is unspecified. An order is a
565 /// total order if it is (for all `a`, `b` and `c`):
567 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
568 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
570 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
571 /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
574 /// use rayon::prelude::*;
576 /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
577 /// floats.par_sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
578 /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
581 /// # Current implementation
583 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
584 /// which combines the fast average case of randomized quicksort with the fast worst case of
585 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
586 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
587 /// deterministic behavior.
589 /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
590 /// slice consists of several concatenated sorted sequences.
592 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
593 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
596 /// [pdqsort]: https://github.com/orlp/pdqsort
601 /// use rayon::prelude::*;
603 /// let mut v = [5, 4, 1, 3, 2];
604 /// v.par_sort_unstable_by(|a, b| a.cmp(b));
605 /// assert_eq!(v, [1, 2, 3, 4, 5]);
607 /// // reverse sorting
608 /// v.par_sort_unstable_by(|a, b| b.cmp(a));
609 /// assert_eq!(v, [5, 4, 3, 2, 1]);
611 fn par_sort_unstable_by
<F
>(&mut self, compare
: F
)
613 F
: Fn(&T
, &T
) -> Ordering
+ Sync
,
615 par_quicksort(self.as_parallel_slice_mut(), |a
, b
| {
616 compare(a
, b
) == Ordering
::Less
620 /// Sorts the slice in parallel with a key extraction function, but might not preserve the order
621 /// of equal elements.
623 /// This sort is unstable (i.e., may reorder equal elements), in-place
624 /// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case,
625 /// where the key function is *O*(*m*).
627 /// # Current implementation
629 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
630 /// which combines the fast average case of randomized quicksort with the fast worst case of
631 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
632 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
633 /// deterministic behavior.
635 /// Due to its key calling strategy, `par_sort_unstable_by_key` is likely to be slower than
636 /// [`par_sort_by_cached_key`](#method.par_sort_by_cached_key) in cases where the key function
639 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
640 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
643 /// [pdqsort]: https://github.com/orlp/pdqsort
648 /// use rayon::prelude::*;
650 /// let mut v = [-5i32, 4, 1, -3, 2];
652 /// v.par_sort_unstable_by_key(|k| k.abs());
653 /// assert_eq!(v, [1, 2, -3, 4, -5]);
655 fn par_sort_unstable_by_key
<K
, F
>(&mut self, f
: F
)
658 F
: Fn(&T
) -> K
+ Sync
,
660 par_quicksort(self.as_parallel_slice_mut(), |a
, b
| f(a
).lt(&f(b
)));
664 impl<T
: Send
> ParallelSliceMut
<T
> for [T
] {
666 fn as_parallel_slice_mut(&mut self) -> &mut [T
] {
671 impl<'data
, T
: Sync
+ 'data
> IntoParallelIterator
for &'data
[T
] {
672 type Item
= &'data T
;
673 type Iter
= Iter
<'data
, T
>;
675 fn into_par_iter(self) -> Self::Iter
{
680 impl<'data
, T
: Send
+ 'data
> IntoParallelIterator
for &'data
mut [T
] {
681 type Item
= &'data
mut T
;
682 type Iter
= IterMut
<'data
, T
>;
684 fn into_par_iter(self) -> Self::Iter
{
685 IterMut { slice: self }
689 /// Parallel iterator over immutable items in a slice
691 pub struct Iter
<'data
, T
: Sync
> {
695 impl<'data
, T
: Sync
> Clone
for Iter
<'data
, T
> {
696 fn clone(&self) -> Self {
701 impl<'data
, T
: Sync
+ 'data
> ParallelIterator
for Iter
<'data
, T
> {
702 type Item
= &'data T
;
704 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
706 C
: UnindexedConsumer
<Self::Item
>,
708 bridge(self, consumer
)
711 fn opt_len(&self) -> Option
<usize> {
716 impl<'data
, T
: Sync
+ 'data
> IndexedParallelIterator
for Iter
<'data
, T
> {
717 fn drive
<C
>(self, consumer
: C
) -> C
::Result
719 C
: Consumer
<Self::Item
>,
721 bridge(self, consumer
)
724 fn len(&self) -> usize {
728 fn with_producer
<CB
>(self, callback
: CB
) -> CB
::Output
730 CB
: ProducerCallback
<Self::Item
>,
732 callback
.callback(IterProducer { slice: self.slice }
)
736 struct IterProducer
<'data
, T
: Sync
> {
740 impl<'data
, T
: 'data
+ Sync
> Producer
for IterProducer
<'data
, T
> {
741 type Item
= &'data T
;
742 type IntoIter
= ::std
::slice
::Iter
<'data
, T
>;
744 fn into_iter(self) -> Self::IntoIter
{
748 fn split_at(self, index
: usize) -> (Self, Self) {
749 let (left
, right
) = self.slice
.split_at(index
);
750 (IterProducer { slice: left }
, IterProducer { slice: right }
)
754 /// Parallel iterator over immutable overlapping windows of a slice
756 pub struct Windows
<'data
, T
: Sync
> {
761 impl<'data
, T
: Sync
> Clone
for Windows
<'data
, T
> {
762 fn clone(&self) -> Self {
767 impl<'data
, T
: Sync
+ 'data
> ParallelIterator
for Windows
<'data
, T
> {
768 type Item
= &'data
[T
];
770 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
772 C
: UnindexedConsumer
<Self::Item
>,
774 bridge(self, consumer
)
777 fn opt_len(&self) -> Option
<usize> {
782 impl<'data
, T
: Sync
+ 'data
> IndexedParallelIterator
for Windows
<'data
, T
> {
783 fn drive
<C
>(self, consumer
: C
) -> C
::Result
785 C
: Consumer
<Self::Item
>,
787 bridge(self, consumer
)
790 fn len(&self) -> usize {
791 assert
!(self.window_size
>= 1);
792 self.slice
.len().saturating_sub(self.window_size
- 1)
795 fn with_producer
<CB
>(self, callback
: CB
) -> CB
::Output
797 CB
: ProducerCallback
<Self::Item
>,
799 callback
.callback(WindowsProducer
{
800 window_size
: self.window_size
,
806 struct WindowsProducer
<'data
, T
: Sync
> {
811 impl<'data
, T
: 'data
+ Sync
> Producer
for WindowsProducer
<'data
, T
> {
812 type Item
= &'data
[T
];
813 type IntoIter
= ::std
::slice
::Windows
<'data
, T
>;
815 fn into_iter(self) -> Self::IntoIter
{
816 self.slice
.windows(self.window_size
)
819 fn split_at(self, index
: usize) -> (Self, Self) {
820 let left_index
= cmp
::min(self.slice
.len(), index
+ (self.window_size
- 1));
821 let left
= &self.slice
[..left_index
];
822 let right
= &self.slice
[index
..];
825 window_size
: self.window_size
,
829 window_size
: self.window_size
,
836 /// Parallel iterator over mutable items in a slice
838 pub struct IterMut
<'data
, T
: Send
> {
839 slice
: &'data
mut [T
],
842 impl<'data
, T
: Send
+ 'data
> ParallelIterator
for IterMut
<'data
, T
> {
843 type Item
= &'data
mut T
;
845 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
847 C
: UnindexedConsumer
<Self::Item
>,
849 bridge(self, consumer
)
852 fn opt_len(&self) -> Option
<usize> {
857 impl<'data
, T
: Send
+ 'data
> IndexedParallelIterator
for IterMut
<'data
, T
> {
858 fn drive
<C
>(self, consumer
: C
) -> C
::Result
860 C
: Consumer
<Self::Item
>,
862 bridge(self, consumer
)
865 fn len(&self) -> usize {
869 fn with_producer
<CB
>(self, callback
: CB
) -> CB
::Output
871 CB
: ProducerCallback
<Self::Item
>,
873 callback
.callback(IterMutProducer { slice: self.slice }
)
877 struct IterMutProducer
<'data
, T
: Send
> {
878 slice
: &'data
mut [T
],
881 impl<'data
, T
: 'data
+ Send
> Producer
for IterMutProducer
<'data
, T
> {
882 type Item
= &'data
mut T
;
883 type IntoIter
= ::std
::slice
::IterMut
<'data
, T
>;
885 fn into_iter(self) -> Self::IntoIter
{
886 self.slice
.iter_mut()
889 fn split_at(self, index
: usize) -> (Self, Self) {
890 let (left
, right
) = self.slice
.split_at_mut(index
);
892 IterMutProducer { slice: left }
,
893 IterMutProducer { slice: right }
,
898 /// Parallel iterator over slices separated by a predicate
899 pub struct Split
<'data
, T
, P
> {
904 impl<'data
, T
, P
: Clone
> Clone
for Split
<'data
, T
, P
> {
905 fn clone(&self) -> Self {
907 separator
: self.separator
.clone(),
913 impl<'data
, T
: Debug
, P
> Debug
for Split
<'data
, T
, P
> {
914 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
915 f
.debug_struct("Split").field("slice", &self.slice
).finish()
919 impl<'data
, T
, P
> ParallelIterator
for Split
<'data
, T
, P
>
921 P
: Fn(&T
) -> bool
+ Sync
+ Send
,
924 type Item
= &'data
[T
];
926 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
928 C
: UnindexedConsumer
<Self::Item
>,
930 let producer
= SplitProducer
::new(self.slice
, &self.separator
);
931 bridge_unindexed(producer
, consumer
)
935 /// Implement support for `SplitProducer`.
936 impl<'data
, T
, P
> Fissile
<P
> for &'data
[T
]
940 fn length(&self) -> usize {
944 fn midpoint(&self, end
: usize) -> usize {
948 fn find(&self, separator
: &P
, start
: usize, end
: usize) -> Option
<usize> {
949 self[start
..end
].iter().position(separator
)
952 fn rfind(&self, separator
: &P
, end
: usize) -> Option
<usize> {
953 self[..end
].iter().rposition(separator
)
956 fn split_once(self, index
: usize) -> (Self, Self) {
957 let (left
, right
) = self.split_at(index
);
958 (left
, &right
[1..]) // skip the separator
961 fn fold_splits
<F
>(self, separator
: &P
, folder
: F
, skip_last
: bool
) -> F
966 let mut split
= self.split(separator
);
970 folder
.consume_iter(split
)
974 /// Parallel iterator over mutable slices separated by a predicate
975 pub struct SplitMut
<'data
, T
, P
> {
976 slice
: &'data
mut [T
],
980 impl<'data
, T
: Debug
, P
> Debug
for SplitMut
<'data
, T
, P
> {
981 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
982 f
.debug_struct("SplitMut")
983 .field("slice", &self.slice
)
988 impl<'data
, T
, P
> ParallelIterator
for SplitMut
<'data
, T
, P
>
990 P
: Fn(&T
) -> bool
+ Sync
+ Send
,
993 type Item
= &'data
mut [T
];
995 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
997 C
: UnindexedConsumer
<Self::Item
>,
999 let producer
= SplitProducer
::new(self.slice
, &self.separator
);
1000 bridge_unindexed(producer
, consumer
)
1004 /// Implement support for `SplitProducer`.
1005 impl<'data
, T
, P
> Fissile
<P
> for &'data
mut [T
]
1009 fn length(&self) -> usize {
1013 fn midpoint(&self, end
: usize) -> usize {
1017 fn find(&self, separator
: &P
, start
: usize, end
: usize) -> Option
<usize> {
1018 self[start
..end
].iter().position(separator
)
1021 fn rfind(&self, separator
: &P
, end
: usize) -> Option
<usize> {
1022 self[..end
].iter().rposition(separator
)
1025 fn split_once(self, index
: usize) -> (Self, Self) {
1026 let (left
, right
) = self.split_at_mut(index
);
1027 (left
, &mut right
[1..]) // skip the separator
1030 fn fold_splits
<F
>(self, separator
: &P
, folder
: F
, skip_last
: bool
) -> F
1035 let mut split
= self.split_mut(separator
);
1039 folder
.consume_iter(split
)