1 //! Parallel iterator types for [slices][std::slice]
3 //! You will rarely need to interact with this module directly unless you need
4 //! to name one of the iterator types.
6 //! [std::slice]: https://doc.rust-lang.org/stable/std/slice/
13 use self::mergesort
::par_mergesort
;
14 use self::quicksort
::par_quicksort
;
15 use crate::iter
::plumbing
::*;
17 use crate::split_producer
::*;
19 use std
::cmp
::Ordering
;
20 use std
::fmt
::{self, Debug}
;
22 use super::math
::div_round_up
;
24 /// Parallel extensions for slices.
25 pub trait ParallelSlice
<T
: Sync
> {
26 /// Returns a plain slice, which is used to implement the rest of the
28 fn as_parallel_slice(&self) -> &[T
];
30 /// Returns a parallel iterator over subslices separated by elements that
31 /// match the separator.
36 /// use rayon::prelude::*;
37 /// let smallest = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9]
38 /// .par_split(|i| *i == 0)
39 /// .map(|numbers| numbers.iter().min().unwrap())
41 /// assert_eq!(Some(&1), smallest);
43 fn par_split
<P
>(&self, separator
: P
) -> Split
<'_
, T
, P
>
45 P
: Fn(&T
) -> bool
+ Sync
+ Send
,
48 slice
: self.as_parallel_slice(),
53 /// Returns a parallel iterator over all contiguous windows of length
54 /// `window_size`. The windows overlap.
59 /// use rayon::prelude::*;
60 /// let windows: Vec<_> = [1, 2, 3].par_windows(2).collect();
61 /// assert_eq!(vec![[1, 2], [2, 3]], windows);
63 fn par_windows(&self, window_size
: usize) -> Windows
<'_
, T
> {
66 slice
: self.as_parallel_slice(),
70 /// Returns a parallel iterator over at most `chunk_size` elements of
71 /// `self` at a time. The chunks do not overlap.
73 /// If the number of elements in the iterator is not divisible by
74 /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
75 /// other chunks will have that exact length.
80 /// use rayon::prelude::*;
81 /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks(2).collect();
82 /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]);
84 fn par_chunks(&self, chunk_size
: usize) -> Chunks
<'_
, T
> {
85 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
88 slice
: self.as_parallel_slice(),
93 impl<T
: Sync
> ParallelSlice
<T
> for [T
] {
95 fn as_parallel_slice(&self) -> &[T
] {
100 /// Parallel extensions for mutable slices.
101 pub trait ParallelSliceMut
<T
: Send
> {
102 /// Returns a plain mutable slice, which is used to implement the rest of
103 /// the parallel methods.
104 fn as_parallel_slice_mut(&mut self) -> &mut [T
];
106 /// Returns a parallel iterator over mutable subslices separated by
107 /// elements that match the separator.
112 /// use rayon::prelude::*;
113 /// let mut array = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9];
114 /// array.par_split_mut(|i| *i == 0)
115 /// .for_each(|slice| slice.reverse());
116 /// assert_eq!(array, [3, 2, 1, 0, 8, 4, 2, 0, 9, 6, 3]);
118 fn par_split_mut
<P
>(&mut self, separator
: P
) -> SplitMut
<'_
, T
, P
>
120 P
: Fn(&T
) -> bool
+ Sync
+ Send
,
123 slice
: self.as_parallel_slice_mut(),
128 /// Returns a parallel iterator over at most `chunk_size` elements of
129 /// `self` at a time. The chunks are mutable and do not overlap.
131 /// If the number of elements in the iterator is not divisible by
132 /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
133 /// other chunks will have that exact length.
138 /// use rayon::prelude::*;
139 /// let mut array = [1, 2, 3, 4, 5];
140 /// array.par_chunks_mut(2)
141 /// .for_each(|slice| slice.reverse());
142 /// assert_eq!(array, [2, 1, 4, 3, 5]);
144 fn par_chunks_mut(&mut self, chunk_size
: usize) -> ChunksMut
<'_
, T
> {
145 assert
!(chunk_size
!= 0, "chunk_size must not be zero");
148 slice
: self.as_parallel_slice_mut(),
152 /// Sorts the slice in parallel.
154 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
156 /// When applicable, unstable sorting is preferred because it is generally faster than stable
157 /// sorting and it doesn't allocate auxiliary memory.
158 /// See [`par_sort_unstable`](#method.par_sort_unstable).
160 /// # Current implementation
162 /// The current algorithm is an adaptive merge sort inspired by
163 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
164 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
165 /// two or more sorted sequences concatenated one after another.
167 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
168 /// non-allocating insertion sort is used instead.
170 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
171 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
172 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
173 /// parallel subdivision of chunks and parallel merge operation.
178 /// use rayon::prelude::*;
180 /// let mut v = [-5, 4, 1, -3, 2];
183 /// assert_eq!(v, [-5, -3, 1, 2, 4]);
185 fn par_sort(&mut self)
189 par_mergesort(self.as_parallel_slice_mut(), T
::lt
);
192 /// Sorts the slice in parallel with a comparator function.
194 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
196 /// When applicable, unstable sorting is preferred because it is generally faster than stable
197 /// sorting and it doesn't allocate auxiliary memory.
198 /// See [`par_sort_unstable_by`](#method.par_sort_unstable_by).
200 /// # Current implementation
202 /// The current algorithm is an adaptive merge sort inspired by
203 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
204 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
205 /// two or more sorted sequences concatenated one after another.
207 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
208 /// non-allocating insertion sort is used instead.
210 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
211 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
212 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
213 /// parallel subdivision of chunks and parallel merge operation.
218 /// use rayon::prelude::*;
220 /// let mut v = [5, 4, 1, 3, 2];
221 /// v.par_sort_by(|a, b| a.cmp(b));
222 /// assert_eq!(v, [1, 2, 3, 4, 5]);
224 /// // reverse sorting
225 /// v.par_sort_by(|a, b| b.cmp(a));
226 /// assert_eq!(v, [5, 4, 3, 2, 1]);
228 fn par_sort_by
<F
>(&mut self, compare
: F
)
230 F
: Fn(&T
, &T
) -> Ordering
+ Sync
,
232 par_mergesort(self.as_parallel_slice_mut(), |a
, b
| {
233 compare(a
, b
) == Ordering
::Less
237 /// Sorts the slice in parallel with a key extraction function.
239 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
241 /// When applicable, unstable sorting is preferred because it is generally faster than stable
242 /// sorting and it doesn't allocate auxiliary memory.
243 /// See [`par_sort_unstable_by_key`](#method.par_sort_unstable_by_key).
245 /// # Current implementation
247 /// The current algorithm is an adaptive merge sort inspired by
248 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
249 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
250 /// two or more sorted sequences concatenated one after another.
252 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
253 /// non-allocating insertion sort is used instead.
255 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
256 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
257 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
258 /// parallel subdivision of chunks and parallel merge operation.
263 /// use rayon::prelude::*;
265 /// let mut v = [-5i32, 4, 1, -3, 2];
267 /// v.par_sort_by_key(|k| k.abs());
268 /// assert_eq!(v, [1, 2, -3, 4, -5]);
270 fn par_sort_by_key
<B
, F
>(&mut self, f
: F
)
273 F
: Fn(&T
) -> B
+ Sync
,
275 par_mergesort(self.as_parallel_slice_mut(), |a
, b
| f(a
).lt(&f(b
)));
278 /// Sorts the slice in parallel, but may not preserve the order of equal elements.
280 /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
281 /// and `O(n log n)` worst-case.
283 /// # Current implementation
285 /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
286 /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
287 /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
288 /// heapsort on degenerate inputs.
290 /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
291 /// slice consists of several concatenated sorted sequences.
293 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
294 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
297 /// [pdqsort]: https://github.com/orlp/pdqsort
302 /// use rayon::prelude::*;
304 /// let mut v = [-5, 4, 1, -3, 2];
306 /// v.par_sort_unstable();
307 /// assert_eq!(v, [-5, -3, 1, 2, 4]);
309 fn par_sort_unstable(&mut self)
313 par_quicksort(self.as_parallel_slice_mut(), T
::lt
);
316 /// Sorts the slice in parallel with a comparator function, but may not preserve the order of
319 /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
320 /// and `O(n log n)` worst-case.
322 /// # Current implementation
324 /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
325 /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
326 /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
327 /// heapsort on degenerate inputs.
329 /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
330 /// slice consists of several concatenated sorted sequences.
332 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
333 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
336 /// [pdqsort]: https://github.com/orlp/pdqsort
341 /// use rayon::prelude::*;
343 /// let mut v = [5, 4, 1, 3, 2];
344 /// v.par_sort_unstable_by(|a, b| a.cmp(b));
345 /// assert_eq!(v, [1, 2, 3, 4, 5]);
347 /// // reverse sorting
348 /// v.par_sort_unstable_by(|a, b| b.cmp(a));
349 /// assert_eq!(v, [5, 4, 3, 2, 1]);
351 fn par_sort_unstable_by
<F
>(&mut self, compare
: F
)
353 F
: Fn(&T
, &T
) -> Ordering
+ Sync
,
355 par_quicksort(self.as_parallel_slice_mut(), |a
, b
| {
356 compare(a
, b
) == Ordering
::Less
360 /// Sorts the slice in parallel with a key extraction function, but may not preserve the order
361 /// of equal elements.
363 /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
364 /// and `O(n log n)` worst-case.
366 /// # Current implementation
368 /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
369 /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
370 /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
371 /// heapsort on degenerate inputs.
373 /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
374 /// slice consists of several concatenated sorted sequences.
376 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
377 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
380 /// [pdqsort]: https://github.com/orlp/pdqsort
385 /// use rayon::prelude::*;
387 /// let mut v = [-5i32, 4, 1, -3, 2];
389 /// v.par_sort_unstable_by_key(|k| k.abs());
390 /// assert_eq!(v, [1, 2, -3, 4, -5]);
392 fn par_sort_unstable_by_key
<B
, F
>(&mut self, f
: F
)
395 F
: Fn(&T
) -> B
+ Sync
,
397 par_quicksort(self.as_parallel_slice_mut(), |a
, b
| f(a
).lt(&f(b
)));
401 impl<T
: Send
> ParallelSliceMut
<T
> for [T
] {
403 fn as_parallel_slice_mut(&mut self) -> &mut [T
] {
408 impl<'data
, T
: Sync
+ 'data
> IntoParallelIterator
for &'data
[T
] {
409 type Item
= &'data T
;
410 type Iter
= Iter
<'data
, T
>;
412 fn into_par_iter(self) -> Self::Iter
{
417 impl<'data
, T
: Sync
+ 'data
> IntoParallelIterator
for &'data Vec
<T
> {
418 type Item
= &'data T
;
419 type Iter
= Iter
<'data
, T
>;
421 fn into_par_iter(self) -> Self::Iter
{
426 impl<'data
, T
: Send
+ 'data
> IntoParallelIterator
for &'data
mut [T
] {
427 type Item
= &'data
mut T
;
428 type Iter
= IterMut
<'data
, T
>;
430 fn into_par_iter(self) -> Self::Iter
{
431 IterMut { slice: self }
435 impl<'data
, T
: Send
+ 'data
> IntoParallelIterator
for &'data
mut Vec
<T
> {
436 type Item
= &'data
mut T
;
437 type Iter
= IterMut
<'data
, T
>;
439 fn into_par_iter(self) -> Self::Iter
{
440 IterMut { slice: self }
444 /// Parallel iterator over immutable items in a slice
446 pub struct Iter
<'data
, T
: Sync
> {
450 impl<'data
, T
: Sync
> Clone
for Iter
<'data
, T
> {
451 fn clone(&self) -> Self {
456 impl<'data
, T
: Sync
+ 'data
> ParallelIterator
for Iter
<'data
, T
> {
457 type Item
= &'data T
;
459 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
461 C
: UnindexedConsumer
<Self::Item
>,
463 bridge(self, consumer
)
466 fn opt_len(&self) -> Option
<usize> {
471 impl<'data
, T
: Sync
+ 'data
> IndexedParallelIterator
for Iter
<'data
, T
> {
472 fn drive
<C
>(self, consumer
: C
) -> C
::Result
474 C
: Consumer
<Self::Item
>,
476 bridge(self, consumer
)
479 fn len(&self) -> usize {
483 fn with_producer
<CB
>(self, callback
: CB
) -> CB
::Output
485 CB
: ProducerCallback
<Self::Item
>,
487 callback
.callback(IterProducer { slice: self.slice }
)
491 struct IterProducer
<'data
, T
: Sync
> {
495 impl<'data
, T
: 'data
+ Sync
> Producer
for IterProducer
<'data
, T
> {
496 type Item
= &'data T
;
497 type IntoIter
= ::std
::slice
::Iter
<'data
, T
>;
499 fn into_iter(self) -> Self::IntoIter
{
503 fn split_at(self, index
: usize) -> (Self, Self) {
504 let (left
, right
) = self.slice
.split_at(index
);
505 (IterProducer { slice: left }
, IterProducer { slice: right }
)
509 /// Parallel iterator over immutable non-overlapping chunks of a slice
511 pub struct Chunks
<'data
, T
: Sync
> {
516 impl<'data
, T
: Sync
> Clone
for Chunks
<'data
, T
> {
517 fn clone(&self) -> Self {
522 impl<'data
, T
: Sync
+ 'data
> ParallelIterator
for Chunks
<'data
, T
> {
523 type Item
= &'data
[T
];
525 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
527 C
: UnindexedConsumer
<Self::Item
>,
529 bridge(self, consumer
)
532 fn opt_len(&self) -> Option
<usize> {
537 impl<'data
, T
: Sync
+ 'data
> IndexedParallelIterator
for Chunks
<'data
, T
> {
538 fn drive
<C
>(self, consumer
: C
) -> C
::Result
540 C
: Consumer
<Self::Item
>,
542 bridge(self, consumer
)
545 fn len(&self) -> usize {
546 div_round_up(self.slice
.len(), self.chunk_size
)
549 fn with_producer
<CB
>(self, callback
: CB
) -> CB
::Output
551 CB
: ProducerCallback
<Self::Item
>,
553 callback
.callback(ChunksProducer
{
554 chunk_size
: self.chunk_size
,
560 struct ChunksProducer
<'data
, T
: Sync
> {
565 impl<'data
, T
: 'data
+ Sync
> Producer
for ChunksProducer
<'data
, T
> {
566 type Item
= &'data
[T
];
567 type IntoIter
= ::std
::slice
::Chunks
<'data
, T
>;
569 fn into_iter(self) -> Self::IntoIter
{
570 self.slice
.chunks(self.chunk_size
)
573 fn split_at(self, index
: usize) -> (Self, Self) {
574 let elem_index
= cmp
::min(index
* self.chunk_size
, self.slice
.len());
575 let (left
, right
) = self.slice
.split_at(elem_index
);
578 chunk_size
: self.chunk_size
,
582 chunk_size
: self.chunk_size
,
589 /// Parallel iterator over immutable overlapping windows of a slice
591 pub struct Windows
<'data
, T
: Sync
> {
596 impl<'data
, T
: Sync
> Clone
for Windows
<'data
, T
> {
597 fn clone(&self) -> Self {
602 impl<'data
, T
: Sync
+ 'data
> ParallelIterator
for Windows
<'data
, T
> {
603 type Item
= &'data
[T
];
605 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
607 C
: UnindexedConsumer
<Self::Item
>,
609 bridge(self, consumer
)
612 fn opt_len(&self) -> Option
<usize> {
617 impl<'data
, T
: Sync
+ 'data
> IndexedParallelIterator
for Windows
<'data
, T
> {
618 fn drive
<C
>(self, consumer
: C
) -> C
::Result
620 C
: Consumer
<Self::Item
>,
622 bridge(self, consumer
)
625 fn len(&self) -> usize {
626 assert
!(self.window_size
>= 1);
627 self.slice
.len().saturating_sub(self.window_size
- 1)
630 fn with_producer
<CB
>(self, callback
: CB
) -> CB
::Output
632 CB
: ProducerCallback
<Self::Item
>,
634 callback
.callback(WindowsProducer
{
635 window_size
: self.window_size
,
641 struct WindowsProducer
<'data
, T
: Sync
> {
646 impl<'data
, T
: 'data
+ Sync
> Producer
for WindowsProducer
<'data
, T
> {
647 type Item
= &'data
[T
];
648 type IntoIter
= ::std
::slice
::Windows
<'data
, T
>;
650 fn into_iter(self) -> Self::IntoIter
{
651 self.slice
.windows(self.window_size
)
654 fn split_at(self, index
: usize) -> (Self, Self) {
655 let left_index
= cmp
::min(self.slice
.len(), index
+ (self.window_size
- 1));
656 let left
= &self.slice
[..left_index
];
657 let right
= &self.slice
[index
..];
660 window_size
: self.window_size
,
664 window_size
: self.window_size
,
671 /// Parallel iterator over mutable items in a slice
673 pub struct IterMut
<'data
, T
: Send
> {
674 slice
: &'data
mut [T
],
677 impl<'data
, T
: Send
+ 'data
> ParallelIterator
for IterMut
<'data
, T
> {
678 type Item
= &'data
mut T
;
680 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
682 C
: UnindexedConsumer
<Self::Item
>,
684 bridge(self, consumer
)
687 fn opt_len(&self) -> Option
<usize> {
692 impl<'data
, T
: Send
+ 'data
> IndexedParallelIterator
for IterMut
<'data
, T
> {
693 fn drive
<C
>(self, consumer
: C
) -> C
::Result
695 C
: Consumer
<Self::Item
>,
697 bridge(self, consumer
)
700 fn len(&self) -> usize {
704 fn with_producer
<CB
>(self, callback
: CB
) -> CB
::Output
706 CB
: ProducerCallback
<Self::Item
>,
708 callback
.callback(IterMutProducer { slice: self.slice }
)
712 struct IterMutProducer
<'data
, T
: Send
> {
713 slice
: &'data
mut [T
],
716 impl<'data
, T
: 'data
+ Send
> Producer
for IterMutProducer
<'data
, T
> {
717 type Item
= &'data
mut T
;
718 type IntoIter
= ::std
::slice
::IterMut
<'data
, T
>;
720 fn into_iter(self) -> Self::IntoIter
{
721 self.slice
.iter_mut()
724 fn split_at(self, index
: usize) -> (Self, Self) {
725 let (left
, right
) = self.slice
.split_at_mut(index
);
727 IterMutProducer { slice: left }
,
728 IterMutProducer { slice: right }
,
733 /// Parallel iterator over mutable non-overlapping chunks of a slice
735 pub struct ChunksMut
<'data
, T
: Send
> {
737 slice
: &'data
mut [T
],
740 impl<'data
, T
: Send
+ 'data
> ParallelIterator
for ChunksMut
<'data
, T
> {
741 type Item
= &'data
mut [T
];
743 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
745 C
: UnindexedConsumer
<Self::Item
>,
747 bridge(self, consumer
)
750 fn opt_len(&self) -> Option
<usize> {
755 impl<'data
, T
: Send
+ 'data
> IndexedParallelIterator
for ChunksMut
<'data
, T
> {
756 fn drive
<C
>(self, consumer
: C
) -> C
::Result
758 C
: Consumer
<Self::Item
>,
760 bridge(self, consumer
)
763 fn len(&self) -> usize {
764 div_round_up(self.slice
.len(), self.chunk_size
)
767 fn with_producer
<CB
>(self, callback
: CB
) -> CB
::Output
769 CB
: ProducerCallback
<Self::Item
>,
771 callback
.callback(ChunksMutProducer
{
772 chunk_size
: self.chunk_size
,
778 struct ChunksMutProducer
<'data
, T
: Send
> {
780 slice
: &'data
mut [T
],
783 impl<'data
, T
: 'data
+ Send
> Producer
for ChunksMutProducer
<'data
, T
> {
784 type Item
= &'data
mut [T
];
785 type IntoIter
= ::std
::slice
::ChunksMut
<'data
, T
>;
787 fn into_iter(self) -> Self::IntoIter
{
788 self.slice
.chunks_mut(self.chunk_size
)
791 fn split_at(self, index
: usize) -> (Self, Self) {
792 let elem_index
= cmp
::min(index
* self.chunk_size
, self.slice
.len());
793 let (left
, right
) = self.slice
.split_at_mut(elem_index
);
796 chunk_size
: self.chunk_size
,
800 chunk_size
: self.chunk_size
,
807 /// Parallel iterator over slices separated by a predicate
808 pub struct Split
<'data
, T
, P
> {
813 impl<'data
, T
, P
: Clone
> Clone
for Split
<'data
, T
, P
> {
814 fn clone(&self) -> Self {
816 separator
: self.separator
.clone(),
822 impl<'data
, T
: Debug
, P
> Debug
for Split
<'data
, T
, P
> {
823 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
824 f
.debug_struct("Split").field("slice", &self.slice
).finish()
828 impl<'data
, T
, P
> ParallelIterator
for Split
<'data
, T
, P
>
830 P
: Fn(&T
) -> bool
+ Sync
+ Send
,
833 type Item
= &'data
[T
];
835 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
837 C
: UnindexedConsumer
<Self::Item
>,
839 let producer
= SplitProducer
::new(self.slice
, &self.separator
);
840 bridge_unindexed(producer
, consumer
)
844 /// Implement support for `SplitProducer`.
845 impl<'data
, T
, P
> Fissile
<P
> for &'data
[T
]
849 fn length(&self) -> usize {
853 fn midpoint(&self, end
: usize) -> usize {
857 fn find(&self, separator
: &P
, start
: usize, end
: usize) -> Option
<usize> {
858 self[start
..end
].iter().position(separator
)
861 fn rfind(&self, separator
: &P
, end
: usize) -> Option
<usize> {
862 self[..end
].iter().rposition(separator
)
865 fn split_once(self, index
: usize) -> (Self, Self) {
866 let (left
, right
) = self.split_at(index
);
867 (left
, &right
[1..]) // skip the separator
870 fn fold_splits
<F
>(self, separator
: &P
, folder
: F
, skip_last
: bool
) -> F
875 let mut split
= self.split(separator
);
879 folder
.consume_iter(split
)
883 /// Parallel iterator over mutable slices separated by a predicate
884 pub struct SplitMut
<'data
, T
, P
> {
885 slice
: &'data
mut [T
],
889 impl<'data
, T
: Debug
, P
> Debug
for SplitMut
<'data
, T
, P
> {
890 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
891 f
.debug_struct("SplitMut")
892 .field("slice", &self.slice
)
897 impl<'data
, T
, P
> ParallelIterator
for SplitMut
<'data
, T
, P
>
899 P
: Fn(&T
) -> bool
+ Sync
+ Send
,
902 type Item
= &'data
mut [T
];
904 fn drive_unindexed
<C
>(self, consumer
: C
) -> C
::Result
906 C
: UnindexedConsumer
<Self::Item
>,
908 let producer
= SplitProducer
::new(self.slice
, &self.separator
);
909 bridge_unindexed(producer
, consumer
)
913 /// Implement support for `SplitProducer`.
914 impl<'data
, T
, P
> Fissile
<P
> for &'data
mut [T
]
918 fn length(&self) -> usize {
922 fn midpoint(&self, end
: usize) -> usize {
926 fn find(&self, separator
: &P
, start
: usize, end
: usize) -> Option
<usize> {
927 self[start
..end
].iter().position(separator
)
930 fn rfind(&self, separator
: &P
, end
: usize) -> Option
<usize> {
931 self[..end
].iter().rposition(separator
)
934 fn split_once(self, index
: usize) -> (Self, Self) {
935 let (left
, right
) = self.split_at_mut(index
);
936 (left
, &mut right
[1..]) // skip the separator
939 fn fold_splits
<F
>(self, separator
: &P
, folder
: F
, skip_last
: bool
) -> F
944 let mut split
= self.split_mut(separator
);
948 folder
.consume_iter(split
)