]> git.proxmox.com Git - rustc.git/blob - vendor/rustc-rayon/src/slice/mod.rs
New upstream version 1.52.0~beta.3+dfsg1
[rustc.git] / vendor / rustc-rayon / src / slice / mod.rs
1 //! Parallel iterator types for [slices][std::slice]
2 //!
3 //! You will rarely need to interact with this module directly unless you need
4 //! to name one of the iterator types.
5 //!
6 //! [std::slice]: https://doc.rust-lang.org/stable/std/slice/
7
8 mod mergesort;
9 mod quicksort;
10
11 mod test;
12
13 use self::mergesort::par_mergesort;
14 use self::quicksort::par_quicksort;
15 use crate::iter::plumbing::*;
16 use crate::iter::*;
17 use crate::split_producer::*;
18 use std::cmp;
19 use std::cmp::Ordering;
20 use std::fmt::{self, Debug};
21
22 use super::math::div_round_up;
23
24 /// Parallel extensions for slices.
25 pub trait ParallelSlice<T: Sync> {
26 /// Returns a plain slice, which is used to implement the rest of the
27 /// parallel methods.
28 fn as_parallel_slice(&self) -> &[T];
29
30 /// Returns a parallel iterator over subslices separated by elements that
31 /// match the separator.
32 ///
33 /// # Examples
34 ///
35 /// ```
36 /// use rayon::prelude::*;
37 /// let smallest = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9]
38 /// .par_split(|i| *i == 0)
39 /// .map(|numbers| numbers.iter().min().unwrap())
40 /// .min();
41 /// assert_eq!(Some(&1), smallest);
42 /// ```
43 fn par_split<P>(&self, separator: P) -> Split<'_, T, P>
44 where
45 P: Fn(&T) -> bool + Sync + Send,
46 {
47 Split {
48 slice: self.as_parallel_slice(),
49 separator,
50 }
51 }
52
53 /// Returns a parallel iterator over all contiguous windows of length
54 /// `window_size`. The windows overlap.
55 ///
56 /// # Examples
57 ///
58 /// ```
59 /// use rayon::prelude::*;
60 /// let windows: Vec<_> = [1, 2, 3].par_windows(2).collect();
61 /// assert_eq!(vec![[1, 2], [2, 3]], windows);
62 /// ```
63 fn par_windows(&self, window_size: usize) -> Windows<'_, T> {
64 Windows {
65 window_size,
66 slice: self.as_parallel_slice(),
67 }
68 }
69
70 /// Returns a parallel iterator over at most `chunk_size` elements of
71 /// `self` at a time. The chunks do not overlap.
72 ///
73 /// If the number of elements in the iterator is not divisible by
74 /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
75 /// other chunks will have that exact length.
76 ///
77 /// # Examples
78 ///
79 /// ```
80 /// use rayon::prelude::*;
81 /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks(2).collect();
82 /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]);
83 /// ```
84 fn par_chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
85 assert!(chunk_size != 0, "chunk_size must not be zero");
86 Chunks {
87 chunk_size,
88 slice: self.as_parallel_slice(),
89 }
90 }
91 }
92
93 impl<T: Sync> ParallelSlice<T> for [T] {
94 #[inline]
95 fn as_parallel_slice(&self) -> &[T] {
96 self
97 }
98 }
99
100 /// Parallel extensions for mutable slices.
101 pub trait ParallelSliceMut<T: Send> {
102 /// Returns a plain mutable slice, which is used to implement the rest of
103 /// the parallel methods.
104 fn as_parallel_slice_mut(&mut self) -> &mut [T];
105
106 /// Returns a parallel iterator over mutable subslices separated by
107 /// elements that match the separator.
108 ///
109 /// # Examples
110 ///
111 /// ```
112 /// use rayon::prelude::*;
113 /// let mut array = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9];
114 /// array.par_split_mut(|i| *i == 0)
115 /// .for_each(|slice| slice.reverse());
116 /// assert_eq!(array, [3, 2, 1, 0, 8, 4, 2, 0, 9, 6, 3]);
117 /// ```
118 fn par_split_mut<P>(&mut self, separator: P) -> SplitMut<'_, T, P>
119 where
120 P: Fn(&T) -> bool + Sync + Send,
121 {
122 SplitMut {
123 slice: self.as_parallel_slice_mut(),
124 separator,
125 }
126 }
127
128 /// Returns a parallel iterator over at most `chunk_size` elements of
129 /// `self` at a time. The chunks are mutable and do not overlap.
130 ///
131 /// If the number of elements in the iterator is not divisible by
132 /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
133 /// other chunks will have that exact length.
134 ///
135 /// # Examples
136 ///
137 /// ```
138 /// use rayon::prelude::*;
139 /// let mut array = [1, 2, 3, 4, 5];
140 /// array.par_chunks_mut(2)
141 /// .for_each(|slice| slice.reverse());
142 /// assert_eq!(array, [2, 1, 4, 3, 5]);
143 /// ```
144 fn par_chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
145 assert!(chunk_size != 0, "chunk_size must not be zero");
146 ChunksMut {
147 chunk_size,
148 slice: self.as_parallel_slice_mut(),
149 }
150 }
151
152 /// Sorts the slice in parallel.
153 ///
154 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
155 ///
156 /// When applicable, unstable sorting is preferred because it is generally faster than stable
157 /// sorting and it doesn't allocate auxiliary memory.
158 /// See [`par_sort_unstable`](#method.par_sort_unstable).
159 ///
160 /// # Current implementation
161 ///
162 /// The current algorithm is an adaptive merge sort inspired by
163 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
164 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
165 /// two or more sorted sequences concatenated one after another.
166 ///
167 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
168 /// non-allocating insertion sort is used instead.
169 ///
170 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
171 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
172 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
173 /// parallel subdivision of chunks and parallel merge operation.
174 ///
175 /// # Examples
176 ///
177 /// ```
178 /// use rayon::prelude::*;
179 ///
180 /// let mut v = [-5, 4, 1, -3, 2];
181 ///
182 /// v.par_sort();
183 /// assert_eq!(v, [-5, -3, 1, 2, 4]);
184 /// ```
185 fn par_sort(&mut self)
186 where
187 T: Ord,
188 {
189 par_mergesort(self.as_parallel_slice_mut(), T::lt);
190 }
191
192 /// Sorts the slice in parallel with a comparator function.
193 ///
194 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
195 ///
196 /// When applicable, unstable sorting is preferred because it is generally faster than stable
197 /// sorting and it doesn't allocate auxiliary memory.
198 /// See [`par_sort_unstable_by`](#method.par_sort_unstable_by).
199 ///
200 /// # Current implementation
201 ///
202 /// The current algorithm is an adaptive merge sort inspired by
203 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
204 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
205 /// two or more sorted sequences concatenated one after another.
206 ///
207 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
208 /// non-allocating insertion sort is used instead.
209 ///
210 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
211 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
212 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
213 /// parallel subdivision of chunks and parallel merge operation.
214 ///
215 /// # Examples
216 ///
217 /// ```
218 /// use rayon::prelude::*;
219 ///
220 /// let mut v = [5, 4, 1, 3, 2];
221 /// v.par_sort_by(|a, b| a.cmp(b));
222 /// assert_eq!(v, [1, 2, 3, 4, 5]);
223 ///
224 /// // reverse sorting
225 /// v.par_sort_by(|a, b| b.cmp(a));
226 /// assert_eq!(v, [5, 4, 3, 2, 1]);
227 /// ```
228 fn par_sort_by<F>(&mut self, compare: F)
229 where
230 F: Fn(&T, &T) -> Ordering + Sync,
231 {
232 par_mergesort(self.as_parallel_slice_mut(), |a, b| {
233 compare(a, b) == Ordering::Less
234 });
235 }
236
237 /// Sorts the slice in parallel with a key extraction function.
238 ///
239 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
240 ///
241 /// When applicable, unstable sorting is preferred because it is generally faster than stable
242 /// sorting and it doesn't allocate auxiliary memory.
243 /// See [`par_sort_unstable_by_key`](#method.par_sort_unstable_by_key).
244 ///
245 /// # Current implementation
246 ///
247 /// The current algorithm is an adaptive merge sort inspired by
248 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
249 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
250 /// two or more sorted sequences concatenated one after another.
251 ///
252 /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
253 /// non-allocating insertion sort is used instead.
254 ///
255 /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
256 /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
257 /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
258 /// parallel subdivision of chunks and parallel merge operation.
259 ///
260 /// # Examples
261 ///
262 /// ```
263 /// use rayon::prelude::*;
264 ///
265 /// let mut v = [-5i32, 4, 1, -3, 2];
266 ///
267 /// v.par_sort_by_key(|k| k.abs());
268 /// assert_eq!(v, [1, 2, -3, 4, -5]);
269 /// ```
270 fn par_sort_by_key<B, F>(&mut self, f: F)
271 where
272 B: Ord,
273 F: Fn(&T) -> B + Sync,
274 {
275 par_mergesort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
276 }
277
278 /// Sorts the slice in parallel, but may not preserve the order of equal elements.
279 ///
280 /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
281 /// and `O(n log n)` worst-case.
282 ///
283 /// # Current implementation
284 ///
285 /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
286 /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
287 /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
288 /// heapsort on degenerate inputs.
289 ///
290 /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
291 /// slice consists of several concatenated sorted sequences.
292 ///
293 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
294 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
295 /// parallel.
296 ///
297 /// [pdqsort]: https://github.com/orlp/pdqsort
298 ///
299 /// # Examples
300 ///
301 /// ```
302 /// use rayon::prelude::*;
303 ///
304 /// let mut v = [-5, 4, 1, -3, 2];
305 ///
306 /// v.par_sort_unstable();
307 /// assert_eq!(v, [-5, -3, 1, 2, 4]);
308 /// ```
309 fn par_sort_unstable(&mut self)
310 where
311 T: Ord,
312 {
313 par_quicksort(self.as_parallel_slice_mut(), T::lt);
314 }
315
316 /// Sorts the slice in parallel with a comparator function, but may not preserve the order of
317 /// equal elements.
318 ///
319 /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
320 /// and `O(n log n)` worst-case.
321 ///
322 /// # Current implementation
323 ///
324 /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
325 /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
326 /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
327 /// heapsort on degenerate inputs.
328 ///
329 /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
330 /// slice consists of several concatenated sorted sequences.
331 ///
332 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
333 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
334 /// parallel.
335 ///
336 /// [pdqsort]: https://github.com/orlp/pdqsort
337 ///
338 /// # Examples
339 ///
340 /// ```
341 /// use rayon::prelude::*;
342 ///
343 /// let mut v = [5, 4, 1, 3, 2];
344 /// v.par_sort_unstable_by(|a, b| a.cmp(b));
345 /// assert_eq!(v, [1, 2, 3, 4, 5]);
346 ///
347 /// // reverse sorting
348 /// v.par_sort_unstable_by(|a, b| b.cmp(a));
349 /// assert_eq!(v, [5, 4, 3, 2, 1]);
350 /// ```
351 fn par_sort_unstable_by<F>(&mut self, compare: F)
352 where
353 F: Fn(&T, &T) -> Ordering + Sync,
354 {
355 par_quicksort(self.as_parallel_slice_mut(), |a, b| {
356 compare(a, b) == Ordering::Less
357 });
358 }
359
360 /// Sorts the slice in parallel with a key extraction function, but may not preserve the order
361 /// of equal elements.
362 ///
363 /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
364 /// and `O(n log n)` worst-case.
365 ///
366 /// # Current implementation
367 ///
368 /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
369 /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
370 /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
371 /// heapsort on degenerate inputs.
372 ///
373 /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
374 /// slice consists of several concatenated sorted sequences.
375 ///
376 /// All quicksorts work in two stages: partitioning into two halves followed by recursive
377 /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
378 /// parallel.
379 ///
380 /// [pdqsort]: https://github.com/orlp/pdqsort
381 ///
382 /// # Examples
383 ///
384 /// ```
385 /// use rayon::prelude::*;
386 ///
387 /// let mut v = [-5i32, 4, 1, -3, 2];
388 ///
389 /// v.par_sort_unstable_by_key(|k| k.abs());
390 /// assert_eq!(v, [1, 2, -3, 4, -5]);
391 /// ```
392 fn par_sort_unstable_by_key<B, F>(&mut self, f: F)
393 where
394 B: Ord,
395 F: Fn(&T) -> B + Sync,
396 {
397 par_quicksort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
398 }
399 }
400
401 impl<T: Send> ParallelSliceMut<T> for [T] {
402 #[inline]
403 fn as_parallel_slice_mut(&mut self) -> &mut [T] {
404 self
405 }
406 }
407
408 impl<'data, T: Sync + 'data> IntoParallelIterator for &'data [T] {
409 type Item = &'data T;
410 type Iter = Iter<'data, T>;
411
412 fn into_par_iter(self) -> Self::Iter {
413 Iter { slice: self }
414 }
415 }
416
417 impl<'data, T: Sync + 'data> IntoParallelIterator for &'data Vec<T> {
418 type Item = &'data T;
419 type Iter = Iter<'data, T>;
420
421 fn into_par_iter(self) -> Self::Iter {
422 Iter { slice: self }
423 }
424 }
425
426 impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut [T] {
427 type Item = &'data mut T;
428 type Iter = IterMut<'data, T>;
429
430 fn into_par_iter(self) -> Self::Iter {
431 IterMut { slice: self }
432 }
433 }
434
435 impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut Vec<T> {
436 type Item = &'data mut T;
437 type Iter = IterMut<'data, T>;
438
439 fn into_par_iter(self) -> Self::Iter {
440 IterMut { slice: self }
441 }
442 }
443
444 /// Parallel iterator over immutable items in a slice
445 #[derive(Debug)]
446 pub struct Iter<'data, T: Sync> {
447 slice: &'data [T],
448 }
449
450 impl<'data, T: Sync> Clone for Iter<'data, T> {
451 fn clone(&self) -> Self {
452 Iter { ..*self }
453 }
454 }
455
456 impl<'data, T: Sync + 'data> ParallelIterator for Iter<'data, T> {
457 type Item = &'data T;
458
459 fn drive_unindexed<C>(self, consumer: C) -> C::Result
460 where
461 C: UnindexedConsumer<Self::Item>,
462 {
463 bridge(self, consumer)
464 }
465
466 fn opt_len(&self) -> Option<usize> {
467 Some(self.len())
468 }
469 }
470
471 impl<'data, T: Sync + 'data> IndexedParallelIterator for Iter<'data, T> {
472 fn drive<C>(self, consumer: C) -> C::Result
473 where
474 C: Consumer<Self::Item>,
475 {
476 bridge(self, consumer)
477 }
478
479 fn len(&self) -> usize {
480 self.slice.len()
481 }
482
483 fn with_producer<CB>(self, callback: CB) -> CB::Output
484 where
485 CB: ProducerCallback<Self::Item>,
486 {
487 callback.callback(IterProducer { slice: self.slice })
488 }
489 }
490
491 struct IterProducer<'data, T: Sync> {
492 slice: &'data [T],
493 }
494
495 impl<'data, T: 'data + Sync> Producer for IterProducer<'data, T> {
496 type Item = &'data T;
497 type IntoIter = ::std::slice::Iter<'data, T>;
498
499 fn into_iter(self) -> Self::IntoIter {
500 self.slice.iter()
501 }
502
503 fn split_at(self, index: usize) -> (Self, Self) {
504 let (left, right) = self.slice.split_at(index);
505 (IterProducer { slice: left }, IterProducer { slice: right })
506 }
507 }
508
509 /// Parallel iterator over immutable non-overlapping chunks of a slice
510 #[derive(Debug)]
511 pub struct Chunks<'data, T: Sync> {
512 chunk_size: usize,
513 slice: &'data [T],
514 }
515
516 impl<'data, T: Sync> Clone for Chunks<'data, T> {
517 fn clone(&self) -> Self {
518 Chunks { ..*self }
519 }
520 }
521
522 impl<'data, T: Sync + 'data> ParallelIterator for Chunks<'data, T> {
523 type Item = &'data [T];
524
525 fn drive_unindexed<C>(self, consumer: C) -> C::Result
526 where
527 C: UnindexedConsumer<Self::Item>,
528 {
529 bridge(self, consumer)
530 }
531
532 fn opt_len(&self) -> Option<usize> {
533 Some(self.len())
534 }
535 }
536
537 impl<'data, T: Sync + 'data> IndexedParallelIterator for Chunks<'data, T> {
538 fn drive<C>(self, consumer: C) -> C::Result
539 where
540 C: Consumer<Self::Item>,
541 {
542 bridge(self, consumer)
543 }
544
545 fn len(&self) -> usize {
546 div_round_up(self.slice.len(), self.chunk_size)
547 }
548
549 fn with_producer<CB>(self, callback: CB) -> CB::Output
550 where
551 CB: ProducerCallback<Self::Item>,
552 {
553 callback.callback(ChunksProducer {
554 chunk_size: self.chunk_size,
555 slice: self.slice,
556 })
557 }
558 }
559
560 struct ChunksProducer<'data, T: Sync> {
561 chunk_size: usize,
562 slice: &'data [T],
563 }
564
565 impl<'data, T: 'data + Sync> Producer for ChunksProducer<'data, T> {
566 type Item = &'data [T];
567 type IntoIter = ::std::slice::Chunks<'data, T>;
568
569 fn into_iter(self) -> Self::IntoIter {
570 self.slice.chunks(self.chunk_size)
571 }
572
573 fn split_at(self, index: usize) -> (Self, Self) {
574 let elem_index = cmp::min(index * self.chunk_size, self.slice.len());
575 let (left, right) = self.slice.split_at(elem_index);
576 (
577 ChunksProducer {
578 chunk_size: self.chunk_size,
579 slice: left,
580 },
581 ChunksProducer {
582 chunk_size: self.chunk_size,
583 slice: right,
584 },
585 )
586 }
587 }
588
589 /// Parallel iterator over immutable overlapping windows of a slice
590 #[derive(Debug)]
591 pub struct Windows<'data, T: Sync> {
592 window_size: usize,
593 slice: &'data [T],
594 }
595
596 impl<'data, T: Sync> Clone for Windows<'data, T> {
597 fn clone(&self) -> Self {
598 Windows { ..*self }
599 }
600 }
601
602 impl<'data, T: Sync + 'data> ParallelIterator for Windows<'data, T> {
603 type Item = &'data [T];
604
605 fn drive_unindexed<C>(self, consumer: C) -> C::Result
606 where
607 C: UnindexedConsumer<Self::Item>,
608 {
609 bridge(self, consumer)
610 }
611
612 fn opt_len(&self) -> Option<usize> {
613 Some(self.len())
614 }
615 }
616
617 impl<'data, T: Sync + 'data> IndexedParallelIterator for Windows<'data, T> {
618 fn drive<C>(self, consumer: C) -> C::Result
619 where
620 C: Consumer<Self::Item>,
621 {
622 bridge(self, consumer)
623 }
624
625 fn len(&self) -> usize {
626 assert!(self.window_size >= 1);
627 self.slice.len().saturating_sub(self.window_size - 1)
628 }
629
630 fn with_producer<CB>(self, callback: CB) -> CB::Output
631 where
632 CB: ProducerCallback<Self::Item>,
633 {
634 callback.callback(WindowsProducer {
635 window_size: self.window_size,
636 slice: self.slice,
637 })
638 }
639 }
640
641 struct WindowsProducer<'data, T: Sync> {
642 window_size: usize,
643 slice: &'data [T],
644 }
645
646 impl<'data, T: 'data + Sync> Producer for WindowsProducer<'data, T> {
647 type Item = &'data [T];
648 type IntoIter = ::std::slice::Windows<'data, T>;
649
650 fn into_iter(self) -> Self::IntoIter {
651 self.slice.windows(self.window_size)
652 }
653
654 fn split_at(self, index: usize) -> (Self, Self) {
655 let left_index = cmp::min(self.slice.len(), index + (self.window_size - 1));
656 let left = &self.slice[..left_index];
657 let right = &self.slice[index..];
658 (
659 WindowsProducer {
660 window_size: self.window_size,
661 slice: left,
662 },
663 WindowsProducer {
664 window_size: self.window_size,
665 slice: right,
666 },
667 )
668 }
669 }
670
671 /// Parallel iterator over mutable items in a slice
672 #[derive(Debug)]
673 pub struct IterMut<'data, T: Send> {
674 slice: &'data mut [T],
675 }
676
677 impl<'data, T: Send + 'data> ParallelIterator for IterMut<'data, T> {
678 type Item = &'data mut T;
679
680 fn drive_unindexed<C>(self, consumer: C) -> C::Result
681 where
682 C: UnindexedConsumer<Self::Item>,
683 {
684 bridge(self, consumer)
685 }
686
687 fn opt_len(&self) -> Option<usize> {
688 Some(self.len())
689 }
690 }
691
692 impl<'data, T: Send + 'data> IndexedParallelIterator for IterMut<'data, T> {
693 fn drive<C>(self, consumer: C) -> C::Result
694 where
695 C: Consumer<Self::Item>,
696 {
697 bridge(self, consumer)
698 }
699
700 fn len(&self) -> usize {
701 self.slice.len()
702 }
703
704 fn with_producer<CB>(self, callback: CB) -> CB::Output
705 where
706 CB: ProducerCallback<Self::Item>,
707 {
708 callback.callback(IterMutProducer { slice: self.slice })
709 }
710 }
711
712 struct IterMutProducer<'data, T: Send> {
713 slice: &'data mut [T],
714 }
715
716 impl<'data, T: 'data + Send> Producer for IterMutProducer<'data, T> {
717 type Item = &'data mut T;
718 type IntoIter = ::std::slice::IterMut<'data, T>;
719
720 fn into_iter(self) -> Self::IntoIter {
721 self.slice.iter_mut()
722 }
723
724 fn split_at(self, index: usize) -> (Self, Self) {
725 let (left, right) = self.slice.split_at_mut(index);
726 (
727 IterMutProducer { slice: left },
728 IterMutProducer { slice: right },
729 )
730 }
731 }
732
733 /// Parallel iterator over mutable non-overlapping chunks of a slice
734 #[derive(Debug)]
735 pub struct ChunksMut<'data, T: Send> {
736 chunk_size: usize,
737 slice: &'data mut [T],
738 }
739
740 impl<'data, T: Send + 'data> ParallelIterator for ChunksMut<'data, T> {
741 type Item = &'data mut [T];
742
743 fn drive_unindexed<C>(self, consumer: C) -> C::Result
744 where
745 C: UnindexedConsumer<Self::Item>,
746 {
747 bridge(self, consumer)
748 }
749
750 fn opt_len(&self) -> Option<usize> {
751 Some(self.len())
752 }
753 }
754
755 impl<'data, T: Send + 'data> IndexedParallelIterator for ChunksMut<'data, T> {
756 fn drive<C>(self, consumer: C) -> C::Result
757 where
758 C: Consumer<Self::Item>,
759 {
760 bridge(self, consumer)
761 }
762
763 fn len(&self) -> usize {
764 div_round_up(self.slice.len(), self.chunk_size)
765 }
766
767 fn with_producer<CB>(self, callback: CB) -> CB::Output
768 where
769 CB: ProducerCallback<Self::Item>,
770 {
771 callback.callback(ChunksMutProducer {
772 chunk_size: self.chunk_size,
773 slice: self.slice,
774 })
775 }
776 }
777
778 struct ChunksMutProducer<'data, T: Send> {
779 chunk_size: usize,
780 slice: &'data mut [T],
781 }
782
783 impl<'data, T: 'data + Send> Producer for ChunksMutProducer<'data, T> {
784 type Item = &'data mut [T];
785 type IntoIter = ::std::slice::ChunksMut<'data, T>;
786
787 fn into_iter(self) -> Self::IntoIter {
788 self.slice.chunks_mut(self.chunk_size)
789 }
790
791 fn split_at(self, index: usize) -> (Self, Self) {
792 let elem_index = cmp::min(index * self.chunk_size, self.slice.len());
793 let (left, right) = self.slice.split_at_mut(elem_index);
794 (
795 ChunksMutProducer {
796 chunk_size: self.chunk_size,
797 slice: left,
798 },
799 ChunksMutProducer {
800 chunk_size: self.chunk_size,
801 slice: right,
802 },
803 )
804 }
805 }
806
807 /// Parallel iterator over slices separated by a predicate
808 pub struct Split<'data, T, P> {
809 slice: &'data [T],
810 separator: P,
811 }
812
813 impl<'data, T, P: Clone> Clone for Split<'data, T, P> {
814 fn clone(&self) -> Self {
815 Split {
816 separator: self.separator.clone(),
817 ..*self
818 }
819 }
820 }
821
822 impl<'data, T: Debug, P> Debug for Split<'data, T, P> {
823 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
824 f.debug_struct("Split").field("slice", &self.slice).finish()
825 }
826 }
827
828 impl<'data, T, P> ParallelIterator for Split<'data, T, P>
829 where
830 P: Fn(&T) -> bool + Sync + Send,
831 T: Sync,
832 {
833 type Item = &'data [T];
834
835 fn drive_unindexed<C>(self, consumer: C) -> C::Result
836 where
837 C: UnindexedConsumer<Self::Item>,
838 {
839 let producer = SplitProducer::new(self.slice, &self.separator);
840 bridge_unindexed(producer, consumer)
841 }
842 }
843
844 /// Implement support for `SplitProducer`.
845 impl<'data, T, P> Fissile<P> for &'data [T]
846 where
847 P: Fn(&T) -> bool,
848 {
849 fn length(&self) -> usize {
850 self.len()
851 }
852
853 fn midpoint(&self, end: usize) -> usize {
854 end / 2
855 }
856
857 fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
858 self[start..end].iter().position(separator)
859 }
860
861 fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
862 self[..end].iter().rposition(separator)
863 }
864
865 fn split_once(self, index: usize) -> (Self, Self) {
866 let (left, right) = self.split_at(index);
867 (left, &right[1..]) // skip the separator
868 }
869
870 fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
871 where
872 F: Folder<Self>,
873 Self: Send,
874 {
875 let mut split = self.split(separator);
876 if skip_last {
877 split.next_back();
878 }
879 folder.consume_iter(split)
880 }
881 }
882
883 /// Parallel iterator over mutable slices separated by a predicate
884 pub struct SplitMut<'data, T, P> {
885 slice: &'data mut [T],
886 separator: P,
887 }
888
889 impl<'data, T: Debug, P> Debug for SplitMut<'data, T, P> {
890 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
891 f.debug_struct("SplitMut")
892 .field("slice", &self.slice)
893 .finish()
894 }
895 }
896
897 impl<'data, T, P> ParallelIterator for SplitMut<'data, T, P>
898 where
899 P: Fn(&T) -> bool + Sync + Send,
900 T: Send,
901 {
902 type Item = &'data mut [T];
903
904 fn drive_unindexed<C>(self, consumer: C) -> C::Result
905 where
906 C: UnindexedConsumer<Self::Item>,
907 {
908 let producer = SplitProducer::new(self.slice, &self.separator);
909 bridge_unindexed(producer, consumer)
910 }
911 }
912
913 /// Implement support for `SplitProducer`.
914 impl<'data, T, P> Fissile<P> for &'data mut [T]
915 where
916 P: Fn(&T) -> bool,
917 {
918 fn length(&self) -> usize {
919 self.len()
920 }
921
922 fn midpoint(&self, end: usize) -> usize {
923 end / 2
924 }
925
926 fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
927 self[start..end].iter().position(separator)
928 }
929
930 fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
931 self[..end].iter().rposition(separator)
932 }
933
934 fn split_once(self, index: usize) -> (Self, Self) {
935 let (left, right) = self.split_at_mut(index);
936 (left, &mut right[1..]) // skip the separator
937 }
938
939 fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
940 where
941 F: Folder<Self>,
942 Self: Send,
943 {
944 let mut split = self.split_mut(separator);
945 if skip_last {
946 split.next_back();
947 }
948 folder.consume_iter(split)
949 }
950 }