3 //! This module contains a sorting algorithm based on Orson Peters' pattern-defeating quicksort,
4 //! published at: <https://github.com/orlp/pdqsort>
6 //! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our
7 //! stable sorting implementation.
9 // ignore-tidy-undocumented-unsafe
12 use crate::mem
::{self, MaybeUninit}
;
15 /// When dropped, copies from `src` into `dest`.
16 struct CopyOnDrop
<T
> {
21 impl<T
> Drop
for CopyOnDrop
<T
> {
23 // SAFETY: This is a helper class.
24 // Please refer to its usage for correctness.
25 // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
27 ptr
::copy_nonoverlapping(self.src
, self.dest
, 1);
32 /// Shifts the first element to the right until it encounters a greater or equal element.
33 fn shift_head
<T
, F
>(v
: &mut [T
], is_less
: &mut F
)
35 F
: FnMut(&T
, &T
) -> bool
,
38 // SAFETY: The unsafe operations below involves indexing without a bound check (`get_unchecked` and `get_unchecked_mut`)
39 // and copying memory (`ptr::copy_nonoverlapping`).
42 // 1. We checked the size of the array to >=2.
43 // 2. All the indexing that we will do is always between {0 <= index < len} at most.
46 // 1. We are obtaining pointers to references which are guaranteed to be valid.
47 // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
48 // Namely, `i` and `i-1`.
49 // 3. If the slice is properly aligned, the elements are properly aligned.
50 // It is the caller's responsibility to make sure the slice is properly aligned.
52 // See comments below for further detail.
54 // If the first two elements are out-of-order...
55 if len
>= 2 && is_less(v
.get_unchecked(1), v
.get_unchecked(0)) {
56 // Read the first element into a stack-allocated variable. If a following comparison
57 // operation panics, `hole` will get dropped and automatically write the element back
59 let mut tmp
= mem
::ManuallyDrop
::new(ptr
::read(v
.get_unchecked(0)));
60 let mut hole
= CopyOnDrop { src: &mut *tmp, dest: v.get_unchecked_mut(1) }
;
61 ptr
::copy_nonoverlapping(v
.get_unchecked(1), v
.get_unchecked_mut(0), 1);
64 if !is_less(v
.get_unchecked(i
), &*tmp
) {
68 // Move `i`-th element one place to the left, thus shifting the hole to the right.
69 ptr
::copy_nonoverlapping(v
.get_unchecked(i
), v
.get_unchecked_mut(i
- 1), 1);
70 hole
.dest
= v
.get_unchecked_mut(i
);
72 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
77 /// Shifts the last element to the left until it encounters a smaller or equal element.
78 fn shift_tail
<T
, F
>(v
: &mut [T
], is_less
: &mut F
)
80 F
: FnMut(&T
, &T
) -> bool
,
83 // SAFETY: The unsafe operations below involves indexing without a bound check (`get_unchecked` and `get_unchecked_mut`)
84 // and copying memory (`ptr::copy_nonoverlapping`).
87 // 1. We checked the size of the array to >= 2.
88 // 2. All the indexing that we will do is always between `0 <= index < len-1` at most.
91 // 1. We are obtaining pointers to references which are guaranteed to be valid.
92 // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
93 // Namely, `i` and `i+1`.
94 // 3. If the slice is properly aligned, the elements are properly aligned.
95 // It is the caller's responsibility to make sure the slice is properly aligned.
97 // See comments below for further detail.
99 // If the last two elements are out-of-order...
100 if len
>= 2 && is_less(v
.get_unchecked(len
- 1), v
.get_unchecked(len
- 2)) {
101 // Read the last element into a stack-allocated variable. If a following comparison
102 // operation panics, `hole` will get dropped and automatically write the element back
104 let mut tmp
= mem
::ManuallyDrop
::new(ptr
::read(v
.get_unchecked(len
- 1)));
105 let mut hole
= CopyOnDrop { src: &mut *tmp, dest: v.get_unchecked_mut(len - 2) }
;
106 ptr
::copy_nonoverlapping(v
.get_unchecked(len
- 2), v
.get_unchecked_mut(len
- 1), 1);
108 for i
in (0..len
- 2).rev() {
109 if !is_less(&*tmp
, v
.get_unchecked(i
)) {
113 // Move `i`-th element one place to the right, thus shifting the hole to the left.
114 ptr
::copy_nonoverlapping(v
.get_unchecked(i
), v
.get_unchecked_mut(i
+ 1), 1);
115 hole
.dest
= v
.get_unchecked_mut(i
);
117 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
122 /// Partially sorts a slice by shifting several out-of-order elements around.
124 /// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case.
126 fn partial_insertion_sort
<T
, F
>(v
: &mut [T
], is_less
: &mut F
) -> bool
128 F
: FnMut(&T
, &T
) -> bool
,
130 // Maximum number of adjacent out-of-order pairs that will get shifted.
131 const MAX_STEPS
: usize = 5;
132 // If the slice is shorter than this, don't shift any elements.
133 const SHORTEST_SHIFTING
: usize = 50;
138 for _
in 0..MAX_STEPS
{
139 // SAFETY: We already explicitly did the bound checking with `i < len`.
140 // All our subsequent indexing is only in the range `0 <= index < len`
142 // Find the next pair of adjacent out-of-order elements.
143 while i
< len
&& !is_less(v
.get_unchecked(i
), v
.get_unchecked(i
- 1)) {
153 // Don't shift elements on short arrays, that has a performance cost.
154 if len
< SHORTEST_SHIFTING
{
158 // Swap the found pair of elements. This puts them in correct order.
161 // Shift the smaller element to the left.
162 shift_tail(&mut v
[..i
], is_less
);
163 // Shift the greater element to the right.
164 shift_head(&mut v
[i
..], is_less
);
167 // Didn't manage to sort the slice in the limited number of steps.
171 /// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case.
172 fn insertion_sort
<T
, F
>(v
: &mut [T
], is_less
: &mut F
)
174 F
: FnMut(&T
, &T
) -> bool
,
176 for i
in 1..v
.len() {
177 shift_tail(&mut v
[..i
+ 1], is_less
);
181 /// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case.
183 #[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
184 pub fn heapsort
<T
, F
>(v
: &mut [T
], mut is_less
: F
)
186 F
: FnMut(&T
, &T
) -> bool
,
188 // This binary heap respects the invariant `parent >= child`.
189 let mut sift_down
= |v
: &mut [T
], mut node
| {
191 // Children of `node`:
192 let left
= 2 * node
+ 1;
193 let right
= 2 * node
+ 2;
195 // Choose the greater child.
197 if right
< v
.len() && is_less(&v
[left
], &v
[right
]) { right }
else { left }
;
199 // Stop if the invariant holds at `node`.
200 if greater
>= v
.len() || !is_less(&v
[node
], &v
[greater
]) {
204 // Swap `node` with the greater child, move one step down, and continue sifting.
205 v
.swap(node
, greater
);
210 // Build the heap in linear time.
211 for i
in (0..v
.len() / 2).rev() {
215 // Pop maximal elements from the heap.
216 for i
in (1..v
.len()).rev() {
218 sift_down(&mut v
[..i
], 0);
222 /// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
225 /// Returns the number of elements smaller than `pivot`.
227 /// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
228 /// This idea is presented in the [BlockQuicksort][pdf] paper.
230 /// [pdf]: https://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
231 fn partition_in_blocks
<T
, F
>(v
: &mut [T
], pivot
: &T
, is_less
: &mut F
) -> usize
233 F
: FnMut(&T
, &T
) -> bool
,
235 // Number of elements in a typical block.
236 const BLOCK
: usize = 128;
238 // The partitioning algorithm repeats the following steps until completion:
240 // 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
241 // 2. Trace a block from the right side to identify elements smaller than the pivot.
242 // 3. Exchange the identified elements between the left and right side.
244 // We keep the following variables for a block of elements:
246 // 1. `block` - Number of elements in the block.
247 // 2. `start` - Start pointer into the `offsets` array.
248 // 3. `end` - End pointer into the `offsets` array.
249 // 4. `offsets - Indices of out-of-order elements within the block.
251 // The current block on the left side (from `l` to `l.add(block_l)`).
252 let mut l
= v
.as_mut_ptr();
253 let mut block_l
= BLOCK
;
254 let mut start_l
= ptr
::null_mut();
255 let mut end_l
= ptr
::null_mut();
256 let mut offsets_l
= [MaybeUninit
::<u8>::uninit(); BLOCK
];
258 // The current block on the right side (from `r.sub(block_r)` to `r`).
259 // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe`
260 let mut r
= unsafe { l.add(v.len()) }
;
261 let mut block_r
= BLOCK
;
262 let mut start_r
= ptr
::null_mut();
263 let mut end_r
= ptr
::null_mut();
264 let mut offsets_r
= [MaybeUninit
::<u8>::uninit(); BLOCK
];
266 // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
267 // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
269 // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
270 fn width
<T
>(l
: *mut T
, r
: *mut T
) -> usize {
271 assert
!(mem
::size_of
::<T
>() > 0);
272 (r
as usize - l
as usize) / mem
::size_of
::<T
>()
276 // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
277 // some patch-up work in order to partition the remaining elements in between.
278 let is_done
= width(l
, r
) <= 2 * BLOCK
;
281 // Number of remaining elements (still not compared to the pivot).
282 let mut rem
= width(l
, r
);
283 if start_l
< end_l
|| start_r
< end_r
{
287 // Adjust block sizes so that the left and right block don't overlap, but get perfectly
288 // aligned to cover the whole remaining gap.
291 } else if start_r
< end_r
{
295 block_r
= rem
- block_l
;
297 debug_assert
!(block_l
<= BLOCK
&& block_r
<= BLOCK
);
298 debug_assert
!(width(l
, r
) == block_l
+ block_r
);
301 if start_l
== end_l
{
302 // Trace `block_l` elements from the left side.
303 start_l
= MaybeUninit
::slice_as_mut_ptr(&mut offsets_l
);
304 end_l
= MaybeUninit
::slice_as_mut_ptr(&mut offsets_l
);
307 for i
in 0..block_l
{
308 // SAFETY: The unsafety operations below involve the usage of the `offset`.
309 // According to the conditions required by the function, we satisfy them because:
310 // 1. `offsets_l` is stack-allocated, and thus considered separate allocated object.
311 // 2. The function `is_less` returns a `bool`.
312 // Casting a `bool` will never overflow `isize`.
313 // 3. We have guaranteed that `block_l` will be `<= BLOCK`.
314 // Plus, `end_l` was initially set to the begin pointer of `offsets_` which was declared on the stack.
315 // Thus, we know that even in the worst case (all invocations of `is_less` returns false) we will only be at most 1 byte pass the end.
316 // Another unsafety operation here is dereferencing `elem`.
317 // However, `elem` was initially the begin pointer to the slice which is always valid.
319 // Branchless comparison.
321 end_l
= end_l
.offset(!is_less(&*elem
, pivot
) as isize);
322 elem
= elem
.offset(1);
327 if start_r
== end_r
{
328 // Trace `block_r` elements from the right side.
329 start_r
= MaybeUninit
::slice_as_mut_ptr(&mut offsets_r
);
330 end_r
= MaybeUninit
::slice_as_mut_ptr(&mut offsets_r
);
333 for i
in 0..block_r
{
334 // SAFETY: The unsafety operations below involve the usage of the `offset`.
335 // According to the conditions required by the function, we satisfy them because:
336 // 1. `offsets_r` is stack-allocated, and thus considered separate allocated object.
337 // 2. The function `is_less` returns a `bool`.
338 // Casting a `bool` will never overflow `isize`.
339 // 3. We have guaranteed that `block_r` will be `<= BLOCK`.
340 // Plus, `end_r` was initially set to the begin pointer of `offsets_` which was declared on the stack.
341 // Thus, we know that even in the worst case (all invocations of `is_less` returns true) we will only be at most 1 byte pass the end.
342 // Another unsafety operation here is dereferencing `elem`.
343 // However, `elem` was initially `1 * sizeof(T)` past the end and we decrement it by `1 * sizeof(T)` before accessing it.
344 // Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
346 // Branchless comparison.
347 elem
= elem
.offset(-1);
349 end_r
= end_r
.offset(is_less(&*elem
, pivot
) as isize);
354 // Number of out-of-order elements to swap between the left and right side.
355 let count
= cmp
::min(width(start_l
, end_l
), width(start_r
, end_r
));
360 l
.offset(*start_l
as isize)
365 r
.offset(-(*start_r
as isize) - 1)
369 // Instead of swapping one pair at the time, it is more efficient to perform a cyclic
370 // permutation. This is not strictly equivalent to swapping, but produces a similar
371 // result using fewer memory operations.
373 let tmp
= ptr
::read(left
!());
374 ptr
::copy_nonoverlapping(right
!(), left
!(), 1);
377 start_l
= start_l
.offset(1);
378 ptr
::copy_nonoverlapping(left
!(), right
!(), 1);
379 start_r
= start_r
.offset(1);
380 ptr
::copy_nonoverlapping(right
!(), left
!(), 1);
383 ptr
::copy_nonoverlapping(&tmp
, right
!(), 1);
385 start_l
= start_l
.offset(1);
386 start_r
= start_r
.offset(1);
390 if start_l
== end_l
{
391 // All out-of-order elements in the left block were moved. Move to the next block.
392 l
= unsafe { l.offset(block_l as isize) }
;
395 if start_r
== end_r
{
396 // All out-of-order elements in the right block were moved. Move to the previous block.
397 r
= unsafe { r.offset(-(block_r as isize)) }
;
405 // All that remains now is at most one block (either the left or the right) with out-of-order
406 // elements that need to be moved. Such remaining elements can be simply shifted to the end
407 // within their block.
410 // The left block remains.
411 // Move its remaining out-of-order elements to the far right.
412 debug_assert_eq
!(width(l
, r
), block_l
);
413 while start_l
< end_l
{
415 end_l
= end_l
.offset(-1);
416 ptr
::swap(l
.offset(*end_l
as isize), r
.offset(-1));
420 width(v
.as_mut_ptr(), r
)
421 } else if start_r
< end_r
{
422 // The right block remains.
423 // Move its remaining out-of-order elements to the far left.
424 debug_assert_eq
!(width(l
, r
), block_r
);
425 while start_r
< end_r
{
427 end_r
= end_r
.offset(-1);
428 ptr
::swap(l
, r
.offset(-(*end_r
as isize) - 1));
432 width(v
.as_mut_ptr(), l
)
434 // Nothing else to do, we're done.
435 width(v
.as_mut_ptr(), l
)
439 /// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
440 /// equal to `v[pivot]`.
442 /// Returns a tuple of:
444 /// 1. Number of elements smaller than `v[pivot]`.
445 /// 2. True if `v` was already partitioned.
446 fn partition
<T
, F
>(v
: &mut [T
], pivot
: usize, is_less
: &mut F
) -> (usize, bool
)
448 F
: FnMut(&T
, &T
) -> bool
,
450 let (mid
, was_partitioned
) = {
451 // Place the pivot at the beginning of slice.
453 let (pivot
, v
) = v
.split_at_mut(1);
454 let pivot
= &mut pivot
[0];
456 // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
457 // operation panics, the pivot will be automatically written back into the slice.
458 let mut tmp
= mem
::ManuallyDrop
::new(unsafe { ptr::read(pivot) }
);
459 let _pivot_guard
= CopyOnDrop { src: &mut *tmp, dest: pivot }
;
462 // Find the first pair of out-of-order elements.
466 // SAFETY: The unsafety below involves indexing an array.
467 // For the first one: We already do the bounds checking here with `l < r`.
468 // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
469 // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
471 // Find the first element greater than or equal to the pivot.
472 while l
< r
&& is_less(v
.get_unchecked(l
), pivot
) {
476 // Find the last element smaller that the pivot.
477 while l
< r
&& !is_less(v
.get_unchecked(r
- 1), pivot
) {
482 (l
+ partition_in_blocks(&mut v
[l
..r
], pivot
, is_less
), l
>= r
)
484 // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated
485 // variable) back into the slice where it originally was. This step is critical in ensuring
489 // Place the pivot between the two partitions.
492 (mid
, was_partitioned
)
495 /// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
497 /// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
498 /// elements smaller than the pivot.
499 fn partition_equal
<T
, F
>(v
: &mut [T
], pivot
: usize, is_less
: &mut F
) -> usize
501 F
: FnMut(&T
, &T
) -> bool
,
503 // Place the pivot at the beginning of slice.
505 let (pivot
, v
) = v
.split_at_mut(1);
506 let pivot
= &mut pivot
[0];
508 // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
509 // operation panics, the pivot will be automatically written back into the slice.
510 // SAFETY: The pointer here is valid because it is obtained from a reference to a slice.
511 let mut tmp
= mem
::ManuallyDrop
::new(unsafe { ptr::read(pivot) }
);
512 let _pivot_guard
= CopyOnDrop { src: &mut *tmp, dest: pivot }
;
515 // Now partition the slice.
519 // SAFETY: The unsafety below involves indexing an array.
520 // For the first one: We already do the bounds checking here with `l < r`.
521 // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
522 // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
524 // Find the first element greater than the pivot.
525 while l
< r
&& !is_less(pivot
, v
.get_unchecked(l
)) {
529 // Find the last element equal to the pivot.
530 while l
< r
&& is_less(pivot
, v
.get_unchecked(r
- 1)) {
539 // Swap the found pair of out-of-order elements.
541 ptr
::swap(v
.get_unchecked_mut(l
), v
.get_unchecked_mut(r
));
546 // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
549 // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable)
550 // back into the slice where it originally was. This step is critical in ensuring safety!
553 /// Scatters some elements around in an attempt to break patterns that might cause imbalanced
554 /// partitions in quicksort.
556 fn break_patterns
<T
>(v
: &mut [T
]) {
559 // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
560 let mut random
= len
as u32;
561 let mut gen_u32
= || {
562 random ^
= random
<< 13;
563 random ^
= random
>> 17;
564 random ^
= random
<< 5;
567 let mut gen_usize
= || {
568 if usize::BITS
<= 32 {
571 (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
575 // Take random numbers modulo this number.
576 // The number fits into `usize` because `len` is not greater than `isize::MAX`.
577 let modulus
= len
.next_power_of_two();
579 // Some pivot candidates will be in the nearby of this index. Let's randomize them.
580 let pos
= len
/ 4 * 2;
583 // Generate a random number modulo `len`. However, in order to avoid costly operations
584 // we first take it modulo a power of two, and then decrease by `len` until it fits
585 // into the range `[0, len - 1]`.
586 let mut other
= gen_usize() & (modulus
- 1);
588 // `other` is guaranteed to be less than `2 * len`.
593 v
.swap(pos
- 1 + i
, other
);
598 /// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
600 /// Elements in `v` might be reordered in the process.
601 fn choose_pivot
<T
, F
>(v
: &mut [T
], is_less
: &mut F
) -> (usize, bool
)
603 F
: FnMut(&T
, &T
) -> bool
,
605 // Minimum length to choose the median-of-medians method.
606 // Shorter slices use the simple median-of-three method.
607 const SHORTEST_MEDIAN_OF_MEDIANS
: usize = 50;
608 // Maximum number of swaps that can be performed in this function.
609 const MAX_SWAPS
: usize = 4 * 3;
613 // Three indices near which we are going to choose a pivot.
614 let mut a
= len
/ 4 * 1;
615 let mut b
= len
/ 4 * 2;
616 let mut c
= len
/ 4 * 3;
618 // Counts the total number of swaps we are about to perform while sorting indices.
622 // Swaps indices so that `v[a] <= v[b]`.
623 let mut sort2
= |a
: &mut usize, b
: &mut usize| unsafe {
624 if is_less(v
.get_unchecked(*b
), v
.get_unchecked(*a
)) {
630 // Swaps indices so that `v[a] <= v[b] <= v[c]`.
631 let mut sort3
= |a
: &mut usize, b
: &mut usize, c
: &mut usize| {
637 if len
>= SHORTEST_MEDIAN_OF_MEDIANS
{
638 // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
639 let mut sort_adjacent
= |a
: &mut usize| {
641 sort3(&mut (tmp
- 1), a
, &mut (tmp
+ 1));
644 // Find medians in the neighborhoods of `a`, `b`, and `c`.
645 sort_adjacent(&mut a
);
646 sort_adjacent(&mut b
);
647 sort_adjacent(&mut c
);
650 // Find the median among `a`, `b`, and `c`.
651 sort3(&mut a
, &mut b
, &mut c
);
654 if swaps
< MAX_SWAPS
{
657 // The maximum number of swaps was performed. Chances are the slice is descending or mostly
658 // descending, so reversing will probably help sort it faster.
664 /// Sorts `v` recursively.
666 /// If the slice had a predecessor in the original array, it is specified as `pred`.
668 /// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
669 /// this function will immediately switch to heapsort.
670 fn recurse
<'a
, T
, F
>(mut v
: &'a
mut [T
], is_less
: &mut F
, mut pred
: Option
<&'a T
>, mut limit
: u32)
672 F
: FnMut(&T
, &T
) -> bool
,
674 // Slices of up to this length get sorted using insertion sort.
675 const MAX_INSERTION
: usize = 20;
677 // True if the last partitioning was reasonably balanced.
678 let mut was_balanced
= true;
679 // True if the last partitioning didn't shuffle elements (the slice was already partitioned).
680 let mut was_partitioned
= true;
685 // Very short slices get sorted using insertion sort.
686 if len
<= MAX_INSERTION
{
687 insertion_sort(v
, is_less
);
691 // If too many bad pivot choices were made, simply fall back to heapsort in order to
692 // guarantee `O(n * log(n))` worst-case.
694 heapsort(v
, is_less
);
698 // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
699 // some elements around. Hopefully we'll choose a better pivot this time.
705 // Choose a pivot and try guessing whether the slice is already sorted.
706 let (pivot
, likely_sorted
) = choose_pivot(v
, is_less
);
708 // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
709 // selection predicts the slice is likely already sorted...
710 if was_balanced
&& was_partitioned
&& likely_sorted
{
711 // Try identifying several out-of-order elements and shifting them to correct
712 // positions. If the slice ends up being completely sorted, we're done.
713 if partial_insertion_sort(v
, is_less
) {
718 // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
719 // slice. Partition the slice into elements equal to and elements greater than the pivot.
720 // This case is usually hit when the slice contains many duplicate elements.
721 if let Some(p
) = pred
{
722 if !is_less(p
, &v
[pivot
]) {
723 let mid
= partition_equal(v
, pivot
, is_less
);
725 // Continue sorting elements greater than the pivot.
726 v
= &mut { v }
[mid
..];
731 // Partition the slice.
732 let (mid
, was_p
) = partition(v
, pivot
, is_less
);
733 was_balanced
= cmp
::min(mid
, len
- mid
) >= len
/ 8;
734 was_partitioned
= was_p
;
736 // Split the slice into `left`, `pivot`, and `right`.
737 let (left
, right
) = { v }
.split_at_mut(mid
);
738 let (pivot
, right
) = right
.split_at_mut(1);
739 let pivot
= &pivot
[0];
741 // Recurse into the shorter side only in order to minimize the total number of recursive
742 // calls and consume less stack space. Then just continue with the longer side (this is
743 // akin to tail recursion).
744 if left
.len() < right
.len() {
745 recurse(left
, is_less
, pred
, limit
);
749 recurse(right
, is_less
, Some(pivot
), limit
);
755 /// Sorts `v` using pattern-defeating quicksort, which is *O*(*n* \* log(*n*)) worst-case.
756 pub fn quicksort
<T
, F
>(v
: &mut [T
], mut is_less
: F
)
758 F
: FnMut(&T
, &T
) -> bool
,
760 // Sorting has no meaningful behavior on zero-sized types.
761 if mem
::size_of
::<T
>() == 0 {
765 // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
766 let limit
= usize::BITS
- v
.len().leading_zeros();
768 recurse(v
, &mut is_less
, None
, limit
);
771 fn partition_at_index_loop
<'a
, T
, F
>(
775 mut pred
: Option
<&'a T
>,
777 F
: FnMut(&T
, &T
) -> bool
,
780 // For slices of up to this length it's probably faster to simply sort them.
781 const MAX_INSERTION
: usize = 10;
782 if v
.len() <= MAX_INSERTION
{
783 insertion_sort(v
, is_less
);
788 let (pivot
, _
) = choose_pivot(v
, is_less
);
790 // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
791 // slice. Partition the slice into elements equal to and elements greater than the pivot.
792 // This case is usually hit when the slice contains many duplicate elements.
793 if let Some(p
) = pred
{
794 if !is_less(p
, &v
[pivot
]) {
795 let mid
= partition_equal(v
, pivot
, is_less
);
797 // If we've passed our index, then we're good.
802 // Otherwise, continue sorting elements greater than the pivot.
810 let (mid
, _
) = partition(v
, pivot
, is_less
);
812 // Split the slice into `left`, `pivot`, and `right`.
813 let (left
, right
) = { v }
.split_at_mut(mid
);
814 let (pivot
, right
) = right
.split_at_mut(1);
815 let pivot
= &pivot
[0];
819 index
= index
- mid
- 1;
821 } else if mid
> index
{
824 // If mid == index, then we're done, since partition() guaranteed that all elements
825 // after mid are greater than or equal to mid.
831 pub fn partition_at_index
<T
, F
>(
835 ) -> (&mut [T
], &mut T
, &mut [T
])
837 F
: FnMut(&T
, &T
) -> bool
,
839 use cmp
::Ordering
::Greater
;
840 use cmp
::Ordering
::Less
;
842 if index
>= v
.len() {
843 panic
!("partition_at_index index {} greater than length of slice {}", index
, v
.len());
846 if mem
::size_of
::<T
>() == 0 {
847 // Sorting has no meaningful behavior on zero-sized types. Do nothing.
848 } else if index
== v
.len() - 1 {
849 // Find max element and place it in the last position of the array. We're free to use
850 // `unwrap()` here because we know v must not be empty.
851 let (max_index
, _
) = v
854 .max_by(|&(_
, x
), &(_
, y
)| if is_less(x
, y
) { Less }
else { Greater }
)
856 v
.swap(max_index
, index
);
857 } else if index
== 0 {
858 // Find min element and place it in the first position of the array. We're free to use
859 // `unwrap()` here because we know v must not be empty.
860 let (min_index
, _
) = v
863 .min_by(|&(_
, x
), &(_
, y
)| if is_less(x
, y
) { Less }
else { Greater }
)
865 v
.swap(min_index
, index
);
867 partition_at_index_loop(v
, index
, &mut is_less
, None
);
870 let (left
, right
) = v
.split_at_mut(index
);
871 let (pivot
, right
) = right
.split_at_mut(1);
872 let pivot
= &mut pivot
[0];