]> git.proxmox.com Git - rustc.git/blame - library/alloc/src/slice.rs
Merge tag 'debian/1.52.1+dfsg1-1_exp2' into proxmox/buster
[rustc.git] / library / alloc / src / slice.rs
CommitLineData
c1a9b12d 1//! A dynamically-sized view into a contiguous sequence, `[T]`.
1a4d82fc 2//!
6a06907d 3//! *[See also the slice primitive type](slice).*
94b46f34 4//!
62682a34
SL
5//! Slices are a view into a block of memory represented as a pointer and a
6//! length.
1a4d82fc 7//!
c34b1796 8//! ```
1a4d82fc 9//! // slicing a Vec
c34b1796
AL
10//! let vec = vec![1, 2, 3];
11//! let int_slice = &vec[..];
1a4d82fc
JJ
12//! // coercing an array to a slice
13//! let str_slice: &[&str] = &["one", "two", "three"];
14//! ```
15//!
16//! Slices are either mutable or shared. The shared slice type is `&[T]`,
c34b1796
AL
17//! while the mutable slice type is `&mut [T]`, where `T` represents the element
18//! type. For example, you can mutate the block of memory that a mutable slice
19//! points to:
1a4d82fc 20//!
c34b1796
AL
21//! ```
22//! let x = &mut [1, 2, 3];
1a4d82fc 23//! x[1] = 7;
c34b1796 24//! assert_eq!(x, &[1, 7, 3]);
1a4d82fc
JJ
25//! ```
26//!
27//! Here are some of the things this module contains:
28//!
29//! ## Structs
30//!
c30ab7b3 31//! There are several structs that are useful for slices, such as [`Iter`], which
1a4d82fc
JJ
32//! represents iteration over a slice.
33//!
c34b1796 34//! ## Trait Implementations
1a4d82fc
JJ
35//!
36//! There are several implementations of common traits for slices. Some examples
37//! include:
38//!
c30ab7b3
SL
39//! * [`Clone`]
40//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
41//! * [`Hash`] - for slices whose element type is [`Hash`].
1a4d82fc
JJ
42//!
43//! ## Iteration
44//!
c34b1796
AL
45//! The slices implement `IntoIterator`. The iterator yields references to the
46//! slice elements.
1a4d82fc 47//!
c34b1796
AL
48//! ```
49//! let numbers = &[0, 1, 2];
50//! for n in numbers {
51//! println!("{} is a number!", n);
1a4d82fc
JJ
52//! }
53//! ```
54//!
c34b1796
AL
55//! The mutable slice yields mutable references to the elements:
56//!
57//! ```
58//! let mut scores = [7, 8, 9];
59//! for score in &mut scores[..] {
60//! *score += 1;
61//! }
62//! ```
63//!
62682a34
SL
64//! This iterator yields mutable references to the slice's elements, so while
65//! the element type of the slice is `i32`, the element type of the iterator is
66//! `&mut i32`.
c34b1796 67//!
cc61c64b 68//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
c34b1796 69//! iterators.
cc61c64b
XL
70//! * Further methods that return iterators are [`.split`], [`.splitn`],
71//! [`.chunks`], [`.windows`] and more.
c1a9b12d 72//!
3dfed10e 73//! [`Hash`]: core::hash::Hash
6a06907d
XL
74//! [`.iter`]: slice::iter
75//! [`.iter_mut`]: slice::iter_mut
76//! [`.split`]: slice::split
77//! [`.splitn`]: slice::splitn
78//! [`.chunks`]: slice::chunks
79//! [`.windows`]: slice::windows
85aaf69f 80#![stable(feature = "rust1", since = "1.0.0")]
62682a34
SL
81// Many of the usings in this module are only used in the test configuration.
82// It's cleaner to just turn off the unused_imports warning than to fix them.
7453a54e 83#![cfg_attr(test, allow(unused_imports, dead_code))]
62682a34 84
9fa01778 85use core::borrow::{Borrow, BorrowMut};
8bb4bdeb 86use core::cmp::Ordering::{self, Less};
9fa01778 87use core::mem::{self, size_of};
1a4d82fc 88use core::ptr;
1a4d82fc 89
fc512014 90use crate::alloc::{Allocator, Global};
9fa01778
XL
91use crate::borrow::ToOwned;
92use crate::boxed::Box;
93use crate::vec::Vec;
1a4d82fc 94
6a06907d
XL
95#[unstable(feature = "slice_range", issue = "76393")]
96pub use core::slice::range;
3dfed10e
XL
97#[unstable(feature = "array_chunks", issue = "74985")]
98pub use core::slice::ArrayChunks;
1b1a35ee
XL
99#[unstable(feature = "array_chunks", issue = "74985")]
100pub use core::slice::ArrayChunksMut;
101#[unstable(feature = "array_windows", issue = "75027")]
102pub use core::slice::ArrayWindows;
60c5eb7d
XL
103#[stable(feature = "slice_get_slice", since = "1.28.0")]
104pub use core::slice::SliceIndex;
105#[stable(feature = "from_ref", since = "1.28.0")]
106pub use core::slice::{from_mut, from_ref};
92a42be0 107#[stable(feature = "rust1", since = "1.0.0")]
60c5eb7d 108pub use core::slice::{from_raw_parts, from_raw_parts_mut};
92a42be0 109#[stable(feature = "rust1", since = "1.0.0")]
60c5eb7d
XL
110pub use core::slice::{Chunks, Windows};
111#[stable(feature = "chunks_exact", since = "1.31.0")]
112pub use core::slice::{ChunksExact, ChunksExactMut};
92a42be0 113#[stable(feature = "rust1", since = "1.0.0")]
60c5eb7d 114pub use core::slice::{ChunksMut, Split, SplitMut};
5869c6ff
XL
115#[unstable(feature = "slice_group_by", issue = "80552")]
116pub use core::slice::{GroupBy, GroupByMut};
92a42be0 117#[stable(feature = "rust1", since = "1.0.0")]
60c5eb7d
XL
118pub use core::slice::{Iter, IterMut};
119#[stable(feature = "rchunks", since = "1.31.0")]
120pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
83c7162d 121#[stable(feature = "slice_rsplit", since = "1.27.0")]
cc61c64b 122pub use core::slice::{RSplit, RSplitMut};
92a42be0 123#[stable(feature = "rust1", since = "1.0.0")]
60c5eb7d 124pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
1a4d82fc
JJ
125
126////////////////////////////////////////////////////////////////////////////////
127// Basic slice extension methods
128////////////////////////////////////////////////////////////////////////////////
129
c34b1796 130// HACK(japaric) needed for the implementation of `vec!` macro during testing
dc9dc135 131// N.B., see the `hack` module in this file for more details.
c34b1796 132#[cfg(test)]
9fa01778 133pub use hack::into_vec;
c34b1796
AL
134
135// HACK(japaric) needed for the implementation of `Vec::clone` during testing
dc9dc135 136// N.B., see the `hack` module in this file for more details.
c34b1796 137#[cfg(test)]
9fa01778 138pub use hack::to_vec;
c34b1796
AL
139
140// HACK(japaric): With cfg(test) `impl [T]` is not available, these three
141// functions are actually methods that are in `impl [T]` but not in
142// `core::slice::SliceExt` - we need to supply these functions for the
143// `test_permutations` test
144mod hack {
fc512014
XL
145 use core::alloc::Allocator;
146
9fa01778 147 use crate::boxed::Box;
60c5eb7d 148 use crate::vec::Vec;
c34b1796 149
ba9703b0
XL
150 // We shouldn't add inline attribute to this since this is used in
151 // `vec!` macro mostly and causes perf regression. See #71204 for
152 // discussion and perf results.
fc512014 153 pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
c34b1796 154 unsafe {
dc9dc135 155 let len = b.len();
fc512014
XL
156 let (b, alloc) = Box::into_raw_with_allocator(b);
157 Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
c34b1796
AL
158 }
159 }
160
c34b1796 161 #[inline]
fc512014
XL
162 pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
163 T::to_vec(s, alloc)
164 }
165
166 pub trait ConvertVec {
167 fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>
168 where
169 Self: Sized;
170 }
171
172 impl<T: Clone> ConvertVec for T {
173 #[inline]
174 default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
175 struct DropGuard<'a, T, A: Allocator> {
176 vec: &'a mut Vec<T, A>,
177 num_init: usize,
178 }
179 impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
180 #[inline]
181 fn drop(&mut self) {
182 // SAFETY:
183 // items were marked initialized in the loop below
184 unsafe {
185 self.vec.set_len(self.num_init);
186 }
187 }
188 }
189 let mut vec = Vec::with_capacity_in(s.len(), alloc);
190 let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
191 let slots = guard.vec.spare_capacity_mut();
192 // .take(slots.len()) is necessary for LLVM to remove bounds checks
193 // and has better codegen than zip.
194 for (i, b) in s.iter().enumerate().take(slots.len()) {
195 guard.num_init = i;
196 slots[i].write(b.clone());
197 }
198 core::mem::forget(guard);
199 // SAFETY:
200 // the vec was allocated and initialized above to at least this length.
201 unsafe {
202 vec.set_len(s.len());
203 }
204 vec
205 }
206 }
207
208 impl<T: Copy> ConvertVec for T {
209 #[inline]
210 fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
211 let mut v = Vec::with_capacity_in(s.len(), alloc);
212 // SAFETY:
213 // allocated above with the capacity of `s`, and initialize to `s.len()` in
214 // ptr::copy_to_non_overlapping below.
215 unsafe {
216 s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len());
217 v.set_len(s.len());
218 }
219 v
220 }
c34b1796 221 }
c34b1796
AL
222}
223
94b46f34 224#[lang = "slice_alloc"]
6a06907d 225#[cfg_attr(not(test), rustc_diagnostic_item = "slice")]
c34b1796 226#[cfg(not(test))]
c34b1796 227impl<T> [T] {
32a655c1
SL
228 /// Sorts the slice.
229 ///
29967ef6 230 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
32a655c1 231 ///
041b39d2
XL
232 /// When applicable, unstable sorting is preferred because it is generally faster than stable
233 /// sorting and it doesn't allocate auxiliary memory.
6a06907d 234 /// See [`sort_unstable`](slice::sort_unstable).
041b39d2 235 ///
32a655c1
SL
236 /// # Current implementation
237 ///
238 /// The current algorithm is an adaptive, iterative merge sort inspired by
239 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
240 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
241 /// two or more sorted sequences concatenated one after another.
62682a34 242 ///
32a655c1
SL
243 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
244 /// non-allocating insertion sort is used instead.
92a42be0 245 ///
62682a34
SL
246 /// # Examples
247 ///
c30ab7b3 248 /// ```
62682a34
SL
249 /// let mut v = [-5, 4, 1, -3, 2];
250 ///
251 /// v.sort();
252 /// assert!(v == [-5, -3, 1, 2, 4]);
253 /// ```
254 #[stable(feature = "rust1", since = "1.0.0")]
255 #[inline]
92a42be0 256 pub fn sort(&mut self)
60c5eb7d
XL
257 where
258 T: Ord,
92a42be0 259 {
8bb4bdeb 260 merge_sort(self, |a, b| a.lt(b));
62682a34
SL
261 }
262
cc61c64b 263 /// Sorts the slice with a comparator function.
92a42be0 264 ///
29967ef6 265 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
32a655c1 266 ///
a1dfa0c6
XL
267 /// The comparator function must define a total ordering for the elements in the slice. If
268 /// the ordering is not total, the order of the elements is unspecified. An order is a
9fa01778 269 /// total order if it is (for all `a`, `b` and `c`):
a1dfa0c6 270 ///
9fa01778
XL
271 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
272 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
a1dfa0c6
XL
273 ///
274 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
275 /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
276 ///
277 /// ```
278 /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
279 /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
280 /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
281 /// ```
282 ///
041b39d2
XL
283 /// When applicable, unstable sorting is preferred because it is generally faster than stable
284 /// sorting and it doesn't allocate auxiliary memory.
6a06907d 285 /// See [`sort_unstable_by`](slice::sort_unstable_by).
041b39d2 286 ///
32a655c1
SL
287 /// # Current implementation
288 ///
289 /// The current algorithm is an adaptive, iterative merge sort inspired by
290 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
291 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
292 /// two or more sorted sequences concatenated one after another.
293 ///
294 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
295 /// non-allocating insertion sort is used instead.
92a42be0 296 ///
92a42be0
SL
297 /// # Examples
298 ///
c30ab7b3 299 /// ```
cc61c64b
XL
300 /// let mut v = [5, 4, 1, 3, 2];
301 /// v.sort_by(|a, b| a.cmp(b));
302 /// assert!(v == [1, 2, 3, 4, 5]);
92a42be0 303 ///
cc61c64b
XL
304 /// // reverse sorting
305 /// v.sort_by(|a, b| b.cmp(a));
306 /// assert!(v == [5, 4, 3, 2, 1]);
92a42be0 307 /// ```
cc61c64b 308 #[stable(feature = "rust1", since = "1.0.0")]
92a42be0 309 #[inline]
cc61c64b 310 pub fn sort_by<F>(&mut self, mut compare: F)
60c5eb7d
XL
311 where
312 F: FnMut(&T, &T) -> Ordering,
92a42be0 313 {
cc61c64b 314 merge_sort(self, |a, b| compare(a, b) == Less);
92a42be0
SL
315 }
316
cc61c64b 317 /// Sorts the slice with a key extraction function.
32a655c1 318 ///
29967ef6
XL
319 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
320 /// worst-case, where the key function is *O*(*m*).
32a655c1 321 ///
9fa01778 322 /// For expensive key functions (e.g. functions that are not simple property accesses or
6a06907d 323 /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
9fa01778
XL
324 /// significantly faster, as it does not recompute element keys.
325 ///
041b39d2
XL
326 /// When applicable, unstable sorting is preferred because it is generally faster than stable
327 /// sorting and it doesn't allocate auxiliary memory.
6a06907d 328 /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
041b39d2 329 ///
32a655c1
SL
330 /// # Current implementation
331 ///
332 /// The current algorithm is an adaptive, iterative merge sort inspired by
333 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
334 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
335 /// two or more sorted sequences concatenated one after another.
62682a34 336 ///
32a655c1
SL
337 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
338 /// non-allocating insertion sort is used instead.
62682a34
SL
339 ///
340 /// # Examples
341 ///
c30ab7b3 342 /// ```
cc61c64b
XL
343 /// let mut v = [-5i32, 4, 1, -3, 2];
344 ///
345 /// v.sort_by_key(|k| k.abs());
346 /// assert!(v == [1, 2, -3, 4, -5]);
347 /// ```
348 #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
349 #[inline]
0531ce1d 350 pub fn sort_by_key<K, F>(&mut self, mut f: F)
60c5eb7d
XL
351 where
352 F: FnMut(&T) -> K,
353 K: Ord,
cc61c64b
XL
354 {
355 merge_sort(self, |a, b| f(a).lt(&f(b)));
356 }
357
0531ce1d
XL
358 /// Sorts the slice with a key extraction function.
359 ///
360 /// During sorting, the key function is called only once per element.
361 ///
29967ef6
XL
362 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
363 /// worst-case, where the key function is *O*(*m*).
0531ce1d 364 ///
0731742a 365 /// For simple key functions (e.g., functions that are property accesses or
6a06907d 366 /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
0531ce1d
XL
367 /// faster.
368 ///
369 /// # Current implementation
370 ///
371 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
372 /// which combines the fast average case of randomized quicksort with the fast worst case of
373 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
374 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
375 /// deterministic behavior.
376 ///
377 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
378 /// length of the slice.
379 ///
380 /// # Examples
381 ///
382 /// ```
0531ce1d
XL
383 /// let mut v = [-5i32, 4, 32, -3, 2];
384 ///
385 /// v.sort_by_cached_key(|k| k.to_string());
386 /// assert!(v == [-3, -5, 2, 32, 4]);
387 /// ```
388 ///
389 /// [pdqsort]: https://github.com/orlp/pdqsort
9fa01778 390 #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
0531ce1d
XL
391 #[inline]
392 pub fn sort_by_cached_key<K, F>(&mut self, f: F)
60c5eb7d
XL
393 where
394 F: FnMut(&T) -> K,
395 K: Ord,
0531ce1d
XL
396 {
397 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
398 macro_rules! sort_by_key {
60c5eb7d 399 ($t:ty, $slice:ident, $f:ident) => {{
0531ce1d
XL
400 let mut indices: Vec<_> =
401 $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
402 // The elements of `indices` are unique, as they are indexed, so any sort will be
403 // stable with respect to the original slice. We use `sort_unstable` here because
404 // it requires less memory allocation.
405 indices.sort_unstable();
406 for i in 0..$slice.len() {
407 let mut index = indices[i].1;
408 while (index as usize) < i {
409 index = indices[index as usize].1;
410 }
411 indices[i].1 = index;
412 $slice.swap(i, index as usize);
413 }
60c5eb7d 414 }};
0531ce1d
XL
415 }
416
60c5eb7d
XL
417 let sz_u8 = mem::size_of::<(K, u8)>();
418 let sz_u16 = mem::size_of::<(K, u16)>();
419 let sz_u32 = mem::size_of::<(K, u32)>();
0531ce1d
XL
420 let sz_usize = mem::size_of::<(K, usize)>();
421
422 let len = self.len();
60c5eb7d
XL
423 if len < 2 {
424 return;
425 }
426 if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
427 return sort_by_key!(u8, self, f);
428 }
429 if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
430 return sort_by_key!(u16, self, f);
431 }
432 if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
433 return sort_by_key!(u32, self, f);
434 }
0531ce1d
XL
435 sort_by_key!(usize, self, f)
436 }
437
62682a34 438 /// Copies `self` into a new `Vec`.
5bcae85e
SL
439 ///
440 /// # Examples
441 ///
442 /// ```
443 /// let s = [10, 40, 30];
444 /// let x = s.to_vec();
445 /// // Here, `s` and `x` can be modified independently.
446 /// ```
2c00a5a8 447 #[rustc_conversion_suggestion]
85aaf69f 448 #[stable(feature = "rust1", since = "1.0.0")]
62682a34 449 #[inline]
92a42be0 450 pub fn to_vec(&self) -> Vec<T>
fc512014
XL
451 where
452 T: Clone,
453 {
454 self.to_vec_in(Global)
455 }
456
457 /// Copies `self` into a new `Vec` with an allocator.
458 ///
459 /// # Examples
460 ///
461 /// ```
462 /// #![feature(allocator_api)]
463 ///
464 /// use std::alloc::System;
465 ///
466 /// let s = [10, 40, 30];
467 /// let x = s.to_vec_in(System);
468 /// // Here, `s` and `x` can be modified independently.
469 /// ```
470 #[inline]
471 #[unstable(feature = "allocator_api", issue = "32838")]
472 pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
60c5eb7d
XL
473 where
474 T: Clone,
92a42be0 475 {
dc9dc135 476 // N.B., see the `hack` module in this file for more details.
fc512014 477 hack::to_vec(self, alloc)
1a4d82fc
JJ
478 }
479
9346a6ac 480 /// Converts `self` into a vector without clones or allocation.
5bcae85e 481 ///
7cac9316
XL
482 /// The resulting vector can be converted back into a box via
483 /// `Vec<T>`'s `into_boxed_slice` method.
484 ///
5bcae85e
SL
485 /// # Examples
486 ///
487 /// ```
488 /// let s: Box<[i32]> = Box::new([10, 40, 30]);
489 /// let x = s.into_vec();
490 /// // `s` cannot be used anymore because it has been converted into `x`.
491 ///
c30ab7b3 492 /// assert_eq!(x, vec![10, 40, 30]);
5bcae85e 493 /// ```
c34b1796 494 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 495 #[inline]
fc512014 496 pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> {
dc9dc135 497 // N.B., see the `hack` module in this file for more details.
c34b1796 498 hack::into_vec(self)
1a4d82fc 499 }
83c7162d
XL
500
501 /// Creates a vector by repeating a slice `n` times.
502 ///
b7449926
XL
503 /// # Panics
504 ///
505 /// This function will panic if the capacity would overflow.
506 ///
83c7162d
XL
507 /// # Examples
508 ///
509 /// Basic usage:
510 ///
511 /// ```
e74abb32 512 /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
83c7162d 513 /// ```
b7449926
XL
514 ///
515 /// A panic upon overflow:
516 ///
517 /// ```should_panic
e74abb32 518 /// // this will panic at runtime
ba9703b0 519 /// b"0123456789abcdef".repeat(usize::MAX);
b7449926 520 /// ```
e74abb32 521 #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
60c5eb7d
XL
522 pub fn repeat(&self, n: usize) -> Vec<T>
523 where
524 T: Copy,
525 {
83c7162d
XL
526 if n == 0 {
527 return Vec::new();
528 }
529
530 // If `n` is larger than zero, it can be split as
531 // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
532 // `2^expn` is the number represented by the leftmost '1' bit of `n`,
533 // and `rem` is the remaining part of `n`.
534
535 // Using `Vec` to access `set_len()`.
dfeec247
XL
536 let capacity = self.len().checked_mul(n).expect("capacity overflow");
537 let mut buf = Vec::with_capacity(capacity);
83c7162d
XL
538
539 // `2^expn` repetition is done by doubling `buf` `expn`-times.
540 buf.extend(self);
541 {
542 let mut m = n >> 1;
543 // If `m > 0`, there are remaining bits up to the leftmost '1'.
544 while m > 0 {
545 // `buf.extend(buf)`:
546 unsafe {
547 ptr::copy_nonoverlapping(
548 buf.as_ptr(),
549 (buf.as_mut_ptr() as *mut T).add(buf.len()),
550 buf.len(),
551 );
552 // `buf` has capacity of `self.len() * n`.
553 let buf_len = buf.len();
554 buf.set_len(buf_len * 2);
555 }
556
557 m >>= 1;
558 }
559 }
560
561 // `rem` (`= n - 2^expn`) repetition is done by copying
562 // first `rem` repetitions from `buf` itself.
dfeec247 563 let rem_len = capacity - buf.len(); // `self.len() * rem`
83c7162d
XL
564 if rem_len > 0 {
565 // `buf.extend(buf[0 .. rem_len])`:
566 unsafe {
567 // This is non-overlapping since `2^expn > rem`.
568 ptr::copy_nonoverlapping(
569 buf.as_ptr(),
570 (buf.as_mut_ptr() as *mut T).add(buf.len()),
571 rem_len,
572 );
573 // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
dfeec247 574 buf.set_len(capacity);
83c7162d
XL
575 }
576 }
577 buf
578 }
416331ca
XL
579
580 /// Flattens a slice of `T` into a single value `Self::Output`.
581 ///
582 /// # Examples
583 ///
584 /// ```
585 /// assert_eq!(["hello", "world"].concat(), "helloworld");
586 /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
587 /// ```
588 #[stable(feature = "rust1", since = "1.0.0")]
589 pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
60c5eb7d
XL
590 where
591 Self: Concat<Item>,
416331ca
XL
592 {
593 Concat::concat(self)
594 }
595
596 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
597 /// given separator between each.
598 ///
599 /// # Examples
600 ///
601 /// ```
602 /// assert_eq!(["hello", "world"].join(" "), "hello world");
603 /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
604 /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
605 /// ```
606 #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
607 pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
60c5eb7d
XL
608 where
609 Self: Join<Separator>,
416331ca
XL
610 {
611 Join::join(self, sep)
612 }
613
614 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
615 /// given separator between each.
616 ///
617 /// # Examples
618 ///
619 /// ```
620 /// # #![allow(deprecated)]
621 /// assert_eq!(["hello", "world"].connect(" "), "hello world");
622 /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
623 /// ```
624 #[stable(feature = "rust1", since = "1.0.0")]
625 #[rustc_deprecated(since = "1.3.0", reason = "renamed to join")]
626 pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
60c5eb7d
XL
627 where
628 Self: Join<Separator>,
416331ca
XL
629 {
630 Join::join(self, sep)
631 }
c34b1796 632}
1a4d82fc 633
94b46f34 634#[lang = "slice_u8_alloc"]
ff7c6d11 635#[cfg(not(test))]
abe05a73 636impl [u8] {
abe05a73
XL
637 /// Returns a vector containing a copy of this slice where each byte
638 /// is mapped to its ASCII upper case equivalent.
639 ///
640 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
641 /// but non-ASCII letters are unchanged.
642 ///
643 /// To uppercase the value in-place, use [`make_ascii_uppercase`].
644 ///
3dfed10e 645 /// [`make_ascii_uppercase`]: u8::make_ascii_uppercase
ff7c6d11 646 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
abe05a73
XL
647 #[inline]
648 pub fn to_ascii_uppercase(&self) -> Vec<u8> {
649 let mut me = self.to_vec();
650 me.make_ascii_uppercase();
651 me
652 }
653
654 /// Returns a vector containing a copy of this slice where each byte
655 /// is mapped to its ASCII lower case equivalent.
656 ///
657 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
658 /// but non-ASCII letters are unchanged.
659 ///
660 /// To lowercase the value in-place, use [`make_ascii_lowercase`].
661 ///
3dfed10e 662 /// [`make_ascii_lowercase`]: u8::make_ascii_lowercase
ff7c6d11 663 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
abe05a73
XL
664 #[inline]
665 pub fn to_ascii_lowercase(&self) -> Vec<u8> {
666 let mut me = self.to_vec();
667 me.make_ascii_lowercase();
668 me
669 }
abe05a73
XL
670}
671
1a4d82fc
JJ
672////////////////////////////////////////////////////////////////////////////////
673// Extension traits for slices over specific kinds of data
674////////////////////////////////////////////////////////////////////////////////
416331ca 675
6a06907d 676/// Helper trait for [`[T]::concat`](slice::concat).
416331ca
XL
677///
678/// Note: the `Item` type parameter is not used in this trait,
679/// but it allows impls to be more generic.
680/// Without it, we get this error:
681///
682/// ```error
683/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
684/// --> src/liballoc/slice.rs:608:6
685/// |
686/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
687/// | ^ unconstrained type parameter
688/// ```
2c00a5a8 689///
416331ca
XL
690/// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
691/// such that multiple `T` types would apply:
2c00a5a8 692///
416331ca
XL
693/// ```
694/// # #[allow(dead_code)]
695/// pub struct Foo(Vec<u32>, Vec<String>);
696///
697/// impl std::borrow::Borrow<[u32]> for Foo {
698/// fn borrow(&self) -> &[u32] { &self.0 }
699/// }
700///
701/// impl std::borrow::Borrow<[String]> for Foo {
702/// fn borrow(&self) -> &[String] { &self.1 }
703/// }
704/// ```
705#[unstable(feature = "slice_concat_trait", issue = "27747")]
706pub trait Concat<Item: ?Sized> {
707 #[unstable(feature = "slice_concat_trait", issue = "27747")]
d9579d0f
AL
708 /// The resulting type after concatenation
709 type Output;
710
6a06907d 711 /// Implementation of [`[T]::concat`](slice::concat)
416331ca
XL
712 #[unstable(feature = "slice_concat_trait", issue = "27747")]
713 fn concat(slice: &Self) -> Self::Output;
714}
1a4d82fc 715
6a06907d 716/// Helper trait for [`[T]::join`](slice::join)
416331ca
XL
717#[unstable(feature = "slice_concat_trait", issue = "27747")]
718pub trait Join<Separator> {
719 #[unstable(feature = "slice_concat_trait", issue = "27747")]
720 /// The resulting type after concatenation
721 type Output;
c1a9b12d 722
6a06907d 723 /// Implementation of [`[T]::join`](slice::join)
416331ca
XL
724 #[unstable(feature = "slice_concat_trait", issue = "27747")]
725 fn join(slice: &Self, sep: Separator) -> Self::Output;
1a4d82fc
JJ
726}
727
416331ca
XL
728#[unstable(feature = "slice_concat_ext", issue = "27747")]
729impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
d9579d0f
AL
730 type Output = Vec<T>;
731
416331ca
XL
732 fn concat(slice: &Self) -> Vec<T> {
733 let size = slice.iter().map(|slice| slice.borrow().len()).sum();
1a4d82fc 734 let mut result = Vec::with_capacity(size);
416331ca 735 for v in slice {
92a42be0 736 result.extend_from_slice(v.borrow())
1a4d82fc
JJ
737 }
738 result
739 }
416331ca 740}
1a4d82fc 741
416331ca
XL
742#[unstable(feature = "slice_concat_ext", issue = "27747")]
743impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
744 type Output = Vec<T>;
745
746 fn join(slice: &Self, sep: &T) -> Vec<T> {
747 let mut iter = slice.iter();
94b46f34
XL
748 let first = match iter.next() {
749 Some(first) => first,
750 None => return vec![],
751 };
416331ca 752 let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
0731742a 753 let mut result = Vec::with_capacity(size);
94b46f34
XL
754 result.extend_from_slice(first.borrow());
755
756 for v in iter {
757 result.push(sep.clone());
92a42be0 758 result.extend_from_slice(v.borrow())
1a4d82fc
JJ
759 }
760 result
761 }
762}
763
416331ca
XL
764#[unstable(feature = "slice_concat_ext", issue = "27747")]
765impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
766 type Output = Vec<T>;
767
768 fn join(slice: &Self, sep: &[T]) -> Vec<T> {
769 let mut iter = slice.iter();
770 let first = match iter.next() {
771 Some(first) => first,
772 None => return vec![],
773 };
60c5eb7d
XL
774 let size =
775 slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
416331ca
XL
776 let mut result = Vec::with_capacity(size);
777 result.extend_from_slice(first.borrow());
778
779 for v in iter {
780 result.extend_from_slice(sep);
781 result.extend_from_slice(v.borrow())
782 }
783 result
784 }
785}
786
1a4d82fc
JJ
787////////////////////////////////////////////////////////////////////////////////
788// Standard trait implementations for slices
789////////////////////////////////////////////////////////////////////////////////
790
85aaf69f
SL
791#[stable(feature = "rust1", since = "1.0.0")]
792impl<T> Borrow<[T]> for Vec<T> {
92a42be0
SL
793 fn borrow(&self) -> &[T] {
794 &self[..]
795 }
1a4d82fc
JJ
796}
797
85aaf69f
SL
798#[stable(feature = "rust1", since = "1.0.0")]
799impl<T> BorrowMut<[T]> for Vec<T> {
92a42be0
SL
800 fn borrow_mut(&mut self) -> &mut [T] {
801 &mut self[..]
802 }
1a4d82fc
JJ
803}
804
85aaf69f
SL
805#[stable(feature = "rust1", since = "1.0.0")]
806impl<T: Clone> ToOwned for [T] {
807 type Owned = Vec<T>;
c34b1796 808 #[cfg(not(test))]
92a42be0
SL
809 fn to_owned(&self) -> Vec<T> {
810 self.to_vec()
811 }
c34b1796 812
c34b1796 813 #[cfg(test)]
92a42be0 814 fn to_owned(&self) -> Vec<T> {
fc512014 815 hack::to_vec(self, Global)
92a42be0 816 }
cc61c64b
XL
817
818 fn clone_into(&self, target: &mut Vec<T>) {
819 // drop anything in target that will not be overwritten
820 target.truncate(self.len());
cc61c64b
XL
821
822 // target.len <= self.len due to the truncate above, so the
ba9703b0
XL
823 // slices here are always in-bounds.
824 let (init, tail) = self.split_at(target.len());
825
826 // reuse the contained values' allocations/resources.
827 target.clone_from_slice(init);
828 target.extend_from_slice(tail);
cc61c64b 829 }
1a4d82fc
JJ
830}
831
1a4d82fc
JJ
832////////////////////////////////////////////////////////////////////////////////
833// Sorting
834////////////////////////////////////////////////////////////////////////////////
835
476ff2be
SL
836/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
837///
838/// This is the integral subroutine of insertion sort.
8bb4bdeb 839fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
60c5eb7d
XL
840where
841 F: FnMut(&T, &T) -> bool,
92a42be0 842{
8bb4bdeb 843 if v.len() >= 2 && is_less(&v[1], &v[0]) {
1a4d82fc 844 unsafe {
476ff2be
SL
845 // There are three ways to implement insertion here:
846 //
847 // 1. Swap adjacent elements until the first one gets to its final destination.
848 // However, this way we copy data around more than is necessary. If elements are big
849 // structures (costly to copy), this method will be slow.
850 //
851 // 2. Iterate until the right place for the first element is found. Then shift the
852 // elements succeeding it to make room for it and finally place it into the
853 // remaining hole. This is a good method.
854 //
855 // 3. Copy the first element into a temporary variable. Iterate until the right place
856 // for it is found. As we go along, copy every traversed element into the slot
857 // preceding it. Finally, copy data from the temporary variable into the remaining
858 // hole. This method is very good. Benchmarks demonstrated slightly better
859 // performance than with the 2nd method.
860 //
861 // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
cc61c64b 862 let mut tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
476ff2be
SL
863
864 // Intermediate state of the insertion process is always tracked by `hole`, which
865 // serves two purposes:
8bb4bdeb 866 // 1. Protects integrity of `v` from panics in `is_less`.
476ff2be
SL
867 // 2. Fills the remaining hole in `v` in the end.
868 //
869 // Panic safety:
870 //
8bb4bdeb 871 // If `is_less` panics at any point during the process, `hole` will get dropped and
476ff2be
SL
872 // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
873 // initially held exactly once.
60c5eb7d 874 let mut hole = InsertionHole { src: &mut *tmp, dest: &mut v[1] };
476ff2be
SL
875 ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
876
877 for i in 2..v.len() {
cc61c64b 878 if !is_less(&v[i], &*tmp) {
476ff2be
SL
879 break;
880 }
881 ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
882 hole.dest = &mut v[i];
1a4d82fc 883 }
476ff2be
SL
884 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
885 }
886 }
1a4d82fc 887
476ff2be
SL
888 // When dropped, copies from `src` into `dest`.
889 struct InsertionHole<T> {
890 src: *mut T,
891 dest: *mut T,
892 }
1a4d82fc 893
476ff2be
SL
894 impl<T> Drop for InsertionHole<T> {
895 fn drop(&mut self) {
60c5eb7d
XL
896 unsafe {
897 ptr::copy_nonoverlapping(self.src, self.dest, 1);
898 }
1a4d82fc
JJ
899 }
900 }
901}
902
476ff2be
SL
903/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
904/// stores the result into `v[..]`.
905///
906/// # Safety
907///
908/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
909/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
8bb4bdeb 910unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
60c5eb7d
XL
911where
912 F: FnMut(&T, &T) -> bool,
92a42be0 913{
476ff2be
SL
914 let len = v.len();
915 let v = v.as_mut_ptr();
f035d41b 916 let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
476ff2be
SL
917
918 // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
919 // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
920 // copying the lesser (or greater) one into `v`.
921 //
922 // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
923 // consumed first, then we must copy whatever is left of the shorter run into the remaining
924 // hole in `v`.
925 //
926 // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
8bb4bdeb 927 // 1. Protects integrity of `v` from panics in `is_less`.
476ff2be
SL
928 // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
929 //
930 // Panic safety:
931 //
8bb4bdeb 932 // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
476ff2be
SL
933 // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
934 // object it initially held exactly once.
935 let mut hole;
936
937 if mid <= len - mid {
938 // The left run is shorter.
f035d41b
XL
939 unsafe {
940 ptr::copy_nonoverlapping(v, buf, mid);
941 hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
942 }
476ff2be
SL
943
944 // Initially, these pointers point to the beginnings of their arrays.
945 let left = &mut hole.start;
946 let mut right = v_mid;
947 let out = &mut hole.dest;
948
949 while *left < hole.end && right < v_end {
950 // Consume the lesser side.
951 // If equal, prefer the left run to maintain stability.
f035d41b
XL
952 unsafe {
953 let to_copy = if is_less(&*right, &**left) {
954 get_and_increment(&mut right)
955 } else {
956 get_and_increment(left)
957 };
958 ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
959 }
476ff2be 960 }
1a4d82fc 961 } else {
476ff2be 962 // The right run is shorter.
f035d41b
XL
963 unsafe {
964 ptr::copy_nonoverlapping(v_mid, buf, len - mid);
965 hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
966 }
476ff2be
SL
967
968 // Initially, these pointers point past the ends of their arrays.
969 let left = &mut hole.dest;
970 let right = &mut hole.end;
971 let mut out = v_end;
972
973 while v < *left && buf < *right {
974 // Consume the greater side.
975 // If equal, prefer the right run to maintain stability.
f035d41b
XL
976 unsafe {
977 let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
978 decrement_and_get(left)
979 } else {
980 decrement_and_get(right)
981 };
982 ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
983 }
476ff2be
SL
984 }
985 }
986 // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
987 // it will now be copied into the hole in `v`.
1a4d82fc 988
476ff2be
SL
989 unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
990 let old = *ptr;
f035d41b 991 *ptr = unsafe { ptr.offset(1) };
476ff2be
SL
992 old
993 }
1a4d82fc 994
476ff2be 995 unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
f035d41b 996 *ptr = unsafe { ptr.offset(-1) };
476ff2be 997 *ptr
1a4d82fc
JJ
998 }
999
476ff2be
SL
1000 // When dropped, copies the range `start..end` into `dest..`.
1001 struct MergeHole<T> {
1002 start: *mut T,
1003 end: *mut T,
1004 dest: *mut T,
1005 }
1a4d82fc 1006
476ff2be
SL
1007 impl<T> Drop for MergeHole<T> {
1008 fn drop(&mut self) {
041b39d2 1009 // `T` is not a zero-sized type, so it's okay to divide by its size.
476ff2be 1010 let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
60c5eb7d
XL
1011 unsafe {
1012 ptr::copy_nonoverlapping(self.start, self.dest, len);
1013 }
476ff2be
SL
1014 }
1015 }
1016}
1a4d82fc 1017
476ff2be
SL
1018/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
1019/// [here](http://svn.python.org/projects/python/trunk/Objects/listsort.txt).
1020///
1021/// The algorithm identifies strictly descending and non-descending subsequences, which are called
1022/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
1023/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
32a655c1 1024/// satisfied:
476ff2be 1025///
32a655c1
SL
1026/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
1027/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
476ff2be 1028///
29967ef6 1029/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
8bb4bdeb 1030fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
60c5eb7d
XL
1031where
1032 F: FnMut(&T, &T) -> bool,
476ff2be 1033{
cc61c64b
XL
1034 // Slices of up to this length get sorted using insertion sort.
1035 const MAX_INSERTION: usize = 20;
1036 // Very short runs are extended using insertion sort to span at least this many elements.
1037 const MIN_RUN: usize = 10;
1038
476ff2be
SL
1039 // Sorting has no meaningful behavior on zero-sized types.
1040 if size_of::<T>() == 0 {
1041 return;
1042 }
1a4d82fc 1043
476ff2be
SL
1044 let len = v.len();
1045
1046 // Short arrays get sorted in-place via insertion sort to avoid allocations.
cc61c64b 1047 if len <= MAX_INSERTION {
476ff2be 1048 if len >= 2 {
60c5eb7d 1049 for i in (0..len - 1).rev() {
8bb4bdeb 1050 insert_head(&mut v[i..], &mut is_less);
1a4d82fc
JJ
1051 }
1052 }
476ff2be 1053 return;
1a4d82fc
JJ
1054 }
1055
476ff2be
SL
1056 // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
1057 // shallow copies of the contents of `v` without risking the dtors running on copies if
8bb4bdeb 1058 // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
476ff2be
SL
1059 // which will always have length at most `len / 2`.
1060 let mut buf = Vec::with_capacity(len / 2);
1061
1062 // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
1063 // strange decision, but consider the fact that merges more often go in the opposite direction
1064 // (forwards). According to benchmarks, merging forwards is slightly faster than merging
1065 // backwards. To conclude, identifying runs by traversing backwards improves performance.
1066 let mut runs = vec![];
1067 let mut end = len;
1068 while end > 0 {
1069 // Find the next natural run, and reverse it if it's strictly descending.
1070 let mut start = end - 1;
1071 if start > 0 {
1072 start -= 1;
8bb4bdeb
XL
1073 unsafe {
1074 if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
60c5eb7d 1075 while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
8bb4bdeb
XL
1076 start -= 1;
1077 }
1078 v[start..end].reverse();
1079 } else {
60c5eb7d
XL
1080 while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
1081 {
8bb4bdeb
XL
1082 start -= 1;
1083 }
1a4d82fc
JJ
1084 }
1085 }
1086 }
1087
476ff2be
SL
1088 // Insert some more elements into the run if it's too short. Insertion sort is faster than
1089 // merge sort on short sequences, so this significantly improves performance.
cc61c64b 1090 while start > 0 && end - start < MIN_RUN {
476ff2be 1091 start -= 1;
8bb4bdeb 1092 insert_head(&mut v[start..end], &mut is_less);
476ff2be 1093 }
1a4d82fc 1094
476ff2be 1095 // Push this run onto the stack.
60c5eb7d 1096 runs.push(Run { start, len: end - start });
476ff2be
SL
1097 end = start;
1098
1099 // Merge some pairs of adjacent runs to satisfy the invariants.
1100 while let Some(r) = collapse(&runs) {
1101 let left = runs[r + 1];
1102 let right = runs[r];
1103 unsafe {
60c5eb7d
XL
1104 merge(
1105 &mut v[left.start..right.start + right.len],
1106 left.len,
1107 buf.as_mut_ptr(),
1108 &mut is_less,
1109 );
476ff2be 1110 }
60c5eb7d 1111 runs[r] = Run { start: left.start, len: left.len + right.len };
476ff2be
SL
1112 runs.remove(r + 1);
1113 }
1a4d82fc
JJ
1114 }
1115
476ff2be
SL
1116 // Finally, exactly one run must remain in the stack.
1117 debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
1118
1119 // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
1120 // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
1121 // algorithm should continue building a new run instead, `None` is returned.
1122 //
041b39d2 1123 // TimSort is infamous for its buggy implementations, as described here:
476ff2be
SL
1124 // http://envisage-project.eu/timsort-specification-and-verification/
1125 //
1126 // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
1127 // Enforcing them on just top three is not sufficient to ensure that the invariants will still
1128 // hold for *all* runs in the stack.
1129 //
1130 // This function correctly checks invariants for the top four runs. Additionally, if the top
1131 // run starts at index 0, it will always demand a merge operation until the stack is fully
1132 // collapsed, in order to complete the sort.
1133 #[inline]
1134 fn collapse(runs: &[Run]) -> Option<usize> {
1135 let n = runs.len();
60c5eb7d
XL
1136 if n >= 2
1137 && (runs[n - 1].start == 0
1138 || runs[n - 2].len <= runs[n - 1].len
1139 || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
1140 || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
1141 {
1142 if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
476ff2be
SL
1143 } else {
1144 None
1145 }
1a4d82fc
JJ
1146 }
1147
476ff2be
SL
1148 #[derive(Clone, Copy)]
1149 struct Run {
1150 start: usize,
1151 len: usize,
1a4d82fc
JJ
1152 }
1153}