]> git.proxmox.com Git - rustc.git/blob - src/liballoc/slice.rs
New upstream version 1.27.1+dfsg1
[rustc.git] / src / liballoc / slice.rs
1 // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! A dynamically-sized view into a contiguous sequence, `[T]`.
12 //!
13 //! Slices are a view into a block of memory represented as a pointer and a
14 //! length.
15 //!
16 //! ```
17 //! // slicing a Vec
18 //! let vec = vec![1, 2, 3];
19 //! let int_slice = &vec[..];
20 //! // coercing an array to a slice
21 //! let str_slice: &[&str] = &["one", "two", "three"];
22 //! ```
23 //!
24 //! Slices are either mutable or shared. The shared slice type is `&[T]`,
25 //! while the mutable slice type is `&mut [T]`, where `T` represents the element
26 //! type. For example, you can mutate the block of memory that a mutable slice
27 //! points to:
28 //!
29 //! ```
30 //! let x = &mut [1, 2, 3];
31 //! x[1] = 7;
32 //! assert_eq!(x, &[1, 7, 3]);
33 //! ```
34 //!
35 //! Here are some of the things this module contains:
36 //!
37 //! ## Structs
38 //!
39 //! There are several structs that are useful for slices, such as [`Iter`], which
40 //! represents iteration over a slice.
41 //!
42 //! ## Trait Implementations
43 //!
44 //! There are several implementations of common traits for slices. Some examples
45 //! include:
46 //!
47 //! * [`Clone`]
48 //! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
49 //! * [`Hash`] - for slices whose element type is [`Hash`].
50 //!
51 //! ## Iteration
52 //!
53 //! The slices implement `IntoIterator`. The iterator yields references to the
54 //! slice elements.
55 //!
56 //! ```
57 //! let numbers = &[0, 1, 2];
58 //! for n in numbers {
59 //! println!("{} is a number!", n);
60 //! }
61 //! ```
62 //!
63 //! The mutable slice yields mutable references to the elements:
64 //!
65 //! ```
66 //! let mut scores = [7, 8, 9];
67 //! for score in &mut scores[..] {
68 //! *score += 1;
69 //! }
70 //! ```
71 //!
72 //! This iterator yields mutable references to the slice's elements, so while
73 //! the element type of the slice is `i32`, the element type of the iterator is
74 //! `&mut i32`.
75 //!
76 //! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
77 //! iterators.
78 //! * Further methods that return iterators are [`.split`], [`.splitn`],
79 //! [`.chunks`], [`.windows`] and more.
80 //!
81 //! *[See also the slice primitive type](../../std/primitive.slice.html).*
82 //!
83 //! [`Clone`]: ../../std/clone/trait.Clone.html
84 //! [`Eq`]: ../../std/cmp/trait.Eq.html
85 //! [`Ord`]: ../../std/cmp/trait.Ord.html
86 //! [`Iter`]: struct.Iter.html
87 //! [`Hash`]: ../../std/hash/trait.Hash.html
88 //! [`.iter`]: ../../std/primitive.slice.html#method.iter
89 //! [`.iter_mut`]: ../../std/primitive.slice.html#method.iter_mut
90 //! [`.split`]: ../../std/primitive.slice.html#method.split
91 //! [`.splitn`]: ../../std/primitive.slice.html#method.splitn
92 //! [`.chunks`]: ../../std/primitive.slice.html#method.chunks
93 //! [`.windows`]: ../../std/primitive.slice.html#method.windows
94 #![stable(feature = "rust1", since = "1.0.0")]
95
96 // Many of the usings in this module are only used in the test configuration.
97 // It's cleaner to just turn off the unused_imports warning than to fix them.
98 #![cfg_attr(test, allow(unused_imports, dead_code))]
99
100 use core::cmp::Ordering::{self, Less};
101 use core::mem::size_of;
102 use core::mem;
103 use core::ptr;
104 #[cfg(stage0)] use core::slice::SliceExt;
105 use core::{u8, u16, u32};
106
107 use borrow::{Borrow, BorrowMut, ToOwned};
108 use boxed::Box;
109 use vec::Vec;
110
111 #[stable(feature = "rust1", since = "1.0.0")]
112 pub use core::slice::{Chunks, Windows};
113 #[stable(feature = "rust1", since = "1.0.0")]
114 pub use core::slice::{Iter, IterMut};
115 #[stable(feature = "rust1", since = "1.0.0")]
116 pub use core::slice::{SplitMut, ChunksMut, Split};
117 #[stable(feature = "rust1", since = "1.0.0")]
118 pub use core::slice::{SplitN, RSplitN, SplitNMut, RSplitNMut};
119 #[stable(feature = "slice_rsplit", since = "1.27.0")]
120 pub use core::slice::{RSplit, RSplitMut};
121 #[stable(feature = "rust1", since = "1.0.0")]
122 pub use core::slice::{from_raw_parts, from_raw_parts_mut};
123 #[unstable(feature = "from_ref", issue = "45703")]
124 pub use core::slice::{from_ref, from_ref_mut};
125 #[unstable(feature = "slice_get_slice", issue = "35729")]
126 pub use core::slice::SliceIndex;
127 #[unstable(feature = "exact_chunks", issue = "47115")]
128 pub use core::slice::{ExactChunks, ExactChunksMut};
129
130 ////////////////////////////////////////////////////////////////////////////////
131 // Basic slice extension methods
132 ////////////////////////////////////////////////////////////////////////////////
133
134 // HACK(japaric) needed for the implementation of `vec!` macro during testing
135 // NB see the hack module in this file for more details
136 #[cfg(test)]
137 pub use self::hack::into_vec;
138
139 // HACK(japaric) needed for the implementation of `Vec::clone` during testing
140 // NB see the hack module in this file for more details
141 #[cfg(test)]
142 pub use self::hack::to_vec;
143
144 // HACK(japaric): With cfg(test) `impl [T]` is not available, these three
145 // functions are actually methods that are in `impl [T]` but not in
146 // `core::slice::SliceExt` - we need to supply these functions for the
147 // `test_permutations` test
148 mod hack {
149 use boxed::Box;
150 use core::mem;
151
152 #[cfg(test)]
153 use string::ToString;
154 use vec::Vec;
155
156 pub fn into_vec<T>(mut b: Box<[T]>) -> Vec<T> {
157 unsafe {
158 let xs = Vec::from_raw_parts(b.as_mut_ptr(), b.len(), b.len());
159 mem::forget(b);
160 xs
161 }
162 }
163
164 #[inline]
165 pub fn to_vec<T>(s: &[T]) -> Vec<T>
166 where T: Clone
167 {
168 let mut vector = Vec::with_capacity(s.len());
169 vector.extend_from_slice(s);
170 vector
171 }
172 }
173
174 #[cfg_attr(stage0, lang = "slice")]
175 #[cfg_attr(not(stage0), lang = "slice_alloc")]
176 #[cfg(not(test))]
177 impl<T> [T] {
178 #[cfg(stage0)]
179 slice_core_methods!();
180
181 /// Sorts the slice.
182 ///
183 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
184 ///
185 /// When applicable, unstable sorting is preferred because it is generally faster than stable
186 /// sorting and it doesn't allocate auxiliary memory.
187 /// See [`sort_unstable`](#method.sort_unstable).
188 ///
189 /// # Current implementation
190 ///
191 /// The current algorithm is an adaptive, iterative merge sort inspired by
192 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
193 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
194 /// two or more sorted sequences concatenated one after another.
195 ///
196 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
197 /// non-allocating insertion sort is used instead.
198 ///
199 /// # Examples
200 ///
201 /// ```
202 /// let mut v = [-5, 4, 1, -3, 2];
203 ///
204 /// v.sort();
205 /// assert!(v == [-5, -3, 1, 2, 4]);
206 /// ```
207 #[stable(feature = "rust1", since = "1.0.0")]
208 #[inline]
209 pub fn sort(&mut self)
210 where T: Ord
211 {
212 merge_sort(self, |a, b| a.lt(b));
213 }
214
215 /// Sorts the slice with a comparator function.
216 ///
217 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
218 ///
219 /// When applicable, unstable sorting is preferred because it is generally faster than stable
220 /// sorting and it doesn't allocate auxiliary memory.
221 /// See [`sort_unstable_by`](#method.sort_unstable_by).
222 ///
223 /// # Current implementation
224 ///
225 /// The current algorithm is an adaptive, iterative merge sort inspired by
226 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
227 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
228 /// two or more sorted sequences concatenated one after another.
229 ///
230 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
231 /// non-allocating insertion sort is used instead.
232 ///
233 /// # Examples
234 ///
235 /// ```
236 /// let mut v = [5, 4, 1, 3, 2];
237 /// v.sort_by(|a, b| a.cmp(b));
238 /// assert!(v == [1, 2, 3, 4, 5]);
239 ///
240 /// // reverse sorting
241 /// v.sort_by(|a, b| b.cmp(a));
242 /// assert!(v == [5, 4, 3, 2, 1]);
243 /// ```
244 #[stable(feature = "rust1", since = "1.0.0")]
245 #[inline]
246 pub fn sort_by<F>(&mut self, mut compare: F)
247 where F: FnMut(&T, &T) -> Ordering
248 {
249 merge_sort(self, |a, b| compare(a, b) == Less);
250 }
251
252 /// Sorts the slice with a key extraction function.
253 ///
254 /// This sort is stable (i.e. does not reorder equal elements) and `O(m n log(m n))`
255 /// worst-case, where the key function is `O(m)`.
256 ///
257 /// When applicable, unstable sorting is preferred because it is generally faster than stable
258 /// sorting and it doesn't allocate auxiliary memory.
259 /// See [`sort_unstable_by_key`](#method.sort_unstable_by_key).
260 ///
261 /// # Current implementation
262 ///
263 /// The current algorithm is an adaptive, iterative merge sort inspired by
264 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
265 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
266 /// two or more sorted sequences concatenated one after another.
267 ///
268 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
269 /// non-allocating insertion sort is used instead.
270 ///
271 /// # Examples
272 ///
273 /// ```
274 /// let mut v = [-5i32, 4, 1, -3, 2];
275 ///
276 /// v.sort_by_key(|k| k.abs());
277 /// assert!(v == [1, 2, -3, 4, -5]);
278 /// ```
279 #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
280 #[inline]
281 pub fn sort_by_key<K, F>(&mut self, mut f: F)
282 where F: FnMut(&T) -> K, K: Ord
283 {
284 merge_sort(self, |a, b| f(a).lt(&f(b)));
285 }
286
287 /// Sorts the slice with a key extraction function.
288 ///
289 /// During sorting, the key function is called only once per element.
290 ///
291 /// This sort is stable (i.e. does not reorder equal elements) and `O(m n + n log n)`
292 /// worst-case, where the key function is `O(m)`.
293 ///
294 /// For simple key functions (e.g. functions that are property accesses or
295 /// basic operations), [`sort_by_key`](#method.sort_by_key) is likely to be
296 /// faster.
297 ///
298 /// # Current implementation
299 ///
300 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
301 /// which combines the fast average case of randomized quicksort with the fast worst case of
302 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
303 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
304 /// deterministic behavior.
305 ///
306 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
307 /// length of the slice.
308 ///
309 /// # Examples
310 ///
311 /// ```
312 /// #![feature(slice_sort_by_cached_key)]
313 /// let mut v = [-5i32, 4, 32, -3, 2];
314 ///
315 /// v.sort_by_cached_key(|k| k.to_string());
316 /// assert!(v == [-3, -5, 2, 32, 4]);
317 /// ```
318 ///
319 /// [pdqsort]: https://github.com/orlp/pdqsort
320 #[unstable(feature = "slice_sort_by_cached_key", issue = "34447")]
321 #[inline]
322 pub fn sort_by_cached_key<K, F>(&mut self, f: F)
323 where F: FnMut(&T) -> K, K: Ord
324 {
325 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
326 macro_rules! sort_by_key {
327 ($t:ty, $slice:ident, $f:ident) => ({
328 let mut indices: Vec<_> =
329 $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
330 // The elements of `indices` are unique, as they are indexed, so any sort will be
331 // stable with respect to the original slice. We use `sort_unstable` here because
332 // it requires less memory allocation.
333 indices.sort_unstable();
334 for i in 0..$slice.len() {
335 let mut index = indices[i].1;
336 while (index as usize) < i {
337 index = indices[index as usize].1;
338 }
339 indices[i].1 = index;
340 $slice.swap(i, index as usize);
341 }
342 })
343 }
344
345 let sz_u8 = mem::size_of::<(K, u8)>();
346 let sz_u16 = mem::size_of::<(K, u16)>();
347 let sz_u32 = mem::size_of::<(K, u32)>();
348 let sz_usize = mem::size_of::<(K, usize)>();
349
350 let len = self.len();
351 if len < 2 { return }
352 if sz_u8 < sz_u16 && len <= ( u8::MAX as usize) { return sort_by_key!( u8, self, f) }
353 if sz_u16 < sz_u32 && len <= (u16::MAX as usize) { return sort_by_key!(u16, self, f) }
354 if sz_u32 < sz_usize && len <= (u32::MAX as usize) { return sort_by_key!(u32, self, f) }
355 sort_by_key!(usize, self, f)
356 }
357
358 /// Copies `self` into a new `Vec`.
359 ///
360 /// # Examples
361 ///
362 /// ```
363 /// let s = [10, 40, 30];
364 /// let x = s.to_vec();
365 /// // Here, `s` and `x` can be modified independently.
366 /// ```
367 #[rustc_conversion_suggestion]
368 #[stable(feature = "rust1", since = "1.0.0")]
369 #[inline]
370 pub fn to_vec(&self) -> Vec<T>
371 where T: Clone
372 {
373 // NB see hack module in this file
374 hack::to_vec(self)
375 }
376
377 /// Converts `self` into a vector without clones or allocation.
378 ///
379 /// The resulting vector can be converted back into a box via
380 /// `Vec<T>`'s `into_boxed_slice` method.
381 ///
382 /// # Examples
383 ///
384 /// ```
385 /// let s: Box<[i32]> = Box::new([10, 40, 30]);
386 /// let x = s.into_vec();
387 /// // `s` cannot be used anymore because it has been converted into `x`.
388 ///
389 /// assert_eq!(x, vec![10, 40, 30]);
390 /// ```
391 #[stable(feature = "rust1", since = "1.0.0")]
392 #[inline]
393 pub fn into_vec(self: Box<Self>) -> Vec<T> {
394 // NB see hack module in this file
395 hack::into_vec(self)
396 }
397
398 /// Creates a vector by repeating a slice `n` times.
399 ///
400 /// # Examples
401 ///
402 /// Basic usage:
403 ///
404 /// ```
405 /// #![feature(repeat_generic_slice)]
406 ///
407 /// fn main() {
408 /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
409 /// }
410 /// ```
411 #[unstable(feature = "repeat_generic_slice",
412 reason = "it's on str, why not on slice?",
413 issue = "48784")]
414 pub fn repeat(&self, n: usize) -> Vec<T> where T: Copy {
415 if n == 0 {
416 return Vec::new();
417 }
418
419 // If `n` is larger than zero, it can be split as
420 // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
421 // `2^expn` is the number represented by the leftmost '1' bit of `n`,
422 // and `rem` is the remaining part of `n`.
423
424 // Using `Vec` to access `set_len()`.
425 let mut buf = Vec::with_capacity(self.len() * n);
426
427 // `2^expn` repetition is done by doubling `buf` `expn`-times.
428 buf.extend(self);
429 {
430 let mut m = n >> 1;
431 // If `m > 0`, there are remaining bits up to the leftmost '1'.
432 while m > 0 {
433 // `buf.extend(buf)`:
434 unsafe {
435 ptr::copy_nonoverlapping(
436 buf.as_ptr(),
437 (buf.as_mut_ptr() as *mut T).add(buf.len()),
438 buf.len(),
439 );
440 // `buf` has capacity of `self.len() * n`.
441 let buf_len = buf.len();
442 buf.set_len(buf_len * 2);
443 }
444
445 m >>= 1;
446 }
447 }
448
449 // `rem` (`= n - 2^expn`) repetition is done by copying
450 // first `rem` repetitions from `buf` itself.
451 let rem_len = self.len() * n - buf.len(); // `self.len() * rem`
452 if rem_len > 0 {
453 // `buf.extend(buf[0 .. rem_len])`:
454 unsafe {
455 // This is non-overlapping since `2^expn > rem`.
456 ptr::copy_nonoverlapping(
457 buf.as_ptr(),
458 (buf.as_mut_ptr() as *mut T).add(buf.len()),
459 rem_len,
460 );
461 // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
462 let buf_cap = buf.capacity();
463 buf.set_len(buf_cap);
464 }
465 }
466 buf
467 }
468 }
469
470 #[cfg_attr(stage0, lang = "slice_u8")]
471 #[cfg_attr(not(stage0), lang = "slice_u8_alloc")]
472 #[cfg(not(test))]
473 impl [u8] {
474 /// Returns a vector containing a copy of this slice where each byte
475 /// is mapped to its ASCII upper case equivalent.
476 ///
477 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
478 /// but non-ASCII letters are unchanged.
479 ///
480 /// To uppercase the value in-place, use [`make_ascii_uppercase`].
481 ///
482 /// [`make_ascii_uppercase`]: #method.make_ascii_uppercase
483 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
484 #[inline]
485 pub fn to_ascii_uppercase(&self) -> Vec<u8> {
486 let mut me = self.to_vec();
487 me.make_ascii_uppercase();
488 me
489 }
490
491 /// Returns a vector containing a copy of this slice where each byte
492 /// is mapped to its ASCII lower case equivalent.
493 ///
494 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
495 /// but non-ASCII letters are unchanged.
496 ///
497 /// To lowercase the value in-place, use [`make_ascii_lowercase`].
498 ///
499 /// [`make_ascii_lowercase`]: #method.make_ascii_lowercase
500 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
501 #[inline]
502 pub fn to_ascii_lowercase(&self) -> Vec<u8> {
503 let mut me = self.to_vec();
504 me.make_ascii_lowercase();
505 me
506 }
507
508 #[cfg(stage0)]
509 slice_u8_core_methods!();
510 }
511
512 ////////////////////////////////////////////////////////////////////////////////
513 // Extension traits for slices over specific kinds of data
514 ////////////////////////////////////////////////////////////////////////////////
515 #[unstable(feature = "slice_concat_ext",
516 reason = "trait should not have to exist",
517 issue = "27747")]
518 /// An extension trait for concatenating slices
519 ///
520 /// While this trait is unstable, the methods are stable. `SliceConcatExt` is
521 /// included in the [standard library prelude], so you can use [`join()`] and
522 /// [`concat()`] as if they existed on `[T]` itself.
523 ///
524 /// [standard library prelude]: ../../std/prelude/index.html
525 /// [`join()`]: #tymethod.join
526 /// [`concat()`]: #tymethod.concat
527 pub trait SliceConcatExt<T: ?Sized> {
528 #[unstable(feature = "slice_concat_ext",
529 reason = "trait should not have to exist",
530 issue = "27747")]
531 /// The resulting type after concatenation
532 type Output;
533
534 /// Flattens a slice of `T` into a single value `Self::Output`.
535 ///
536 /// # Examples
537 ///
538 /// ```
539 /// assert_eq!(["hello", "world"].concat(), "helloworld");
540 /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
541 /// ```
542 #[stable(feature = "rust1", since = "1.0.0")]
543 fn concat(&self) -> Self::Output;
544
545 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
546 /// given separator between each.
547 ///
548 /// # Examples
549 ///
550 /// ```
551 /// assert_eq!(["hello", "world"].join(" "), "hello world");
552 /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
553 /// ```
554 #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
555 fn join(&self, sep: &T) -> Self::Output;
556
557 #[stable(feature = "rust1", since = "1.0.0")]
558 #[rustc_deprecated(since = "1.3.0", reason = "renamed to join")]
559 fn connect(&self, sep: &T) -> Self::Output;
560 }
561
562 #[unstable(feature = "slice_concat_ext",
563 reason = "trait should not have to exist",
564 issue = "27747")]
565 impl<T: Clone, V: Borrow<[T]>> SliceConcatExt<T> for [V] {
566 type Output = Vec<T>;
567
568 fn concat(&self) -> Vec<T> {
569 let size = self.iter().fold(0, |acc, v| acc + v.borrow().len());
570 let mut result = Vec::with_capacity(size);
571 for v in self {
572 result.extend_from_slice(v.borrow())
573 }
574 result
575 }
576
577 fn join(&self, sep: &T) -> Vec<T> {
578 let size = self.iter().fold(0, |acc, v| acc + v.borrow().len());
579 let mut result = Vec::with_capacity(size + self.len());
580 let mut first = true;
581 for v in self {
582 if first {
583 first = false
584 } else {
585 result.push(sep.clone())
586 }
587 result.extend_from_slice(v.borrow())
588 }
589 result
590 }
591
592 fn connect(&self, sep: &T) -> Vec<T> {
593 self.join(sep)
594 }
595 }
596
597 ////////////////////////////////////////////////////////////////////////////////
598 // Standard trait implementations for slices
599 ////////////////////////////////////////////////////////////////////////////////
600
601 #[stable(feature = "rust1", since = "1.0.0")]
602 impl<T> Borrow<[T]> for Vec<T> {
603 fn borrow(&self) -> &[T] {
604 &self[..]
605 }
606 }
607
608 #[stable(feature = "rust1", since = "1.0.0")]
609 impl<T> BorrowMut<[T]> for Vec<T> {
610 fn borrow_mut(&mut self) -> &mut [T] {
611 &mut self[..]
612 }
613 }
614
615 #[stable(feature = "rust1", since = "1.0.0")]
616 impl<T: Clone> ToOwned for [T] {
617 type Owned = Vec<T>;
618 #[cfg(not(test))]
619 fn to_owned(&self) -> Vec<T> {
620 self.to_vec()
621 }
622
623 #[cfg(test)]
624 fn to_owned(&self) -> Vec<T> {
625 hack::to_vec(self)
626 }
627
628 fn clone_into(&self, target: &mut Vec<T>) {
629 // drop anything in target that will not be overwritten
630 target.truncate(self.len());
631 let len = target.len();
632
633 // reuse the contained values' allocations/resources.
634 target.clone_from_slice(&self[..len]);
635
636 // target.len <= self.len due to the truncate above, so the
637 // slice here is always in-bounds.
638 target.extend_from_slice(&self[len..]);
639 }
640 }
641
642 ////////////////////////////////////////////////////////////////////////////////
643 // Sorting
644 ////////////////////////////////////////////////////////////////////////////////
645
646 /// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
647 ///
648 /// This is the integral subroutine of insertion sort.
649 fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
650 where F: FnMut(&T, &T) -> bool
651 {
652 if v.len() >= 2 && is_less(&v[1], &v[0]) {
653 unsafe {
654 // There are three ways to implement insertion here:
655 //
656 // 1. Swap adjacent elements until the first one gets to its final destination.
657 // However, this way we copy data around more than is necessary. If elements are big
658 // structures (costly to copy), this method will be slow.
659 //
660 // 2. Iterate until the right place for the first element is found. Then shift the
661 // elements succeeding it to make room for it and finally place it into the
662 // remaining hole. This is a good method.
663 //
664 // 3. Copy the first element into a temporary variable. Iterate until the right place
665 // for it is found. As we go along, copy every traversed element into the slot
666 // preceding it. Finally, copy data from the temporary variable into the remaining
667 // hole. This method is very good. Benchmarks demonstrated slightly better
668 // performance than with the 2nd method.
669 //
670 // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
671 let mut tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
672
673 // Intermediate state of the insertion process is always tracked by `hole`, which
674 // serves two purposes:
675 // 1. Protects integrity of `v` from panics in `is_less`.
676 // 2. Fills the remaining hole in `v` in the end.
677 //
678 // Panic safety:
679 //
680 // If `is_less` panics at any point during the process, `hole` will get dropped and
681 // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
682 // initially held exactly once.
683 let mut hole = InsertionHole {
684 src: &mut *tmp,
685 dest: &mut v[1],
686 };
687 ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
688
689 for i in 2..v.len() {
690 if !is_less(&v[i], &*tmp) {
691 break;
692 }
693 ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
694 hole.dest = &mut v[i];
695 }
696 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
697 }
698 }
699
700 // When dropped, copies from `src` into `dest`.
701 struct InsertionHole<T> {
702 src: *mut T,
703 dest: *mut T,
704 }
705
706 impl<T> Drop for InsertionHole<T> {
707 fn drop(&mut self) {
708 unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); }
709 }
710 }
711 }
712
713 /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
714 /// stores the result into `v[..]`.
715 ///
716 /// # Safety
717 ///
718 /// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
719 /// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
720 unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
721 where F: FnMut(&T, &T) -> bool
722 {
723 let len = v.len();
724 let v = v.as_mut_ptr();
725 let v_mid = v.offset(mid as isize);
726 let v_end = v.offset(len as isize);
727
728 // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
729 // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
730 // copying the lesser (or greater) one into `v`.
731 //
732 // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
733 // consumed first, then we must copy whatever is left of the shorter run into the remaining
734 // hole in `v`.
735 //
736 // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
737 // 1. Protects integrity of `v` from panics in `is_less`.
738 // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
739 //
740 // Panic safety:
741 //
742 // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
743 // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
744 // object it initially held exactly once.
745 let mut hole;
746
747 if mid <= len - mid {
748 // The left run is shorter.
749 ptr::copy_nonoverlapping(v, buf, mid);
750 hole = MergeHole {
751 start: buf,
752 end: buf.offset(mid as isize),
753 dest: v,
754 };
755
756 // Initially, these pointers point to the beginnings of their arrays.
757 let left = &mut hole.start;
758 let mut right = v_mid;
759 let out = &mut hole.dest;
760
761 while *left < hole.end && right < v_end {
762 // Consume the lesser side.
763 // If equal, prefer the left run to maintain stability.
764 let to_copy = if is_less(&*right, &**left) {
765 get_and_increment(&mut right)
766 } else {
767 get_and_increment(left)
768 };
769 ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
770 }
771 } else {
772 // The right run is shorter.
773 ptr::copy_nonoverlapping(v_mid, buf, len - mid);
774 hole = MergeHole {
775 start: buf,
776 end: buf.offset((len - mid) as isize),
777 dest: v_mid,
778 };
779
780 // Initially, these pointers point past the ends of their arrays.
781 let left = &mut hole.dest;
782 let right = &mut hole.end;
783 let mut out = v_end;
784
785 while v < *left && buf < *right {
786 // Consume the greater side.
787 // If equal, prefer the right run to maintain stability.
788 let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
789 decrement_and_get(left)
790 } else {
791 decrement_and_get(right)
792 };
793 ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
794 }
795 }
796 // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
797 // it will now be copied into the hole in `v`.
798
799 unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
800 let old = *ptr;
801 *ptr = ptr.offset(1);
802 old
803 }
804
805 unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
806 *ptr = ptr.offset(-1);
807 *ptr
808 }
809
810 // When dropped, copies the range `start..end` into `dest..`.
811 struct MergeHole<T> {
812 start: *mut T,
813 end: *mut T,
814 dest: *mut T,
815 }
816
817 impl<T> Drop for MergeHole<T> {
818 fn drop(&mut self) {
819 // `T` is not a zero-sized type, so it's okay to divide by its size.
820 let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
821 unsafe { ptr::copy_nonoverlapping(self.start, self.dest, len); }
822 }
823 }
824 }
825
826 /// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
827 /// [here](http://svn.python.org/projects/python/trunk/Objects/listsort.txt).
828 ///
829 /// The algorithm identifies strictly descending and non-descending subsequences, which are called
830 /// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
831 /// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
832 /// satisfied:
833 ///
834 /// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
835 /// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
836 ///
837 /// The invariants ensure that the total running time is `O(n log n)` worst-case.
838 fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
839 where F: FnMut(&T, &T) -> bool
840 {
841 // Slices of up to this length get sorted using insertion sort.
842 const MAX_INSERTION: usize = 20;
843 // Very short runs are extended using insertion sort to span at least this many elements.
844 const MIN_RUN: usize = 10;
845
846 // Sorting has no meaningful behavior on zero-sized types.
847 if size_of::<T>() == 0 {
848 return;
849 }
850
851 let len = v.len();
852
853 // Short arrays get sorted in-place via insertion sort to avoid allocations.
854 if len <= MAX_INSERTION {
855 if len >= 2 {
856 for i in (0..len-1).rev() {
857 insert_head(&mut v[i..], &mut is_less);
858 }
859 }
860 return;
861 }
862
863 // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
864 // shallow copies of the contents of `v` without risking the dtors running on copies if
865 // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
866 // which will always have length at most `len / 2`.
867 let mut buf = Vec::with_capacity(len / 2);
868
869 // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
870 // strange decision, but consider the fact that merges more often go in the opposite direction
871 // (forwards). According to benchmarks, merging forwards is slightly faster than merging
872 // backwards. To conclude, identifying runs by traversing backwards improves performance.
873 let mut runs = vec![];
874 let mut end = len;
875 while end > 0 {
876 // Find the next natural run, and reverse it if it's strictly descending.
877 let mut start = end - 1;
878 if start > 0 {
879 start -= 1;
880 unsafe {
881 if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
882 while start > 0 && is_less(v.get_unchecked(start),
883 v.get_unchecked(start - 1)) {
884 start -= 1;
885 }
886 v[start..end].reverse();
887 } else {
888 while start > 0 && !is_less(v.get_unchecked(start),
889 v.get_unchecked(start - 1)) {
890 start -= 1;
891 }
892 }
893 }
894 }
895
896 // Insert some more elements into the run if it's too short. Insertion sort is faster than
897 // merge sort on short sequences, so this significantly improves performance.
898 while start > 0 && end - start < MIN_RUN {
899 start -= 1;
900 insert_head(&mut v[start..end], &mut is_less);
901 }
902
903 // Push this run onto the stack.
904 runs.push(Run {
905 start,
906 len: end - start,
907 });
908 end = start;
909
910 // Merge some pairs of adjacent runs to satisfy the invariants.
911 while let Some(r) = collapse(&runs) {
912 let left = runs[r + 1];
913 let right = runs[r];
914 unsafe {
915 merge(&mut v[left.start .. right.start + right.len], left.len, buf.as_mut_ptr(),
916 &mut is_less);
917 }
918 runs[r] = Run {
919 start: left.start,
920 len: left.len + right.len,
921 };
922 runs.remove(r + 1);
923 }
924 }
925
926 // Finally, exactly one run must remain in the stack.
927 debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
928
929 // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
930 // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
931 // algorithm should continue building a new run instead, `None` is returned.
932 //
933 // TimSort is infamous for its buggy implementations, as described here:
934 // http://envisage-project.eu/timsort-specification-and-verification/
935 //
936 // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
937 // Enforcing them on just top three is not sufficient to ensure that the invariants will still
938 // hold for *all* runs in the stack.
939 //
940 // This function correctly checks invariants for the top four runs. Additionally, if the top
941 // run starts at index 0, it will always demand a merge operation until the stack is fully
942 // collapsed, in order to complete the sort.
943 #[inline]
944 fn collapse(runs: &[Run]) -> Option<usize> {
945 let n = runs.len();
946 if n >= 2 && (runs[n - 1].start == 0 ||
947 runs[n - 2].len <= runs[n - 1].len ||
948 (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) ||
949 (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len)) {
950 if n >= 3 && runs[n - 3].len < runs[n - 1].len {
951 Some(n - 3)
952 } else {
953 Some(n - 2)
954 }
955 } else {
956 None
957 }
958 }
959
960 #[derive(Clone, Copy)]
961 struct Run {
962 start: usize,
963 len: usize,
964 }
965 }