]> git.proxmox.com Git - rustc.git/blame - library/alloc/src/slice.rs
New upstream version 1.48.0~beta.8+dfsg1
[rustc.git] / library / alloc / src / slice.rs
CommitLineData
c1a9b12d 1//! A dynamically-sized view into a contiguous sequence, `[T]`.
1a4d82fc 2//!
94b46f34
XL
3//! *[See also the slice primitive type](../../std/primitive.slice.html).*
4//!
62682a34
SL
5//! Slices are a view into a block of memory represented as a pointer and a
6//! length.
1a4d82fc 7//!
c34b1796 8//! ```
1a4d82fc 9//! // slicing a Vec
c34b1796
AL
10//! let vec = vec![1, 2, 3];
11//! let int_slice = &vec[..];
1a4d82fc
JJ
12//! // coercing an array to a slice
13//! let str_slice: &[&str] = &["one", "two", "three"];
14//! ```
15//!
16//! Slices are either mutable or shared. The shared slice type is `&[T]`,
c34b1796
AL
17//! while the mutable slice type is `&mut [T]`, where `T` represents the element
18//! type. For example, you can mutate the block of memory that a mutable slice
19//! points to:
1a4d82fc 20//!
c34b1796
AL
21//! ```
22//! let x = &mut [1, 2, 3];
1a4d82fc 23//! x[1] = 7;
c34b1796 24//! assert_eq!(x, &[1, 7, 3]);
1a4d82fc
JJ
25//! ```
26//!
27//! Here are some of the things this module contains:
28//!
29//! ## Structs
30//!
c30ab7b3 31//! There are several structs that are useful for slices, such as [`Iter`], which
1a4d82fc
JJ
32//! represents iteration over a slice.
33//!
c34b1796 34//! ## Trait Implementations
1a4d82fc
JJ
35//!
36//! There are several implementations of common traits for slices. Some examples
37//! include:
38//!
c30ab7b3
SL
39//! * [`Clone`]
40//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
41//! * [`Hash`] - for slices whose element type is [`Hash`].
1a4d82fc
JJ
42//!
43//! ## Iteration
44//!
c34b1796
AL
45//! The slices implement `IntoIterator`. The iterator yields references to the
46//! slice elements.
1a4d82fc 47//!
c34b1796
AL
48//! ```
49//! let numbers = &[0, 1, 2];
50//! for n in numbers {
51//! println!("{} is a number!", n);
1a4d82fc
JJ
52//! }
53//! ```
54//!
c34b1796
AL
55//! The mutable slice yields mutable references to the elements:
56//!
57//! ```
58//! let mut scores = [7, 8, 9];
59//! for score in &mut scores[..] {
60//! *score += 1;
61//! }
62//! ```
63//!
62682a34
SL
64//! This iterator yields mutable references to the slice's elements, so while
65//! the element type of the slice is `i32`, the element type of the iterator is
66//! `&mut i32`.
c34b1796 67//!
cc61c64b 68//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
c34b1796 69//! iterators.
cc61c64b
XL
70//! * Further methods that return iterators are [`.split`], [`.splitn`],
71//! [`.chunks`], [`.windows`] and more.
c1a9b12d 72//!
3dfed10e 73//! [`Hash`]: core::hash::Hash
cc61c64b
XL
74//! [`.iter`]: ../../std/primitive.slice.html#method.iter
75//! [`.iter_mut`]: ../../std/primitive.slice.html#method.iter_mut
76//! [`.split`]: ../../std/primitive.slice.html#method.split
77//! [`.splitn`]: ../../std/primitive.slice.html#method.splitn
78//! [`.chunks`]: ../../std/primitive.slice.html#method.chunks
79//! [`.windows`]: ../../std/primitive.slice.html#method.windows
85aaf69f 80#![stable(feature = "rust1", since = "1.0.0")]
62682a34
SL
81// Many of the usings in this module are only used in the test configuration.
82// It's cleaner to just turn off the unused_imports warning than to fix them.
7453a54e 83#![cfg_attr(test, allow(unused_imports, dead_code))]
62682a34 84
9fa01778 85use core::borrow::{Borrow, BorrowMut};
8bb4bdeb 86use core::cmp::Ordering::{self, Less};
9fa01778 87use core::mem::{self, size_of};
1a4d82fc 88use core::ptr;
1a4d82fc 89
9fa01778
XL
90use crate::borrow::ToOwned;
91use crate::boxed::Box;
92use crate::vec::Vec;
1a4d82fc 93
1b1a35ee
XL
94#[unstable(feature = "slice_check_range", issue = "76393")]
95pub use core::slice::check_range;
3dfed10e
XL
96#[unstable(feature = "array_chunks", issue = "74985")]
97pub use core::slice::ArrayChunks;
1b1a35ee
XL
98#[unstable(feature = "array_chunks", issue = "74985")]
99pub use core::slice::ArrayChunksMut;
100#[unstable(feature = "array_windows", issue = "75027")]
101pub use core::slice::ArrayWindows;
60c5eb7d
XL
102#[stable(feature = "slice_get_slice", since = "1.28.0")]
103pub use core::slice::SliceIndex;
104#[stable(feature = "from_ref", since = "1.28.0")]
105pub use core::slice::{from_mut, from_ref};
92a42be0 106#[stable(feature = "rust1", since = "1.0.0")]
60c5eb7d 107pub use core::slice::{from_raw_parts, from_raw_parts_mut};
92a42be0 108#[stable(feature = "rust1", since = "1.0.0")]
60c5eb7d
XL
109pub use core::slice::{Chunks, Windows};
110#[stable(feature = "chunks_exact", since = "1.31.0")]
111pub use core::slice::{ChunksExact, ChunksExactMut};
92a42be0 112#[stable(feature = "rust1", since = "1.0.0")]
60c5eb7d 113pub use core::slice::{ChunksMut, Split, SplitMut};
92a42be0 114#[stable(feature = "rust1", since = "1.0.0")]
60c5eb7d
XL
115pub use core::slice::{Iter, IterMut};
116#[stable(feature = "rchunks", since = "1.31.0")]
117pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
83c7162d 118#[stable(feature = "slice_rsplit", since = "1.27.0")]
cc61c64b 119pub use core::slice::{RSplit, RSplitMut};
92a42be0 120#[stable(feature = "rust1", since = "1.0.0")]
60c5eb7d 121pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
1a4d82fc
JJ
122
123////////////////////////////////////////////////////////////////////////////////
124// Basic slice extension methods
125////////////////////////////////////////////////////////////////////////////////
126
c34b1796 127// HACK(japaric) needed for the implementation of `vec!` macro during testing
dc9dc135 128// N.B., see the `hack` module in this file for more details.
c34b1796 129#[cfg(test)]
9fa01778 130pub use hack::into_vec;
c34b1796
AL
131
132// HACK(japaric) needed for the implementation of `Vec::clone` during testing
dc9dc135 133// N.B., see the `hack` module in this file for more details.
c34b1796 134#[cfg(test)]
9fa01778 135pub use hack::to_vec;
c34b1796
AL
136
137// HACK(japaric): With cfg(test) `impl [T]` is not available, these three
138// functions are actually methods that are in `impl [T]` but not in
139// `core::slice::SliceExt` - we need to supply these functions for the
140// `test_permutations` test
141mod hack {
9fa01778 142 use crate::boxed::Box;
60c5eb7d 143 use crate::vec::Vec;
c34b1796 144
ba9703b0
XL
145 // We shouldn't add inline attribute to this since this is used in
146 // `vec!` macro mostly and causes perf regression. See #71204 for
147 // discussion and perf results.
dc9dc135 148 pub fn into_vec<T>(b: Box<[T]>) -> Vec<T> {
c34b1796 149 unsafe {
dc9dc135
XL
150 let len = b.len();
151 let b = Box::into_raw(b);
ba9703b0 152 Vec::from_raw_parts(b as *mut T, len, len)
c34b1796
AL
153 }
154 }
155
c34b1796 156 #[inline]
92a42be0 157 pub fn to_vec<T>(s: &[T]) -> Vec<T>
60c5eb7d
XL
158 where
159 T: Clone,
92a42be0 160 {
f035d41b
XL
161 let mut vec = Vec::with_capacity(s.len());
162 vec.extend_from_slice(s);
163 vec
c34b1796 164 }
c34b1796
AL
165}
166
94b46f34 167#[lang = "slice_alloc"]
c34b1796 168#[cfg(not(test))]
c34b1796 169impl<T> [T] {
32a655c1
SL
170 /// Sorts the slice.
171 ///
ba9703b0 172 /// This sort is stable (i.e., does not reorder equal elements) and `O(n * log(n))` worst-case.
32a655c1 173 ///
041b39d2
XL
174 /// When applicable, unstable sorting is preferred because it is generally faster than stable
175 /// sorting and it doesn't allocate auxiliary memory.
176 /// See [`sort_unstable`](#method.sort_unstable).
177 ///
32a655c1
SL
178 /// # Current implementation
179 ///
180 /// The current algorithm is an adaptive, iterative merge sort inspired by
181 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
182 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
183 /// two or more sorted sequences concatenated one after another.
62682a34 184 ///
32a655c1
SL
185 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
186 /// non-allocating insertion sort is used instead.
92a42be0 187 ///
62682a34
SL
188 /// # Examples
189 ///
c30ab7b3 190 /// ```
62682a34
SL
191 /// let mut v = [-5, 4, 1, -3, 2];
192 ///
193 /// v.sort();
194 /// assert!(v == [-5, -3, 1, 2, 4]);
195 /// ```
196 #[stable(feature = "rust1", since = "1.0.0")]
197 #[inline]
92a42be0 198 pub fn sort(&mut self)
60c5eb7d
XL
199 where
200 T: Ord,
92a42be0 201 {
8bb4bdeb 202 merge_sort(self, |a, b| a.lt(b));
62682a34
SL
203 }
204
cc61c64b 205 /// Sorts the slice with a comparator function.
92a42be0 206 ///
ba9703b0 207 /// This sort is stable (i.e., does not reorder equal elements) and `O(n * log(n))` worst-case.
32a655c1 208 ///
a1dfa0c6
XL
209 /// The comparator function must define a total ordering for the elements in the slice. If
210 /// the ordering is not total, the order of the elements is unspecified. An order is a
9fa01778 211 /// total order if it is (for all `a`, `b` and `c`):
a1dfa0c6 212 ///
9fa01778
XL
213 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
214 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
a1dfa0c6
XL
215 ///
216 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
217 /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
218 ///
219 /// ```
220 /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
221 /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
222 /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
223 /// ```
224 ///
041b39d2
XL
225 /// When applicable, unstable sorting is preferred because it is generally faster than stable
226 /// sorting and it doesn't allocate auxiliary memory.
227 /// See [`sort_unstable_by`](#method.sort_unstable_by).
228 ///
32a655c1
SL
229 /// # Current implementation
230 ///
231 /// The current algorithm is an adaptive, iterative merge sort inspired by
232 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
233 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
234 /// two or more sorted sequences concatenated one after another.
235 ///
236 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
237 /// non-allocating insertion sort is used instead.
92a42be0 238 ///
92a42be0
SL
239 /// # Examples
240 ///
c30ab7b3 241 /// ```
cc61c64b
XL
242 /// let mut v = [5, 4, 1, 3, 2];
243 /// v.sort_by(|a, b| a.cmp(b));
244 /// assert!(v == [1, 2, 3, 4, 5]);
92a42be0 245 ///
cc61c64b
XL
246 /// // reverse sorting
247 /// v.sort_by(|a, b| b.cmp(a));
248 /// assert!(v == [5, 4, 3, 2, 1]);
92a42be0 249 /// ```
cc61c64b 250 #[stable(feature = "rust1", since = "1.0.0")]
92a42be0 251 #[inline]
cc61c64b 252 pub fn sort_by<F>(&mut self, mut compare: F)
60c5eb7d
XL
253 where
254 F: FnMut(&T, &T) -> Ordering,
92a42be0 255 {
cc61c64b 256 merge_sort(self, |a, b| compare(a, b) == Less);
92a42be0
SL
257 }
258
cc61c64b 259 /// Sorts the slice with a key extraction function.
32a655c1 260 ///
ba9703b0 261 /// This sort is stable (i.e., does not reorder equal elements) and `O(m * n * log(n))`
0531ce1d 262 /// worst-case, where the key function is `O(m)`.
32a655c1 263 ///
9fa01778
XL
264 /// For expensive key functions (e.g. functions that are not simple property accesses or
265 /// basic operations), [`sort_by_cached_key`](#method.sort_by_cached_key) is likely to be
266 /// significantly faster, as it does not recompute element keys.
267 ///
041b39d2
XL
268 /// When applicable, unstable sorting is preferred because it is generally faster than stable
269 /// sorting and it doesn't allocate auxiliary memory.
270 /// See [`sort_unstable_by_key`](#method.sort_unstable_by_key).
271 ///
32a655c1
SL
272 /// # Current implementation
273 ///
274 /// The current algorithm is an adaptive, iterative merge sort inspired by
275 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
276 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
277 /// two or more sorted sequences concatenated one after another.
62682a34 278 ///
32a655c1
SL
279 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
280 /// non-allocating insertion sort is used instead.
62682a34
SL
281 ///
282 /// # Examples
283 ///
c30ab7b3 284 /// ```
cc61c64b
XL
285 /// let mut v = [-5i32, 4, 1, -3, 2];
286 ///
287 /// v.sort_by_key(|k| k.abs());
288 /// assert!(v == [1, 2, -3, 4, -5]);
289 /// ```
290 #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
291 #[inline]
0531ce1d 292 pub fn sort_by_key<K, F>(&mut self, mut f: F)
60c5eb7d
XL
293 where
294 F: FnMut(&T) -> K,
295 K: Ord,
cc61c64b
XL
296 {
297 merge_sort(self, |a, b| f(a).lt(&f(b)));
298 }
299
0531ce1d
XL
300 /// Sorts the slice with a key extraction function.
301 ///
302 /// During sorting, the key function is called only once per element.
303 ///
ba9703b0 304 /// This sort is stable (i.e., does not reorder equal elements) and `O(m * n + n * log(n))`
0531ce1d
XL
305 /// worst-case, where the key function is `O(m)`.
306 ///
0731742a 307 /// For simple key functions (e.g., functions that are property accesses or
0531ce1d
XL
308 /// basic operations), [`sort_by_key`](#method.sort_by_key) is likely to be
309 /// faster.
310 ///
311 /// # Current implementation
312 ///
313 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
314 /// which combines the fast average case of randomized quicksort with the fast worst case of
315 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
316 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
317 /// deterministic behavior.
318 ///
319 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
320 /// length of the slice.
321 ///
322 /// # Examples
323 ///
324 /// ```
0531ce1d
XL
325 /// let mut v = [-5i32, 4, 32, -3, 2];
326 ///
327 /// v.sort_by_cached_key(|k| k.to_string());
328 /// assert!(v == [-3, -5, 2, 32, 4]);
329 /// ```
330 ///
331 /// [pdqsort]: https://github.com/orlp/pdqsort
9fa01778 332 #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
0531ce1d
XL
333 #[inline]
334 pub fn sort_by_cached_key<K, F>(&mut self, f: F)
60c5eb7d
XL
335 where
336 F: FnMut(&T) -> K,
337 K: Ord,
0531ce1d
XL
338 {
339 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
340 macro_rules! sort_by_key {
60c5eb7d 341 ($t:ty, $slice:ident, $f:ident) => {{
0531ce1d
XL
342 let mut indices: Vec<_> =
343 $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
344 // The elements of `indices` are unique, as they are indexed, so any sort will be
345 // stable with respect to the original slice. We use `sort_unstable` here because
346 // it requires less memory allocation.
347 indices.sort_unstable();
348 for i in 0..$slice.len() {
349 let mut index = indices[i].1;
350 while (index as usize) < i {
351 index = indices[index as usize].1;
352 }
353 indices[i].1 = index;
354 $slice.swap(i, index as usize);
355 }
60c5eb7d 356 }};
0531ce1d
XL
357 }
358
60c5eb7d
XL
359 let sz_u8 = mem::size_of::<(K, u8)>();
360 let sz_u16 = mem::size_of::<(K, u16)>();
361 let sz_u32 = mem::size_of::<(K, u32)>();
0531ce1d
XL
362 let sz_usize = mem::size_of::<(K, usize)>();
363
364 let len = self.len();
60c5eb7d
XL
365 if len < 2 {
366 return;
367 }
368 if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
369 return sort_by_key!(u8, self, f);
370 }
371 if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
372 return sort_by_key!(u16, self, f);
373 }
374 if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
375 return sort_by_key!(u32, self, f);
376 }
0531ce1d
XL
377 sort_by_key!(usize, self, f)
378 }
379
62682a34 380 /// Copies `self` into a new `Vec`.
5bcae85e
SL
381 ///
382 /// # Examples
383 ///
384 /// ```
385 /// let s = [10, 40, 30];
386 /// let x = s.to_vec();
387 /// // Here, `s` and `x` can be modified independently.
388 /// ```
2c00a5a8 389 #[rustc_conversion_suggestion]
85aaf69f 390 #[stable(feature = "rust1", since = "1.0.0")]
62682a34 391 #[inline]
92a42be0 392 pub fn to_vec(&self) -> Vec<T>
60c5eb7d
XL
393 where
394 T: Clone,
92a42be0 395 {
dc9dc135 396 // N.B., see the `hack` module in this file for more details.
62682a34 397 hack::to_vec(self)
1a4d82fc
JJ
398 }
399
9346a6ac 400 /// Converts `self` into a vector without clones or allocation.
5bcae85e 401 ///
7cac9316
XL
402 /// The resulting vector can be converted back into a box via
403 /// `Vec<T>`'s `into_boxed_slice` method.
404 ///
5bcae85e
SL
405 /// # Examples
406 ///
407 /// ```
408 /// let s: Box<[i32]> = Box::new([10, 40, 30]);
409 /// let x = s.into_vec();
410 /// // `s` cannot be used anymore because it has been converted into `x`.
411 ///
c30ab7b3 412 /// assert_eq!(x, vec![10, 40, 30]);
5bcae85e 413 /// ```
c34b1796 414 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 415 #[inline]
c34b1796 416 pub fn into_vec(self: Box<Self>) -> Vec<T> {
dc9dc135 417 // N.B., see the `hack` module in this file for more details.
c34b1796 418 hack::into_vec(self)
1a4d82fc 419 }
83c7162d
XL
420
421 /// Creates a vector by repeating a slice `n` times.
422 ///
b7449926
XL
423 /// # Panics
424 ///
425 /// This function will panic if the capacity would overflow.
426 ///
83c7162d
XL
427 /// # Examples
428 ///
429 /// Basic usage:
430 ///
431 /// ```
e74abb32 432 /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
83c7162d 433 /// ```
b7449926
XL
434 ///
435 /// A panic upon overflow:
436 ///
437 /// ```should_panic
e74abb32 438 /// // this will panic at runtime
ba9703b0 439 /// b"0123456789abcdef".repeat(usize::MAX);
b7449926 440 /// ```
e74abb32 441 #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
60c5eb7d
XL
442 pub fn repeat(&self, n: usize) -> Vec<T>
443 where
444 T: Copy,
445 {
83c7162d
XL
446 if n == 0 {
447 return Vec::new();
448 }
449
450 // If `n` is larger than zero, it can be split as
451 // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
452 // `2^expn` is the number represented by the leftmost '1' bit of `n`,
453 // and `rem` is the remaining part of `n`.
454
455 // Using `Vec` to access `set_len()`.
dfeec247
XL
456 let capacity = self.len().checked_mul(n).expect("capacity overflow");
457 let mut buf = Vec::with_capacity(capacity);
83c7162d
XL
458
459 // `2^expn` repetition is done by doubling `buf` `expn`-times.
460 buf.extend(self);
461 {
462 let mut m = n >> 1;
463 // If `m > 0`, there are remaining bits up to the leftmost '1'.
464 while m > 0 {
465 // `buf.extend(buf)`:
466 unsafe {
467 ptr::copy_nonoverlapping(
468 buf.as_ptr(),
469 (buf.as_mut_ptr() as *mut T).add(buf.len()),
470 buf.len(),
471 );
472 // `buf` has capacity of `self.len() * n`.
473 let buf_len = buf.len();
474 buf.set_len(buf_len * 2);
475 }
476
477 m >>= 1;
478 }
479 }
480
481 // `rem` (`= n - 2^expn`) repetition is done by copying
482 // first `rem` repetitions from `buf` itself.
dfeec247 483 let rem_len = capacity - buf.len(); // `self.len() * rem`
83c7162d
XL
484 if rem_len > 0 {
485 // `buf.extend(buf[0 .. rem_len])`:
486 unsafe {
487 // This is non-overlapping since `2^expn > rem`.
488 ptr::copy_nonoverlapping(
489 buf.as_ptr(),
490 (buf.as_mut_ptr() as *mut T).add(buf.len()),
491 rem_len,
492 );
493 // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
dfeec247 494 buf.set_len(capacity);
83c7162d
XL
495 }
496 }
497 buf
498 }
416331ca
XL
499
500 /// Flattens a slice of `T` into a single value `Self::Output`.
501 ///
502 /// # Examples
503 ///
504 /// ```
505 /// assert_eq!(["hello", "world"].concat(), "helloworld");
506 /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
507 /// ```
508 #[stable(feature = "rust1", since = "1.0.0")]
509 pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
60c5eb7d
XL
510 where
511 Self: Concat<Item>,
416331ca
XL
512 {
513 Concat::concat(self)
514 }
515
516 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
517 /// given separator between each.
518 ///
519 /// # Examples
520 ///
521 /// ```
522 /// assert_eq!(["hello", "world"].join(" "), "hello world");
523 /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
524 /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
525 /// ```
526 #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
527 pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
60c5eb7d
XL
528 where
529 Self: Join<Separator>,
416331ca
XL
530 {
531 Join::join(self, sep)
532 }
533
534 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
535 /// given separator between each.
536 ///
537 /// # Examples
538 ///
539 /// ```
540 /// # #![allow(deprecated)]
541 /// assert_eq!(["hello", "world"].connect(" "), "hello world");
542 /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
543 /// ```
544 #[stable(feature = "rust1", since = "1.0.0")]
545 #[rustc_deprecated(since = "1.3.0", reason = "renamed to join")]
546 pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
60c5eb7d
XL
547 where
548 Self: Join<Separator>,
416331ca
XL
549 {
550 Join::join(self, sep)
551 }
c34b1796 552}
1a4d82fc 553
94b46f34 554#[lang = "slice_u8_alloc"]
ff7c6d11 555#[cfg(not(test))]
abe05a73 556impl [u8] {
abe05a73
XL
557 /// Returns a vector containing a copy of this slice where each byte
558 /// is mapped to its ASCII upper case equivalent.
559 ///
560 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
561 /// but non-ASCII letters are unchanged.
562 ///
563 /// To uppercase the value in-place, use [`make_ascii_uppercase`].
564 ///
3dfed10e 565 /// [`make_ascii_uppercase`]: u8::make_ascii_uppercase
ff7c6d11 566 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
abe05a73
XL
567 #[inline]
568 pub fn to_ascii_uppercase(&self) -> Vec<u8> {
569 let mut me = self.to_vec();
570 me.make_ascii_uppercase();
571 me
572 }
573
574 /// Returns a vector containing a copy of this slice where each byte
575 /// is mapped to its ASCII lower case equivalent.
576 ///
577 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
578 /// but non-ASCII letters are unchanged.
579 ///
580 /// To lowercase the value in-place, use [`make_ascii_lowercase`].
581 ///
3dfed10e 582 /// [`make_ascii_lowercase`]: u8::make_ascii_lowercase
ff7c6d11 583 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
abe05a73
XL
584 #[inline]
585 pub fn to_ascii_lowercase(&self) -> Vec<u8> {
586 let mut me = self.to_vec();
587 me.make_ascii_lowercase();
588 me
589 }
abe05a73
XL
590}
591
1a4d82fc
JJ
592////////////////////////////////////////////////////////////////////////////////
593// Extension traits for slices over specific kinds of data
594////////////////////////////////////////////////////////////////////////////////
416331ca
XL
595
596/// Helper trait for [`[T]::concat`](../../std/primitive.slice.html#method.concat).
597///
598/// Note: the `Item` type parameter is not used in this trait,
599/// but it allows impls to be more generic.
600/// Without it, we get this error:
601///
602/// ```error
603/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
604/// --> src/liballoc/slice.rs:608:6
605/// |
606/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
607/// | ^ unconstrained type parameter
608/// ```
2c00a5a8 609///
416331ca
XL
610/// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
611/// such that multiple `T` types would apply:
2c00a5a8 612///
416331ca
XL
613/// ```
614/// # #[allow(dead_code)]
615/// pub struct Foo(Vec<u32>, Vec<String>);
616///
617/// impl std::borrow::Borrow<[u32]> for Foo {
618/// fn borrow(&self) -> &[u32] { &self.0 }
619/// }
620///
621/// impl std::borrow::Borrow<[String]> for Foo {
622/// fn borrow(&self) -> &[String] { &self.1 }
623/// }
624/// ```
625#[unstable(feature = "slice_concat_trait", issue = "27747")]
626pub trait Concat<Item: ?Sized> {
627 #[unstable(feature = "slice_concat_trait", issue = "27747")]
d9579d0f
AL
628 /// The resulting type after concatenation
629 type Output;
630
416331ca
XL
631 /// Implementation of [`[T]::concat`](../../std/primitive.slice.html#method.concat)
632 #[unstable(feature = "slice_concat_trait", issue = "27747")]
633 fn concat(slice: &Self) -> Self::Output;
634}
1a4d82fc 635
416331ca
XL
636/// Helper trait for [`[T]::join`](../../std/primitive.slice.html#method.join)
637#[unstable(feature = "slice_concat_trait", issue = "27747")]
638pub trait Join<Separator> {
639 #[unstable(feature = "slice_concat_trait", issue = "27747")]
640 /// The resulting type after concatenation
641 type Output;
c1a9b12d 642
416331ca
XL
643 /// Implementation of [`[T]::join`](../../std/primitive.slice.html#method.join)
644 #[unstable(feature = "slice_concat_trait", issue = "27747")]
645 fn join(slice: &Self, sep: Separator) -> Self::Output;
1a4d82fc
JJ
646}
647
416331ca
XL
648#[unstable(feature = "slice_concat_ext", issue = "27747")]
649impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
d9579d0f
AL
650 type Output = Vec<T>;
651
416331ca
XL
652 fn concat(slice: &Self) -> Vec<T> {
653 let size = slice.iter().map(|slice| slice.borrow().len()).sum();
1a4d82fc 654 let mut result = Vec::with_capacity(size);
416331ca 655 for v in slice {
92a42be0 656 result.extend_from_slice(v.borrow())
1a4d82fc
JJ
657 }
658 result
659 }
416331ca 660}
1a4d82fc 661
416331ca
XL
662#[unstable(feature = "slice_concat_ext", issue = "27747")]
663impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
664 type Output = Vec<T>;
665
666 fn join(slice: &Self, sep: &T) -> Vec<T> {
667 let mut iter = slice.iter();
94b46f34
XL
668 let first = match iter.next() {
669 Some(first) => first,
670 None => return vec![],
671 };
416331ca 672 let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
0731742a 673 let mut result = Vec::with_capacity(size);
94b46f34
XL
674 result.extend_from_slice(first.borrow());
675
676 for v in iter {
677 result.push(sep.clone());
92a42be0 678 result.extend_from_slice(v.borrow())
1a4d82fc
JJ
679 }
680 result
681 }
682}
683
416331ca
XL
684#[unstable(feature = "slice_concat_ext", issue = "27747")]
685impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
686 type Output = Vec<T>;
687
688 fn join(slice: &Self, sep: &[T]) -> Vec<T> {
689 let mut iter = slice.iter();
690 let first = match iter.next() {
691 Some(first) => first,
692 None => return vec![],
693 };
60c5eb7d
XL
694 let size =
695 slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
416331ca
XL
696 let mut result = Vec::with_capacity(size);
697 result.extend_from_slice(first.borrow());
698
699 for v in iter {
700 result.extend_from_slice(sep);
701 result.extend_from_slice(v.borrow())
702 }
703 result
704 }
705}
706
1a4d82fc
JJ
707////////////////////////////////////////////////////////////////////////////////
708// Standard trait implementations for slices
709////////////////////////////////////////////////////////////////////////////////
710
85aaf69f
SL
711#[stable(feature = "rust1", since = "1.0.0")]
712impl<T> Borrow<[T]> for Vec<T> {
92a42be0
SL
713 fn borrow(&self) -> &[T] {
714 &self[..]
715 }
1a4d82fc
JJ
716}
717
85aaf69f
SL
718#[stable(feature = "rust1", since = "1.0.0")]
719impl<T> BorrowMut<[T]> for Vec<T> {
92a42be0
SL
720 fn borrow_mut(&mut self) -> &mut [T] {
721 &mut self[..]
722 }
1a4d82fc
JJ
723}
724
85aaf69f
SL
725#[stable(feature = "rust1", since = "1.0.0")]
726impl<T: Clone> ToOwned for [T] {
727 type Owned = Vec<T>;
c34b1796 728 #[cfg(not(test))]
92a42be0
SL
729 fn to_owned(&self) -> Vec<T> {
730 self.to_vec()
731 }
c34b1796 732
c34b1796 733 #[cfg(test)]
92a42be0 734 fn to_owned(&self) -> Vec<T> {
7cac9316 735 hack::to_vec(self)
92a42be0 736 }
cc61c64b
XL
737
738 fn clone_into(&self, target: &mut Vec<T>) {
739 // drop anything in target that will not be overwritten
740 target.truncate(self.len());
cc61c64b
XL
741
742 // target.len <= self.len due to the truncate above, so the
ba9703b0
XL
743 // slices here are always in-bounds.
744 let (init, tail) = self.split_at(target.len());
745
746 // reuse the contained values' allocations/resources.
747 target.clone_from_slice(init);
748 target.extend_from_slice(tail);
cc61c64b 749 }
1a4d82fc
JJ
750}
751
1a4d82fc
JJ
752////////////////////////////////////////////////////////////////////////////////
753// Sorting
754////////////////////////////////////////////////////////////////////////////////
755
476ff2be
SL
756/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
757///
758/// This is the integral subroutine of insertion sort.
8bb4bdeb 759fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
60c5eb7d
XL
760where
761 F: FnMut(&T, &T) -> bool,
92a42be0 762{
8bb4bdeb 763 if v.len() >= 2 && is_less(&v[1], &v[0]) {
1a4d82fc 764 unsafe {
476ff2be
SL
765 // There are three ways to implement insertion here:
766 //
767 // 1. Swap adjacent elements until the first one gets to its final destination.
768 // However, this way we copy data around more than is necessary. If elements are big
769 // structures (costly to copy), this method will be slow.
770 //
771 // 2. Iterate until the right place for the first element is found. Then shift the
772 // elements succeeding it to make room for it and finally place it into the
773 // remaining hole. This is a good method.
774 //
775 // 3. Copy the first element into a temporary variable. Iterate until the right place
776 // for it is found. As we go along, copy every traversed element into the slot
777 // preceding it. Finally, copy data from the temporary variable into the remaining
778 // hole. This method is very good. Benchmarks demonstrated slightly better
779 // performance than with the 2nd method.
780 //
781 // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
cc61c64b 782 let mut tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
476ff2be
SL
783
784 // Intermediate state of the insertion process is always tracked by `hole`, which
785 // serves two purposes:
8bb4bdeb 786 // 1. Protects integrity of `v` from panics in `is_less`.
476ff2be
SL
787 // 2. Fills the remaining hole in `v` in the end.
788 //
789 // Panic safety:
790 //
8bb4bdeb 791 // If `is_less` panics at any point during the process, `hole` will get dropped and
476ff2be
SL
792 // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
793 // initially held exactly once.
60c5eb7d 794 let mut hole = InsertionHole { src: &mut *tmp, dest: &mut v[1] };
476ff2be
SL
795 ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
796
797 for i in 2..v.len() {
cc61c64b 798 if !is_less(&v[i], &*tmp) {
476ff2be
SL
799 break;
800 }
801 ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
802 hole.dest = &mut v[i];
1a4d82fc 803 }
476ff2be
SL
804 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
805 }
806 }
1a4d82fc 807
476ff2be
SL
808 // When dropped, copies from `src` into `dest`.
809 struct InsertionHole<T> {
810 src: *mut T,
811 dest: *mut T,
812 }
1a4d82fc 813
476ff2be
SL
814 impl<T> Drop for InsertionHole<T> {
815 fn drop(&mut self) {
60c5eb7d
XL
816 unsafe {
817 ptr::copy_nonoverlapping(self.src, self.dest, 1);
818 }
1a4d82fc
JJ
819 }
820 }
821}
822
476ff2be
SL
823/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
824/// stores the result into `v[..]`.
825///
826/// # Safety
827///
828/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
829/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
8bb4bdeb 830unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
60c5eb7d
XL
831where
832 F: FnMut(&T, &T) -> bool,
92a42be0 833{
476ff2be
SL
834 let len = v.len();
835 let v = v.as_mut_ptr();
f035d41b 836 let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
476ff2be
SL
837
838 // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
839 // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
840 // copying the lesser (or greater) one into `v`.
841 //
842 // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
843 // consumed first, then we must copy whatever is left of the shorter run into the remaining
844 // hole in `v`.
845 //
846 // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
8bb4bdeb 847 // 1. Protects integrity of `v` from panics in `is_less`.
476ff2be
SL
848 // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
849 //
850 // Panic safety:
851 //
8bb4bdeb 852 // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
476ff2be
SL
853 // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
854 // object it initially held exactly once.
855 let mut hole;
856
857 if mid <= len - mid {
858 // The left run is shorter.
f035d41b
XL
859 unsafe {
860 ptr::copy_nonoverlapping(v, buf, mid);
861 hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
862 }
476ff2be
SL
863
864 // Initially, these pointers point to the beginnings of their arrays.
865 let left = &mut hole.start;
866 let mut right = v_mid;
867 let out = &mut hole.dest;
868
869 while *left < hole.end && right < v_end {
870 // Consume the lesser side.
871 // If equal, prefer the left run to maintain stability.
f035d41b
XL
872 unsafe {
873 let to_copy = if is_less(&*right, &**left) {
874 get_and_increment(&mut right)
875 } else {
876 get_and_increment(left)
877 };
878 ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
879 }
476ff2be 880 }
1a4d82fc 881 } else {
476ff2be 882 // The right run is shorter.
f035d41b
XL
883 unsafe {
884 ptr::copy_nonoverlapping(v_mid, buf, len - mid);
885 hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
886 }
476ff2be
SL
887
888 // Initially, these pointers point past the ends of their arrays.
889 let left = &mut hole.dest;
890 let right = &mut hole.end;
891 let mut out = v_end;
892
893 while v < *left && buf < *right {
894 // Consume the greater side.
895 // If equal, prefer the right run to maintain stability.
f035d41b
XL
896 unsafe {
897 let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
898 decrement_and_get(left)
899 } else {
900 decrement_and_get(right)
901 };
902 ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
903 }
476ff2be
SL
904 }
905 }
906 // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
907 // it will now be copied into the hole in `v`.
1a4d82fc 908
476ff2be
SL
909 unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
910 let old = *ptr;
f035d41b 911 *ptr = unsafe { ptr.offset(1) };
476ff2be
SL
912 old
913 }
1a4d82fc 914
476ff2be 915 unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
f035d41b 916 *ptr = unsafe { ptr.offset(-1) };
476ff2be 917 *ptr
1a4d82fc
JJ
918 }
919
476ff2be
SL
920 // When dropped, copies the range `start..end` into `dest..`.
921 struct MergeHole<T> {
922 start: *mut T,
923 end: *mut T,
924 dest: *mut T,
925 }
1a4d82fc 926
476ff2be
SL
927 impl<T> Drop for MergeHole<T> {
928 fn drop(&mut self) {
041b39d2 929 // `T` is not a zero-sized type, so it's okay to divide by its size.
476ff2be 930 let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
60c5eb7d
XL
931 unsafe {
932 ptr::copy_nonoverlapping(self.start, self.dest, len);
933 }
476ff2be
SL
934 }
935 }
936}
1a4d82fc 937
476ff2be
SL
938/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
939/// [here](http://svn.python.org/projects/python/trunk/Objects/listsort.txt).
940///
941/// The algorithm identifies strictly descending and non-descending subsequences, which are called
942/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
943/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
32a655c1 944/// satisfied:
476ff2be 945///
32a655c1
SL
946/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
947/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
476ff2be 948///
ba9703b0 949/// The invariants ensure that the total running time is `O(n * log(n))` worst-case.
8bb4bdeb 950fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
60c5eb7d
XL
951where
952 F: FnMut(&T, &T) -> bool,
476ff2be 953{
cc61c64b
XL
954 // Slices of up to this length get sorted using insertion sort.
955 const MAX_INSERTION: usize = 20;
956 // Very short runs are extended using insertion sort to span at least this many elements.
957 const MIN_RUN: usize = 10;
958
476ff2be
SL
959 // Sorting has no meaningful behavior on zero-sized types.
960 if size_of::<T>() == 0 {
961 return;
962 }
1a4d82fc 963
476ff2be
SL
964 let len = v.len();
965
966 // Short arrays get sorted in-place via insertion sort to avoid allocations.
cc61c64b 967 if len <= MAX_INSERTION {
476ff2be 968 if len >= 2 {
60c5eb7d 969 for i in (0..len - 1).rev() {
8bb4bdeb 970 insert_head(&mut v[i..], &mut is_less);
1a4d82fc
JJ
971 }
972 }
476ff2be 973 return;
1a4d82fc
JJ
974 }
975
476ff2be
SL
976 // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
977 // shallow copies of the contents of `v` without risking the dtors running on copies if
8bb4bdeb 978 // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
476ff2be
SL
979 // which will always have length at most `len / 2`.
980 let mut buf = Vec::with_capacity(len / 2);
981
982 // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
983 // strange decision, but consider the fact that merges more often go in the opposite direction
984 // (forwards). According to benchmarks, merging forwards is slightly faster than merging
985 // backwards. To conclude, identifying runs by traversing backwards improves performance.
986 let mut runs = vec![];
987 let mut end = len;
988 while end > 0 {
989 // Find the next natural run, and reverse it if it's strictly descending.
990 let mut start = end - 1;
991 if start > 0 {
992 start -= 1;
8bb4bdeb
XL
993 unsafe {
994 if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
60c5eb7d 995 while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
8bb4bdeb
XL
996 start -= 1;
997 }
998 v[start..end].reverse();
999 } else {
60c5eb7d
XL
1000 while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
1001 {
8bb4bdeb
XL
1002 start -= 1;
1003 }
1a4d82fc
JJ
1004 }
1005 }
1006 }
1007
476ff2be
SL
1008 // Insert some more elements into the run if it's too short. Insertion sort is faster than
1009 // merge sort on short sequences, so this significantly improves performance.
cc61c64b 1010 while start > 0 && end - start < MIN_RUN {
476ff2be 1011 start -= 1;
8bb4bdeb 1012 insert_head(&mut v[start..end], &mut is_less);
476ff2be 1013 }
1a4d82fc 1014
476ff2be 1015 // Push this run onto the stack.
60c5eb7d 1016 runs.push(Run { start, len: end - start });
476ff2be
SL
1017 end = start;
1018
1019 // Merge some pairs of adjacent runs to satisfy the invariants.
1020 while let Some(r) = collapse(&runs) {
1021 let left = runs[r + 1];
1022 let right = runs[r];
1023 unsafe {
60c5eb7d
XL
1024 merge(
1025 &mut v[left.start..right.start + right.len],
1026 left.len,
1027 buf.as_mut_ptr(),
1028 &mut is_less,
1029 );
476ff2be 1030 }
60c5eb7d 1031 runs[r] = Run { start: left.start, len: left.len + right.len };
476ff2be
SL
1032 runs.remove(r + 1);
1033 }
1a4d82fc
JJ
1034 }
1035
476ff2be
SL
1036 // Finally, exactly one run must remain in the stack.
1037 debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
1038
1039 // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
1040 // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
1041 // algorithm should continue building a new run instead, `None` is returned.
1042 //
041b39d2 1043 // TimSort is infamous for its buggy implementations, as described here:
476ff2be
SL
1044 // http://envisage-project.eu/timsort-specification-and-verification/
1045 //
1046 // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
1047 // Enforcing them on just top three is not sufficient to ensure that the invariants will still
1048 // hold for *all* runs in the stack.
1049 //
1050 // This function correctly checks invariants for the top four runs. Additionally, if the top
1051 // run starts at index 0, it will always demand a merge operation until the stack is fully
1052 // collapsed, in order to complete the sort.
1053 #[inline]
1054 fn collapse(runs: &[Run]) -> Option<usize> {
1055 let n = runs.len();
60c5eb7d
XL
1056 if n >= 2
1057 && (runs[n - 1].start == 0
1058 || runs[n - 2].len <= runs[n - 1].len
1059 || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
1060 || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
1061 {
1062 if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
476ff2be
SL
1063 } else {
1064 None
1065 }
1a4d82fc
JJ
1066 }
1067
476ff2be
SL
1068 #[derive(Clone, Copy)]
1069 struct Run {
1070 start: usize,
1071 len: usize,
1a4d82fc
JJ
1072 }
1073}