1 //! A dynamically-sized view into a contiguous sequence, `[T]`.
3 //! *[See also the slice primitive type](slice).*
5 //! Slices are a view into a block of memory represented as a pointer and a
10 //! let vec = vec![1, 2, 3];
11 //! let int_slice = &vec[..];
12 //! // coercing an array to a slice
13 //! let str_slice: &[&str] = &["one", "two", "three"];
16 //! Slices are either mutable or shared. The shared slice type is `&[T]`,
17 //! while the mutable slice type is `&mut [T]`, where `T` represents the element
18 //! type. For example, you can mutate the block of memory that a mutable slice
22 //! let x = &mut [1, 2, 3];
24 //! assert_eq!(x, &[1, 7, 3]);
27 //! Here are some of the things this module contains:
31 //! There are several structs that are useful for slices, such as [`Iter`], which
32 //! represents iteration over a slice.
34 //! ## Trait Implementations
36 //! There are several implementations of common traits for slices. Some examples
40 //! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
41 //! * [`Hash`] - for slices whose element type is [`Hash`].
45 //! The slices implement `IntoIterator`. The iterator yields references to the
49 //! let numbers = &[0, 1, 2];
50 //! for n in numbers {
51 //! println!("{} is a number!", n);
55 //! The mutable slice yields mutable references to the elements:
58 //! let mut scores = [7, 8, 9];
59 //! for score in &mut scores[..] {
64 //! This iterator yields mutable references to the slice's elements, so while
65 //! the element type of the slice is `i32`, the element type of the iterator is
68 //! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
70 //! * Further methods that return iterators are [`.split`], [`.splitn`],
71 //! [`.chunks`], [`.windows`] and more.
73 //! [`Hash`]: core::hash::Hash
74 //! [`.iter`]: slice::iter
75 //! [`.iter_mut`]: slice::iter_mut
76 //! [`.split`]: slice::split
77 //! [`.splitn`]: slice::splitn
78 //! [`.chunks`]: slice::chunks
79 //! [`.windows`]: slice::windows
80 #![stable(feature = "rust1", since = "1.0.0")]
81 // Many of the usings in this module are only used in the test configuration.
82 // It's cleaner to just turn off the unused_imports warning than to fix them.
83 #![cfg_attr(test, allow(unused_imports, dead_code))]
85 use core
::borrow
::{Borrow, BorrowMut}
;
86 #[cfg(not(no_global_oom_handling))]
87 use core
::cmp
::Ordering
::{self, Less}
;
88 #[cfg(not(no_global_oom_handling))]
90 #[cfg(not(no_global_oom_handling))]
91 use core
::mem
::size_of
;
92 #[cfg(not(no_global_oom_handling))]
95 use crate::alloc
::Allocator
;
96 #[cfg(not(no_global_oom_handling))]
97 use crate::alloc
::Global
;
98 #[cfg(not(no_global_oom_handling))]
99 use crate::borrow
::ToOwned
;
100 use crate::boxed
::Box
;
103 #[unstable(feature = "slice_range", issue = "76393")]
104 pub use core
::slice
::range
;
105 #[unstable(feature = "array_chunks", issue = "74985")]
106 pub use core
::slice
::ArrayChunks
;
107 #[unstable(feature = "array_chunks", issue = "74985")]
108 pub use core
::slice
::ArrayChunksMut
;
109 #[unstable(feature = "array_windows", issue = "75027")]
110 pub use core
::slice
::ArrayWindows
;
111 #[unstable(feature = "inherent_ascii_escape", issue = "77174")]
112 pub use core
::slice
::EscapeAscii
;
113 #[stable(feature = "slice_get_slice", since = "1.28.0")]
114 pub use core
::slice
::SliceIndex
;
115 #[stable(feature = "from_ref", since = "1.28.0")]
116 pub use core
::slice
::{from_mut, from_ref}
;
117 #[stable(feature = "rust1", since = "1.0.0")]
118 pub use core
::slice
::{from_raw_parts, from_raw_parts_mut}
;
119 #[stable(feature = "rust1", since = "1.0.0")]
120 pub use core
::slice
::{Chunks, Windows}
;
121 #[stable(feature = "chunks_exact", since = "1.31.0")]
122 pub use core
::slice
::{ChunksExact, ChunksExactMut}
;
123 #[stable(feature = "rust1", since = "1.0.0")]
124 pub use core
::slice
::{ChunksMut, Split, SplitMut}
;
125 #[unstable(feature = "slice_group_by", issue = "80552")]
126 pub use core
::slice
::{GroupBy, GroupByMut}
;
127 #[stable(feature = "rust1", since = "1.0.0")]
128 pub use core
::slice
::{Iter, IterMut}
;
129 #[stable(feature = "rchunks", since = "1.31.0")]
130 pub use core
::slice
::{RChunks, RChunksExact, RChunksExactMut, RChunksMut}
;
131 #[stable(feature = "slice_rsplit", since = "1.27.0")]
132 pub use core
::slice
::{RSplit, RSplitMut}
;
133 #[stable(feature = "rust1", since = "1.0.0")]
134 pub use core
::slice
::{RSplitN, RSplitNMut, SplitN, SplitNMut}
;
135 #[stable(feature = "split_inclusive", since = "1.51.0")]
136 pub use core
::slice
::{SplitInclusive, SplitInclusiveMut}
;
138 ////////////////////////////////////////////////////////////////////////////////
139 // Basic slice extension methods
140 ////////////////////////////////////////////////////////////////////////////////
142 // HACK(japaric) needed for the implementation of `vec!` macro during testing
143 // N.B., see the `hack` module in this file for more details.
145 pub use hack
::into_vec
;
147 // HACK(japaric) needed for the implementation of `Vec::clone` during testing
148 // N.B., see the `hack` module in this file for more details.
150 pub use hack
::to_vec
;
152 // HACK(japaric): With cfg(test) `impl [T]` is not available, these three
153 // functions are actually methods that are in `impl [T]` but not in
154 // `core::slice::SliceExt` - we need to supply these functions for the
155 // `test_permutations` test
157 use core
::alloc
::Allocator
;
159 use crate::boxed
::Box
;
162 // We shouldn't add inline attribute to this since this is used in
163 // `vec!` macro mostly and causes perf regression. See #71204 for
164 // discussion and perf results.
165 pub fn into_vec
<T
, A
: Allocator
>(b
: Box
<[T
], A
>) -> Vec
<T
, A
> {
168 let (b
, alloc
) = Box
::into_raw_with_allocator(b
);
169 Vec
::from_raw_parts_in(b
as *mut T
, len
, len
, alloc
)
173 #[cfg(not(no_global_oom_handling))]
175 pub fn to_vec
<T
: ConvertVec
, A
: Allocator
>(s
: &[T
], alloc
: A
) -> Vec
<T
, A
> {
179 #[cfg(not(no_global_oom_handling))]
180 pub trait ConvertVec
{
181 fn to_vec
<A
: Allocator
>(s
: &[Self], alloc
: A
) -> Vec
<Self, A
>
186 #[cfg(not(no_global_oom_handling))]
187 impl<T
: Clone
> ConvertVec
for T
{
189 default fn to_vec
<A
: Allocator
>(s
: &[Self], alloc
: A
) -> Vec
<Self, A
> {
190 struct DropGuard
<'a
, T
, A
: Allocator
> {
191 vec
: &'a
mut Vec
<T
, A
>,
194 impl<'a
, T
, A
: Allocator
> Drop
for DropGuard
<'a
, T
, A
> {
198 // items were marked initialized in the loop below
200 self.vec
.set_len(self.num_init
);
204 let mut vec
= Vec
::with_capacity_in(s
.len(), alloc
);
205 let mut guard
= DropGuard { vec: &mut vec, num_init: 0 }
;
206 let slots
= guard
.vec
.spare_capacity_mut();
207 // .take(slots.len()) is necessary for LLVM to remove bounds checks
208 // and has better codegen than zip.
209 for (i
, b
) in s
.iter().enumerate().take(slots
.len()) {
211 slots
[i
].write(b
.clone());
213 core
::mem
::forget(guard
);
215 // the vec was allocated and initialized above to at least this length.
217 vec
.set_len(s
.len());
223 #[cfg(not(no_global_oom_handling))]
224 impl<T
: Copy
> ConvertVec
for T
{
226 fn to_vec
<A
: Allocator
>(s
: &[Self], alloc
: A
) -> Vec
<Self, A
> {
227 let mut v
= Vec
::with_capacity_in(s
.len(), alloc
);
229 // allocated above with the capacity of `s`, and initialize to `s.len()` in
230 // ptr::copy_to_non_overlapping below.
232 s
.as_ptr().copy_to_nonoverlapping(v
.as_mut_ptr(), s
.len());
240 #[lang = "slice_alloc"]
245 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
247 /// When applicable, unstable sorting is preferred because it is generally faster than stable
248 /// sorting and it doesn't allocate auxiliary memory.
249 /// See [`sort_unstable`](slice::sort_unstable).
251 /// # Current implementation
253 /// The current algorithm is an adaptive, iterative merge sort inspired by
254 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
255 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
256 /// two or more sorted sequences concatenated one after another.
258 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
259 /// non-allocating insertion sort is used instead.
264 /// let mut v = [-5, 4, 1, -3, 2];
267 /// assert!(v == [-5, -3, 1, 2, 4]);
269 #[cfg(not(no_global_oom_handling))]
270 #[stable(feature = "rust1", since = "1.0.0")]
272 pub fn sort(&mut self)
276 merge_sort(self, |a
, b
| a
.lt(b
));
279 /// Sorts the slice with a comparator function.
281 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
283 /// The comparator function must define a total ordering for the elements in the slice. If
284 /// the ordering is not total, the order of the elements is unspecified. An order is a
285 /// total order if it is (for all `a`, `b` and `c`):
287 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
288 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
290 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
291 /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
294 /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
295 /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
296 /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
299 /// When applicable, unstable sorting is preferred because it is generally faster than stable
300 /// sorting and it doesn't allocate auxiliary memory.
301 /// See [`sort_unstable_by`](slice::sort_unstable_by).
303 /// # Current implementation
305 /// The current algorithm is an adaptive, iterative merge sort inspired by
306 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
307 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
308 /// two or more sorted sequences concatenated one after another.
310 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
311 /// non-allocating insertion sort is used instead.
316 /// let mut v = [5, 4, 1, 3, 2];
317 /// v.sort_by(|a, b| a.cmp(b));
318 /// assert!(v == [1, 2, 3, 4, 5]);
320 /// // reverse sorting
321 /// v.sort_by(|a, b| b.cmp(a));
322 /// assert!(v == [5, 4, 3, 2, 1]);
324 #[cfg(not(no_global_oom_handling))]
325 #[stable(feature = "rust1", since = "1.0.0")]
327 pub fn sort_by
<F
>(&mut self, mut compare
: F
)
329 F
: FnMut(&T
, &T
) -> Ordering
,
331 merge_sort(self, |a
, b
| compare(a
, b
) == Less
);
334 /// Sorts the slice with a key extraction function.
336 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
337 /// worst-case, where the key function is *O*(*m*).
339 /// For expensive key functions (e.g. functions that are not simple property accesses or
340 /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
341 /// significantly faster, as it does not recompute element keys.
343 /// When applicable, unstable sorting is preferred because it is generally faster than stable
344 /// sorting and it doesn't allocate auxiliary memory.
345 /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
347 /// # Current implementation
349 /// The current algorithm is an adaptive, iterative merge sort inspired by
350 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
351 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
352 /// two or more sorted sequences concatenated one after another.
354 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
355 /// non-allocating insertion sort is used instead.
360 /// let mut v = [-5i32, 4, 1, -3, 2];
362 /// v.sort_by_key(|k| k.abs());
363 /// assert!(v == [1, 2, -3, 4, -5]);
365 #[cfg(not(no_global_oom_handling))]
366 #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
368 pub fn sort_by_key
<K
, F
>(&mut self, mut f
: F
)
373 merge_sort(self, |a
, b
| f(a
).lt(&f(b
)));
376 /// Sorts the slice with a key extraction function.
378 /// During sorting, the key function is called only once per element.
380 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
381 /// worst-case, where the key function is *O*(*m*).
383 /// For simple key functions (e.g., functions that are property accesses or
384 /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
387 /// # Current implementation
389 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
390 /// which combines the fast average case of randomized quicksort with the fast worst case of
391 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
392 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
393 /// deterministic behavior.
395 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
396 /// length of the slice.
401 /// let mut v = [-5i32, 4, 32, -3, 2];
403 /// v.sort_by_cached_key(|k| k.to_string());
404 /// assert!(v == [-3, -5, 2, 32, 4]);
407 /// [pdqsort]: https://github.com/orlp/pdqsort
408 #[cfg(not(no_global_oom_handling))]
409 #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
411 pub fn sort_by_cached_key
<K
, F
>(&mut self, f
: F
)
416 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
417 macro_rules
! sort_by_key
{
418 ($t
:ty
, $slice
:ident
, $f
:ident
) => {{
419 let mut indices
: Vec
<_
> =
420 $slice
.iter().map($f
).enumerate().map(|(i
, k
)| (k
, i
as $t
)).collect();
421 // The elements of `indices` are unique, as they are indexed, so any sort will be
422 // stable with respect to the original slice. We use `sort_unstable` here because
423 // it requires less memory allocation.
424 indices
.sort_unstable();
425 for i
in 0..$slice
.len() {
426 let mut index
= indices
[i
].1;
427 while (index
as usize) < i
{
428 index
= indices
[index
as usize].1;
430 indices
[i
].1 = index
;
431 $slice
.swap(i
, index
as usize);
436 let sz_u8
= mem
::size_of
::<(K
, u8)>();
437 let sz_u16
= mem
::size_of
::<(K
, u16)>();
438 let sz_u32
= mem
::size_of
::<(K
, u32)>();
439 let sz_usize
= mem
::size_of
::<(K
, usize)>();
441 let len
= self.len();
445 if sz_u8
< sz_u16
&& len
<= (u8::MAX
as usize) {
446 return sort_by_key
!(u8, self, f
);
448 if sz_u16
< sz_u32
&& len
<= (u16::MAX
as usize) {
449 return sort_by_key
!(u16, self, f
);
451 if sz_u32
< sz_usize
&& len
<= (u32::MAX
as usize) {
452 return sort_by_key
!(u32, self, f
);
454 sort_by_key
!(usize, self, f
)
457 /// Copies `self` into a new `Vec`.
462 /// let s = [10, 40, 30];
463 /// let x = s.to_vec();
464 /// // Here, `s` and `x` can be modified independently.
466 #[cfg(not(no_global_oom_handling))]
467 #[rustc_conversion_suggestion]
468 #[stable(feature = "rust1", since = "1.0.0")]
470 pub fn to_vec(&self) -> Vec
<T
>
474 self.to_vec_in(Global
)
477 /// Copies `self` into a new `Vec` with an allocator.
482 /// #![feature(allocator_api)]
484 /// use std::alloc::System;
486 /// let s = [10, 40, 30];
487 /// let x = s.to_vec_in(System);
488 /// // Here, `s` and `x` can be modified independently.
490 #[cfg(not(no_global_oom_handling))]
492 #[unstable(feature = "allocator_api", issue = "32838")]
493 pub fn to_vec_in
<A
: Allocator
>(&self, alloc
: A
) -> Vec
<T
, A
>
497 // N.B., see the `hack` module in this file for more details.
498 hack
::to_vec(self, alloc
)
501 /// Converts `self` into a vector without clones or allocation.
503 /// The resulting vector can be converted back into a box via
504 /// `Vec<T>`'s `into_boxed_slice` method.
509 /// let s: Box<[i32]> = Box::new([10, 40, 30]);
510 /// let x = s.into_vec();
511 /// // `s` cannot be used anymore because it has been converted into `x`.
513 /// assert_eq!(x, vec![10, 40, 30]);
515 #[stable(feature = "rust1", since = "1.0.0")]
517 pub fn into_vec
<A
: Allocator
>(self: Box
<Self, A
>) -> Vec
<T
, A
> {
518 // N.B., see the `hack` module in this file for more details.
522 /// Creates a vector by repeating a slice `n` times.
526 /// This function will panic if the capacity would overflow.
533 /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
536 /// A panic upon overflow:
539 /// // this will panic at runtime
540 /// b"0123456789abcdef".repeat(usize::MAX);
542 #[cfg(not(no_global_oom_handling))]
543 #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
544 pub fn repeat(&self, n
: usize) -> Vec
<T
>
552 // If `n` is larger than zero, it can be split as
553 // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
554 // `2^expn` is the number represented by the leftmost '1' bit of `n`,
555 // and `rem` is the remaining part of `n`.
557 // Using `Vec` to access `set_len()`.
558 let capacity
= self.len().checked_mul(n
).expect("capacity overflow");
559 let mut buf
= Vec
::with_capacity(capacity
);
561 // `2^expn` repetition is done by doubling `buf` `expn`-times.
565 // If `m > 0`, there are remaining bits up to the leftmost '1'.
567 // `buf.extend(buf)`:
569 ptr
::copy_nonoverlapping(
571 (buf
.as_mut_ptr() as *mut T
).add(buf
.len()),
574 // `buf` has capacity of `self.len() * n`.
575 let buf_len
= buf
.len();
576 buf
.set_len(buf_len
* 2);
583 // `rem` (`= n - 2^expn`) repetition is done by copying
584 // first `rem` repetitions from `buf` itself.
585 let rem_len
= capacity
- buf
.len(); // `self.len() * rem`
587 // `buf.extend(buf[0 .. rem_len])`:
589 // This is non-overlapping since `2^expn > rem`.
590 ptr
::copy_nonoverlapping(
592 (buf
.as_mut_ptr() as *mut T
).add(buf
.len()),
595 // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
596 buf
.set_len(capacity
);
602 /// Flattens a slice of `T` into a single value `Self::Output`.
607 /// assert_eq!(["hello", "world"].concat(), "helloworld");
608 /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
610 #[stable(feature = "rust1", since = "1.0.0")]
611 pub fn concat
<Item
: ?Sized
>(&self) -> <Self as Concat
<Item
>>::Output
618 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
619 /// given separator between each.
624 /// assert_eq!(["hello", "world"].join(" "), "hello world");
625 /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
626 /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
628 #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
629 pub fn join
<Separator
>(&self, sep
: Separator
) -> <Self as Join
<Separator
>>::Output
631 Self: Join
<Separator
>,
633 Join
::join(self, sep
)
636 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
637 /// given separator between each.
642 /// # #![allow(deprecated)]
643 /// assert_eq!(["hello", "world"].connect(" "), "hello world");
644 /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
646 #[stable(feature = "rust1", since = "1.0.0")]
647 #[rustc_deprecated(since = "1.3.0", reason = "renamed to join")]
648 pub fn connect
<Separator
>(&self, sep
: Separator
) -> <Self as Join
<Separator
>>::Output
650 Self: Join
<Separator
>,
652 Join
::join(self, sep
)
656 #[lang = "slice_u8_alloc"]
659 /// Returns a vector containing a copy of this slice where each byte
660 /// is mapped to its ASCII upper case equivalent.
662 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
663 /// but non-ASCII letters are unchanged.
665 /// To uppercase the value in-place, use [`make_ascii_uppercase`].
667 /// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
668 #[cfg(not(no_global_oom_handling))]
669 #[must_use = "this returns the uppercase bytes as a new Vec, \
670 without modifying the original"]
671 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
673 pub fn to_ascii_uppercase(&self) -> Vec
<u8> {
674 let mut me
= self.to_vec();
675 me
.make_ascii_uppercase();
679 /// Returns a vector containing a copy of this slice where each byte
680 /// is mapped to its ASCII lower case equivalent.
682 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
683 /// but non-ASCII letters are unchanged.
685 /// To lowercase the value in-place, use [`make_ascii_lowercase`].
687 /// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
688 #[cfg(not(no_global_oom_handling))]
689 #[must_use = "this returns the lowercase bytes as a new Vec, \
690 without modifying the original"]
691 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
693 pub fn to_ascii_lowercase(&self) -> Vec
<u8> {
694 let mut me
= self.to_vec();
695 me
.make_ascii_lowercase();
700 ////////////////////////////////////////////////////////////////////////////////
701 // Extension traits for slices over specific kinds of data
702 ////////////////////////////////////////////////////////////////////////////////
704 /// Helper trait for [`[T]::concat`](slice::concat).
706 /// Note: the `Item` type parameter is not used in this trait,
707 /// but it allows impls to be more generic.
708 /// Without it, we get this error:
711 /// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
712 /// --> src/liballoc/slice.rs:608:6
714 /// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
715 /// | ^ unconstrained type parameter
718 /// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
719 /// such that multiple `T` types would apply:
722 /// # #[allow(dead_code)]
723 /// pub struct Foo(Vec<u32>, Vec<String>);
725 /// impl std::borrow::Borrow<[u32]> for Foo {
726 /// fn borrow(&self) -> &[u32] { &self.0 }
729 /// impl std::borrow::Borrow<[String]> for Foo {
730 /// fn borrow(&self) -> &[String] { &self.1 }
733 #[unstable(feature = "slice_concat_trait", issue = "27747")]
734 pub trait Concat
<Item
: ?Sized
> {
735 #[unstable(feature = "slice_concat_trait", issue = "27747")]
736 /// The resulting type after concatenation
739 /// Implementation of [`[T]::concat`](slice::concat)
740 #[unstable(feature = "slice_concat_trait", issue = "27747")]
741 fn concat(slice
: &Self) -> Self::Output
;
744 /// Helper trait for [`[T]::join`](slice::join)
745 #[unstable(feature = "slice_concat_trait", issue = "27747")]
746 pub trait Join
<Separator
> {
747 #[unstable(feature = "slice_concat_trait", issue = "27747")]
748 /// The resulting type after concatenation
751 /// Implementation of [`[T]::join`](slice::join)
752 #[unstable(feature = "slice_concat_trait", issue = "27747")]
753 fn join(slice
: &Self, sep
: Separator
) -> Self::Output
;
756 #[cfg(not(no_global_oom_handling))]
757 #[unstable(feature = "slice_concat_ext", issue = "27747")]
758 impl<T
: Clone
, V
: Borrow
<[T
]>> Concat
<T
> for [V
] {
759 type Output
= Vec
<T
>;
761 fn concat(slice
: &Self) -> Vec
<T
> {
762 let size
= slice
.iter().map(|slice
| slice
.borrow().len()).sum();
763 let mut result
= Vec
::with_capacity(size
);
765 result
.extend_from_slice(v
.borrow())
771 #[cfg(not(no_global_oom_handling))]
772 #[unstable(feature = "slice_concat_ext", issue = "27747")]
773 impl<T
: Clone
, V
: Borrow
<[T
]>> Join
<&T
> for [V
] {
774 type Output
= Vec
<T
>;
776 fn join(slice
: &Self, sep
: &T
) -> Vec
<T
> {
777 let mut iter
= slice
.iter();
778 let first
= match iter
.next() {
779 Some(first
) => first
,
780 None
=> return vec
![],
782 let size
= slice
.iter().map(|v
| v
.borrow().len()).sum
::<usize>() + slice
.len() - 1;
783 let mut result
= Vec
::with_capacity(size
);
784 result
.extend_from_slice(first
.borrow());
787 result
.push(sep
.clone());
788 result
.extend_from_slice(v
.borrow())
794 #[cfg(not(no_global_oom_handling))]
795 #[unstable(feature = "slice_concat_ext", issue = "27747")]
796 impl<T
: Clone
, V
: Borrow
<[T
]>> Join
<&[T
]> for [V
] {
797 type Output
= Vec
<T
>;
799 fn join(slice
: &Self, sep
: &[T
]) -> Vec
<T
> {
800 let mut iter
= slice
.iter();
801 let first
= match iter
.next() {
802 Some(first
) => first
,
803 None
=> return vec
![],
806 slice
.iter().map(|v
| v
.borrow().len()).sum
::<usize>() + sep
.len() * (slice
.len() - 1);
807 let mut result
= Vec
::with_capacity(size
);
808 result
.extend_from_slice(first
.borrow());
811 result
.extend_from_slice(sep
);
812 result
.extend_from_slice(v
.borrow())
818 ////////////////////////////////////////////////////////////////////////////////
819 // Standard trait implementations for slices
820 ////////////////////////////////////////////////////////////////////////////////
822 #[stable(feature = "rust1", since = "1.0.0")]
823 impl<T
> Borrow
<[T
]> for Vec
<T
> {
824 fn borrow(&self) -> &[T
] {
829 #[stable(feature = "rust1", since = "1.0.0")]
830 impl<T
> BorrowMut
<[T
]> for Vec
<T
> {
831 fn borrow_mut(&mut self) -> &mut [T
] {
836 #[cfg(not(no_global_oom_handling))]
837 #[stable(feature = "rust1", since = "1.0.0")]
838 impl<T
: Clone
> ToOwned
for [T
] {
841 fn to_owned(&self) -> Vec
<T
> {
846 fn to_owned(&self) -> Vec
<T
> {
847 hack
::to_vec(self, Global
)
850 fn clone_into(&self, target
: &mut Vec
<T
>) {
851 // drop anything in target that will not be overwritten
852 target
.truncate(self.len());
854 // target.len <= self.len due to the truncate above, so the
855 // slices here are always in-bounds.
856 let (init
, tail
) = self.split_at(target
.len());
858 // reuse the contained values' allocations/resources.
859 target
.clone_from_slice(init
);
860 target
.extend_from_slice(tail
);
864 ////////////////////////////////////////////////////////////////////////////////
866 ////////////////////////////////////////////////////////////////////////////////
868 /// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
870 /// This is the integral subroutine of insertion sort.
871 #[cfg(not(no_global_oom_handling))]
872 fn insert_head
<T
, F
>(v
: &mut [T
], is_less
: &mut F
)
874 F
: FnMut(&T
, &T
) -> bool
,
876 if v
.len() >= 2 && is_less(&v
[1], &v
[0]) {
878 // There are three ways to implement insertion here:
880 // 1. Swap adjacent elements until the first one gets to its final destination.
881 // However, this way we copy data around more than is necessary. If elements are big
882 // structures (costly to copy), this method will be slow.
884 // 2. Iterate until the right place for the first element is found. Then shift the
885 // elements succeeding it to make room for it and finally place it into the
886 // remaining hole. This is a good method.
888 // 3. Copy the first element into a temporary variable. Iterate until the right place
889 // for it is found. As we go along, copy every traversed element into the slot
890 // preceding it. Finally, copy data from the temporary variable into the remaining
891 // hole. This method is very good. Benchmarks demonstrated slightly better
892 // performance than with the 2nd method.
894 // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
895 let mut tmp
= mem
::ManuallyDrop
::new(ptr
::read(&v
[0]));
897 // Intermediate state of the insertion process is always tracked by `hole`, which
898 // serves two purposes:
899 // 1. Protects integrity of `v` from panics in `is_less`.
900 // 2. Fills the remaining hole in `v` in the end.
904 // If `is_less` panics at any point during the process, `hole` will get dropped and
905 // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
906 // initially held exactly once.
907 let mut hole
= InsertionHole { src: &mut *tmp, dest: &mut v[1] }
;
908 ptr
::copy_nonoverlapping(&v
[1], &mut v
[0], 1);
910 for i
in 2..v
.len() {
911 if !is_less(&v
[i
], &*tmp
) {
914 ptr
::copy_nonoverlapping(&v
[i
], &mut v
[i
- 1], 1);
915 hole
.dest
= &mut v
[i
];
917 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
921 // When dropped, copies from `src` into `dest`.
922 struct InsertionHole
<T
> {
927 impl<T
> Drop
for InsertionHole
<T
> {
930 ptr
::copy_nonoverlapping(self.src
, self.dest
, 1);
936 /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
937 /// stores the result into `v[..]`.
941 /// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
942 /// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
943 #[cfg(not(no_global_oom_handling))]
944 unsafe fn merge
<T
, F
>(v
: &mut [T
], mid
: usize, buf
: *mut T
, is_less
: &mut F
)
946 F
: FnMut(&T
, &T
) -> bool
,
949 let v
= v
.as_mut_ptr();
950 let (v_mid
, v_end
) = unsafe { (v.add(mid), v.add(len)) }
;
952 // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
953 // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
954 // copying the lesser (or greater) one into `v`.
956 // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
957 // consumed first, then we must copy whatever is left of the shorter run into the remaining
960 // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
961 // 1. Protects integrity of `v` from panics in `is_less`.
962 // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
966 // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
967 // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
968 // object it initially held exactly once.
971 if mid
<= len
- mid
{
972 // The left run is shorter.
974 ptr
::copy_nonoverlapping(v
, buf
, mid
);
975 hole
= MergeHole { start: buf, end: buf.add(mid), dest: v }
;
978 // Initially, these pointers point to the beginnings of their arrays.
979 let left
= &mut hole
.start
;
980 let mut right
= v_mid
;
981 let out
= &mut hole
.dest
;
983 while *left
< hole
.end
&& right
< v_end
{
984 // Consume the lesser side.
985 // If equal, prefer the left run to maintain stability.
987 let to_copy
= if is_less(&*right
, &**left
) {
988 get_and_increment(&mut right
)
990 get_and_increment(left
)
992 ptr
::copy_nonoverlapping(to_copy
, get_and_increment(out
), 1);
996 // The right run is shorter.
998 ptr
::copy_nonoverlapping(v_mid
, buf
, len
- mid
);
999 hole
= MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid }
;
1002 // Initially, these pointers point past the ends of their arrays.
1003 let left
= &mut hole
.dest
;
1004 let right
= &mut hole
.end
;
1005 let mut out
= v_end
;
1007 while v
< *left
&& buf
< *right
{
1008 // Consume the greater side.
1009 // If equal, prefer the right run to maintain stability.
1011 let to_copy
= if is_less(&*right
.offset(-1), &*left
.offset(-1)) {
1012 decrement_and_get(left
)
1014 decrement_and_get(right
)
1016 ptr
::copy_nonoverlapping(to_copy
, decrement_and_get(&mut out
), 1);
1020 // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
1021 // it will now be copied into the hole in `v`.
1023 unsafe fn get_and_increment
<T
>(ptr
: &mut *mut T
) -> *mut T
{
1025 *ptr
= unsafe { ptr.offset(1) }
;
1029 unsafe fn decrement_and_get
<T
>(ptr
: &mut *mut T
) -> *mut T
{
1030 *ptr
= unsafe { ptr.offset(-1) }
;
1034 // When dropped, copies the range `start..end` into `dest..`.
1035 struct MergeHole
<T
> {
1041 impl<T
> Drop
for MergeHole
<T
> {
1042 fn drop(&mut self) {
1043 // `T` is not a zero-sized type, so it's okay to divide by its size.
1044 let len
= (self.end
as usize - self.start
as usize) / mem
::size_of
::<T
>();
1046 ptr
::copy_nonoverlapping(self.start
, self.dest
, len
);
1052 /// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
1053 /// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
1055 /// The algorithm identifies strictly descending and non-descending subsequences, which are called
1056 /// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
1057 /// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
1060 /// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
1061 /// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
1063 /// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
1064 #[cfg(not(no_global_oom_handling))]
1065 fn merge_sort
<T
, F
>(v
: &mut [T
], mut is_less
: F
)
1067 F
: FnMut(&T
, &T
) -> bool
,
1069 // Slices of up to this length get sorted using insertion sort.
1070 const MAX_INSERTION
: usize = 20;
1071 // Very short runs are extended using insertion sort to span at least this many elements.
1072 const MIN_RUN
: usize = 10;
1074 // Sorting has no meaningful behavior on zero-sized types.
1075 if size_of
::<T
>() == 0 {
1081 // Short arrays get sorted in-place via insertion sort to avoid allocations.
1082 if len
<= MAX_INSERTION
{
1084 for i
in (0..len
- 1).rev() {
1085 insert_head(&mut v
[i
..], &mut is_less
);
1091 // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
1092 // shallow copies of the contents of `v` without risking the dtors running on copies if
1093 // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
1094 // which will always have length at most `len / 2`.
1095 let mut buf
= Vec
::with_capacity(len
/ 2);
1097 // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
1098 // strange decision, but consider the fact that merges more often go in the opposite direction
1099 // (forwards). According to benchmarks, merging forwards is slightly faster than merging
1100 // backwards. To conclude, identifying runs by traversing backwards improves performance.
1101 let mut runs
= vec
![];
1104 // Find the next natural run, and reverse it if it's strictly descending.
1105 let mut start
= end
- 1;
1109 if is_less(v
.get_unchecked(start
+ 1), v
.get_unchecked(start
)) {
1110 while start
> 0 && is_less(v
.get_unchecked(start
), v
.get_unchecked(start
- 1)) {
1113 v
[start
..end
].reverse();
1115 while start
> 0 && !is_less(v
.get_unchecked(start
), v
.get_unchecked(start
- 1))
1123 // Insert some more elements into the run if it's too short. Insertion sort is faster than
1124 // merge sort on short sequences, so this significantly improves performance.
1125 while start
> 0 && end
- start
< MIN_RUN
{
1127 insert_head(&mut v
[start
..end
], &mut is_less
);
1130 // Push this run onto the stack.
1131 runs
.push(Run { start, len: end - start }
);
1134 // Merge some pairs of adjacent runs to satisfy the invariants.
1135 while let Some(r
) = collapse(&runs
) {
1136 let left
= runs
[r
+ 1];
1137 let right
= runs
[r
];
1140 &mut v
[left
.start
..right
.start
+ right
.len
],
1146 runs
[r
] = Run { start: left.start, len: left.len + right.len }
;
1151 // Finally, exactly one run must remain in the stack.
1152 debug_assert
!(runs
.len() == 1 && runs
[0].start
== 0 && runs
[0].len
== len
);
1154 // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
1155 // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
1156 // algorithm should continue building a new run instead, `None` is returned.
1158 // TimSort is infamous for its buggy implementations, as described here:
1159 // http://envisage-project.eu/timsort-specification-and-verification/
1161 // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
1162 // Enforcing them on just top three is not sufficient to ensure that the invariants will still
1163 // hold for *all* runs in the stack.
1165 // This function correctly checks invariants for the top four runs. Additionally, if the top
1166 // run starts at index 0, it will always demand a merge operation until the stack is fully
1167 // collapsed, in order to complete the sort.
1169 fn collapse(runs
: &[Run
]) -> Option
<usize> {
1172 && (runs
[n
- 1].start
== 0
1173 || runs
[n
- 2].len
<= runs
[n
- 1].len
1174 || (n
>= 3 && runs
[n
- 3].len
<= runs
[n
- 2].len
+ runs
[n
- 1].len
)
1175 || (n
>= 4 && runs
[n
- 4].len
<= runs
[n
- 3].len
+ runs
[n
- 2].len
))
1177 if n
>= 3 && runs
[n
- 3].len
< runs
[n
- 1].len { Some(n - 3) }
else { Some(n - 2) }
1183 #[derive(Clone, Copy)]