1 // This Source Code Form is subject to the terms of the Mozilla Public
2 // License, v. 2.0. If a copy of the MPL was not distributed with this
3 // file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 //! A fixed capacity array sized to match some other type `T`.
7 //! See [`InlineArray`](struct.InlineArray.html)
9 use core
::borrow
::{Borrow, BorrowMut}
;
10 use core
::cmp
::Ordering
;
11 use core
::fmt
::{Debug, Error, Formatter}
;
12 use core
::hash
::{Hash, Hasher}
;
13 use core
::iter
::FromIterator
;
14 use core
::marker
::PhantomData
;
15 use core
::mem
::{self, MaybeUninit}
;
16 use core
::ops
::{Deref, DerefMut}
;
18 use core
::ptr
::NonNull
;
19 use core
::slice
::{from_raw_parts, from_raw_parts_mut, Iter as SliceIter, IterMut as SliceIterMut}
;
22 pub use self::iter
::{Drain, Iter}
;
24 /// A fixed capacity array sized to match some other type `T`.
26 /// This works like a vector, but allocated on the stack (and thus marginally
27 /// faster than `Vec`), with the allocated space exactly matching the size of
28 /// the given type `T`. The vector consists of a `usize` tracking its current
29 /// length and zero or more elements of type `A`. The capacity is thus
30 /// `( size_of::<T>() - size_of::<usize>() ) / size_of::<A>()`. This could lead
31 /// to situations where the capacity is zero, if `size_of::<A>()` is greater
32 /// than `size_of::<T>() - size_of::<usize>()`, which is not an error and
33 /// handled properly by the data structure.
35 /// If `size_of::<T>()` is less than `size_of::<usize>()`, meaning the vector
36 /// has no space to store its length, `InlineArray::new()` will panic.
38 /// This is meant to facilitate optimisations where a list data structure
39 /// allocates a fairly large struct for itself, allowing you to replace it with
40 /// an `InlineArray` until it grows beyond its capacity. This not only gives you
41 /// a performance boost at very small sizes, it also saves you from having to
42 /// allocate anything on the heap until absolutely necessary.
44 /// For instance, `im::Vector<A>` in its final form currently looks like this
50 /// tree_height: usize,
51 /// outer_head: Rc<Chunk<A>>,
52 /// inner_head: Rc<Chunk<A>>,
53 /// tree: Rc<TreeNode<A>>,
54 /// inner_tail: Rc<Chunk<A>>,
55 /// outer_tail: Rc<Chunk<A>>,
59 /// That's two `usize`s and five `Rc`s, which comes in at 56 bytes on x86_64
60 /// architectures. With `InlineArray`, that leaves us with 56 -
61 /// `size_of::<usize>()` = 48 bytes we can use before having to expand into the
62 /// full data struture. If `A` is `u8`, that's 48 elements, and even if `A` is a
63 /// pointer we can still keep 6 of them inline before we run out of capacity.
65 /// We can declare an enum like this:
68 /// enum VectorWrapper<A> {
69 /// Inline(InlineArray<A, RRB<A>>),
74 /// Both of these will have the same size, and we can swap the `Inline` case out
75 /// with the `Full` case once the `InlineArray` runs out of capacity.
77 pub struct InlineArray
<A
, T
> {
80 // We need both the `_header_align` and `data` to be properly aligned in memory. We do a few tricks
83 // * An alignment is always power of 2. Therefore, with a pair of alignments, one is always
84 // a multiple of the other (one way or the other).
85 // * A struct is aligned to at least the max alignment of each of its fields.
86 // * A `repr(C)` struct follows the order of fields and pushes each as close to the previous one
87 // as allowed by alignment.
89 // By placing two "fake" fields that have 0 size, but an alignment first, we make sure that all
90 // 3 start at the beginning of the struct and that all of them are aligned to their maximum
93 // Unfortunately, we can't use `[A; 0]` to align to actual alignment of the type `A`, because
94 // it prevents use of `InlineArray` in recursive types.
95 // We rely on alignment of `u64`/`usize` or `T` to be sufficient, and panic otherwise. We use
96 // `u64` to handle all common types on 32-bit systems too.
98 // Furthermore, because we don't know if `u64` or `A` has bigger alignment, we decide on case by
99 // case basis if the header or the elements go first. By placing the one with higher alignment
100 // requirements first, we align that one and the other one will be aligned "automatically" when
101 // placed just after it.
103 // To the best of our knowledge, this is all guaranteed by the compiler. But just to make sure,
104 // we have bunch of asserts in the constructor to check; as these are invariants enforced by
105 // the compiler, it should be trivial for it to remove the checks so they are for free (if we
106 // are correct) or will save us (if we are not).
107 _header_align
: [(u64, usize); 0],
108 _phantom
: PhantomData
<A
>,
109 data
: MaybeUninit
<T
>,
116 element_align
: usize,
117 container_align
: usize,
119 if element_size
== 0 {
121 } else if element_align
<= container_align
&& host_size
> header_size
{
122 (host_size
- header_size
) / element_size
124 0 // larger alignment can't be guaranteed, so it'd be unsafe to store any elements
128 impl<A
, T
> InlineArray
<A
, T
> {
129 const HOST_SIZE
: usize = mem
::size_of
::<T
>();
130 const ELEMENT_SIZE
: usize = mem
::size_of
::<A
>();
131 const HEADER_SIZE
: usize = mem
::size_of
::<usize>();
132 // Do we place the header before the elements or the other way around?
133 const HEADER_FIRST
: bool
= mem
::align_of
::<usize>() >= mem
::align_of
::<A
>();
134 // Note: one of the following is always 0
135 // How many usizes to skip before the first element?
136 const ELEMENT_SKIP
: usize = Self::HEADER_FIRST
as usize;
137 // How many elements to skip before the header
138 const HEADER_SKIP
: usize = Self::CAPACITY
* (1 - Self::ELEMENT_SKIP
);
140 /// The maximum number of elements the `InlineArray` can hold.
141 pub const CAPACITY
: usize = capacity(
145 mem
::align_of
::<A
>(),
146 mem
::align_of
::<Self>(),
151 unsafe fn len_const(&self) -> *const usize {
156 .add(Self::HEADER_SKIP
)
158 debug_assert
!(ptr
as usize % mem
::align_of
::<usize>() == 0);
164 pub(crate) unsafe fn len_mut(&mut self) -> *mut usize {
169 .add(Self::HEADER_SKIP
)
171 debug_assert
!(ptr
as usize % mem
::align_of
::<usize>() == 0);
177 pub(crate) unsafe fn data(&self) -> *const A
{
178 if Self::CAPACITY
== 0 {
179 return NonNull
::<A
>::dangling().as_ptr();
185 .add(Self::ELEMENT_SKIP
)
187 debug_assert
!(ptr
as usize % mem
::align_of
::<A
>() == 0);
193 unsafe fn data_mut(&mut self) -> *mut A
{
194 if Self::CAPACITY
== 0 {
195 return NonNull
::<A
>::dangling().as_ptr();
201 .add(Self::ELEMENT_SKIP
)
203 debug_assert
!(ptr
as usize % mem
::align_of
::<A
>() == 0);
209 unsafe fn ptr_at(&self, index
: usize) -> *const A
{
210 debug_assert
!(index
< Self::CAPACITY
);
211 self.data().add(index
)
216 unsafe fn ptr_at_mut(&mut self, index
: usize) -> *mut A
{
217 debug_assert
!(index
< Self::CAPACITY
);
218 self.data_mut().add(index
)
222 unsafe fn read_at(&self, index
: usize) -> A
{
223 ptr
::read(self.ptr_at(index
))
227 unsafe fn write_at(&mut self, index
: usize, value
: A
) {
228 ptr
::write(self.ptr_at_mut(index
), value
);
231 /// Get the length of the array.
234 pub fn len(&self) -> usize {
235 unsafe { *self.len_const() }
238 /// Test if the array is empty.
241 pub fn is_empty(&self) -> bool
{
245 /// Test if the array is at capacity.
248 pub fn is_full(&self) -> bool
{
249 self.len() >= Self::CAPACITY
252 /// Construct a new empty array.
256 /// If the element type requires large alignment, which is larger than
257 /// both alignment of `usize` and alignment of the type that provides the capacity.
260 pub fn new() -> Self {
261 assert
!(Self::HOST_SIZE
> Self::HEADER_SIZE
);
263 (Self::CAPACITY
== 0) || (mem
::align_of
::<Self>() % mem
::align_of
::<A
>() == 0),
264 "InlineArray can't satisfy alignment of {}",
265 core
::any
::type_name
::<A
>()
268 let mut self_
= Self {
270 _phantom
: PhantomData
,
271 data
: MaybeUninit
::uninit(),
273 // Sanity check our assumptions about what is guaranteed by the compiler. If we are right,
274 // these should completely optimize out of the resulting binary.
276 &self_
as *const _
as usize,
277 self_
.data
.as_ptr() as usize,
278 "Padding at the start of struct",
281 self_
.data
.as_ptr() as usize % mem
::align_of
::<usize>(),
285 assert
!(mem
::size_of
::<Self>() == mem
::size_of
::<T
>() || mem
::align_of
::<T
>() < mem
::align_of
::<Self>());
286 assert_eq
!(0, unsafe { self_.data() }
as usize % mem
::align_of
::<A
>());
287 assert_eq
!(0, unsafe { self_.data_mut() }
as usize % mem
::align_of
::<A
>());
288 assert
!(Self::ELEMENT_SKIP
== 0 || Self::HEADER_SKIP
== 0);
289 unsafe { ptr::write(self_.len_mut(), 0usize) }
;
293 /// Push an item to the back of the array.
295 /// Panics if the capacity of the array is exceeded.
298 pub fn push(&mut self, value
: A
) {
300 panic
!("InlineArray::push: chunk size overflow");
303 self.write_at(self.len(), value
);
304 *self.len_mut() += 1;
308 /// Pop an item from the back of the array.
310 /// Returns `None` if the array is empty.
313 pub fn pop(&mut self) -> Option
<A
> {
318 *self.len_mut() -= 1;
320 Some(unsafe { self.read_at(self.len()) }
)
324 /// Insert a new value at index `index`, shifting all the following values
327 /// Panics if the index is out of bounds or the array is at capacity.
329 /// Time: O(n) for the number of items shifted
330 pub fn insert(&mut self, index
: usize, value
: A
) {
332 panic
!("InlineArray::push: chunk size overflow");
334 if index
> self.len() {
335 panic
!("InlineArray::insert: index out of bounds");
338 let src
= self.ptr_at_mut(index
);
339 ptr
::copy(src
, src
.add(1), self.len() - index
);
340 ptr
::write(src
, value
);
341 *self.len_mut() += 1;
345 /// Remove the value at index `index`, shifting all the following values to
348 /// Returns the removed value, or `None` if the array is empty or the index
349 /// is out of bounds.
351 /// Time: O(n) for the number of items shifted
352 pub fn remove(&mut self, index
: usize) -> Option
<A
> {
353 if index
>= self.len() {
357 let src
= self.ptr_at_mut(index
);
358 let value
= ptr
::read(src
);
359 *self.len_mut() -= 1;
360 ptr
::copy(src
.add(1), src
, self.len() - index
);
366 /// Split an array into two, the original array containing
367 /// everything up to `index` and the returned array containing
368 /// everything from `index` onwards.
370 /// Panics if `index` is out of bounds.
372 /// Time: O(n) for the number of items in the new chunk
373 pub fn split_off(&mut self, index
: usize) -> Self {
374 if index
> self.len() {
375 panic
!("InlineArray::split_off: index out of bounds");
377 let mut out
= Self::new();
378 if index
< self.len() {
380 ptr
::copy(self.ptr_at(index
), out
.data_mut(), self.len() - index
);
381 *out
.len_mut() = self.len() - index
;
382 *self.len_mut() = index
;
389 unsafe fn drop_contents(&mut self) {
390 ptr
::drop_in_place
::<[A
]>(&mut **self) // uses DerefMut
393 /// Discard the contents of the array.
396 pub fn clear(&mut self) {
398 self.drop_contents();
403 /// Construct an iterator that drains values from the front of the array.
404 pub fn drain(&mut self) -> Drain
<'_
, A
, T
> {
405 Drain { array: self }
409 impl<A
, T
> Drop
for InlineArray
<A
, T
> {
411 unsafe { self.drop_contents() }
415 impl<A
, T
> Default
for InlineArray
<A
, T
> {
416 fn default() -> Self {
422 // impl<A, T> Copy for InlineArray<A, T> where A: Copy {}
424 impl<A
, T
> Clone
for InlineArray
<A
, T
>
428 fn clone(&self) -> Self {
429 let mut copy
= Self::new();
430 for i
in 0..self.len() {
432 copy
.write_at(i
, self.get_unchecked(i
).clone());
436 *copy
.len_mut() = self.len();
442 impl<A
, T
> Deref
for InlineArray
<A
, T
> {
444 fn deref(&self) -> &Self::Target
{
445 unsafe { from_raw_parts(self.data(), self.len()) }
449 impl<A
, T
> DerefMut
for InlineArray
<A
, T
> {
450 fn deref_mut(&mut self) -> &mut Self::Target
{
451 unsafe { from_raw_parts_mut(self.data_mut(), self.len()) }
455 impl<A
, T
> Borrow
<[A
]> for InlineArray
<A
, T
> {
456 fn borrow(&self) -> &[A
] {
461 impl<A
, T
> BorrowMut
<[A
]> for InlineArray
<A
, T
> {
462 fn borrow_mut(&mut self) -> &mut [A
] {
467 impl<A
, T
> AsRef
<[A
]> for InlineArray
<A
, T
> {
468 fn as_ref(&self) -> &[A
] {
473 impl<A
, T
> AsMut
<[A
]> for InlineArray
<A
, T
> {
474 fn as_mut(&mut self) -> &mut [A
] {
478 impl<A
, T
, Slice
> PartialEq
<Slice
> for InlineArray
<A
, T
>
483 fn eq(&self, other
: &Slice
) -> bool
{
484 self.deref() == other
.borrow()
488 impl<A
, T
> Eq
for InlineArray
<A
, T
> where A
: Eq {}
490 impl<A
, T
> PartialOrd
for InlineArray
<A
, T
>
494 fn partial_cmp(&self, other
: &Self) -> Option
<Ordering
> {
495 self.iter().partial_cmp(other
.iter())
499 impl<A
, T
> Ord
for InlineArray
<A
, T
>
503 fn cmp(&self, other
: &Self) -> Ordering
{
504 self.iter().cmp(other
.iter())
508 impl<A
, T
> Debug
for InlineArray
<A
, T
>
512 fn fmt(&self, f
: &mut Formatter
<'_
>) -> Result
<(), Error
> {
513 f
.write_str("Chunk")?
;
514 f
.debug_list().entries(self.iter()).finish()
518 impl<A
, T
> Hash
for InlineArray
<A
, T
>
522 fn hash
<H
>(&self, hasher
: &mut H
)
532 impl<A
, T
> IntoIterator
for InlineArray
<A
, T
> {
534 type IntoIter
= Iter
<A
, T
>;
535 fn into_iter(self) -> Self::IntoIter
{
540 impl<A
, T
> FromIterator
<A
> for InlineArray
<A
, T
> {
541 fn from_iter
<I
>(it
: I
) -> Self
543 I
: IntoIterator
<Item
= A
>,
545 let mut chunk
= Self::new();
553 impl<'a
, A
, T
> IntoIterator
for &'a InlineArray
<A
, T
> {
555 type IntoIter
= SliceIter
<'a
, A
>;
556 fn into_iter(self) -> Self::IntoIter
{
561 impl<'a
, A
, T
> IntoIterator
for &'a
mut InlineArray
<A
, T
> {
562 type Item
= &'a
mut A
;
563 type IntoIter
= SliceIterMut
<'a
, A
>;
564 fn into_iter(self) -> Self::IntoIter
{
569 impl<A
, T
> Extend
<A
> for InlineArray
<A
, T
> {
570 /// Append the contents of the iterator to the back of the array.
572 /// Panics if the array exceeds its capacity.
574 /// Time: O(n) for the length of the iterator
575 fn extend
<I
>(&mut self, it
: I
)
577 I
: IntoIterator
<Item
= A
>,
585 impl<'a
, A
, T
> Extend
<&'a A
> for InlineArray
<A
, T
>
589 /// Append the contents of the iterator to the back of the array.
591 /// Panics if the array exceeds its capacity.
593 /// Time: O(n) for the length of the iterator
594 fn extend
<I
>(&mut self, it
: I
)
596 I
: IntoIterator
<Item
= &'a A
>,
607 use crate::tests
::DropTest
;
608 use std
::sync
::atomic
::{AtomicUsize, Ordering}
;
612 let counter
= AtomicUsize
::new(0);
614 let mut chunk
: InlineArray
<DropTest
<'_
>, [usize; 32]> = InlineArray
::new();
616 chunk
.push(DropTest
::new(&counter
));
618 assert_eq
!(16, counter
.load(Ordering
::Relaxed
));
622 assert_eq
!(8, counter
.load(Ordering
::Relaxed
));
624 assert_eq
!(0, counter
.load(Ordering
::Relaxed
));
628 fn zero_sized_values() {
629 let mut chunk
: InlineArray
<(), [usize; 32]> = InlineArray
::new();
633 assert_eq
!(65536, chunk
.len());
634 assert_eq
!(Some(()), chunk
.pop());
638 fn low_align_base() {
639 let mut chunk
: InlineArray
<String
, [u8; 512]> = InlineArray
::new();
640 chunk
.push("Hello".to_owned());
641 assert_eq
!(chunk
[0], "Hello");
643 let mut chunk
: InlineArray
<String
, [u16; 512]> = InlineArray
::new();
644 chunk
.push("Hello".to_owned());
645 assert_eq
!(chunk
[0], "Hello");
650 let mut chunk
: InlineArray
<f64, [u8; 16]> = InlineArray
::new();
652 assert_eq
!(chunk
[0], 1234.);
654 let mut chunk
: InlineArray
<f64, [u8; 17]> = InlineArray
::new();
656 assert_eq
!(chunk
[0], 1234.);
660 fn recursive_types_compile() {
663 A(InlineArray
<Recursive
, u64>),
669 fn insufficient_alignment1() {
673 struct MediumAlign(u8);
675 assert_eq
!(0, InlineArray
::<BigAlign
, [usize; 256]>::CAPACITY
);
676 assert_eq
!(0, InlineArray
::<BigAlign
, [u64; 256]>::CAPACITY
);
677 assert_eq
!(0, InlineArray
::<BigAlign
, [f64; 256]>::CAPACITY
);
678 assert_eq
!(0, InlineArray
::<BigAlign
, [MediumAlign
; 256]>::CAPACITY
);
682 fn insufficient_alignment2() {
684 struct BigAlign(usize);
686 let mut bad
: InlineArray
<BigAlign
, [usize; 256]> = InlineArray
::new();
687 assert_eq
!(0, InlineArray
::<BigAlign
, [usize; 256]>::CAPACITY
);
688 assert_eq
!(0, bad
.len());
689 assert_eq
!(0, bad
[..].len());
690 assert_eq
!(true, bad
.is_full());
691 assert_eq
!(0, bad
.drain().count());
692 assert
!(bad
.pop().is_none());
693 assert
!(bad
.remove(0).is_none());
694 assert
!(bad
.split_off(0).is_full());
699 fn sufficient_alignment1() {
703 assert_eq
!(13, InlineArray
::<BigAlign
, [BigAlign
; 14]>::CAPACITY
);
704 assert_eq
!(1, InlineArray
::<BigAlign
, [BigAlign
; 2]>::CAPACITY
);
705 assert_eq
!(0, InlineArray
::<BigAlign
, [BigAlign
; 1]>::CAPACITY
);
707 let mut chunk
: InlineArray
<BigAlign
, [BigAlign
; 2]> = InlineArray
::new();
708 chunk
.push(BigAlign(42));
710 chunk
.get(0).unwrap() as *const _
as usize % mem
::align_of
::<BigAlign
>(),
716 fn sufficient_alignment2() {
718 struct BigAlign([u8; 64]);
720 struct BiggerAlign(u8);
721 assert_eq
!(128, mem
::align_of
::<BigAlign
>());
722 assert_eq
!(256, mem
::align_of
::<BiggerAlign
>());
724 assert_eq
!(199, InlineArray
::<BigAlign
, [BiggerAlign
; 100]>::CAPACITY
);
725 assert_eq
!(3, InlineArray
::<BigAlign
, [BiggerAlign
; 2]>::CAPACITY
);
726 assert_eq
!(1, InlineArray
::<BigAlign
, [BiggerAlign
; 1]>::CAPACITY
);
727 assert_eq
!(0, InlineArray
::<BigAlign
, [BiggerAlign
; 0]>::CAPACITY
);
729 let mut chunk
: InlineArray
<BigAlign
, [BiggerAlign
; 1]> = InlineArray
::new();
730 chunk
.push(BigAlign([0; 64]));
732 chunk
.get(0).unwrap() as *const _
as usize % mem
::align_of
::<BigAlign
>(),