3 We've actually reached an interesting situation here: we've duplicated the logic
4 for specifying a buffer and freeing its memory in Vec and IntoIter. Now that
5 we've implemented it and identified *actual* logic duplication, this is a good
6 time to perform some logic compression.
8 We're going to abstract out the `(ptr, cap)` pair and give them the logic for
9 allocating, growing, and freeing:
15 _marker: PhantomData<T>,
18 unsafe impl<T: Send> Send for RawVec<T> {}
19 unsafe impl<T: Sync> Sync for RawVec<T> {}
23 assert!(mem::size_of::<T>() != 0, "TODO: implement ZST support");
25 ptr: NonNull::dangling(),
32 let (new_cap, new_layout) = if self.cap == 0 {
33 (1, Layout::array::<T>(1).unwrap())
35 // This can't overflow because we ensure self.cap <= isize::MAX.
36 let new_cap = 2 * self.cap;
38 // Layout::array checks that the number of bytes is <= usize::MAX,
39 // but this is redundant since old_layout.size() <= isize::MAX,
40 // so the `unwrap` should never fail.
41 let new_layout = Layout::array::<T>(new_cap).unwrap();
45 // Ensure that the new allocation doesn't exceed `isize::MAX` bytes.
46 assert!(new_layout.size() <= isize::MAX as usize, "Allocation too large");
48 let new_ptr = if self.cap == 0 {
49 unsafe { alloc::alloc(new_layout) }
51 let old_layout = Layout::array::<T>(self.cap).unwrap();
52 let old_ptr = self.ptr.as_ptr() as *mut u8;
53 unsafe { alloc::realloc(old_ptr, old_layout, new_layout.size()) }
56 // If allocation fails, `new_ptr` will be null, in which case we abort.
57 self.ptr = match NonNull::new(new_ptr as *mut T) {
59 None => alloc::handle_alloc_error(new_layout),
65 impl<T> Drop for RawVec<T> {
68 let layout = Layout::array::<T>(self.cap).unwrap();
70 alloc::dealloc(self.ptr.as_ptr() as *mut u8, layout);
77 And change Vec as follows:
86 fn ptr(&self) -> *mut T {
90 fn cap(&self) -> usize {
94 pub fn new() -> Self {
101 // push/pop/insert/remove largely unchanged:
102 // * `self.ptr.as_ptr() -> self.ptr()`
103 // * `self.cap -> self.cap()`
104 // * `self.grow() -> self.buf.grow()`
107 impl<T> Drop for Vec<T> {
109 while let Some(_) = self.pop() {}
110 // deallocation is handled by RawVec
115 And finally we can really simplify IntoIter:
118 pub struct IntoIter<T> {
119 _buf: RawVec<T>, // we don't actually care about this. Just need it to live.
124 // next and next_back literally unchanged since they never referred to the buf
126 impl<T> Drop for IntoIter<T> {
128 // only need to ensure all our elements are read;
129 // buffer will clean itself up afterwards.
130 for _ in &mut *self {}
135 pub fn into_iter(self) -> IntoIter<T> {
137 // need to use ptr::read to unsafely move the buf out since it's
138 // not Copy, and Vec implements Drop (so we can't destructure it).
139 let buf = ptr::read(&self.buf);
144 start: buf.ptr.as_ptr(),
145 end: buf.ptr.as_ptr().add(len),