3 We've actually reached an interesting situation here: we've duplicated the logic
4 for specifying a buffer and freeing its memory in Vec and IntoIter. Now that
5 we've implemented it and identified *actual* logic duplication, this is a good
6 time to perform some logic compression.
8 We're going to abstract out the `(ptr, cap)` pair and give them the logic for
9 allocating, growing, and freeing:
11 <!-- ignore: simplified code -->
16 _marker: PhantomData<T>,
19 unsafe impl<T: Send> Send for RawVec<T> {}
20 unsafe impl<T: Sync> Sync for RawVec<T> {}
24 assert!(mem::size_of::<T>() != 0, "TODO: implement ZST support");
26 ptr: NonNull::dangling(),
33 let (new_cap, new_layout) = if self.cap == 0 {
34 (1, Layout::array::<T>(1).unwrap())
36 // This can't overflow because we ensure self.cap <= isize::MAX.
37 let new_cap = 2 * self.cap;
39 // Layout::array checks that the number of bytes is <= usize::MAX,
40 // but this is redundant since old_layout.size() <= isize::MAX,
41 // so the `unwrap` should never fail.
42 let new_layout = Layout::array::<T>(new_cap).unwrap();
46 // Ensure that the new allocation doesn't exceed `isize::MAX` bytes.
47 assert!(new_layout.size() <= isize::MAX as usize, "Allocation too large");
49 let new_ptr = if self.cap == 0 {
50 unsafe { alloc::alloc(new_layout) }
52 let old_layout = Layout::array::<T>(self.cap).unwrap();
53 let old_ptr = self.ptr.as_ptr() as *mut u8;
54 unsafe { alloc::realloc(old_ptr, old_layout, new_layout.size()) }
57 // If allocation fails, `new_ptr` will be null, in which case we abort.
58 self.ptr = match NonNull::new(new_ptr as *mut T) {
60 None => alloc::handle_alloc_error(new_layout),
66 impl<T> Drop for RawVec<T> {
69 let layout = Layout::array::<T>(self.cap).unwrap();
71 alloc::dealloc(self.ptr.as_ptr() as *mut u8, layout);
78 And change Vec as follows:
80 <!-- ignore: simplified code -->
88 fn ptr(&self) -> *mut T {
92 fn cap(&self) -> usize {
96 pub fn new() -> Self {
103 // push/pop/insert/remove largely unchanged:
104 // * `self.ptr.as_ptr() -> self.ptr()`
105 // * `self.cap -> self.cap()`
106 // * `self.grow() -> self.buf.grow()`
109 impl<T> Drop for Vec<T> {
111 while let Some(_) = self.pop() {}
112 // deallocation is handled by RawVec
117 And finally we can really simplify IntoIter:
119 <!-- ignore: simplified code -->
121 pub struct IntoIter<T> {
122 _buf: RawVec<T>, // we don't actually care about this. Just need it to live.
127 // next and next_back literally unchanged since they never referred to the buf
129 impl<T> Drop for IntoIter<T> {
131 // only need to ensure all our elements are read;
132 // buffer will clean itself up afterwards.
133 for _ in &mut *self {}
138 pub fn into_iter(self) -> IntoIter<T> {
140 // need to use ptr::read to unsafely move the buf out since it's
141 // not Copy, and Vec implements Drop (so we can't destructure it).
142 let buf = ptr::read(&self.buf);
147 start: buf.ptr.as_ptr(),
148 end: buf.ptr.as_ptr().add(len),