]> git.proxmox.com Git - rustc.git/blob - src/doc/nomicon/src/vec-raw.md
New upstream version 1.54.0+dfsg1
[rustc.git] / src / doc / nomicon / src / vec-raw.md
1 # RawVec
2
3 We've actually reached an interesting situation here: we've duplicated the logic
4 for specifying a buffer and freeing its memory in Vec and IntoIter. Now that
5 we've implemented it and identified *actual* logic duplication, this is a good
6 time to perform some logic compression.
7
8 We're going to abstract out the `(ptr, cap)` pair and give them the logic for
9 allocating, growing, and freeing:
10
11 ```rust,ignore
12 struct RawVec<T> {
13 ptr: NonNull<T>,
14 cap: usize,
15 _marker: PhantomData<T>,
16 }
17
18 unsafe impl<T: Send> Send for RawVec<T> {}
19 unsafe impl<T: Sync> Sync for RawVec<T> {}
20
21 impl<T> RawVec<T> {
22 fn new() -> Self {
23 assert!(mem::size_of::<T>() != 0, "TODO: implement ZST support");
24 RawVec {
25 ptr: NonNull::dangling(),
26 cap: 0,
27 _marker: PhantomData,
28 }
29 }
30
31 fn grow(&mut self) {
32 let (new_cap, new_layout) = if self.cap == 0 {
33 (1, Layout::array::<T>(1).unwrap())
34 } else {
35 // This can't overflow because we ensure self.cap <= isize::MAX.
36 let new_cap = 2 * self.cap;
37
38 // Layout::array checks that the number of bytes is <= usize::MAX,
39 // but this is redundant since old_layout.size() <= isize::MAX,
40 // so the `unwrap` should never fail.
41 let new_layout = Layout::array::<T>(new_cap).unwrap();
42 (new_cap, new_layout)
43 };
44
45 // Ensure that the new allocation doesn't exceed `isize::MAX` bytes.
46 assert!(new_layout.size() <= isize::MAX as usize, "Allocation too large");
47
48 let new_ptr = if self.cap == 0 {
49 unsafe { alloc::alloc(new_layout) }
50 } else {
51 let old_layout = Layout::array::<T>(self.cap).unwrap();
52 let old_ptr = self.ptr.as_ptr() as *mut u8;
53 unsafe { alloc::realloc(old_ptr, old_layout, new_layout.size()) }
54 };
55
56 // If allocation fails, `new_ptr` will be null, in which case we abort.
57 self.ptr = match NonNull::new(new_ptr as *mut T) {
58 Some(p) => p,
59 None => alloc::handle_alloc_error(new_layout),
60 };
61 self.cap = new_cap;
62 }
63 }
64
65 impl<T> Drop for RawVec<T> {
66 fn drop(&mut self) {
67 if self.cap != 0 {
68 let layout = Layout::array::<T>(self.cap).unwrap();
69 unsafe {
70 alloc::dealloc(self.ptr.as_ptr() as *mut u8, layout);
71 }
72 }
73 }
74 }
75 ```
76
77 And change Vec as follows:
78
79 ```rust,ignore
80 pub struct Vec<T> {
81 buf: RawVec<T>,
82 len: usize,
83 }
84
85 impl<T> Vec<T> {
86 fn ptr(&self) -> *mut T {
87 self.buf.ptr.as_ptr()
88 }
89
90 fn cap(&self) -> usize {
91 self.buf.cap
92 }
93
94 pub fn new() -> Self {
95 Vec {
96 buf: RawVec::new(),
97 len: 0,
98 }
99 }
100
101 // push/pop/insert/remove largely unchanged:
102 // * `self.ptr.as_ptr() -> self.ptr()`
103 // * `self.cap -> self.cap()`
104 // * `self.grow() -> self.buf.grow()`
105 }
106
107 impl<T> Drop for Vec<T> {
108 fn drop(&mut self) {
109 while let Some(_) = self.pop() {}
110 // deallocation is handled by RawVec
111 }
112 }
113 ```
114
115 And finally we can really simplify IntoIter:
116
117 ```rust,ignore
118 pub struct IntoIter<T> {
119 _buf: RawVec<T>, // we don't actually care about this. Just need it to live.
120 start: *const T,
121 end: *const T,
122 }
123
124 // next and next_back literally unchanged since they never referred to the buf
125
126 impl<T> Drop for IntoIter<T> {
127 fn drop(&mut self) {
128 // only need to ensure all our elements are read;
129 // buffer will clean itself up afterwards.
130 for _ in &mut *self {}
131 }
132 }
133
134 impl<T> Vec<T> {
135 pub fn into_iter(self) -> IntoIter<T> {
136 unsafe {
137 // need to use ptr::read to unsafely move the buf out since it's
138 // not Copy, and Vec implements Drop (so we can't destructure it).
139 let buf = ptr::read(&self.buf);
140 let len = self.len;
141 mem::forget(self);
142
143 IntoIter {
144 start: buf.ptr.as_ptr(),
145 end: buf.ptr.as_ptr().add(len),
146 _buf: buf,
147 }
148 }
149 }
150 }
151 ```
152
153 Much better.