]> git.proxmox.com Git - rustc.git/blob - src/liballoc/raw_vec.rs
Imported Upstream version 1.3.0+dfsg1
[rustc.git] / src / liballoc / raw_vec.rs
1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use core::ptr::Unique;
12 use core::mem;
13 use core::slice::{self, SliceExt};
14 use heap;
15 use super::oom;
16 use super::boxed::Box;
17 use core::ops::Drop;
18
19 /// A low-level utility for more ergonomically allocating, reallocating, and deallocating a
20 /// a buffer of memory on the heap without having to worry about all the corner cases
21 /// involved. This type is excellent for building your own data structures like Vec and VecDeque.
22 /// In particular:
23 ///
24 /// * Produces heap::EMPTY on zero-sized types
25 /// * Produces heap::EMPTY on zero-length allocations
26 /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
27 /// * Guards against 32-bit systems allocating more than isize::MAX bytes
28 /// * Guards against overflowing your length
29 /// * Aborts on OOM
30 /// * Avoids freeing heap::EMPTY
31 /// * Contains a ptr::Unique and thus endows the user with all related benefits
32 ///
33 /// This type does not in anyway inspect the memory that it manages. When dropped it *will*
34 /// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
35 /// to handle the actual things *stored* inside of a RawVec.
36 ///
37 /// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
38 /// This enables you to use capacity growing logic catch the overflows in your length
39 /// that might occur with zero-sized types.
40 ///
41 /// However this means that you need to be careful when roundtripping this type
42 /// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
43 /// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
44 /// field. This allows zero-sized types to not be special-cased by consumers of
45 /// this type.
46 #[unsafe_no_drop_flag]
47 pub struct RawVec<T> {
48 ptr: Unique<T>,
49 cap: usize,
50 }
51
52 impl<T> RawVec<T> {
53 /// Creates the biggest possible RawVec without allocating. If T has positive
54 /// size, then this makes a RawVec with capacity 0. If T has 0 size, then it
55 /// it makes a RawVec with capacity `usize::MAX`. Useful for implementing
56 /// delayed allocation.
57 pub fn new() -> Self {
58 unsafe {
59 // !0 is usize::MAX. This branch should be stripped at compile time.
60 let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
61
62 // heap::EMPTY doubles as "unallocated" and "zero-sized allocation"
63 RawVec { ptr: Unique::new(heap::EMPTY as *mut T), cap: cap }
64 }
65 }
66
67 /// Creates a RawVec with exactly the capacity and alignment requirements
68 /// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0
69 /// or T is zero-sized. Note that if `T` is zero-sized this means you will *not*
70 /// get a RawVec with the requested capacity!
71 ///
72 /// # Panics
73 ///
74 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
75 /// * Panics on 32-bit platforms if the requested capacity exceeds
76 /// `isize::MAX` bytes.
77 ///
78 /// # Aborts
79 ///
80 /// Aborts on OOM
81 pub fn with_capacity(cap: usize) -> Self {
82 unsafe {
83 let elem_size = mem::size_of::<T>();
84
85 let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow");
86 alloc_guard(alloc_size);
87
88 // handles ZSTs and `cap = 0` alike
89 let ptr = if alloc_size == 0 {
90 heap::EMPTY as *mut u8
91 } else {
92 let align = mem::align_of::<T>();
93 let ptr = heap::allocate(alloc_size, align);
94 if ptr.is_null() { oom() }
95 ptr
96 };
97
98 RawVec { ptr: Unique::new(ptr as *mut _), cap: cap }
99 }
100 }
101
102 /// Reconstitutes a RawVec from a pointer and capacity.
103 ///
104 /// # Undefined Behaviour
105 ///
106 /// The ptr must be allocated, and with the given capacity. The
107 /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
108 /// If the ptr and capacity come from a RawVec, then this is guaranteed.
109 pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
110 RawVec { ptr: Unique::new(ptr), cap: cap }
111 }
112
113 /// Converts a `Box<[T]>` into a `RawVec<T>`.
114 pub fn from_box(mut slice: Box<[T]>) -> Self {
115 unsafe {
116 let result = RawVec::from_raw_parts(slice.as_mut_ptr(), slice.len());
117 mem::forget(slice);
118 result
119 }
120 }
121 }
122
123 impl<T> RawVec<T> {
124 /// Gets a raw pointer to the start of the allocation. Note that this is
125 /// heap::EMPTY if `cap = 0` or T is zero-sized. In the former case, you must
126 /// be careful.
127 pub fn ptr(&self) -> *mut T {
128 *self.ptr
129 }
130
131 /// Gets the capacity of the allocation.
132 ///
133 /// This will always be `usize::MAX` if `T` is zero-sized.
134 pub fn cap(&self) -> usize {
135 if mem::size_of::<T>() == 0 { !0 } else { self.cap }
136 }
137
138 /// Doubles the size of the type's backing allocation. This is common enough
139 /// to want to do that it's easiest to just have a dedicated method. Slightly
140 /// more efficient logic can be provided for this than the general case.
141 ///
142 /// This function is ideal for when pushing elements one-at-a-time because
143 /// you don't need to incur the costs of the more general computations
144 /// reserve needs to do to guard against overflow. You do however need to
145 /// manually check if your `len == cap`.
146 ///
147 /// # Panics
148 ///
149 /// * Panics if T is zero-sized on the assumption that you managed to exhaust
150 /// all `usize::MAX` slots in your imaginary buffer.
151 /// * Panics on 32-bit platforms if the requested capacity exceeds
152 /// `isize::MAX` bytes.
153 ///
154 /// # Aborts
155 ///
156 /// Aborts on OOM
157 ///
158 /// # Examples
159 ///
160 /// ```ignore
161 /// struct MyVec<T> {
162 /// buf: RawVec<T>,
163 /// len: usize,
164 /// }
165 ///
166 /// impl<T> MyVec<T> {
167 /// pub fn push(&mut self, elem: T) {
168 /// if self.len == self.buf.cap() { self.buf.double(); }
169 /// // double would have aborted or panicked if the len exceeded
170 /// // `isize::MAX` so this is safe to do unchecked now.
171 /// unsafe {
172 /// ptr::write(self.buf.ptr().offset(self.len as isize), elem);
173 /// }
174 /// self.len += 1;
175 /// }
176 /// }
177 /// ```
178 #[inline(never)]
179 #[cold]
180 pub fn double(&mut self) {
181 unsafe {
182 let elem_size = mem::size_of::<T>();
183
184 // since we set the capacity to usize::MAX when elem_size is
185 // 0, getting to here necessarily means the RawVec is overfull.
186 assert!(elem_size != 0, "capacity overflow");
187
188 let align = mem::align_of::<T>();
189
190 let (new_cap, ptr) = if self.cap == 0 {
191 // skip to 4 because tiny Vec's are dumb; but not if that would cause overflow
192 let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
193 let ptr = heap::allocate(new_cap * elem_size, align);
194 (new_cap, ptr)
195 } else {
196 // Since we guarantee that we never allocate more than isize::MAX bytes,
197 // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
198 let new_cap = 2 * self.cap;
199 let new_alloc_size = new_cap * elem_size;
200 alloc_guard(new_alloc_size);
201 let ptr = heap::reallocate(self.ptr() as *mut _,
202 self.cap * elem_size,
203 new_alloc_size,
204 align);
205 (new_cap, ptr)
206 };
207
208 // If allocate or reallocate fail, we'll get `null` back
209 if ptr.is_null() { oom() }
210
211 self.ptr = Unique::new(ptr as *mut _);
212 self.cap = new_cap;
213 }
214 }
215
216 /// Ensures that the buffer contains at least enough space to hold
217 /// `used_cap + needed_extra_cap` elements. If it doesn't already,
218 /// will reallocate the minimum possible amount of memory necessary.
219 /// Generally this will be exactly the amount of memory necessary,
220 /// but in principle the allocator is free to give back more than
221 /// we asked for.
222 ///
223 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
224 /// the requested space. This is not really unsafe, but the unsafe
225 /// code *you* write that relies on the behaviour of this function may break.
226 ///
227 /// # Panics
228 ///
229 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
230 /// * Panics on 32-bit platforms if the requested capacity exceeds
231 /// `isize::MAX` bytes.
232 ///
233 /// # Aborts
234 ///
235 /// Aborts on OOM
236 pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
237 unsafe {
238 let elem_size = mem::size_of::<T>();
239 let align = mem::align_of::<T>();
240
241 // NOTE: we don't early branch on ZSTs here because we want this
242 // to actually catch "asking for more than usize::MAX" in that case.
243 // If we make it past the first branch then we are guaranteed to
244 // panic.
245
246 // Don't actually need any more capacity.
247 // Wrapping in case they gave a bad `used_cap`.
248 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { return; }
249
250 // Nothing we can really do about these checks :(
251 let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow");
252 let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
253 alloc_guard(new_alloc_size);
254
255 let ptr = if self.cap == 0 {
256 heap::allocate(new_alloc_size, align)
257 } else {
258 heap::reallocate(self.ptr() as *mut _,
259 self.cap * elem_size,
260 new_alloc_size,
261 align)
262 };
263
264 // If allocate or reallocate fail, we'll get `null` back
265 if ptr.is_null() { oom() }
266
267 self.ptr = Unique::new(ptr as *mut _);
268 self.cap = new_cap;
269 }
270 }
271
272 /// Ensures that the buffer contains at least enough space to hold
273 /// `used_cap + needed_extra_cap` elements. If it doesn't already have
274 /// enough capacity, will reallocate enough space plus comfortable slack
275 /// space to get amortized `O(1)` behaviour. Will limit this behaviour
276 /// if it would needlessly cause itself to panic.
277 ///
278 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
279 /// the requested space. This is not really unsafe, but the unsafe
280 /// code *you* write that relies on the behaviour of this function may break.
281 ///
282 /// This is ideal for implementing a bulk-push operation like `extend`.
283 ///
284 /// # Panics
285 ///
286 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
287 /// * Panics on 32-bit platforms if the requested capacity exceeds
288 /// `isize::MAX` bytes.
289 ///
290 /// # Aborts
291 ///
292 /// Aborts on OOM
293 ///
294 /// # Examples
295 ///
296 /// ```ignore
297 /// struct MyVec<T> {
298 /// buf: RawVec<T>,
299 /// len: usize,
300 /// }
301 ///
302 /// impl<T> MyVec<T> {
303 /// pub fn push_all(&mut self, elems: &[T]) {
304 /// self.buf.reserve(self.len, elems.len());
305 /// // reserve would have aborted or panicked if the len exceeded
306 /// // `isize::MAX` so this is safe to do unchecked now.
307 /// for x in elems {
308 /// unsafe {
309 /// ptr::write(self.buf.ptr().offset(self.len as isize), x.clone());
310 /// }
311 /// self.len += 1;
312 /// }
313 /// }
314 /// }
315 /// ```
316 pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
317 unsafe {
318 let elem_size = mem::size_of::<T>();
319 let align = mem::align_of::<T>();
320
321 // NOTE: we don't early branch on ZSTs here because we want this
322 // to actually catch "asking for more than usize::MAX" in that case.
323 // If we make it past the first branch then we are guaranteed to
324 // panic.
325
326 // Don't actually need any more capacity.
327 // Wrapping in case they give a bas `used_cap`
328 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { return; }
329
330 // Nothing we can really do about these checks :(
331 let new_cap = used_cap.checked_add(needed_extra_cap)
332 .and_then(|cap| cap.checked_mul(2))
333 .expect("capacity overflow");
334 let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
335 // FIXME: may crash and burn on over-reserve
336 alloc_guard(new_alloc_size);
337
338 let ptr = if self.cap == 0 {
339 heap::allocate(new_alloc_size, align)
340 } else {
341 heap::reallocate(self.ptr() as *mut _,
342 self.cap * elem_size,
343 new_alloc_size,
344 align)
345 };
346
347 // If allocate or reallocate fail, we'll get `null` back
348 if ptr.is_null() { oom() }
349
350 self.ptr = Unique::new(ptr as *mut _);
351 self.cap = new_cap;
352 }
353 }
354
355 /// Shrinks the allocation down to the specified amount. If the given amount
356 /// is 0, actually completely deallocates.
357 ///
358 /// # Panics
359 ///
360 /// Panics if the given amount is *larger* than the current capacity.
361 ///
362 /// # Aborts
363 ///
364 /// Aborts on OOM.
365 pub fn shrink_to_fit(&mut self, amount: usize) {
366 let elem_size = mem::size_of::<T>();
367 let align = mem::align_of::<T>();
368
369 // Set the `cap` because they might be about to promote to a `Box<[T]>`
370 if elem_size == 0 {
371 self.cap = amount;
372 return;
373 }
374
375 // This check is my waterloo; it's the only thing Vec wouldn't have to do.
376 assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
377
378 if amount == 0 {
379 mem::replace(self, RawVec::new());
380 } else if self.cap != amount {
381 unsafe {
382 // Overflow check is unnecessary as the vector is already at
383 // least this large.
384 let ptr = heap::reallocate(self.ptr() as *mut _,
385 self.cap * elem_size,
386 amount * elem_size,
387 align);
388 if ptr.is_null() { oom() }
389 self.ptr = Unique::new(ptr as *mut _);
390 }
391 self.cap = amount;
392 }
393 }
394
395 /// Converts the entire buffer into `Box<[T]>`.
396 ///
397 /// While it is not *strictly* Undefined Behaviour to call
398 /// this procedure while some of the RawVec is unintialized,
399 /// it cetainly makes it trivial to trigger it.
400 ///
401 /// Note that this will correctly reconstitute any `cap` changes
402 /// that may have been performed. (see description of type for details)
403 pub unsafe fn into_box(self) -> Box<[T]> {
404 // NOTE: not calling `cap()` here, actually using the real `cap` field!
405 let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
406 let output: Box<[T]> = Box::from_raw(slice);
407 mem::forget(self);
408 output
409 }
410
411 /// This is a stupid name in the hopes that someone will find this in the
412 /// not too distant future and remove it with the rest of
413 /// #[unsafe_no_drop_flag]
414 pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool {
415 self.cap != mem::POST_DROP_USIZE
416 }
417 }
418
419 impl<T> Drop for RawVec<T> {
420 /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
421 fn drop(&mut self) {
422 let elem_size = mem::size_of::<T>();
423 if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() {
424 let align = mem::align_of::<T>();
425
426 let num_bytes = elem_size * self.cap;
427 unsafe {
428 heap::deallocate(*self.ptr as *mut _, num_bytes, align);
429 }
430 }
431 }
432 }
433
434
435
436 // We need to guarantee the following:
437 // * We don't ever allocate `> isize::MAX` byte-size objects
438 // * We don't overflow `usize::MAX` and actually allocate too little
439 //
440 // On 64-bit we just need to check for overflow since trying to allocate
441 // `> isize::MAX` bytes will surely fail. On 32-bit we need to add an extra
442 // guard for this in case we're running on a platform which can use all 4GB in
443 // user-space. e.g. PAE or x32
444
445 #[inline]
446 #[cfg(target_pointer_width = "64")]
447 fn alloc_guard(_alloc_size: usize) { }
448
449 #[inline]
450 #[cfg(target_pointer_width = "32")]
451 fn alloc_guard(alloc_size: usize) {
452 assert!(alloc_size <= ::core::isize::MAX as usize, "capacity overflow");
453 }