]> git.proxmox.com Git - rustc.git/blame - src/liballoc/raw_vec.rs
Imported Upstream version 1.6.0+dfsg1
[rustc.git] / src / liballoc / raw_vec.rs
CommitLineData
c1a9b12d
SL
1// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11use core::ptr::Unique;
12use core::mem;
92a42be0 13use core::slice;
c1a9b12d
SL
14use heap;
15use super::oom;
16use super::boxed::Box;
17use core::ops::Drop;
92a42be0 18use core::cmp;
e9174d1e 19use core;
c1a9b12d
SL
20
21/// A low-level utility for more ergonomically allocating, reallocating, and deallocating a
22/// a buffer of memory on the heap without having to worry about all the corner cases
23/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
24/// In particular:
25///
26/// * Produces heap::EMPTY on zero-sized types
27/// * Produces heap::EMPTY on zero-length allocations
28/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
29/// * Guards against 32-bit systems allocating more than isize::MAX bytes
30/// * Guards against overflowing your length
31/// * Aborts on OOM
32/// * Avoids freeing heap::EMPTY
33/// * Contains a ptr::Unique and thus endows the user with all related benefits
34///
35/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
36/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
37/// to handle the actual things *stored* inside of a RawVec.
38///
39/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
40/// This enables you to use capacity growing logic catch the overflows in your length
41/// that might occur with zero-sized types.
42///
43/// However this means that you need to be careful when roundtripping this type
44/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
45/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
46/// field. This allows zero-sized types to not be special-cased by consumers of
47/// this type.
48#[unsafe_no_drop_flag]
49pub struct RawVec<T> {
50 ptr: Unique<T>,
51 cap: usize,
52}
53
54impl<T> RawVec<T> {
55 /// Creates the biggest possible RawVec without allocating. If T has positive
56 /// size, then this makes a RawVec with capacity 0. If T has 0 size, then it
57 /// it makes a RawVec with capacity `usize::MAX`. Useful for implementing
58 /// delayed allocation.
59 pub fn new() -> Self {
60 unsafe {
61 // !0 is usize::MAX. This branch should be stripped at compile time.
b039eaaf
SL
62 let cap = if mem::size_of::<T>() == 0 {
63 !0
64 } else {
65 0
66 };
c1a9b12d
SL
67
68 // heap::EMPTY doubles as "unallocated" and "zero-sized allocation"
b039eaaf
SL
69 RawVec {
70 ptr: Unique::new(heap::EMPTY as *mut T),
71 cap: cap,
72 }
c1a9b12d
SL
73 }
74 }
75
76 /// Creates a RawVec with exactly the capacity and alignment requirements
77 /// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0
78 /// or T is zero-sized. Note that if `T` is zero-sized this means you will *not*
79 /// get a RawVec with the requested capacity!
80 ///
81 /// # Panics
82 ///
83 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
84 /// * Panics on 32-bit platforms if the requested capacity exceeds
85 /// `isize::MAX` bytes.
86 ///
87 /// # Aborts
88 ///
89 /// Aborts on OOM
90 pub fn with_capacity(cap: usize) -> Self {
91 unsafe {
92 let elem_size = mem::size_of::<T>();
93
94 let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow");
95 alloc_guard(alloc_size);
96
97 // handles ZSTs and `cap = 0` alike
98 let ptr = if alloc_size == 0 {
99 heap::EMPTY as *mut u8
100 } else {
101 let align = mem::align_of::<T>();
102 let ptr = heap::allocate(alloc_size, align);
b039eaaf
SL
103 if ptr.is_null() {
104 oom()
105 }
c1a9b12d
SL
106 ptr
107 };
108
b039eaaf
SL
109 RawVec {
110 ptr: Unique::new(ptr as *mut _),
111 cap: cap,
112 }
c1a9b12d
SL
113 }
114 }
115
116 /// Reconstitutes a RawVec from a pointer and capacity.
117 ///
b039eaaf 118 /// # Undefined Behavior
c1a9b12d
SL
119 ///
120 /// The ptr must be allocated, and with the given capacity. The
121 /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
122 /// If the ptr and capacity come from a RawVec, then this is guaranteed.
123 pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
b039eaaf
SL
124 RawVec {
125 ptr: Unique::new(ptr),
126 cap: cap,
127 }
c1a9b12d
SL
128 }
129
130 /// Converts a `Box<[T]>` into a `RawVec<T>`.
131 pub fn from_box(mut slice: Box<[T]>) -> Self {
132 unsafe {
133 let result = RawVec::from_raw_parts(slice.as_mut_ptr(), slice.len());
134 mem::forget(slice);
135 result
136 }
137 }
138}
139
140impl<T> RawVec<T> {
141 /// Gets a raw pointer to the start of the allocation. Note that this is
142 /// heap::EMPTY if `cap = 0` or T is zero-sized. In the former case, you must
143 /// be careful.
144 pub fn ptr(&self) -> *mut T {
145 *self.ptr
146 }
147
148 /// Gets the capacity of the allocation.
149 ///
150 /// This will always be `usize::MAX` if `T` is zero-sized.
151 pub fn cap(&self) -> usize {
b039eaaf
SL
152 if mem::size_of::<T>() == 0 {
153 !0
154 } else {
155 self.cap
156 }
c1a9b12d
SL
157 }
158
159 /// Doubles the size of the type's backing allocation. This is common enough
160 /// to want to do that it's easiest to just have a dedicated method. Slightly
161 /// more efficient logic can be provided for this than the general case.
162 ///
163 /// This function is ideal for when pushing elements one-at-a-time because
164 /// you don't need to incur the costs of the more general computations
165 /// reserve needs to do to guard against overflow. You do however need to
166 /// manually check if your `len == cap`.
167 ///
168 /// # Panics
169 ///
170 /// * Panics if T is zero-sized on the assumption that you managed to exhaust
171 /// all `usize::MAX` slots in your imaginary buffer.
172 /// * Panics on 32-bit platforms if the requested capacity exceeds
173 /// `isize::MAX` bytes.
174 ///
175 /// # Aborts
176 ///
177 /// Aborts on OOM
178 ///
179 /// # Examples
180 ///
181 /// ```ignore
182 /// struct MyVec<T> {
183 /// buf: RawVec<T>,
184 /// len: usize,
185 /// }
186 ///
187 /// impl<T> MyVec<T> {
188 /// pub fn push(&mut self, elem: T) {
189 /// if self.len == self.buf.cap() { self.buf.double(); }
190 /// // double would have aborted or panicked if the len exceeded
191 /// // `isize::MAX` so this is safe to do unchecked now.
192 /// unsafe {
193 /// ptr::write(self.buf.ptr().offset(self.len as isize), elem);
194 /// }
195 /// self.len += 1;
196 /// }
197 /// }
198 /// ```
199 #[inline(never)]
200 #[cold]
201 pub fn double(&mut self) {
202 unsafe {
203 let elem_size = mem::size_of::<T>();
204
205 // since we set the capacity to usize::MAX when elem_size is
206 // 0, getting to here necessarily means the RawVec is overfull.
207 assert!(elem_size != 0, "capacity overflow");
208
209 let align = mem::align_of::<T>();
210
211 let (new_cap, ptr) = if self.cap == 0 {
212 // skip to 4 because tiny Vec's are dumb; but not if that would cause overflow
b039eaaf
SL
213 let new_cap = if elem_size > (!0) / 8 {
214 1
215 } else {
216 4
217 };
c1a9b12d
SL
218 let ptr = heap::allocate(new_cap * elem_size, align);
219 (new_cap, ptr)
220 } else {
221 // Since we guarantee that we never allocate more than isize::MAX bytes,
222 // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
223 let new_cap = 2 * self.cap;
224 let new_alloc_size = new_cap * elem_size;
225 alloc_guard(new_alloc_size);
226 let ptr = heap::reallocate(self.ptr() as *mut _,
227 self.cap * elem_size,
228 new_alloc_size,
229 align);
230 (new_cap, ptr)
231 };
232
233 // If allocate or reallocate fail, we'll get `null` back
b039eaaf
SL
234 if ptr.is_null() {
235 oom()
236 }
c1a9b12d
SL
237
238 self.ptr = Unique::new(ptr as *mut _);
239 self.cap = new_cap;
240 }
241 }
242
243 /// Ensures that the buffer contains at least enough space to hold
244 /// `used_cap + needed_extra_cap` elements. If it doesn't already,
245 /// will reallocate the minimum possible amount of memory necessary.
246 /// Generally this will be exactly the amount of memory necessary,
247 /// but in principle the allocator is free to give back more than
248 /// we asked for.
249 ///
250 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
251 /// the requested space. This is not really unsafe, but the unsafe
b039eaaf 252 /// code *you* write that relies on the behavior of this function may break.
c1a9b12d
SL
253 ///
254 /// # Panics
255 ///
256 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
257 /// * Panics on 32-bit platforms if the requested capacity exceeds
258 /// `isize::MAX` bytes.
259 ///
260 /// # Aborts
261 ///
262 /// Aborts on OOM
263 pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
264 unsafe {
265 let elem_size = mem::size_of::<T>();
266 let align = mem::align_of::<T>();
267
268 // NOTE: we don't early branch on ZSTs here because we want this
269 // to actually catch "asking for more than usize::MAX" in that case.
270 // If we make it past the first branch then we are guaranteed to
271 // panic.
272
273 // Don't actually need any more capacity.
274 // Wrapping in case they gave a bad `used_cap`.
b039eaaf
SL
275 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
276 return;
277 }
c1a9b12d
SL
278
279 // Nothing we can really do about these checks :(
280 let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow");
281 let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
282 alloc_guard(new_alloc_size);
283
284 let ptr = if self.cap == 0 {
285 heap::allocate(new_alloc_size, align)
286 } else {
287 heap::reallocate(self.ptr() as *mut _,
288 self.cap * elem_size,
289 new_alloc_size,
290 align)
291 };
292
293 // If allocate or reallocate fail, we'll get `null` back
b039eaaf
SL
294 if ptr.is_null() {
295 oom()
296 }
c1a9b12d
SL
297
298 self.ptr = Unique::new(ptr as *mut _);
299 self.cap = new_cap;
300 }
301 }
302
303 /// Ensures that the buffer contains at least enough space to hold
304 /// `used_cap + needed_extra_cap` elements. If it doesn't already have
305 /// enough capacity, will reallocate enough space plus comfortable slack
b039eaaf 306 /// space to get amortized `O(1)` behavior. Will limit this behavior
c1a9b12d
SL
307 /// if it would needlessly cause itself to panic.
308 ///
309 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
310 /// the requested space. This is not really unsafe, but the unsafe
b039eaaf 311 /// code *you* write that relies on the behavior of this function may break.
c1a9b12d
SL
312 ///
313 /// This is ideal for implementing a bulk-push operation like `extend`.
314 ///
315 /// # Panics
316 ///
317 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
318 /// * Panics on 32-bit platforms if the requested capacity exceeds
319 /// `isize::MAX` bytes.
320 ///
321 /// # Aborts
322 ///
323 /// Aborts on OOM
324 ///
325 /// # Examples
326 ///
327 /// ```ignore
328 /// struct MyVec<T> {
329 /// buf: RawVec<T>,
330 /// len: usize,
331 /// }
332 ///
333 /// impl<T> MyVec<T> {
334 /// pub fn push_all(&mut self, elems: &[T]) {
335 /// self.buf.reserve(self.len, elems.len());
336 /// // reserve would have aborted or panicked if the len exceeded
337 /// // `isize::MAX` so this is safe to do unchecked now.
338 /// for x in elems {
339 /// unsafe {
340 /// ptr::write(self.buf.ptr().offset(self.len as isize), x.clone());
341 /// }
342 /// self.len += 1;
343 /// }
344 /// }
345 /// }
346 /// ```
347 pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
348 unsafe {
349 let elem_size = mem::size_of::<T>();
350 let align = mem::align_of::<T>();
351
352 // NOTE: we don't early branch on ZSTs here because we want this
353 // to actually catch "asking for more than usize::MAX" in that case.
354 // If we make it past the first branch then we are guaranteed to
355 // panic.
356
357 // Don't actually need any more capacity.
92a42be0 358 // Wrapping in case they give a bad `used_cap`
b039eaaf
SL
359 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
360 return;
361 }
c1a9b12d
SL
362
363 // Nothing we can really do about these checks :(
92a42be0
SL
364 let required_cap = used_cap.checked_add(needed_extra_cap)
365 .expect("capacity overflow");
366
367 // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
368 let double_cap = self.cap * 2;
369
370 // `double_cap` guarantees exponential growth.
371 let new_cap = cmp::max(double_cap, required_cap);
372
c1a9b12d
SL
373 let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
374 // FIXME: may crash and burn on over-reserve
375 alloc_guard(new_alloc_size);
376
377 let ptr = if self.cap == 0 {
378 heap::allocate(new_alloc_size, align)
379 } else {
380 heap::reallocate(self.ptr() as *mut _,
381 self.cap * elem_size,
382 new_alloc_size,
383 align)
384 };
385
386 // If allocate or reallocate fail, we'll get `null` back
b039eaaf
SL
387 if ptr.is_null() {
388 oom()
389 }
c1a9b12d
SL
390
391 self.ptr = Unique::new(ptr as *mut _);
392 self.cap = new_cap;
393 }
394 }
395
396 /// Shrinks the allocation down to the specified amount. If the given amount
397 /// is 0, actually completely deallocates.
398 ///
399 /// # Panics
400 ///
401 /// Panics if the given amount is *larger* than the current capacity.
402 ///
403 /// # Aborts
404 ///
405 /// Aborts on OOM.
406 pub fn shrink_to_fit(&mut self, amount: usize) {
407 let elem_size = mem::size_of::<T>();
408 let align = mem::align_of::<T>();
409
410 // Set the `cap` because they might be about to promote to a `Box<[T]>`
411 if elem_size == 0 {
412 self.cap = amount;
413 return;
414 }
415
416 // This check is my waterloo; it's the only thing Vec wouldn't have to do.
417 assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
418
419 if amount == 0 {
420 mem::replace(self, RawVec::new());
421 } else if self.cap != amount {
422 unsafe {
423 // Overflow check is unnecessary as the vector is already at
424 // least this large.
425 let ptr = heap::reallocate(self.ptr() as *mut _,
426 self.cap * elem_size,
427 amount * elem_size,
428 align);
b039eaaf
SL
429 if ptr.is_null() {
430 oom()
431 }
c1a9b12d
SL
432 self.ptr = Unique::new(ptr as *mut _);
433 }
434 self.cap = amount;
435 }
436 }
437
438 /// Converts the entire buffer into `Box<[T]>`.
439 ///
b039eaaf 440 /// While it is not *strictly* Undefined Behavior to call
c1a9b12d
SL
441 /// this procedure while some of the RawVec is unintialized,
442 /// it cetainly makes it trivial to trigger it.
443 ///
444 /// Note that this will correctly reconstitute any `cap` changes
445 /// that may have been performed. (see description of type for details)
446 pub unsafe fn into_box(self) -> Box<[T]> {
447 // NOTE: not calling `cap()` here, actually using the real `cap` field!
448 let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
449 let output: Box<[T]> = Box::from_raw(slice);
450 mem::forget(self);
451 output
452 }
453
454 /// This is a stupid name in the hopes that someone will find this in the
455 /// not too distant future and remove it with the rest of
456 /// #[unsafe_no_drop_flag]
457 pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool {
458 self.cap != mem::POST_DROP_USIZE
459 }
460}
461
462impl<T> Drop for RawVec<T> {
b039eaaf 463 #[unsafe_destructor_blind_to_params]
c1a9b12d
SL
464 /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
465 fn drop(&mut self) {
466 let elem_size = mem::size_of::<T>();
467 if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() {
468 let align = mem::align_of::<T>();
469
470 let num_bytes = elem_size * self.cap;
471 unsafe {
472 heap::deallocate(*self.ptr as *mut _, num_bytes, align);
473 }
474 }
475 }
476}
477
478
479
480// We need to guarantee the following:
481// * We don't ever allocate `> isize::MAX` byte-size objects
482// * We don't overflow `usize::MAX` and actually allocate too little
483//
484// On 64-bit we just need to check for overflow since trying to allocate
485// `> isize::MAX` bytes will surely fail. On 32-bit we need to add an extra
486// guard for this in case we're running on a platform which can use all 4GB in
487// user-space. e.g. PAE or x32
488
489#[inline]
c1a9b12d 490fn alloc_guard(alloc_size: usize) {
e9174d1e 491 if core::usize::BITS < 64 {
b039eaaf
SL
492 assert!(alloc_size <= ::core::isize::MAX as usize,
493 "capacity overflow");
e9174d1e 494 }
c1a9b12d 495}
92a42be0
SL
496
497
498#[cfg(test)]
499mod tests {
500 use super::*;
501
502 #[test]
503 fn reserve_does_not_overallocate() {
504 {
505 let mut v: RawVec<u32> = RawVec::new();
506 // First `reserve` allocates like `reserve_exact`
507 v.reserve(0, 9);
508 assert_eq!(9, v.cap());
509 }
510
511 {
512 let mut v: RawVec<u32> = RawVec::new();
513 v.reserve(0, 7);
514 assert_eq!(7, v.cap());
515 // 97 if more than double of 7, so `reserve` should work
516 // like `reserve_exact`.
517 v.reserve(7, 90);
518 assert_eq!(97, v.cap());
519 }
520
521 {
522 let mut v: RawVec<u32> = RawVec::new();
523 v.reserve(0, 12);
524 assert_eq!(12, v.cap());
525 v.reserve(12, 3);
526 // 3 is less than half of 12, so `reserve` must grow
527 // exponentially. At the time of writing this test grow
528 // factor is 2, so new capacity is 24, however, grow factor
529 // of 1.5 is OK too. Hence `>= 18` in assert.
530 assert!(v.cap() >= 12 + 12 / 2);
531 }
532 }
533
534}