]> git.proxmox.com Git - rustc.git/blame - src/liballoc/raw_vec.rs
New upstream version 1.27.1+dfsg1
[rustc.git] / src / liballoc / raw_vec.rs
CommitLineData
c1a9b12d
SL
1// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
3b2f2976 11use core::cmp;
c1a9b12d 12use core::mem;
3b2f2976 13use core::ops::Drop;
83c7162d 14use core::ptr::{self, NonNull, Unique};
92a42be0 15use core::slice;
83c7162d
XL
16
17use alloc::{Alloc, Layout, Global, oom};
18use alloc::CollectionAllocErr;
19use alloc::CollectionAllocErr::*;
20use boxed::Box;
c1a9b12d 21
5bcae85e 22/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
c1a9b12d
SL
23/// a buffer of memory on the heap without having to worry about all the corner cases
24/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
25/// In particular:
26///
7cac9316
XL
27/// * Produces Unique::empty() on zero-sized types
28/// * Produces Unique::empty() on zero-length allocations
c1a9b12d
SL
29/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
30/// * Guards against 32-bit systems allocating more than isize::MAX bytes
31/// * Guards against overflowing your length
32/// * Aborts on OOM
7cac9316 33/// * Avoids freeing Unique::empty()
c1a9b12d
SL
34/// * Contains a ptr::Unique and thus endows the user with all related benefits
35///
36/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
37/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
38/// to handle the actual things *stored* inside of a RawVec.
39///
40/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
41/// This enables you to use capacity growing logic catch the overflows in your length
42/// that might occur with zero-sized types.
43///
44/// However this means that you need to be careful when roundtripping this type
45/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
46/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
47/// field. This allows zero-sized types to not be special-cased by consumers of
48/// this type.
041b39d2 49#[allow(missing_debug_implementations)]
83c7162d 50pub struct RawVec<T, A: Alloc = Global> {
c1a9b12d
SL
51 ptr: Unique<T>,
52 cap: usize,
041b39d2 53 a: A,
c1a9b12d
SL
54}
55
041b39d2
XL
56impl<T, A: Alloc> RawVec<T, A> {
57 /// Like `new` but parameterized over the choice of allocator for
58 /// the returned RawVec.
83c7162d 59 pub const fn new_in(a: A) -> Self {
7cac9316 60 // !0 is usize::MAX. This branch should be stripped at compile time.
83c7162d
XL
61 // FIXME(mark-i-m): use this line when `if`s are allowed in `const`
62 //let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
c1a9b12d 63
7cac9316
XL
64 // Unique::empty() doubles as "unallocated" and "zero-sized allocation"
65 RawVec {
66 ptr: Unique::empty(),
83c7162d
XL
67 // FIXME(mark-i-m): use `cap` when ifs are allowed in const
68 cap: [0, !0][(mem::size_of::<T>() == 0) as usize],
3b2f2976 69 a,
c1a9b12d
SL
70 }
71 }
72
041b39d2
XL
73 /// Like `with_capacity` but parameterized over the choice of
74 /// allocator for the returned RawVec.
cc61c64b 75 #[inline]
041b39d2
XL
76 pub fn with_capacity_in(cap: usize, a: A) -> Self {
77 RawVec::allocate_in(cap, false, a)
cc61c64b
XL
78 }
79
041b39d2
XL
80 /// Like `with_capacity_zeroed` but parameterized over the choice
81 /// of allocator for the returned RawVec.
cc61c64b 82 #[inline]
041b39d2
XL
83 pub fn with_capacity_zeroed_in(cap: usize, a: A) -> Self {
84 RawVec::allocate_in(cap, true, a)
cc61c64b
XL
85 }
86
041b39d2 87 fn allocate_in(cap: usize, zeroed: bool, mut a: A) -> Self {
c1a9b12d
SL
88 unsafe {
89 let elem_size = mem::size_of::<T>();
90
83c7162d
XL
91 let alloc_size = cap.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow());
92 alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
c1a9b12d
SL
93
94 // handles ZSTs and `cap = 0` alike
95 let ptr = if alloc_size == 0 {
83c7162d 96 NonNull::<T>::dangling().as_opaque()
c1a9b12d
SL
97 } else {
98 let align = mem::align_of::<T>();
041b39d2
XL
99 let result = if zeroed {
100 a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap())
cc61c64b 101 } else {
041b39d2 102 a.alloc(Layout::from_size_align(alloc_size, align).unwrap())
cc61c64b 103 };
041b39d2
XL
104 match result {
105 Ok(ptr) => ptr,
83c7162d 106 Err(_) => oom(),
b039eaaf 107 }
c1a9b12d
SL
108 };
109
b039eaaf 110 RawVec {
83c7162d 111 ptr: ptr.cast().into(),
3b2f2976
XL
112 cap,
113 a,
b039eaaf 114 }
c1a9b12d
SL
115 }
116 }
041b39d2
XL
117}
118
83c7162d 119impl<T> RawVec<T, Global> {
041b39d2
XL
120 /// Creates the biggest possible RawVec (on the system heap)
121 /// without allocating. If T has positive size, then this makes a
abe05a73 122 /// RawVec with capacity 0. If T has 0 size, then it makes a
041b39d2
XL
123 /// RawVec with capacity `usize::MAX`. Useful for implementing
124 /// delayed allocation.
83c7162d
XL
125 pub const fn new() -> Self {
126 Self::new_in(Global)
041b39d2
XL
127 }
128
129 /// Creates a RawVec (on the system heap) with exactly the
130 /// capacity and alignment requirements for a `[T; cap]`. This is
131 /// equivalent to calling RawVec::new when `cap` is 0 or T is
132 /// zero-sized. Note that if `T` is zero-sized this means you will
133 /// *not* get a RawVec with the requested capacity!
134 ///
135 /// # Panics
136 ///
137 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
138 /// * Panics on 32-bit platforms if the requested capacity exceeds
139 /// `isize::MAX` bytes.
140 ///
141 /// # Aborts
142 ///
143 /// Aborts on OOM
144 #[inline]
145 pub fn with_capacity(cap: usize) -> Self {
83c7162d 146 RawVec::allocate_in(cap, false, Global)
041b39d2 147 }
c1a9b12d 148
041b39d2
XL
149 /// Like `with_capacity` but guarantees the buffer is zeroed.
150 #[inline]
151 pub fn with_capacity_zeroed(cap: usize) -> Self {
83c7162d 152 RawVec::allocate_in(cap, true, Global)
041b39d2
XL
153 }
154}
155
156impl<T, A: Alloc> RawVec<T, A> {
157 /// Reconstitutes a RawVec from a pointer, capacity, and allocator.
c1a9b12d 158 ///
b039eaaf 159 /// # Undefined Behavior
c1a9b12d 160 ///
041b39d2
XL
161 /// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The
162 /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
163 /// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed.
164 pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: A) -> Self {
165 RawVec {
3b2f2976
XL
166 ptr: Unique::new_unchecked(ptr),
167 cap,
168 a,
041b39d2
XL
169 }
170 }
171}
172
83c7162d 173impl<T> RawVec<T, Global> {
041b39d2
XL
174 /// Reconstitutes a RawVec from a pointer, capacity.
175 ///
176 /// # Undefined Behavior
177 ///
178 /// The ptr must be allocated (on the system heap), and with the given capacity. The
c1a9b12d
SL
179 /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
180 /// If the ptr and capacity come from a RawVec, then this is guaranteed.
181 pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
b039eaaf 182 RawVec {
3b2f2976
XL
183 ptr: Unique::new_unchecked(ptr),
184 cap,
83c7162d 185 a: Global,
b039eaaf 186 }
c1a9b12d
SL
187 }
188
189 /// Converts a `Box<[T]>` into a `RawVec<T>`.
190 pub fn from_box(mut slice: Box<[T]>) -> Self {
191 unsafe {
192 let result = RawVec::from_raw_parts(slice.as_mut_ptr(), slice.len());
193 mem::forget(slice);
194 result
195 }
196 }
197}
198
041b39d2 199impl<T, A: Alloc> RawVec<T, A> {
c1a9b12d 200 /// Gets a raw pointer to the start of the allocation. Note that this is
7cac9316 201 /// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must
c1a9b12d
SL
202 /// be careful.
203 pub fn ptr(&self) -> *mut T {
7cac9316 204 self.ptr.as_ptr()
c1a9b12d
SL
205 }
206
207 /// Gets the capacity of the allocation.
208 ///
209 /// This will always be `usize::MAX` if `T` is zero-sized.
a7813a04 210 #[inline(always)]
c1a9b12d 211 pub fn cap(&self) -> usize {
b039eaaf
SL
212 if mem::size_of::<T>() == 0 {
213 !0
214 } else {
215 self.cap
216 }
c1a9b12d
SL
217 }
218
041b39d2
XL
219 /// Returns a shared reference to the allocator backing this RawVec.
220 pub fn alloc(&self) -> &A {
221 &self.a
222 }
223
224 /// Returns a mutable reference to the allocator backing this RawVec.
225 pub fn alloc_mut(&mut self) -> &mut A {
226 &mut self.a
227 }
228
3b2f2976
XL
229 fn current_layout(&self) -> Option<Layout> {
230 if self.cap == 0 {
231 None
232 } else {
233 // We have an allocated chunk of memory, so we can bypass runtime
234 // checks to get our current layout.
235 unsafe {
236 let align = mem::align_of::<T>();
237 let size = mem::size_of::<T>() * self.cap;
238 Some(Layout::from_size_align_unchecked(size, align))
239 }
240 }
241 }
242
c1a9b12d
SL
243 /// Doubles the size of the type's backing allocation. This is common enough
244 /// to want to do that it's easiest to just have a dedicated method. Slightly
245 /// more efficient logic can be provided for this than the general case.
246 ///
247 /// This function is ideal for when pushing elements one-at-a-time because
248 /// you don't need to incur the costs of the more general computations
249 /// reserve needs to do to guard against overflow. You do however need to
250 /// manually check if your `len == cap`.
251 ///
252 /// # Panics
253 ///
254 /// * Panics if T is zero-sized on the assumption that you managed to exhaust
255 /// all `usize::MAX` slots in your imaginary buffer.
256 /// * Panics on 32-bit platforms if the requested capacity exceeds
257 /// `isize::MAX` bytes.
258 ///
259 /// # Aborts
260 ///
261 /// Aborts on OOM
262 ///
263 /// # Examples
264 ///
041b39d2
XL
265 /// ```
266 /// # #![feature(alloc)]
267 /// # extern crate alloc;
268 /// # use std::ptr;
269 /// # use alloc::raw_vec::RawVec;
c1a9b12d
SL
270 /// struct MyVec<T> {
271 /// buf: RawVec<T>,
272 /// len: usize,
273 /// }
274 ///
275 /// impl<T> MyVec<T> {
276 /// pub fn push(&mut self, elem: T) {
277 /// if self.len == self.buf.cap() { self.buf.double(); }
278 /// // double would have aborted or panicked if the len exceeded
279 /// // `isize::MAX` so this is safe to do unchecked now.
280 /// unsafe {
281 /// ptr::write(self.buf.ptr().offset(self.len as isize), elem);
282 /// }
283 /// self.len += 1;
284 /// }
285 /// }
041b39d2
XL
286 /// # fn main() {
287 /// # let mut vec = MyVec { buf: RawVec::new(), len: 0 };
288 /// # vec.push(1);
289 /// # }
c1a9b12d
SL
290 /// ```
291 #[inline(never)]
292 #[cold]
293 pub fn double(&mut self) {
294 unsafe {
295 let elem_size = mem::size_of::<T>();
296
297 // since we set the capacity to usize::MAX when elem_size is
298 // 0, getting to here necessarily means the RawVec is overfull.
299 assert!(elem_size != 0, "capacity overflow");
300
3b2f2976
XL
301 let (new_cap, uniq) = match self.current_layout() {
302 Some(cur) => {
303 // Since we guarantee that we never allocate more than
304 // isize::MAX bytes, `elem_size * self.cap <= isize::MAX` as
305 // a precondition, so this can't overflow. Additionally the
306 // alignment will never be too large as to "not be
307 // satisfiable", so `Layout::from_size_align` will always
308 // return `Some`.
309 //
310 // tl;dr; we bypass runtime checks due to dynamic assertions
311 // in this module, allowing us to use
312 // `from_size_align_unchecked`.
313 let new_cap = 2 * self.cap;
314 let new_size = new_cap * elem_size;
83c7162d
XL
315 alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
316 let ptr_res = self.a.realloc(NonNull::from(self.ptr).as_opaque(),
3b2f2976 317 cur,
83c7162d 318 new_size);
3b2f2976 319 match ptr_res {
83c7162d
XL
320 Ok(ptr) => (new_cap, ptr.cast().into()),
321 Err(_) => oom(),
3b2f2976
XL
322 }
323 }
324 None => {
325 // skip to 4 because tiny Vec's are dumb; but not if that
326 // would cause overflow
327 let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
328 match self.a.alloc_array::<T>(new_cap) {
2c00a5a8 329 Ok(ptr) => (new_cap, ptr.into()),
83c7162d 330 Err(_) => oom(),
3b2f2976
XL
331 }
332 }
041b39d2 333 };
041b39d2 334 self.ptr = uniq;
c1a9b12d
SL
335 self.cap = new_cap;
336 }
337 }
338
9cc50fc6
SL
339 /// Attempts to double the size of the type's backing allocation in place. This is common
340 /// enough to want to do that it's easiest to just have a dedicated method. Slightly
341 /// more efficient logic can be provided for this than the general case.
342 ///
343 /// Returns true if the reallocation attempt has succeeded, or false otherwise.
344 ///
345 /// # Panics
346 ///
347 /// * Panics if T is zero-sized on the assumption that you managed to exhaust
348 /// all `usize::MAX` slots in your imaginary buffer.
349 /// * Panics on 32-bit platforms if the requested capacity exceeds
350 /// `isize::MAX` bytes.
351 #[inline(never)]
352 #[cold]
353 pub fn double_in_place(&mut self) -> bool {
354 unsafe {
355 let elem_size = mem::size_of::<T>();
3b2f2976
XL
356 let old_layout = match self.current_layout() {
357 Some(layout) => layout,
358 None => return false, // nothing to double
359 };
9cc50fc6
SL
360
361 // since we set the capacity to usize::MAX when elem_size is
362 // 0, getting to here necessarily means the RawVec is overfull.
363 assert!(elem_size != 0, "capacity overflow");
364
3b2f2976
XL
365 // Since we guarantee that we never allocate more than isize::MAX
366 // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
367 // this can't overflow.
368 //
369 // Similarly like with `double` above we can go straight to
370 // `Layout::from_size_align_unchecked` as we know this won't
371 // overflow and the alignment is sufficiently small.
9cc50fc6 372 let new_cap = 2 * self.cap;
3b2f2976 373 let new_size = new_cap * elem_size;
83c7162d
XL
374 alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
375 match self.a.grow_in_place(NonNull::from(self.ptr).as_opaque(), old_layout, new_size) {
041b39d2
XL
376 Ok(_) => {
377 // We can't directly divide `size`.
378 self.cap = new_cap;
379 true
380 }
381 Err(_) => {
382 false
383 }
9cc50fc6 384 }
9cc50fc6
SL
385 }
386 }
387
c1a9b12d
SL
388 /// Ensures that the buffer contains at least enough space to hold
389 /// `used_cap + needed_extra_cap` elements. If it doesn't already,
390 /// will reallocate the minimum possible amount of memory necessary.
391 /// Generally this will be exactly the amount of memory necessary,
392 /// but in principle the allocator is free to give back more than
393 /// we asked for.
394 ///
395 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
396 /// the requested space. This is not really unsafe, but the unsafe
b039eaaf 397 /// code *you* write that relies on the behavior of this function may break.
c1a9b12d
SL
398 ///
399 /// # Panics
400 ///
401 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
402 /// * Panics on 32-bit platforms if the requested capacity exceeds
403 /// `isize::MAX` bytes.
404 ///
405 /// # Aborts
406 ///
407 /// Aborts on OOM
0531ce1d
XL
408 pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize)
409 -> Result<(), CollectionAllocErr> {
410
c1a9b12d 411 unsafe {
c1a9b12d
SL
412 // NOTE: we don't early branch on ZSTs here because we want this
413 // to actually catch "asking for more than usize::MAX" in that case.
414 // If we make it past the first branch then we are guaranteed to
415 // panic.
416
417 // Don't actually need any more capacity.
418 // Wrapping in case they gave a bad `used_cap`.
b039eaaf 419 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
0531ce1d 420 return Ok(());
b039eaaf 421 }
c1a9b12d
SL
422
423 // Nothing we can really do about these checks :(
0531ce1d 424 let new_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?;
83c7162d 425 let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
0531ce1d
XL
426
427 alloc_guard(new_layout.size())?;
428
3b2f2976
XL
429 let res = match self.current_layout() {
430 Some(layout) => {
83c7162d
XL
431 debug_assert!(new_layout.align() == layout.align());
432 self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size())
3b2f2976
XL
433 }
434 None => self.a.alloc(new_layout),
435 };
0531ce1d 436
83c7162d 437 self.ptr = res?.cast().into();
c1a9b12d 438 self.cap = new_cap;
0531ce1d
XL
439
440 Ok(())
c1a9b12d
SL
441 }
442 }
443
0531ce1d
XL
444 pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
445 match self.try_reserve_exact(used_cap, needed_extra_cap) {
83c7162d
XL
446 Err(CapacityOverflow) => capacity_overflow(),
447 Err(AllocErr) => oom(),
0531ce1d
XL
448 Ok(()) => { /* yay */ }
449 }
450 }
451
9cc50fc6
SL
452 /// Calculates the buffer's new size given that it'll hold `used_cap +
453 /// needed_extra_cap` elements. This logic is used in amortized reserve methods.
454 /// Returns `(new_capacity, new_alloc_size)`.
0531ce1d
XL
455 fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize)
456 -> Result<usize, CollectionAllocErr> {
457
9cc50fc6 458 // Nothing we can really do about these checks :(
0531ce1d 459 let required_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?;
9cc50fc6
SL
460 // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
461 let double_cap = self.cap * 2;
462 // `double_cap` guarantees exponential growth.
0531ce1d 463 Ok(cmp::max(double_cap, required_cap))
9cc50fc6
SL
464 }
465
c1a9b12d
SL
466 /// Ensures that the buffer contains at least enough space to hold
467 /// `used_cap + needed_extra_cap` elements. If it doesn't already have
468 /// enough capacity, will reallocate enough space plus comfortable slack
b039eaaf 469 /// space to get amortized `O(1)` behavior. Will limit this behavior
c1a9b12d
SL
470 /// if it would needlessly cause itself to panic.
471 ///
472 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
473 /// the requested space. This is not really unsafe, but the unsafe
b039eaaf 474 /// code *you* write that relies on the behavior of this function may break.
c1a9b12d
SL
475 ///
476 /// This is ideal for implementing a bulk-push operation like `extend`.
477 ///
478 /// # Panics
479 ///
480 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
481 /// * Panics on 32-bit platforms if the requested capacity exceeds
482 /// `isize::MAX` bytes.
483 ///
484 /// # Aborts
485 ///
486 /// Aborts on OOM
487 ///
488 /// # Examples
489 ///
041b39d2
XL
490 /// ```
491 /// # #![feature(alloc)]
492 /// # extern crate alloc;
493 /// # use std::ptr;
494 /// # use alloc::raw_vec::RawVec;
c1a9b12d
SL
495 /// struct MyVec<T> {
496 /// buf: RawVec<T>,
497 /// len: usize,
498 /// }
499 ///
041b39d2 500 /// impl<T: Clone> MyVec<T> {
c1a9b12d
SL
501 /// pub fn push_all(&mut self, elems: &[T]) {
502 /// self.buf.reserve(self.len, elems.len());
503 /// // reserve would have aborted or panicked if the len exceeded
504 /// // `isize::MAX` so this is safe to do unchecked now.
505 /// for x in elems {
506 /// unsafe {
507 /// ptr::write(self.buf.ptr().offset(self.len as isize), x.clone());
508 /// }
509 /// self.len += 1;
510 /// }
511 /// }
512 /// }
041b39d2
XL
513 /// # fn main() {
514 /// # let mut vector = MyVec { buf: RawVec::new(), len: 0 };
515 /// # vector.push_all(&[1, 3, 5, 7, 9]);
516 /// # }
c1a9b12d 517 /// ```
0531ce1d
XL
518 pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize)
519 -> Result<(), CollectionAllocErr> {
520 unsafe {
c1a9b12d
SL
521 // NOTE: we don't early branch on ZSTs here because we want this
522 // to actually catch "asking for more than usize::MAX" in that case.
523 // If we make it past the first branch then we are guaranteed to
524 // panic.
525
526 // Don't actually need any more capacity.
92a42be0 527 // Wrapping in case they give a bad `used_cap`
b039eaaf 528 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
0531ce1d 529 return Ok(());
b039eaaf 530 }
c1a9b12d 531
0531ce1d 532 let new_cap = self.amortized_new_size(used_cap, needed_extra_cap)?;
83c7162d 533 let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
0531ce1d
XL
534
535 // FIXME: may crash and burn on over-reserve
536 alloc_guard(new_layout.size())?;
c1a9b12d 537
3b2f2976
XL
538 let res = match self.current_layout() {
539 Some(layout) => {
83c7162d
XL
540 debug_assert!(new_layout.align() == layout.align());
541 self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size())
3b2f2976
XL
542 }
543 None => self.a.alloc(new_layout),
544 };
0531ce1d 545
83c7162d 546 self.ptr = res?.cast().into();
c1a9b12d 547 self.cap = new_cap;
0531ce1d
XL
548
549 Ok(())
c1a9b12d
SL
550 }
551 }
552
0531ce1d
XL
553 /// The same as try_reserve, but errors are lowered to a call to oom().
554 pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
555 match self.try_reserve(used_cap, needed_extra_cap) {
83c7162d
XL
556 Err(CapacityOverflow) => capacity_overflow(),
557 Err(AllocErr) => oom(),
0531ce1d
XL
558 Ok(()) => { /* yay */ }
559 }
560 }
9cc50fc6
SL
561 /// Attempts to ensure that the buffer contains at least enough space to hold
562 /// `used_cap + needed_extra_cap` elements. If it doesn't already have
563 /// enough capacity, will reallocate in place enough space plus comfortable slack
3b2f2976 564 /// space to get amortized `O(1)` behavior. Will limit this behaviour
9cc50fc6
SL
565 /// if it would needlessly cause itself to panic.
566 ///
567 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
568 /// the requested space. This is not really unsafe, but the unsafe
3b2f2976 569 /// code *you* write that relies on the behavior of this function may break.
9cc50fc6
SL
570 ///
571 /// Returns true if the reallocation attempt has succeeded, or false otherwise.
572 ///
573 /// # Panics
574 ///
575 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
576 /// * Panics on 32-bit platforms if the requested capacity exceeds
577 /// `isize::MAX` bytes.
578 pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
579 unsafe {
9cc50fc6
SL
580 // NOTE: we don't early branch on ZSTs here because we want this
581 // to actually catch "asking for more than usize::MAX" in that case.
582 // If we make it past the first branch then we are guaranteed to
583 // panic.
584
585 // Don't actually need any more capacity. If the current `cap` is 0, we can't
586 // reallocate in place.
587 // Wrapping in case they give a bad `used_cap`
3b2f2976
XL
588 let old_layout = match self.current_layout() {
589 Some(layout) => layout,
590 None => return false,
591 };
592 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
9cc50fc6
SL
593 return false;
594 }
595
0531ce1d 596 let new_cap = self.amortized_new_size(used_cap, needed_extra_cap)
83c7162d 597 .unwrap_or_else(|_| capacity_overflow());
9cc50fc6 598
041b39d2
XL
599 // Here, `cap < used_cap + needed_extra_cap <= new_cap`
600 // (regardless of whether `self.cap - used_cap` wrapped).
601 // Therefore we can safely call grow_in_place.
602
041b39d2 603 let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
3b2f2976 604 // FIXME: may crash and burn on over-reserve
83c7162d
XL
605 alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow());
606 match self.a.grow_in_place(
607 NonNull::from(self.ptr).as_opaque(), old_layout, new_layout.size(),
608 ) {
041b39d2
XL
609 Ok(_) => {
610 self.cap = new_cap;
611 true
612 }
613 Err(_) => {
614 false
615 }
9cc50fc6 616 }
9cc50fc6
SL
617 }
618 }
619
c1a9b12d
SL
620 /// Shrinks the allocation down to the specified amount. If the given amount
621 /// is 0, actually completely deallocates.
622 ///
623 /// # Panics
624 ///
625 /// Panics if the given amount is *larger* than the current capacity.
626 ///
627 /// # Aborts
628 ///
629 /// Aborts on OOM.
630 pub fn shrink_to_fit(&mut self, amount: usize) {
631 let elem_size = mem::size_of::<T>();
c1a9b12d
SL
632
633 // Set the `cap` because they might be about to promote to a `Box<[T]>`
634 if elem_size == 0 {
635 self.cap = amount;
636 return;
637 }
638
639 // This check is my waterloo; it's the only thing Vec wouldn't have to do.
640 assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
641
642 if amount == 0 {
041b39d2
XL
643 // We want to create a new zero-length vector within the
644 // same allocator. We use ptr::write to avoid an
645 // erroneous attempt to drop the contents, and we use
646 // ptr::read to sidestep condition against destructuring
647 // types that implement Drop.
648
649 unsafe {
650 let a = ptr::read(&self.a as *const A);
651 self.dealloc_buffer();
652 ptr::write(self, RawVec::new_in(a));
653 }
c1a9b12d
SL
654 } else if self.cap != amount {
655 unsafe {
3b2f2976
XL
656 // We know here that our `amount` is greater than zero. This
657 // implies, via the assert above, that capacity is also greater
658 // than zero, which means that we've got a current layout that
659 // "fits"
660 //
661 // We also know that `self.cap` is greater than `amount`, and
662 // consequently we don't need runtime checks for creating either
663 // layout
664 let old_size = elem_size * self.cap;
665 let new_size = elem_size * amount;
666 let align = mem::align_of::<T>();
667 let old_layout = Layout::from_size_align_unchecked(old_size, align);
83c7162d 668 match self.a.realloc(NonNull::from(self.ptr).as_opaque(),
3b2f2976 669 old_layout,
83c7162d
XL
670 new_size) {
671 Ok(p) => self.ptr = p.cast().into(),
672 Err(_) => oom(),
b039eaaf 673 }
c1a9b12d
SL
674 }
675 self.cap = amount;
676 }
677 }
041b39d2 678}
c1a9b12d 679
83c7162d 680impl<T> RawVec<T, Global> {
c1a9b12d
SL
681 /// Converts the entire buffer into `Box<[T]>`.
682 ///
b039eaaf 683 /// While it is not *strictly* Undefined Behavior to call
5bcae85e
SL
684 /// this procedure while some of the RawVec is uninitialized,
685 /// it certainly makes it trivial to trigger it.
c1a9b12d
SL
686 ///
687 /// Note that this will correctly reconstitute any `cap` changes
688 /// that may have been performed. (see description of type for details)
689 pub unsafe fn into_box(self) -> Box<[T]> {
690 // NOTE: not calling `cap()` here, actually using the real `cap` field!
691 let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
692 let output: Box<[T]> = Box::from_raw(slice);
693 mem::forget(self);
694 output
695 }
c1a9b12d
SL
696}
697
041b39d2 698impl<T, A: Alloc> RawVec<T, A> {
c1a9b12d 699 /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
041b39d2 700 pub unsafe fn dealloc_buffer(&mut self) {
c1a9b12d 701 let elem_size = mem::size_of::<T>();
3b2f2976
XL
702 if elem_size != 0 {
703 if let Some(layout) = self.current_layout() {
83c7162d 704 self.a.dealloc(NonNull::from(self.ptr).as_opaque(), layout);
3b2f2976 705 }
c1a9b12d
SL
706 }
707 }
708}
709
041b39d2
XL
710unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec<T, A> {
711 /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
712 fn drop(&mut self) {
713 unsafe { self.dealloc_buffer(); }
714 }
715}
716
c1a9b12d
SL
717
718
719// We need to guarantee the following:
720// * We don't ever allocate `> isize::MAX` byte-size objects
721// * We don't overflow `usize::MAX` and actually allocate too little
722//
723// On 64-bit we just need to check for overflow since trying to allocate
3157f602
XL
724// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
725// an extra guard for this in case we're running on a platform which can use
726// all 4GB in user-space. e.g. PAE or x32
c1a9b12d
SL
727
728#[inline]
0531ce1d
XL
729fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> {
730 if mem::size_of::<usize>() < 8 && alloc_size > ::core::isize::MAX as usize {
731 Err(CapacityOverflow)
732 } else {
733 Ok(())
e9174d1e 734 }
c1a9b12d 735}
92a42be0 736
83c7162d
XL
737// One central function responsible for reporting capacity overflows. This'll
738// ensure that the code generation related to these panics is minimal as there's
739// only one location which panics rather than a bunch throughout the module.
740fn capacity_overflow() -> ! {
741 panic!("capacity overflow")
742}
743
92a42be0
SL
744#[cfg(test)]
745mod tests {
746 use super::*;
83c7162d 747 use alloc::Opaque;
92a42be0 748
041b39d2
XL
749 #[test]
750 fn allocator_param() {
751 use allocator::{Alloc, AllocErr};
752
753 // Writing a test of integration between third-party
754 // allocators and RawVec is a little tricky because the RawVec
755 // API does not expose fallible allocation methods, so we
756 // cannot check what happens when allocator is exhausted
757 // (beyond detecting a panic).
758 //
759 // Instead, this just checks that the RawVec methods do at
760 // least go through the Allocator API when it reserves
761 // storage.
762
763 // A dumb allocator that consumes a fixed amount of fuel
764 // before allocation attempts start failing.
765 struct BoundedAlloc { fuel: usize }
766 unsafe impl Alloc for BoundedAlloc {
83c7162d 767 unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<Opaque>, AllocErr> {
041b39d2
XL
768 let size = layout.size();
769 if size > self.fuel {
83c7162d 770 return Err(AllocErr);
041b39d2 771 }
83c7162d 772 match Global.alloc(layout) {
041b39d2
XL
773 ok @ Ok(_) => { self.fuel -= size; ok }
774 err @ Err(_) => err,
775 }
776 }
83c7162d
XL
777 unsafe fn dealloc(&mut self, ptr: NonNull<Opaque>, layout: Layout) {
778 Global.dealloc(ptr, layout)
041b39d2
XL
779 }
780 }
781
782 let a = BoundedAlloc { fuel: 500 };
783 let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
784 assert_eq!(v.a.fuel, 450);
785 v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
786 assert_eq!(v.a.fuel, 250);
787 }
788
92a42be0
SL
789 #[test]
790 fn reserve_does_not_overallocate() {
791 {
792 let mut v: RawVec<u32> = RawVec::new();
793 // First `reserve` allocates like `reserve_exact`
794 v.reserve(0, 9);
795 assert_eq!(9, v.cap());
796 }
797
798 {
799 let mut v: RawVec<u32> = RawVec::new();
800 v.reserve(0, 7);
801 assert_eq!(7, v.cap());
802 // 97 if more than double of 7, so `reserve` should work
803 // like `reserve_exact`.
804 v.reserve(7, 90);
805 assert_eq!(97, v.cap());
806 }
807
808 {
809 let mut v: RawVec<u32> = RawVec::new();
810 v.reserve(0, 12);
811 assert_eq!(12, v.cap());
812 v.reserve(12, 3);
813 // 3 is less than half of 12, so `reserve` must grow
814 // exponentially. At the time of writing this test grow
815 // factor is 2, so new capacity is 24, however, grow factor
816 // of 1.5 is OK too. Hence `>= 18` in assert.
817 assert!(v.cap() >= 12 + 12 / 2);
818 }
819 }
820
041b39d2 821
92a42be0 822}