]>
Commit | Line | Data |
---|---|---|
c1a9b12d SL |
1 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
11 | use core::ptr::Unique; | |
12 | use core::mem; | |
92a42be0 | 13 | use core::slice; |
c1a9b12d SL |
14 | use heap; |
15 | use super::oom; | |
16 | use super::boxed::Box; | |
17 | use core::ops::Drop; | |
92a42be0 | 18 | use core::cmp; |
c1a9b12d | 19 | |
5bcae85e | 20 | /// A low-level utility for more ergonomically allocating, reallocating, and deallocating |
c1a9b12d SL |
21 | /// a buffer of memory on the heap without having to worry about all the corner cases |
22 | /// involved. This type is excellent for building your own data structures like Vec and VecDeque. | |
23 | /// In particular: | |
24 | /// | |
25 | /// * Produces heap::EMPTY on zero-sized types | |
26 | /// * Produces heap::EMPTY on zero-length allocations | |
27 | /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics) | |
28 | /// * Guards against 32-bit systems allocating more than isize::MAX bytes | |
29 | /// * Guards against overflowing your length | |
30 | /// * Aborts on OOM | |
31 | /// * Avoids freeing heap::EMPTY | |
32 | /// * Contains a ptr::Unique and thus endows the user with all related benefits | |
33 | /// | |
34 | /// This type does not in anyway inspect the memory that it manages. When dropped it *will* | |
35 | /// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec | |
36 | /// to handle the actual things *stored* inside of a RawVec. | |
37 | /// | |
38 | /// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types. | |
39 | /// This enables you to use capacity growing logic catch the overflows in your length | |
40 | /// that might occur with zero-sized types. | |
41 | /// | |
42 | /// However this means that you need to be careful when roundtripping this type | |
43 | /// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`, | |
44 | /// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity | |
45 | /// field. This allows zero-sized types to not be special-cased by consumers of | |
46 | /// this type. | |
47 | #[unsafe_no_drop_flag] | |
48 | pub struct RawVec<T> { | |
49 | ptr: Unique<T>, | |
50 | cap: usize, | |
51 | } | |
52 | ||
53 | impl<T> RawVec<T> { | |
54 | /// Creates the biggest possible RawVec without allocating. If T has positive | |
55 | /// size, then this makes a RawVec with capacity 0. If T has 0 size, then it | |
56 | /// it makes a RawVec with capacity `usize::MAX`. Useful for implementing | |
57 | /// delayed allocation. | |
58 | pub fn new() -> Self { | |
59 | unsafe { | |
60 | // !0 is usize::MAX. This branch should be stripped at compile time. | |
b039eaaf SL |
61 | let cap = if mem::size_of::<T>() == 0 { |
62 | !0 | |
63 | } else { | |
64 | 0 | |
65 | }; | |
c1a9b12d SL |
66 | |
67 | // heap::EMPTY doubles as "unallocated" and "zero-sized allocation" | |
b039eaaf SL |
68 | RawVec { |
69 | ptr: Unique::new(heap::EMPTY as *mut T), | |
70 | cap: cap, | |
71 | } | |
c1a9b12d SL |
72 | } |
73 | } | |
74 | ||
75 | /// Creates a RawVec with exactly the capacity and alignment requirements | |
76 | /// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0 | |
77 | /// or T is zero-sized. Note that if `T` is zero-sized this means you will *not* | |
78 | /// get a RawVec with the requested capacity! | |
79 | /// | |
80 | /// # Panics | |
81 | /// | |
82 | /// * Panics if the requested capacity exceeds `usize::MAX` bytes. | |
83 | /// * Panics on 32-bit platforms if the requested capacity exceeds | |
84 | /// `isize::MAX` bytes. | |
85 | /// | |
86 | /// # Aborts | |
87 | /// | |
88 | /// Aborts on OOM | |
89 | pub fn with_capacity(cap: usize) -> Self { | |
90 | unsafe { | |
91 | let elem_size = mem::size_of::<T>(); | |
92 | ||
93 | let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow"); | |
94 | alloc_guard(alloc_size); | |
95 | ||
96 | // handles ZSTs and `cap = 0` alike | |
97 | let ptr = if alloc_size == 0 { | |
98 | heap::EMPTY as *mut u8 | |
99 | } else { | |
100 | let align = mem::align_of::<T>(); | |
101 | let ptr = heap::allocate(alloc_size, align); | |
b039eaaf SL |
102 | if ptr.is_null() { |
103 | oom() | |
104 | } | |
c1a9b12d SL |
105 | ptr |
106 | }; | |
107 | ||
b039eaaf SL |
108 | RawVec { |
109 | ptr: Unique::new(ptr as *mut _), | |
110 | cap: cap, | |
111 | } | |
c1a9b12d SL |
112 | } |
113 | } | |
114 | ||
115 | /// Reconstitutes a RawVec from a pointer and capacity. | |
116 | /// | |
b039eaaf | 117 | /// # Undefined Behavior |
c1a9b12d SL |
118 | /// |
119 | /// The ptr must be allocated, and with the given capacity. The | |
120 | /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems). | |
121 | /// If the ptr and capacity come from a RawVec, then this is guaranteed. | |
122 | pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self { | |
b039eaaf SL |
123 | RawVec { |
124 | ptr: Unique::new(ptr), | |
125 | cap: cap, | |
126 | } | |
c1a9b12d SL |
127 | } |
128 | ||
129 | /// Converts a `Box<[T]>` into a `RawVec<T>`. | |
130 | pub fn from_box(mut slice: Box<[T]>) -> Self { | |
131 | unsafe { | |
132 | let result = RawVec::from_raw_parts(slice.as_mut_ptr(), slice.len()); | |
133 | mem::forget(slice); | |
134 | result | |
135 | } | |
136 | } | |
137 | } | |
138 | ||
139 | impl<T> RawVec<T> { | |
140 | /// Gets a raw pointer to the start of the allocation. Note that this is | |
141 | /// heap::EMPTY if `cap = 0` or T is zero-sized. In the former case, you must | |
142 | /// be careful. | |
143 | pub fn ptr(&self) -> *mut T { | |
144 | *self.ptr | |
145 | } | |
146 | ||
147 | /// Gets the capacity of the allocation. | |
148 | /// | |
149 | /// This will always be `usize::MAX` if `T` is zero-sized. | |
a7813a04 | 150 | #[inline(always)] |
c1a9b12d | 151 | pub fn cap(&self) -> usize { |
b039eaaf SL |
152 | if mem::size_of::<T>() == 0 { |
153 | !0 | |
154 | } else { | |
155 | self.cap | |
156 | } | |
c1a9b12d SL |
157 | } |
158 | ||
159 | /// Doubles the size of the type's backing allocation. This is common enough | |
160 | /// to want to do that it's easiest to just have a dedicated method. Slightly | |
161 | /// more efficient logic can be provided for this than the general case. | |
162 | /// | |
163 | /// This function is ideal for when pushing elements one-at-a-time because | |
164 | /// you don't need to incur the costs of the more general computations | |
165 | /// reserve needs to do to guard against overflow. You do however need to | |
166 | /// manually check if your `len == cap`. | |
167 | /// | |
168 | /// # Panics | |
169 | /// | |
170 | /// * Panics if T is zero-sized on the assumption that you managed to exhaust | |
171 | /// all `usize::MAX` slots in your imaginary buffer. | |
172 | /// * Panics on 32-bit platforms if the requested capacity exceeds | |
173 | /// `isize::MAX` bytes. | |
174 | /// | |
175 | /// # Aborts | |
176 | /// | |
177 | /// Aborts on OOM | |
178 | /// | |
179 | /// # Examples | |
180 | /// | |
181 | /// ```ignore | |
182 | /// struct MyVec<T> { | |
183 | /// buf: RawVec<T>, | |
184 | /// len: usize, | |
185 | /// } | |
186 | /// | |
187 | /// impl<T> MyVec<T> { | |
188 | /// pub fn push(&mut self, elem: T) { | |
189 | /// if self.len == self.buf.cap() { self.buf.double(); } | |
190 | /// // double would have aborted or panicked if the len exceeded | |
191 | /// // `isize::MAX` so this is safe to do unchecked now. | |
192 | /// unsafe { | |
193 | /// ptr::write(self.buf.ptr().offset(self.len as isize), elem); | |
194 | /// } | |
195 | /// self.len += 1; | |
196 | /// } | |
197 | /// } | |
198 | /// ``` | |
199 | #[inline(never)] | |
200 | #[cold] | |
201 | pub fn double(&mut self) { | |
202 | unsafe { | |
203 | let elem_size = mem::size_of::<T>(); | |
204 | ||
205 | // since we set the capacity to usize::MAX when elem_size is | |
206 | // 0, getting to here necessarily means the RawVec is overfull. | |
207 | assert!(elem_size != 0, "capacity overflow"); | |
208 | ||
209 | let align = mem::align_of::<T>(); | |
210 | ||
211 | let (new_cap, ptr) = if self.cap == 0 { | |
212 | // skip to 4 because tiny Vec's are dumb; but not if that would cause overflow | |
b039eaaf SL |
213 | let new_cap = if elem_size > (!0) / 8 { |
214 | 1 | |
215 | } else { | |
216 | 4 | |
217 | }; | |
c1a9b12d SL |
218 | let ptr = heap::allocate(new_cap * elem_size, align); |
219 | (new_cap, ptr) | |
220 | } else { | |
221 | // Since we guarantee that we never allocate more than isize::MAX bytes, | |
222 | // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow | |
223 | let new_cap = 2 * self.cap; | |
224 | let new_alloc_size = new_cap * elem_size; | |
225 | alloc_guard(new_alloc_size); | |
226 | let ptr = heap::reallocate(self.ptr() as *mut _, | |
227 | self.cap * elem_size, | |
228 | new_alloc_size, | |
229 | align); | |
230 | (new_cap, ptr) | |
231 | }; | |
232 | ||
233 | // If allocate or reallocate fail, we'll get `null` back | |
b039eaaf SL |
234 | if ptr.is_null() { |
235 | oom() | |
236 | } | |
c1a9b12d SL |
237 | |
238 | self.ptr = Unique::new(ptr as *mut _); | |
239 | self.cap = new_cap; | |
240 | } | |
241 | } | |
242 | ||
9cc50fc6 SL |
243 | /// Attempts to double the size of the type's backing allocation in place. This is common |
244 | /// enough to want to do that it's easiest to just have a dedicated method. Slightly | |
245 | /// more efficient logic can be provided for this than the general case. | |
246 | /// | |
247 | /// Returns true if the reallocation attempt has succeeded, or false otherwise. | |
248 | /// | |
249 | /// # Panics | |
250 | /// | |
251 | /// * Panics if T is zero-sized on the assumption that you managed to exhaust | |
252 | /// all `usize::MAX` slots in your imaginary buffer. | |
253 | /// * Panics on 32-bit platforms if the requested capacity exceeds | |
254 | /// `isize::MAX` bytes. | |
255 | #[inline(never)] | |
256 | #[cold] | |
257 | pub fn double_in_place(&mut self) -> bool { | |
258 | unsafe { | |
259 | let elem_size = mem::size_of::<T>(); | |
260 | let align = mem::align_of::<T>(); | |
261 | ||
262 | // since we set the capacity to usize::MAX when elem_size is | |
263 | // 0, getting to here necessarily means the RawVec is overfull. | |
264 | assert!(elem_size != 0, "capacity overflow"); | |
265 | ||
266 | // Since we guarantee that we never allocate more than isize::MAX bytes, | |
267 | // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow | |
268 | let new_cap = 2 * self.cap; | |
269 | let new_alloc_size = new_cap * elem_size; | |
270 | ||
271 | alloc_guard(new_alloc_size); | |
272 | let size = heap::reallocate_inplace(self.ptr() as *mut _, | |
273 | self.cap * elem_size, | |
274 | new_alloc_size, | |
275 | align); | |
276 | if size >= new_alloc_size { | |
277 | // We can't directly divide `size`. | |
278 | self.cap = new_cap; | |
279 | } | |
280 | size >= new_alloc_size | |
281 | } | |
282 | } | |
283 | ||
c1a9b12d SL |
284 | /// Ensures that the buffer contains at least enough space to hold |
285 | /// `used_cap + needed_extra_cap` elements. If it doesn't already, | |
286 | /// will reallocate the minimum possible amount of memory necessary. | |
287 | /// Generally this will be exactly the amount of memory necessary, | |
288 | /// but in principle the allocator is free to give back more than | |
289 | /// we asked for. | |
290 | /// | |
291 | /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate | |
292 | /// the requested space. This is not really unsafe, but the unsafe | |
b039eaaf | 293 | /// code *you* write that relies on the behavior of this function may break. |
c1a9b12d SL |
294 | /// |
295 | /// # Panics | |
296 | /// | |
297 | /// * Panics if the requested capacity exceeds `usize::MAX` bytes. | |
298 | /// * Panics on 32-bit platforms if the requested capacity exceeds | |
299 | /// `isize::MAX` bytes. | |
300 | /// | |
301 | /// # Aborts | |
302 | /// | |
303 | /// Aborts on OOM | |
304 | pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { | |
305 | unsafe { | |
306 | let elem_size = mem::size_of::<T>(); | |
307 | let align = mem::align_of::<T>(); | |
308 | ||
309 | // NOTE: we don't early branch on ZSTs here because we want this | |
310 | // to actually catch "asking for more than usize::MAX" in that case. | |
311 | // If we make it past the first branch then we are guaranteed to | |
312 | // panic. | |
313 | ||
314 | // Don't actually need any more capacity. | |
315 | // Wrapping in case they gave a bad `used_cap`. | |
b039eaaf SL |
316 | if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { |
317 | return; | |
318 | } | |
c1a9b12d SL |
319 | |
320 | // Nothing we can really do about these checks :( | |
321 | let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow"); | |
322 | let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow"); | |
323 | alloc_guard(new_alloc_size); | |
324 | ||
325 | let ptr = if self.cap == 0 { | |
326 | heap::allocate(new_alloc_size, align) | |
327 | } else { | |
328 | heap::reallocate(self.ptr() as *mut _, | |
329 | self.cap * elem_size, | |
330 | new_alloc_size, | |
331 | align) | |
332 | }; | |
333 | ||
334 | // If allocate or reallocate fail, we'll get `null` back | |
b039eaaf SL |
335 | if ptr.is_null() { |
336 | oom() | |
337 | } | |
c1a9b12d SL |
338 | |
339 | self.ptr = Unique::new(ptr as *mut _); | |
340 | self.cap = new_cap; | |
341 | } | |
342 | } | |
343 | ||
9cc50fc6 SL |
344 | /// Calculates the buffer's new size given that it'll hold `used_cap + |
345 | /// needed_extra_cap` elements. This logic is used in amortized reserve methods. | |
346 | /// Returns `(new_capacity, new_alloc_size)`. | |
347 | fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> (usize, usize) { | |
348 | let elem_size = mem::size_of::<T>(); | |
349 | // Nothing we can really do about these checks :( | |
350 | let required_cap = used_cap.checked_add(needed_extra_cap) | |
351 | .expect("capacity overflow"); | |
352 | // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. | |
353 | let double_cap = self.cap * 2; | |
354 | // `double_cap` guarantees exponential growth. | |
355 | let new_cap = cmp::max(double_cap, required_cap); | |
356 | let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow"); | |
357 | (new_cap, new_alloc_size) | |
358 | } | |
359 | ||
c1a9b12d SL |
360 | /// Ensures that the buffer contains at least enough space to hold |
361 | /// `used_cap + needed_extra_cap` elements. If it doesn't already have | |
362 | /// enough capacity, will reallocate enough space plus comfortable slack | |
b039eaaf | 363 | /// space to get amortized `O(1)` behavior. Will limit this behavior |
c1a9b12d SL |
364 | /// if it would needlessly cause itself to panic. |
365 | /// | |
366 | /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate | |
367 | /// the requested space. This is not really unsafe, but the unsafe | |
b039eaaf | 368 | /// code *you* write that relies on the behavior of this function may break. |
c1a9b12d SL |
369 | /// |
370 | /// This is ideal for implementing a bulk-push operation like `extend`. | |
371 | /// | |
372 | /// # Panics | |
373 | /// | |
374 | /// * Panics if the requested capacity exceeds `usize::MAX` bytes. | |
375 | /// * Panics on 32-bit platforms if the requested capacity exceeds | |
376 | /// `isize::MAX` bytes. | |
377 | /// | |
378 | /// # Aborts | |
379 | /// | |
380 | /// Aborts on OOM | |
381 | /// | |
382 | /// # Examples | |
383 | /// | |
384 | /// ```ignore | |
385 | /// struct MyVec<T> { | |
386 | /// buf: RawVec<T>, | |
387 | /// len: usize, | |
388 | /// } | |
389 | /// | |
390 | /// impl<T> MyVec<T> { | |
391 | /// pub fn push_all(&mut self, elems: &[T]) { | |
392 | /// self.buf.reserve(self.len, elems.len()); | |
393 | /// // reserve would have aborted or panicked if the len exceeded | |
394 | /// // `isize::MAX` so this is safe to do unchecked now. | |
395 | /// for x in elems { | |
396 | /// unsafe { | |
397 | /// ptr::write(self.buf.ptr().offset(self.len as isize), x.clone()); | |
398 | /// } | |
399 | /// self.len += 1; | |
400 | /// } | |
401 | /// } | |
402 | /// } | |
403 | /// ``` | |
404 | pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { | |
405 | unsafe { | |
406 | let elem_size = mem::size_of::<T>(); | |
407 | let align = mem::align_of::<T>(); | |
408 | ||
409 | // NOTE: we don't early branch on ZSTs here because we want this | |
410 | // to actually catch "asking for more than usize::MAX" in that case. | |
411 | // If we make it past the first branch then we are guaranteed to | |
412 | // panic. | |
413 | ||
414 | // Don't actually need any more capacity. | |
92a42be0 | 415 | // Wrapping in case they give a bad `used_cap` |
b039eaaf SL |
416 | if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { |
417 | return; | |
418 | } | |
c1a9b12d | 419 | |
9cc50fc6 | 420 | let (new_cap, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap); |
c1a9b12d SL |
421 | // FIXME: may crash and burn on over-reserve |
422 | alloc_guard(new_alloc_size); | |
423 | ||
424 | let ptr = if self.cap == 0 { | |
425 | heap::allocate(new_alloc_size, align) | |
426 | } else { | |
427 | heap::reallocate(self.ptr() as *mut _, | |
428 | self.cap * elem_size, | |
429 | new_alloc_size, | |
430 | align) | |
431 | }; | |
432 | ||
433 | // If allocate or reallocate fail, we'll get `null` back | |
b039eaaf SL |
434 | if ptr.is_null() { |
435 | oom() | |
436 | } | |
c1a9b12d SL |
437 | |
438 | self.ptr = Unique::new(ptr as *mut _); | |
439 | self.cap = new_cap; | |
440 | } | |
441 | } | |
442 | ||
9cc50fc6 SL |
443 | /// Attempts to ensure that the buffer contains at least enough space to hold |
444 | /// `used_cap + needed_extra_cap` elements. If it doesn't already have | |
445 | /// enough capacity, will reallocate in place enough space plus comfortable slack | |
446 | /// space to get amortized `O(1)` behaviour. Will limit this behaviour | |
447 | /// if it would needlessly cause itself to panic. | |
448 | /// | |
449 | /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate | |
450 | /// the requested space. This is not really unsafe, but the unsafe | |
451 | /// code *you* write that relies on the behaviour of this function may break. | |
452 | /// | |
453 | /// Returns true if the reallocation attempt has succeeded, or false otherwise. | |
454 | /// | |
455 | /// # Panics | |
456 | /// | |
457 | /// * Panics if the requested capacity exceeds `usize::MAX` bytes. | |
458 | /// * Panics on 32-bit platforms if the requested capacity exceeds | |
459 | /// `isize::MAX` bytes. | |
460 | pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool { | |
461 | unsafe { | |
462 | let elem_size = mem::size_of::<T>(); | |
463 | let align = mem::align_of::<T>(); | |
464 | ||
465 | // NOTE: we don't early branch on ZSTs here because we want this | |
466 | // to actually catch "asking for more than usize::MAX" in that case. | |
467 | // If we make it past the first branch then we are guaranteed to | |
468 | // panic. | |
469 | ||
470 | // Don't actually need any more capacity. If the current `cap` is 0, we can't | |
471 | // reallocate in place. | |
472 | // Wrapping in case they give a bad `used_cap` | |
473 | if self.cap().wrapping_sub(used_cap) >= needed_extra_cap || self.cap == 0 { | |
474 | return false; | |
475 | } | |
476 | ||
477 | let (_, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap); | |
478 | // FIXME: may crash and burn on over-reserve | |
479 | alloc_guard(new_alloc_size); | |
480 | ||
481 | let size = heap::reallocate_inplace(self.ptr() as *mut _, | |
482 | self.cap * elem_size, | |
483 | new_alloc_size, | |
484 | align); | |
485 | if size >= new_alloc_size { | |
486 | self.cap = new_alloc_size / elem_size; | |
487 | } | |
488 | size >= new_alloc_size | |
489 | } | |
490 | } | |
491 | ||
c1a9b12d SL |
492 | /// Shrinks the allocation down to the specified amount. If the given amount |
493 | /// is 0, actually completely deallocates. | |
494 | /// | |
495 | /// # Panics | |
496 | /// | |
497 | /// Panics if the given amount is *larger* than the current capacity. | |
498 | /// | |
499 | /// # Aborts | |
500 | /// | |
501 | /// Aborts on OOM. | |
502 | pub fn shrink_to_fit(&mut self, amount: usize) { | |
503 | let elem_size = mem::size_of::<T>(); | |
504 | let align = mem::align_of::<T>(); | |
505 | ||
506 | // Set the `cap` because they might be about to promote to a `Box<[T]>` | |
507 | if elem_size == 0 { | |
508 | self.cap = amount; | |
509 | return; | |
510 | } | |
511 | ||
512 | // This check is my waterloo; it's the only thing Vec wouldn't have to do. | |
513 | assert!(self.cap >= amount, "Tried to shrink to a larger capacity"); | |
514 | ||
515 | if amount == 0 { | |
516 | mem::replace(self, RawVec::new()); | |
517 | } else if self.cap != amount { | |
518 | unsafe { | |
519 | // Overflow check is unnecessary as the vector is already at | |
520 | // least this large. | |
521 | let ptr = heap::reallocate(self.ptr() as *mut _, | |
522 | self.cap * elem_size, | |
523 | amount * elem_size, | |
524 | align); | |
b039eaaf SL |
525 | if ptr.is_null() { |
526 | oom() | |
527 | } | |
c1a9b12d SL |
528 | self.ptr = Unique::new(ptr as *mut _); |
529 | } | |
530 | self.cap = amount; | |
531 | } | |
532 | } | |
533 | ||
534 | /// Converts the entire buffer into `Box<[T]>`. | |
535 | /// | |
b039eaaf | 536 | /// While it is not *strictly* Undefined Behavior to call |
5bcae85e SL |
537 | /// this procedure while some of the RawVec is uninitialized, |
538 | /// it certainly makes it trivial to trigger it. | |
c1a9b12d SL |
539 | /// |
540 | /// Note that this will correctly reconstitute any `cap` changes | |
541 | /// that may have been performed. (see description of type for details) | |
542 | pub unsafe fn into_box(self) -> Box<[T]> { | |
543 | // NOTE: not calling `cap()` here, actually using the real `cap` field! | |
544 | let slice = slice::from_raw_parts_mut(self.ptr(), self.cap); | |
545 | let output: Box<[T]> = Box::from_raw(slice); | |
546 | mem::forget(self); | |
547 | output | |
548 | } | |
549 | ||
550 | /// This is a stupid name in the hopes that someone will find this in the | |
551 | /// not too distant future and remove it with the rest of | |
552 | /// #[unsafe_no_drop_flag] | |
553 | pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool { | |
554 | self.cap != mem::POST_DROP_USIZE | |
555 | } | |
556 | } | |
557 | ||
558 | impl<T> Drop for RawVec<T> { | |
b039eaaf | 559 | #[unsafe_destructor_blind_to_params] |
c1a9b12d SL |
560 | /// Frees the memory owned by the RawVec *without* trying to Drop its contents. |
561 | fn drop(&mut self) { | |
562 | let elem_size = mem::size_of::<T>(); | |
563 | if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() { | |
564 | let align = mem::align_of::<T>(); | |
565 | ||
566 | let num_bytes = elem_size * self.cap; | |
567 | unsafe { | |
568 | heap::deallocate(*self.ptr as *mut _, num_bytes, align); | |
569 | } | |
570 | } | |
571 | } | |
572 | } | |
573 | ||
574 | ||
575 | ||
576 | // We need to guarantee the following: | |
577 | // * We don't ever allocate `> isize::MAX` byte-size objects | |
578 | // * We don't overflow `usize::MAX` and actually allocate too little | |
579 | // | |
580 | // On 64-bit we just need to check for overflow since trying to allocate | |
3157f602 XL |
581 | // `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add |
582 | // an extra guard for this in case we're running on a platform which can use | |
583 | // all 4GB in user-space. e.g. PAE or x32 | |
c1a9b12d SL |
584 | |
585 | #[inline] | |
c1a9b12d | 586 | fn alloc_guard(alloc_size: usize) { |
9cc50fc6 | 587 | if mem::size_of::<usize>() < 8 { |
b039eaaf SL |
588 | assert!(alloc_size <= ::core::isize::MAX as usize, |
589 | "capacity overflow"); | |
e9174d1e | 590 | } |
c1a9b12d | 591 | } |
92a42be0 SL |
592 | |
593 | ||
594 | #[cfg(test)] | |
595 | mod tests { | |
596 | use super::*; | |
597 | ||
598 | #[test] | |
599 | fn reserve_does_not_overallocate() { | |
600 | { | |
601 | let mut v: RawVec<u32> = RawVec::new(); | |
602 | // First `reserve` allocates like `reserve_exact` | |
603 | v.reserve(0, 9); | |
604 | assert_eq!(9, v.cap()); | |
605 | } | |
606 | ||
607 | { | |
608 | let mut v: RawVec<u32> = RawVec::new(); | |
609 | v.reserve(0, 7); | |
610 | assert_eq!(7, v.cap()); | |
611 | // 97 if more than double of 7, so `reserve` should work | |
612 | // like `reserve_exact`. | |
613 | v.reserve(7, 90); | |
614 | assert_eq!(97, v.cap()); | |
615 | } | |
616 | ||
617 | { | |
618 | let mut v: RawVec<u32> = RawVec::new(); | |
619 | v.reserve(0, 12); | |
620 | assert_eq!(12, v.cap()); | |
621 | v.reserve(12, 3); | |
622 | // 3 is less than half of 12, so `reserve` must grow | |
623 | // exponentially. At the time of writing this test grow | |
624 | // factor is 2, so new capacity is 24, however, grow factor | |
625 | // of 1.5 is OK too. Hence `>= 18` in assert. | |
626 | assert!(v.cap() >= 12 + 12 / 2); | |
627 | } | |
628 | } | |
629 | ||
630 | } |