]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | //! The arena, a fast but limited type of allocator. |
2 | //! | |
3 | //! Arenas are a type of allocator that destroy the objects within, all at | |
4 | //! once, once the arena itself is destroyed. They do not support deallocation | |
5 | //! of individual objects while the arena itself is still alive. The benefit | |
6 | //! of an arena is very fast allocation; just a pointer bump. | |
7 | //! | |
f9f354fc | 8 | //! This crate implements several kinds of arena. |
1a4d82fc | 9 | |
dfeec247 | 10 | #![doc( |
1b1a35ee | 11 | html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/", |
dfeec247 XL |
12 | test(no_crate_inject, attr(deny(warnings))) |
13 | )] | |
32a655c1 | 14 | #![feature(dropck_eyepatch)] |
1b1a35ee XL |
15 | #![feature(new_uninit)] |
16 | #![feature(maybe_uninit_slice)] | |
fc512014 | 17 | #![feature(min_specialization)] |
94222f64 XL |
18 | #![feature(decl_macro)] |
19 | #![feature(rustc_attrs)] | |
85aaf69f | 20 | #![cfg_attr(test, feature(test))] |
ee023bcb | 21 | #![feature(strict_provenance)] |
83c7162d | 22 | |
532ac7d7 | 23 | use smallvec::SmallVec; |
1a4d82fc | 24 | |
f035d41b | 25 | use std::alloc::Layout; |
1a4d82fc JJ |
26 | use std::cell::{Cell, RefCell}; |
27 | use std::cmp; | |
9cc50fc6 | 28 | use std::marker::{PhantomData, Send}; |
1b1a35ee | 29 | use std::mem::{self, MaybeUninit}; |
1a4d82fc | 30 | use std::ptr; |
c30ab7b3 | 31 | use std::slice; |
e9174d1e | 32 | |
29967ef6 XL |
33 | #[inline(never)] |
34 | #[cold] | |
5869c6ff | 35 | fn cold_path<F: FnOnce() -> R, R>(f: F) -> R { |
29967ef6 XL |
36 | f() |
37 | } | |
38 | ||
9e0c209e | 39 | /// An arena that can hold objects of only one type. |
1a4d82fc JJ |
40 | pub struct TypedArena<T> { |
41 | /// A pointer to the next object to be allocated. | |
9cc50fc6 | 42 | ptr: Cell<*mut T>, |
1a4d82fc JJ |
43 | |
44 | /// A pointer to the end of the allocated area. When this pointer is | |
45 | /// reached, a new chunk is allocated. | |
9cc50fc6 | 46 | end: Cell<*mut T>, |
1a4d82fc | 47 | |
9e0c209e | 48 | /// A vector of arena chunks. |
5099ac24 | 49 | chunks: RefCell<Vec<ArenaChunk<T>>>, |
85aaf69f SL |
50 | |
51 | /// Marker indicating that dropping the arena causes its owned | |
52 | /// instances of `T` to be dropped. | |
9cc50fc6 | 53 | _own: PhantomData<T>, |
1a4d82fc JJ |
54 | } |
55 | ||
5099ac24 | 56 | struct ArenaChunk<T = u8> { |
9e0c209e | 57 | /// The raw storage for the arena chunk. |
1b1a35ee | 58 | storage: Box<[MaybeUninit<T>]>, |
532ac7d7 XL |
59 | /// The number of valid entries in the chunk. |
60 | entries: usize, | |
1a4d82fc JJ |
61 | } |
62 | ||
5099ac24 | 63 | impl<T> ArenaChunk<T> { |
1a4d82fc | 64 | #[inline] |
5099ac24 FG |
65 | unsafe fn new(capacity: usize) -> ArenaChunk<T> { |
66 | ArenaChunk { storage: Box::new_uninit_slice(capacity), entries: 0 } | |
1a4d82fc JJ |
67 | } |
68 | ||
9cc50fc6 | 69 | /// Destroys this arena chunk. |
1a4d82fc | 70 | #[inline] |
85aaf69f | 71 | unsafe fn destroy(&mut self, len: usize) { |
9cc50fc6 SL |
72 | // The branch on needs_drop() is an -O1 performance optimization. |
73 | // Without the branch, dropping TypedArena<u8> takes linear time. | |
7cac9316 | 74 | if mem::needs_drop::<T>() { |
1b1a35ee | 75 | ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(&mut self.storage[..len])); |
1a4d82fc | 76 | } |
1a4d82fc JJ |
77 | } |
78 | ||
79 | // Returns a pointer to the first allocated object. | |
80 | #[inline] | |
1b1a35ee XL |
81 | fn start(&mut self) -> *mut T { |
82 | MaybeUninit::slice_as_mut_ptr(&mut self.storage) | |
1a4d82fc JJ |
83 | } |
84 | ||
85 | // Returns a pointer to the end of the allocated space. | |
86 | #[inline] | |
1b1a35ee | 87 | fn end(&mut self) -> *mut T { |
1a4d82fc | 88 | unsafe { |
9cc50fc6 SL |
89 | if mem::size_of::<T>() == 0 { |
90 | // A pointer as large as possible for zero-sized elements. | |
ee023bcb | 91 | ptr::invalid_mut(!0) |
9cc50fc6 | 92 | } else { |
1b1a35ee | 93 | self.start().add(self.storage.len()) |
9cc50fc6 | 94 | } |
1a4d82fc JJ |
95 | } |
96 | } | |
97 | } | |
98 | ||
f9f354fc XL |
99 | // The arenas start with PAGE-sized chunks, and then each new chunk is twice as |
100 | // big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon | |
101 | // we stop growing. This scales well, from arenas that are barely used up to | |
102 | // arenas that are used for 100s of MiBs. Note also that the chosen sizes match | |
103 | // the usual sizes of pages and huge pages on Linux. | |
9cc50fc6 | 104 | const PAGE: usize = 4096; |
f9f354fc | 105 | const HUGE_PAGE: usize = 2 * 1024 * 1024; |
9cc50fc6 | 106 | |
0bf4aa26 | 107 | impl<T> Default for TypedArena<T> { |
9e0c209e | 108 | /// Creates a new `TypedArena`. |
0bf4aa26 | 109 | fn default() -> TypedArena<T> { |
9e0c209e SL |
110 | TypedArena { |
111 | // We set both `ptr` and `end` to 0 so that the first call to | |
112 | // alloc() will trigger a grow(). | |
dc9dc135 XL |
113 | ptr: Cell::new(ptr::null_mut()), |
114 | end: Cell::new(ptr::null_mut()), | |
3c0e092e | 115 | chunks: Default::default(), |
9e0c209e | 116 | _own: PhantomData, |
1a4d82fc JJ |
117 | } |
118 | } | |
0bf4aa26 | 119 | } |
1a4d82fc | 120 | |
fc512014 XL |
121 | trait IterExt<T> { |
122 | fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T]; | |
123 | } | |
124 | ||
125 | impl<I, T> IterExt<T> for I | |
126 | where | |
127 | I: IntoIterator<Item = T>, | |
128 | { | |
5099ac24 FG |
129 | // This default collects into a `SmallVec` and then allocates by copying |
130 | // from it. The specializations below for types like `Vec` are more | |
131 | // efficient, copying directly without the intermediate collecting step. | |
132 | // This default could be made more efficient, like | |
133 | // `DroplessArena::alloc_from_iter`, but it's not hot enough to bother. | |
fc512014 XL |
134 | #[inline] |
135 | default fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] { | |
136 | let vec: SmallVec<[_; 8]> = self.into_iter().collect(); | |
137 | vec.alloc_from_iter(arena) | |
138 | } | |
139 | } | |
140 | ||
141 | impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> { | |
142 | #[inline] | |
143 | fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] { | |
144 | let len = self.len(); | |
145 | if len == 0 { | |
146 | return &mut []; | |
147 | } | |
5099ac24 | 148 | // Move the content to the arena by copying and then forgetting it. |
fc512014 XL |
149 | unsafe { |
150 | let start_ptr = arena.alloc_raw_slice(len); | |
151 | self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len); | |
152 | mem::forget(self); | |
153 | slice::from_raw_parts_mut(start_ptr, len) | |
154 | } | |
155 | } | |
156 | } | |
157 | ||
158 | impl<T> IterExt<T> for Vec<T> { | |
159 | #[inline] | |
160 | fn alloc_from_iter(mut self, arena: &TypedArena<T>) -> &mut [T] { | |
161 | let len = self.len(); | |
162 | if len == 0 { | |
163 | return &mut []; | |
164 | } | |
5099ac24 | 165 | // Move the content to the arena by copying and then forgetting it. |
fc512014 XL |
166 | unsafe { |
167 | let start_ptr = arena.alloc_raw_slice(len); | |
168 | self.as_ptr().copy_to_nonoverlapping(start_ptr, len); | |
169 | self.set_len(0); | |
170 | slice::from_raw_parts_mut(start_ptr, len) | |
171 | } | |
172 | } | |
173 | } | |
174 | ||
175 | impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> { | |
176 | #[inline] | |
177 | fn alloc_from_iter(mut self, arena: &TypedArena<A::Item>) -> &mut [A::Item] { | |
178 | let len = self.len(); | |
179 | if len == 0 { | |
180 | return &mut []; | |
181 | } | |
5099ac24 | 182 | // Move the content to the arena by copying and then forgetting it. |
fc512014 XL |
183 | unsafe { |
184 | let start_ptr = arena.alloc_raw_slice(len); | |
185 | self.as_ptr().copy_to_nonoverlapping(start_ptr, len); | |
186 | self.set_len(0); | |
187 | slice::from_raw_parts_mut(start_ptr, len) | |
188 | } | |
189 | } | |
190 | } | |
191 | ||
0bf4aa26 | 192 | impl<T> TypedArena<T> { |
1a4d82fc JJ |
193 | /// Allocates an object in the `TypedArena`, returning a reference to it. |
194 | #[inline] | |
195 | pub fn alloc(&self, object: T) -> &mut T { | |
196 | if self.ptr == self.end { | |
c30ab7b3 | 197 | self.grow(1) |
1a4d82fc JJ |
198 | } |
199 | ||
e9174d1e | 200 | unsafe { |
9cc50fc6 | 201 | if mem::size_of::<T>() == 0 { |
1b1a35ee | 202 | self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T); |
ee023bcb | 203 | let ptr = ptr::NonNull::<T>::dangling().as_ptr(); |
9cc50fc6 SL |
204 | // Don't drop the object. This `write` is equivalent to `forget`. |
205 | ptr::write(ptr, object); | |
206 | &mut *ptr | |
207 | } else { | |
208 | let ptr = self.ptr.get(); | |
209 | // Advance the pointer. | |
210 | self.ptr.set(self.ptr.get().offset(1)); | |
211 | // Write into uninitialized memory. | |
212 | ptr::write(ptr, object); | |
213 | &mut *ptr | |
214 | } | |
e9174d1e | 215 | } |
1a4d82fc JJ |
216 | } |
217 | ||
532ac7d7 | 218 | #[inline] |
f035d41b | 219 | fn can_allocate(&self, additional: usize) -> bool { |
ee023bcb FG |
220 | // FIXME: this should *likely* use `offset_from`, but more |
221 | // investigation is needed (including running tests in miri). | |
222 | let available_bytes = self.end.get().addr() - self.ptr.get().addr(); | |
f035d41b XL |
223 | let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap(); |
224 | available_bytes >= additional_bytes | |
532ac7d7 XL |
225 | } |
226 | ||
227 | /// Ensures there's enough space in the current chunk to fit `len` objects. | |
228 | #[inline] | |
f035d41b XL |
229 | fn ensure_capacity(&self, additional: usize) { |
230 | if !self.can_allocate(additional) { | |
231 | self.grow(additional); | |
232 | debug_assert!(self.can_allocate(additional)); | |
532ac7d7 XL |
233 | } |
234 | } | |
235 | ||
236 | #[inline] | |
237 | unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T { | |
238 | assert!(mem::size_of::<T>() != 0); | |
239 | assert!(len != 0); | |
240 | ||
241 | self.ensure_capacity(len); | |
242 | ||
243 | let start_ptr = self.ptr.get(); | |
244 | self.ptr.set(start_ptr.add(len)); | |
245 | start_ptr | |
246 | } | |
247 | ||
532ac7d7 XL |
248 | #[inline] |
249 | pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] { | |
c30ab7b3 | 250 | assert!(mem::size_of::<T>() != 0); |
fc512014 | 251 | iter.alloc_from_iter(self) |
c30ab7b3 SL |
252 | } |
253 | ||
1a4d82fc JJ |
254 | /// Grows the arena. |
255 | #[inline(never)] | |
9cc50fc6 | 256 | #[cold] |
f035d41b | 257 | fn grow(&self, additional: usize) { |
1a4d82fc | 258 | unsafe { |
f035d41b | 259 | // We need the element size to convert chunk sizes (ranging from |
f9f354fc XL |
260 | // PAGE to HUGE_PAGE bytes) to element counts. |
261 | let elem_size = cmp::max(1, mem::size_of::<T>()); | |
9cc50fc6 | 262 | let mut chunks = self.chunks.borrow_mut(); |
f035d41b | 263 | let mut new_cap; |
9e0c209e | 264 | if let Some(last_chunk) = chunks.last_mut() { |
29967ef6 XL |
265 | // If a type is `!needs_drop`, we don't need to keep track of how many elements |
266 | // the chunk stores - the field will be ignored anyway. | |
267 | if mem::needs_drop::<T>() { | |
ee023bcb FG |
268 | // FIXME: this should *likely* use `offset_from`, but more |
269 | // investigation is needed (including running tests in miri). | |
270 | let used_bytes = self.ptr.get().addr() - last_chunk.start().addr(); | |
29967ef6 XL |
271 | last_chunk.entries = used_bytes / mem::size_of::<T>(); |
272 | } | |
f035d41b | 273 | |
1b1a35ee | 274 | // If the previous chunk's len is less than HUGE_PAGE |
f035d41b XL |
275 | // bytes, then this chunk will be least double the previous |
276 | // chunk's size. | |
29967ef6 XL |
277 | new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2); |
278 | new_cap *= 2; | |
9cc50fc6 | 279 | } else { |
f035d41b | 280 | new_cap = PAGE / elem_size; |
9cc50fc6 | 281 | } |
f035d41b XL |
282 | // Also ensure that this chunk can fit `additional`. |
283 | new_cap = cmp::max(additional, new_cap); | |
f9f354fc | 284 | |
5099ac24 | 285 | let mut chunk = ArenaChunk::<T>::new(new_cap); |
9e0c209e SL |
286 | self.ptr.set(chunk.start()); |
287 | self.end.set(chunk.end()); | |
288 | chunks.push(chunk); | |
9cc50fc6 SL |
289 | } |
290 | } | |
9e0c209e | 291 | |
9cc50fc6 SL |
292 | // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other |
293 | // chunks. | |
5099ac24 | 294 | fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) { |
9cc50fc6 | 295 | // Determine how much was filled. |
ee023bcb | 296 | let start = last_chunk.start().addr(); |
9cc50fc6 | 297 | // We obtain the value of the pointer to the first uninitialized element. |
ee023bcb | 298 | let end = self.ptr.get().addr(); |
9cc50fc6 SL |
299 | // We then calculate the number of elements to be dropped in the last chunk, |
300 | // which is the filled area's length. | |
301 | let diff = if mem::size_of::<T>() == 0 { | |
302 | // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get | |
303 | // the number of zero-sized values in the last and only chunk, just out of caution. | |
304 | // Recall that `end` was incremented for each allocated value. | |
305 | end - start | |
306 | } else { | |
ee023bcb FG |
307 | // FIXME: this should *likely* use `offset_from`, but more |
308 | // investigation is needed (including running tests in miri). | |
9cc50fc6 SL |
309 | (end - start) / mem::size_of::<T>() |
310 | }; | |
311 | // Pass that to the `destroy` method. | |
312 | unsafe { | |
313 | last_chunk.destroy(diff); | |
1a4d82fc | 314 | } |
9cc50fc6 SL |
315 | // Reset the chunk. |
316 | self.ptr.set(last_chunk.start()); | |
1a4d82fc JJ |
317 | } |
318 | } | |
319 | ||
32a655c1 | 320 | unsafe impl<#[may_dangle] T> Drop for TypedArena<T> { |
1a4d82fc JJ |
321 | fn drop(&mut self) { |
322 | unsafe { | |
323 | // Determine how much was filled. | |
9cc50fc6 | 324 | let mut chunks_borrow = self.chunks.borrow_mut(); |
9e0c209e SL |
325 | if let Some(mut last_chunk) = chunks_borrow.pop() { |
326 | // Drop the contents of the last chunk. | |
327 | self.clear_last_chunk(&mut last_chunk); | |
328 | // The last chunk will be dropped. Destroy all other chunks. | |
329 | for chunk in chunks_borrow.iter_mut() { | |
532ac7d7 | 330 | chunk.destroy(chunk.entries); |
9e0c209e | 331 | } |
9cc50fc6 | 332 | } |
1b1a35ee | 333 | // Box handles deallocation of `last_chunk` and `self.chunks`. |
1a4d82fc JJ |
334 | } |
335 | } | |
336 | } | |
337 | ||
9cc50fc6 SL |
338 | unsafe impl<T: Send> Send for TypedArena<T> {} |
339 | ||
3c0e092e XL |
340 | /// An arena that can hold objects of multiple different types that impl `Copy` |
341 | /// and/or satisfy `!mem::needs_drop`. | |
32a655c1 | 342 | pub struct DroplessArena { |
1b1a35ee XL |
343 | /// A pointer to the start of the free space. |
344 | start: Cell<*mut u8>, | |
32a655c1 | 345 | |
1b1a35ee XL |
346 | /// A pointer to the end of free space. |
347 | /// | |
3c0e092e XL |
348 | /// The allocation proceeds downwards from the end of the chunk towards the |
349 | /// start. (This is slightly simpler and faster than allocating upwards, | |
350 | /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.) | |
1b1a35ee | 351 | /// When this pointer crosses the start pointer, a new chunk is allocated. |
32a655c1 SL |
352 | end: Cell<*mut u8>, |
353 | ||
354 | /// A vector of arena chunks. | |
5099ac24 | 355 | chunks: RefCell<Vec<ArenaChunk>>, |
32a655c1 SL |
356 | } |
357 | ||
83c7162d XL |
358 | unsafe impl Send for DroplessArena {} |
359 | ||
0bf4aa26 | 360 | impl Default for DroplessArena { |
a1dfa0c6 | 361 | #[inline] |
0bf4aa26 | 362 | fn default() -> DroplessArena { |
32a655c1 | 363 | DroplessArena { |
1b1a35ee | 364 | start: Cell::new(ptr::null_mut()), |
dc9dc135 | 365 | end: Cell::new(ptr::null_mut()), |
0bf4aa26 | 366 | chunks: Default::default(), |
32a655c1 SL |
367 | } |
368 | } | |
0bf4aa26 | 369 | } |
32a655c1 | 370 | |
0bf4aa26 | 371 | impl DroplessArena { |
32a655c1 SL |
372 | #[inline(never)] |
373 | #[cold] | |
f035d41b | 374 | fn grow(&self, additional: usize) { |
32a655c1 SL |
375 | unsafe { |
376 | let mut chunks = self.chunks.borrow_mut(); | |
f035d41b | 377 | let mut new_cap; |
32a655c1 | 378 | if let Some(last_chunk) = chunks.last_mut() { |
f035d41b XL |
379 | // There is no need to update `last_chunk.entries` because that |
380 | // field isn't used by `DroplessArena`. | |
381 | ||
1b1a35ee | 382 | // If the previous chunk's len is less than HUGE_PAGE |
f035d41b XL |
383 | // bytes, then this chunk will be least double the previous |
384 | // chunk's size. | |
29967ef6 XL |
385 | new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2); |
386 | new_cap *= 2; | |
32a655c1 | 387 | } else { |
f035d41b | 388 | new_cap = PAGE; |
32a655c1 | 389 | } |
f035d41b XL |
390 | // Also ensure that this chunk can fit `additional`. |
391 | new_cap = cmp::max(additional, new_cap); | |
f9f354fc | 392 | |
5099ac24 | 393 | let mut chunk = ArenaChunk::new(new_cap); |
1b1a35ee | 394 | self.start.set(chunk.start()); |
32a655c1 SL |
395 | self.end.set(chunk.end()); |
396 | chunks.push(chunk); | |
397 | } | |
398 | } | |
399 | ||
f035d41b XL |
400 | /// Allocates a byte slice with specified layout from the current memory |
401 | /// chunk. Returns `None` if there is no free space left to satisfy the | |
402 | /// request. | |
32a655c1 | 403 | #[inline] |
f035d41b | 404 | fn alloc_raw_without_grow(&self, layout: Layout) -> Option<*mut u8> { |
ee023bcb FG |
405 | let start = self.start.get().addr(); |
406 | let old_end = self.end.get(); | |
407 | let end = old_end.addr(); | |
1b1a35ee | 408 | |
f035d41b XL |
409 | let align = layout.align(); |
410 | let bytes = layout.size(); | |
1b1a35ee XL |
411 | |
412 | let new_end = end.checked_sub(bytes)? & !(align - 1); | |
413 | if start <= new_end { | |
ee023bcb | 414 | let new_end = old_end.with_addr(new_end); |
1b1a35ee XL |
415 | self.end.set(new_end); |
416 | Some(new_end) | |
f035d41b XL |
417 | } else { |
418 | None | |
419 | } | |
420 | } | |
32a655c1 | 421 | |
f035d41b XL |
422 | #[inline] |
423 | pub fn alloc_raw(&self, layout: Layout) -> *mut u8 { | |
424 | assert!(layout.size() != 0); | |
425 | loop { | |
426 | if let Some(a) = self.alloc_raw_without_grow(layout) { | |
427 | break a; | |
32a655c1 | 428 | } |
f035d41b XL |
429 | // No free space left. Allocate a new chunk to satisfy the request. |
430 | // On failure the grow will panic or abort. | |
431 | self.grow(layout.size()); | |
94b46f34 XL |
432 | } |
433 | } | |
434 | ||
435 | #[inline] | |
436 | pub fn alloc<T>(&self, object: T) -> &mut T { | |
437 | assert!(!mem::needs_drop::<T>()); | |
438 | ||
f035d41b | 439 | let mem = self.alloc_raw(Layout::for_value::<T>(&object)) as *mut T; |
94b46f34 XL |
440 | |
441 | unsafe { | |
32a655c1 | 442 | // Write into uninitialized memory. |
94b46f34 XL |
443 | ptr::write(mem, object); |
444 | &mut *mem | |
32a655c1 SL |
445 | } |
446 | } | |
447 | ||
448 | /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable | |
449 | /// reference to it. Will panic if passed a zero-sized type. | |
450 | /// | |
451 | /// Panics: | |
ff7c6d11 | 452 | /// |
32a655c1 SL |
453 | /// - Zero-sized types |
454 | /// - Zero-length slices | |
455 | #[inline] | |
456 | pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T] | |
2c00a5a8 XL |
457 | where |
458 | T: Copy, | |
459 | { | |
7cac9316 | 460 | assert!(!mem::needs_drop::<T>()); |
32a655c1 | 461 | assert!(mem::size_of::<T>() != 0); |
a1dfa0c6 | 462 | assert!(!slice.is_empty()); |
32a655c1 | 463 | |
f035d41b | 464 | let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T; |
32a655c1 SL |
465 | |
466 | unsafe { | |
f035d41b XL |
467 | mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len()); |
468 | slice::from_raw_parts_mut(mem, slice.len()) | |
32a655c1 SL |
469 | } |
470 | } | |
532ac7d7 | 471 | |
dc9dc135 XL |
472 | #[inline] |
473 | unsafe fn write_from_iter<T, I: Iterator<Item = T>>( | |
474 | &self, | |
475 | mut iter: I, | |
476 | len: usize, | |
477 | mem: *mut T, | |
478 | ) -> &mut [T] { | |
479 | let mut i = 0; | |
480 | // Use a manual loop since LLVM manages to optimize it better for | |
481 | // slice iterators | |
482 | loop { | |
483 | let value = iter.next(); | |
484 | if i >= len || value.is_none() { | |
485 | // We only return as many items as the iterator gave us, even | |
486 | // though it was supposed to give us `len` | |
487 | return slice::from_raw_parts_mut(mem, i); | |
488 | } | |
e74abb32 | 489 | ptr::write(mem.add(i), value.unwrap()); |
dc9dc135 XL |
490 | i += 1; |
491 | } | |
492 | } | |
493 | ||
532ac7d7 XL |
494 | #[inline] |
495 | pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] { | |
dc9dc135 | 496 | let iter = iter.into_iter(); |
532ac7d7 XL |
497 | assert!(mem::size_of::<T>() != 0); |
498 | assert!(!mem::needs_drop::<T>()); | |
499 | ||
500 | let size_hint = iter.size_hint(); | |
501 | ||
502 | match size_hint { | |
503 | (min, Some(max)) if min == max => { | |
504 | // We know the exact number of elements the iterator will produce here | |
505 | let len = min; | |
506 | ||
507 | if len == 0 { | |
dfeec247 | 508 | return &mut []; |
532ac7d7 | 509 | } |
f035d41b XL |
510 | |
511 | let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T; | |
dfeec247 | 512 | unsafe { self.write_from_iter(iter, len, mem) } |
532ac7d7 XL |
513 | } |
514 | (_, _) => { | |
515 | cold_path(move || -> &mut [T] { | |
516 | let mut vec: SmallVec<[_; 8]> = iter.collect(); | |
517 | if vec.is_empty() { | |
518 | return &mut []; | |
519 | } | |
520 | // Move the content to the arena by copying it and then forgetting | |
521 | // the content of the SmallVec | |
522 | unsafe { | |
523 | let len = vec.len(); | |
f035d41b XL |
524 | let start_ptr = |
525 | self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T; | |
532ac7d7 XL |
526 | vec.as_ptr().copy_to_nonoverlapping(start_ptr, len); |
527 | vec.set_len(0); | |
528 | slice::from_raw_parts_mut(start_ptr, len) | |
529 | } | |
530 | }) | |
531 | } | |
532 | } | |
533 | } | |
32a655c1 SL |
534 | } |
535 | ||
5099ac24 FG |
536 | /// Declare an `Arena` containing one dropless arena and many typed arenas (the |
537 | /// types of the typed arenas are specified by the arguments). | |
538 | /// | |
539 | /// There are three cases of interest. | |
540 | /// - Types that are `Copy`: these need not be specified in the arguments. They | |
541 | /// will use the `DroplessArena`. | |
542 | /// - Types that are `!Copy` and `!Drop`: these must be specified in the | |
543 | /// arguments. An empty `TypedArena` will be created for each one, but the | |
544 | /// `DroplessArena` will always be used and the `TypedArena` will stay empty. | |
545 | /// This is odd but harmless, because an empty arena allocates no memory. | |
546 | /// - Types that are `!Copy` and `Drop`: these must be specified in the | |
547 | /// arguments. The `TypedArena` will be used for them. | |
548 | /// | |
94222f64 | 549 | #[rustc_macro_transparency = "semitransparent"] |
3c0e092e | 550 | pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) { |
94222f64 | 551 | #[derive(Default)] |
3c0e092e | 552 | pub struct Arena<'tcx> { |
94222f64 | 553 | pub dropless: $crate::DroplessArena, |
3c0e092e | 554 | $($name: $crate::TypedArena<$ty>,)* |
94222f64 | 555 | } |
ba9703b0 | 556 | |
5099ac24 | 557 | pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized { |
94222f64 XL |
558 | fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self; |
559 | fn allocate_from_iter<'a>( | |
560 | arena: &'a Arena<'tcx>, | |
561 | iter: impl ::std::iter::IntoIterator<Item = Self>, | |
562 | ) -> &'a mut [Self]; | |
563 | } | |
564 | ||
3c0e092e | 565 | // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`. |
5099ac24 | 566 | impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T { |
94222f64 XL |
567 | #[inline] |
568 | fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self { | |
569 | arena.dropless.alloc(self) | |
570 | } | |
571 | #[inline] | |
572 | fn allocate_from_iter<'a>( | |
573 | arena: &'a Arena<'tcx>, | |
574 | iter: impl ::std::iter::IntoIterator<Item = Self>, | |
575 | ) -> &'a mut [Self] { | |
576 | arena.dropless.alloc_from_iter(iter) | |
ba9703b0 | 577 | } |
94222f64 XL |
578 | } |
579 | $( | |
5099ac24 | 580 | impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty { |
ba9703b0 | 581 | #[inline] |
3c0e092e | 582 | fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self { |
94222f64 | 583 | if !::std::mem::needs_drop::<Self>() { |
3c0e092e XL |
584 | arena.dropless.alloc(self) |
585 | } else { | |
586 | arena.$name.alloc(self) | |
94222f64 | 587 | } |
f035d41b | 588 | } |
94222f64 | 589 | |
f035d41b XL |
590 | #[inline] |
591 | fn allocate_from_iter<'a>( | |
3c0e092e | 592 | arena: &'a Arena<'tcx>, |
f035d41b XL |
593 | iter: impl ::std::iter::IntoIterator<Item = Self>, |
594 | ) -> &'a mut [Self] { | |
94222f64 | 595 | if !::std::mem::needs_drop::<Self>() { |
3c0e092e XL |
596 | arena.dropless.alloc_from_iter(iter) |
597 | } else { | |
598 | arena.$name.alloc_from_iter(iter) | |
ba9703b0 XL |
599 | } |
600 | } | |
94222f64 XL |
601 | } |
602 | )* | |
ba9703b0 | 603 | |
94222f64 XL |
604 | impl<'tcx> Arena<'tcx> { |
605 | #[inline] | |
5099ac24 | 606 | pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&self, value: T) -> &mut T { |
94222f64 XL |
607 | value.allocate_on(self) |
608 | } | |
ba9703b0 | 609 | |
3c0e092e | 610 | // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`. |
94222f64 XL |
611 | #[inline] |
612 | pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] { | |
613 | if value.is_empty() { | |
614 | return &mut []; | |
ba9703b0 | 615 | } |
94222f64 XL |
616 | self.dropless.alloc_slice(value) |
617 | } | |
ba9703b0 | 618 | |
5099ac24 | 619 | pub fn alloc_from_iter<'a, T: ArenaAllocatable<'tcx, C>, C>( |
94222f64 XL |
620 | &'a self, |
621 | iter: impl ::std::iter::IntoIterator<Item = T>, | |
622 | ) -> &'a mut [T] { | |
623 | T::allocate_from_iter(self, iter) | |
ba9703b0 XL |
624 | } |
625 | } | |
626 | } | |
627 | ||
5099ac24 FG |
628 | // Marker types that let us give different behaviour for arenas allocating |
629 | // `Copy` types vs `!Copy` types. | |
630 | pub struct IsCopy; | |
631 | pub struct IsNotCopy; | |
632 | ||
1a4d82fc | 633 | #[cfg(test)] |
dc9dc135 | 634 | mod tests; |