]> git.proxmox.com Git - rustc.git/blame - vendor/chalk-ir-0.80.0/src/fold/in_place.rs
New upstream version 1.66.0+dfsg1
[rustc.git] / vendor / chalk-ir-0.80.0 / src / fold / in_place.rs
CommitLineData
5869c6ff
XL
1//! Subroutines to help implementers of `Fold` avoid unnecessary heap allocations.
2
3use std::marker::PhantomData;
4use std::{mem, ptr};
5
6fn is_zst<T>() -> bool {
7 mem::size_of::<T>() == 0
8}
9
10fn is_layout_identical<T, U>() -> bool {
11 mem::size_of::<T>() == mem::size_of::<U>() && mem::align_of::<T>() == mem::align_of::<U>()
12}
13
14/// Maps a `Box<T>` to a `Box<U>`, reusing the underlying storage if possible.
15pub(super) fn fallible_map_box<T, U, E>(
16 b: Box<T>,
17 map: impl FnOnce(T) -> Result<U, E>,
18) -> Result<Box<U>, E> {
19 // This optimization is only valid when `T` and `U` have the same size/alignment and is not
20 // useful for ZSTs.
21 if !is_layout_identical::<T, U>() || is_zst::<T>() {
22 return map(*b).map(Box::new);
23 }
24
25 let raw = Box::into_raw(b);
26 unsafe {
27 let val = ptr::read(raw);
28
29 // Box<T> -> Box<MaybeUninit<U>>
30 let mut raw: Box<mem::MaybeUninit<U>> = Box::from_raw(raw.cast());
31
32 // If `map` panics or returns an error, `raw` will free the memory associated with `b`, but
33 // not drop the boxed value itself since it is wrapped in `MaybeUninit`. This is what we
34 // want since the boxed value was moved into `map`.
35 let mapped_val = map(val)?;
36 ptr::write(raw.as_mut_ptr(), mapped_val);
37
38 // Box<MaybeUninit<U>> -> Box<U>
39 Ok(Box::from_raw(Box::into_raw(raw).cast()))
40 }
41}
42
43/// Maps a `Vec<T>` to a `Vec<U>`, reusing the underlying storage if possible.
44pub(super) fn fallible_map_vec<T, U, E>(
45 vec: Vec<T>,
46 mut map: impl FnMut(T) -> Result<U, E>,
47) -> Result<Vec<U>, E> {
48 // This optimization is only valid when `T` and `U` have the same size/alignment and is not
49 // useful for ZSTs.
50 if !is_layout_identical::<T, U>() || is_zst::<T>() {
51 return vec.into_iter().map(map).collect();
52 }
53
54 let mut vec = VecMappedInPlace::<T, U>::new(vec);
55
56 unsafe {
57 for i in 0..vec.len {
58 let place = vec.ptr.add(i);
59 let val = ptr::read(place);
60
61 // Set `map_in_progress` so the drop impl for `VecMappedInPlace` can handle the other
62 // elements correctly in case `map` panics or returns an error.
63 vec.map_in_progress = i;
64 let mapped_val = map(val)?;
65
66 ptr::write(place as *mut U, mapped_val);
67 }
68
69 Ok(vec.finish())
70 }
71}
72
73/// Takes ownership of a `Vec` that is being mapped in place, cleaning up if the map fails.
74struct VecMappedInPlace<T, U> {
75 ptr: *mut T,
76 len: usize,
77 cap: usize,
78
79 map_in_progress: usize,
80 _elem_tys: PhantomData<(T, U)>,
81}
82
83impl<T, U> VecMappedInPlace<T, U> {
84 fn new(mut vec: Vec<T>) -> Self {
85 assert!(is_layout_identical::<T, U>());
86
87 // FIXME: This is just `Vec::into_raw_parts`. Use that instead when it is stabilized.
88 let ptr = vec.as_mut_ptr();
89 let len = vec.len();
90 let cap = vec.capacity();
91 mem::forget(vec);
92
93 VecMappedInPlace {
94 ptr,
95 len,
96 cap,
97
98 map_in_progress: 0,
99 _elem_tys: PhantomData,
100 }
101 }
102
103 /// Converts back into a `Vec` once the map is complete.
104 unsafe fn finish(self) -> Vec<U> {
105 let this = mem::ManuallyDrop::new(self);
106 Vec::from_raw_parts(this.ptr as *mut U, this.len, this.cap)
107 }
108}
109
110/// `VecMappedInPlace` drops everything but the element that was passed to `map` when it panicked or
111/// returned an error. Everything before that index in the vector has type `U` (it has been mapped)
112/// and everything after it has type `T` (it has not been mapped).
113///
114/// ```text
115/// mapped
116/// | not yet mapped
117/// |----| |-----|
118/// [UUUU UxTT TTTT]
119/// ^
120/// `map_in_progress` (not dropped)
121/// ```
122impl<T, U> Drop for VecMappedInPlace<T, U> {
123 fn drop(&mut self) {
124 // Drop mapped elements of type `U`.
125 for i in 0..self.map_in_progress {
126 unsafe {
127 ptr::drop_in_place(self.ptr.add(i) as *mut U);
128 }
129 }
130
131 // Drop unmapped elements of type `T`.
132 for i in (self.map_in_progress + 1)..self.len {
133 unsafe {
134 ptr::drop_in_place(self.ptr.add(i));
135 }
136 }
137
138 // Free the underlying storage for the `Vec`.
139 // `len` is 0 because the elements were handled above.
140 unsafe {
141 Vec::from_raw_parts(self.ptr, 0, self.cap);
142 }
143 }
144}
145
146#[cfg(test)]
147mod tests {
148 use std::fmt;
149 use std::sync::{Arc, Mutex};
150
151 /// A wrapper around `T` that records when it is dropped.
152 struct RecordDrop<T: fmt::Display> {
153 id: T,
154 drops: Arc<Mutex<Vec<String>>>,
155 }
156
157 impl<T: fmt::Display> RecordDrop<T> {
158 fn new(id: T, drops: &Arc<Mutex<Vec<String>>>) -> Self {
159 RecordDrop {
160 id,
161 drops: drops.clone(),
162 }
163 }
164 }
165
166 impl RecordDrop<u8> {
167 fn map_to_char(self) -> RecordDrop<char> {
168 let this = std::mem::ManuallyDrop::new(self);
169 RecordDrop {
170 id: (this.id + b'A') as char,
171 drops: this.drops.clone(),
172 }
173 }
174 }
175
176 impl<T: fmt::Display> Drop for RecordDrop<T> {
177 fn drop(&mut self) {
178 self.drops.lock().unwrap().push(format!("{}", self.id));
179 }
180 }
181
182 #[test]
183 fn vec_no_cleanup_after_success() {
184 let drops = Arc::new(Mutex::new(Vec::new()));
185 let to_fold = (0u8..5).map(|i| RecordDrop::new(i, &drops)).collect();
186
187 let res: Result<_, ()> = super::fallible_map_vec(to_fold, |x| Ok(x.map_to_char()));
188
189 assert!(res.is_ok());
190 assert!(drops.lock().unwrap().is_empty());
191 }
192
193 #[test]
194 fn vec_cleanup_after_panic() {
195 let drops = Arc::new(Mutex::new(Vec::new()));
196 let to_fold = (0u8..5).map(|i| RecordDrop::new(i, &drops)).collect();
197
198 let res = std::panic::catch_unwind(|| {
199 let _: Result<_, ()> = super::fallible_map_vec(to_fold, |x| {
200 if x.id == 3 {
201 panic!();
202 }
203
204 Ok(x.map_to_char())
205 });
206 });
207
208 assert!(res.is_err());
209 assert_eq!(*drops.lock().unwrap(), &["3", "A", "B", "C", "4"]);
210 }
211
212 #[test]
213 fn vec_cleanup_after_early_return() {
214 let drops = Arc::new(Mutex::new(Vec::new()));
215 let to_fold = (0u8..5).map(|i| RecordDrop::new(i, &drops)).collect();
216
217 let res = super::fallible_map_vec(to_fold, |x| {
218 if x.id == 2 {
219 return Err(());
220 }
221
222 Ok(x.map_to_char())
223 });
224
225 assert!(res.is_err());
226 assert_eq!(*drops.lock().unwrap(), &["2", "A", "B", "3", "4"]);
227 }
228
229 #[test]
230 fn box_no_cleanup_after_success() {
231 let drops = Arc::new(Mutex::new(Vec::new()));
232 let to_fold = Box::new(RecordDrop::new(0, &drops));
233
234 let res: Result<Box<_>, ()> = super::fallible_map_box(to_fold, |x| Ok(x.map_to_char()));
235
236 assert!(res.is_ok());
237 assert!(drops.lock().unwrap().is_empty());
238 }
239
240 #[test]
241 fn box_cleanup_after_panic() {
242 let drops = Arc::new(Mutex::new(Vec::new()));
243 let to_fold = Box::new(RecordDrop::new(0, &drops));
244
245 let res = std::panic::catch_unwind(|| {
246 let _: Result<Box<()>, ()> = super::fallible_map_box(to_fold, |_| panic!());
247 });
248
249 assert!(res.is_err());
250 assert_eq!(*drops.lock().unwrap(), &["0"]);
251 }
252
253 #[test]
254 fn box_cleanup_after_early_return() {
255 let drops = Arc::new(Mutex::new(Vec::new()));
256 let to_fold = Box::new(RecordDrop::new(0, &drops));
257
258 let res: Result<Box<()>, _> = super::fallible_map_box(to_fold, |_| Err(()));
259
260 assert!(res.is_err());
261 assert_eq!(*drops.lock().unwrap(), &["0"]);
262 }
263}