]>
Commit | Line | Data |
---|---|---|
0bf4aa26 | 1 | //! Manually manage memory through raw pointers. |
1a4d82fc | 2 | //! |
54a0048b | 3 | //! *[See also the pointer primitive types](../../std/primitive.pointer.html).* |
0bf4aa26 XL |
4 | //! |
5 | //! # Safety | |
6 | //! | |
7 | //! Many functions in this module take raw pointers as arguments and read from | |
8 | //! or write to them. For this to be safe, these pointers must be *valid*. | |
9 | //! Whether a pointer is valid depends on the operation it is used for | |
10 | //! (read or write), and the extent of the memory that is accessed (i.e., | |
11 | //! how many bytes are read/written). Most functions use `*mut T` and `*const T` | |
12 | //! to access only a single value, in which case the documentation omits the size | |
13 | //! and implicitly assumes it to be `size_of::<T>()` bytes. | |
14 | //! | |
9fa01778 | 15 | //! The precise rules for validity are not determined yet. The guarantees that are |
0bf4aa26 XL |
16 | //! provided at this point are very minimal: |
17 | //! | |
18 | //! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst]. | |
19 | //! * All pointers (except for the null pointer) are valid for all operations of | |
20 | //! [size zero][zst]. | |
60c5eb7d | 21 | //! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer |
74b04a01 | 22 | //! be *dereferenceable*: the memory range of the given size starting at the pointer must all be |
60c5eb7d XL |
23 | //! within the bounds of a single allocated object. Note that in Rust, |
24 | //! every (stack-allocated) variable is considered a separate allocated object. | |
0bf4aa26 XL |
25 | //! * All accesses performed by functions in this module are *non-atomic* in the sense |
26 | //! of [atomic operations] used to synchronize between threads. This means it is | |
27 | //! undefined behavior to perform two concurrent accesses to the same location from different | |
28 | //! threads unless both accesses only read from memory. Notice that this explicitly | |
29 | //! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot | |
30 | //! be used for inter-thread synchronization. | |
31 | //! * The result of casting a reference to a pointer is valid for as long as the | |
32 | //! underlying object is live and no reference (just raw pointers) is used to | |
33 | //! access the same memory. | |
34 | //! | |
35 | //! These axioms, along with careful use of [`offset`] for pointer arithmetic, | |
36 | //! are enough to correctly implement many useful things in unsafe code. Stronger guarantees | |
37 | //! will be provided eventually, as the [aliasing] rules are being determined. For more | |
38 | //! information, see the [book] as well as the section in the reference devoted | |
39 | //! to [undefined behavior][ub]. | |
40 | //! | |
41 | //! ## Alignment | |
42 | //! | |
43 | //! Valid raw pointers as defined above are not necessarily properly aligned (where | |
44 | //! "proper" alignment is defined by the pointee type, i.e., `*const T` must be | |
45 | //! aligned to `mem::align_of::<T>()`). However, most functions require their | |
46 | //! arguments to be properly aligned, and will explicitly state | |
47 | //! this requirement in their documentation. Notable exceptions to this are | |
48 | //! [`read_unaligned`] and [`write_unaligned`]. | |
49 | //! | |
50 | //! When a function requires proper alignment, it does so even if the access | |
51 | //! has size 0, i.e., even if memory is not actually touched. Consider using | |
52 | //! [`NonNull::dangling`] in such cases. | |
53 | //! | |
54 | //! [aliasing]: ../../nomicon/aliasing.html | |
13cf67c4 | 55 | //! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer |
0bf4aa26 XL |
56 | //! [ub]: ../../reference/behavior-considered-undefined.html |
57 | //! [null]: ./fn.null.html | |
58 | //! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts | |
59 | //! [atomic operations]: ../../std/sync/atomic/index.html | |
60 | //! [`copy`]: ../../std/ptr/fn.copy.html | |
61 | //! [`offset`]: ../../std/primitive.pointer.html#method.offset | |
62 | //! [`read_unaligned`]: ./fn.read_unaligned.html | |
63 | //! [`write_unaligned`]: ./fn.write_unaligned.html | |
64 | //! [`read_volatile`]: ./fn.read_volatile.html | |
65 | //! [`write_volatile`]: ./fn.write_volatile.html | |
66 | //! [`NonNull::dangling`]: ./struct.NonNull.html#method.dangling | |
1a4d82fc | 67 | |
60c5eb7d XL |
68 | // ignore-tidy-undocumented-unsafe |
69 | ||
85aaf69f | 70 | #![stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 71 | |
dfeec247 | 72 | use crate::cmp::Ordering; |
48663c56 XL |
73 | use crate::fmt; |
74 | use crate::hash; | |
74b04a01 | 75 | use crate::intrinsics::{self, is_aligned_and_not_null, is_nonoverlapping}; |
48663c56 | 76 | use crate::mem::{self, MaybeUninit}; |
1a4d82fc | 77 | |
c34b1796 | 78 | #[stable(feature = "rust1", since = "1.0.0")] |
48663c56 | 79 | pub use crate::intrinsics::copy_nonoverlapping; |
1a4d82fc | 80 | |
c34b1796 | 81 | #[stable(feature = "rust1", since = "1.0.0")] |
48663c56 | 82 | pub use crate::intrinsics::copy; |
1a4d82fc | 83 | |
c34b1796 | 84 | #[stable(feature = "rust1", since = "1.0.0")] |
48663c56 | 85 | pub use crate::intrinsics::write_bytes; |
1a4d82fc | 86 | |
dc9dc135 XL |
87 | mod non_null; |
88 | #[stable(feature = "nonnull", since = "1.25.0")] | |
89 | pub use non_null::NonNull; | |
90 | ||
91 | mod unique; | |
dfeec247 | 92 | #[unstable(feature = "ptr_internals", issue = "none")] |
dc9dc135 XL |
93 | pub use unique::Unique; |
94 | ||
dfeec247 XL |
95 | mod const_ptr; |
96 | mod mut_ptr; | |
97 | ||
cc61c64b XL |
98 | /// Executes the destructor (if any) of the pointed-to value. |
99 | /// | |
0bf4aa26 XL |
100 | /// This is semantically equivalent to calling [`ptr::read`] and discarding |
101 | /// the result, but has the following advantages: | |
cc61c64b XL |
102 | /// |
103 | /// * It is *required* to use `drop_in_place` to drop unsized types like | |
104 | /// trait objects, because they can't be read out onto the stack and | |
105 | /// dropped normally. | |
106 | /// | |
0bf4aa26 | 107 | /// * It is friendlier to the optimizer to do this over [`ptr::read`] when |
0731742a | 108 | /// dropping manually allocated memory (e.g., when writing Box/Rc/Vec), |
cc61c64b XL |
109 | /// as the compiler doesn't need to prove that it's sound to elide the |
110 | /// copy. | |
111 | /// | |
416331ca XL |
112 | /// Unaligned values cannot be dropped in place, they must be copied to an aligned |
113 | /// location first using [`ptr::read_unaligned`]. | |
114 | /// | |
0bf4aa26 | 115 | /// [`ptr::read`]: ../ptr/fn.read.html |
416331ca | 116 | /// [`ptr::read_unaligned`]: ../ptr/fn.read_unaligned.html |
0bf4aa26 | 117 | /// |
ea8adc8c | 118 | /// # Safety |
cc61c64b | 119 | /// |
0bf4aa26 XL |
120 | /// Behavior is undefined if any of the following conditions are violated: |
121 | /// | |
74b04a01 | 122 | /// * `to_drop` must be [valid] for both reads and writes. |
0bf4aa26 | 123 | /// |
416331ca | 124 | /// * `to_drop` must be properly aligned. |
0bf4aa26 | 125 | /// |
74b04a01 XL |
126 | /// * The value `to_drop` points to must be valid for dropping, which may mean it must uphold |
127 | /// additional invariants - this is type-dependent. | |
128 | /// | |
0bf4aa26 XL |
129 | /// Additionally, if `T` is not [`Copy`], using the pointed-to value after |
130 | /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop = | |
a1dfa0c6 | 131 | /// foo` counts as a use because it will cause the value to be dropped |
0bf4aa26 XL |
132 | /// again. [`write`] can be used to overwrite data without causing it to be |
133 | /// dropped. | |
134 | /// | |
135 | /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. | |
136 | /// | |
137 | /// [valid]: ../ptr/index.html#safety | |
138 | /// [`Copy`]: ../marker/trait.Copy.html | |
139 | /// [`write`]: ../ptr/fn.write.html | |
140 | /// | |
141 | /// # Examples | |
142 | /// | |
143 | /// Manually remove the last item from a vector: | |
144 | /// | |
145 | /// ``` | |
146 | /// use std::ptr; | |
147 | /// use std::rc::Rc; | |
148 | /// | |
149 | /// let last = Rc::new(1); | |
150 | /// let weak = Rc::downgrade(&last); | |
151 | /// | |
152 | /// let mut v = vec![Rc::new(0), last]; | |
153 | /// | |
154 | /// unsafe { | |
155 | /// // Get a raw pointer to the last element in `v`. | |
156 | /// let ptr = &mut v[1] as *mut _; | |
9fa01778 | 157 | /// // Shorten `v` to prevent the last item from being dropped. We do that first, |
0bf4aa26 XL |
158 | /// // to prevent issues if the `drop_in_place` below panics. |
159 | /// v.set_len(1); | |
160 | /// // Without a call `drop_in_place`, the last item would never be dropped, | |
161 | /// // and the memory it manages would be leaked. | |
162 | /// ptr::drop_in_place(ptr); | |
163 | /// } | |
164 | /// | |
165 | /// assert_eq!(v, &[0.into()]); | |
166 | /// | |
167 | /// // Ensure that the last item was dropped. | |
168 | /// assert!(weak.upgrade().is_none()); | |
169 | /// ``` | |
170 | /// | |
0bf4aa26 XL |
171 | /// Notice that the compiler performs this copy automatically when dropping packed structs, |
172 | /// i.e., you do not usually have to worry about such issues unless you call `drop_in_place` | |
173 | /// manually. | |
cc61c64b | 174 | #[stable(feature = "drop_in_place", since = "1.8.0")] |
ea8adc8c | 175 | #[lang = "drop_in_place"] |
cc61c64b | 176 | #[allow(unconditional_recursion)] |
dfeec247 | 177 | pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { |
cc61c64b XL |
178 | // Code here does not matter - this is replaced by the |
179 | // real drop glue by the compiler. | |
dfeec247 | 180 | drop_in_place(to_drop) |
cc61c64b XL |
181 | } |
182 | ||
1a4d82fc JJ |
183 | /// Creates a null raw pointer. |
184 | /// | |
185 | /// # Examples | |
186 | /// | |
187 | /// ``` | |
188 | /// use std::ptr; | |
189 | /// | |
85aaf69f | 190 | /// let p: *const i32 = ptr::null(); |
1a4d82fc JJ |
191 | /// assert!(p.is_null()); |
192 | /// ``` | |
e74abb32 | 193 | #[inline(always)] |
85aaf69f | 194 | #[stable(feature = "rust1", since = "1.0.0")] |
a1dfa0c6 | 195 | #[rustc_promotable] |
dfeec247 | 196 | #[rustc_const_stable(feature = "const_ptr_null", since = "1.32.0")] |
60c5eb7d XL |
197 | pub const fn null<T>() -> *const T { |
198 | 0 as *const T | |
199 | } | |
1a4d82fc JJ |
200 | |
201 | /// Creates a null mutable raw pointer. | |
202 | /// | |
203 | /// # Examples | |
204 | /// | |
205 | /// ``` | |
206 | /// use std::ptr; | |
207 | /// | |
85aaf69f | 208 | /// let p: *mut i32 = ptr::null_mut(); |
1a4d82fc JJ |
209 | /// assert!(p.is_null()); |
210 | /// ``` | |
e74abb32 | 211 | #[inline(always)] |
85aaf69f | 212 | #[stable(feature = "rust1", since = "1.0.0")] |
a1dfa0c6 | 213 | #[rustc_promotable] |
dfeec247 | 214 | #[rustc_const_stable(feature = "const_ptr_null", since = "1.32.0")] |
60c5eb7d XL |
215 | pub const fn null_mut<T>() -> *mut T { |
216 | 0 as *mut T | |
217 | } | |
1a4d82fc | 218 | |
dc9dc135 XL |
219 | #[repr(C)] |
220 | pub(crate) union Repr<T> { | |
221 | pub(crate) rust: *const [T], | |
222 | rust_mut: *mut [T], | |
223 | pub(crate) raw: FatPtr<T>, | |
224 | } | |
225 | ||
226 | #[repr(C)] | |
227 | pub(crate) struct FatPtr<T> { | |
228 | data: *const T, | |
229 | pub(crate) len: usize, | |
230 | } | |
231 | ||
60c5eb7d | 232 | /// Forms a raw slice from a pointer and a length. |
dc9dc135 XL |
233 | /// |
234 | /// The `len` argument is the number of **elements**, not the number of bytes. | |
235 | /// | |
60c5eb7d XL |
236 | /// This function is safe, but actually using the return value is unsafe. |
237 | /// See the documentation of [`from_raw_parts`] for slice safety requirements. | |
238 | /// | |
239 | /// [`from_raw_parts`]: ../../std/slice/fn.from_raw_parts.html | |
240 | /// | |
dc9dc135 XL |
241 | /// # Examples |
242 | /// | |
243 | /// ```rust | |
dc9dc135 XL |
244 | /// use std::ptr; |
245 | /// | |
246 | /// // create a slice pointer when starting out with a pointer to the first element | |
dfeec247 XL |
247 | /// let x = [5, 6, 7]; |
248 | /// let ptr = x.as_ptr(); | |
249 | /// let slice = ptr::slice_from_raw_parts(ptr, 3); | |
dc9dc135 XL |
250 | /// assert_eq!(unsafe { &*slice }[2], 7); |
251 | /// ``` | |
252 | #[inline] | |
dfeec247 XL |
253 | #[stable(feature = "slice_from_raw_parts", since = "1.42.0")] |
254 | #[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")] | |
255 | pub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] { | |
dc9dc135 XL |
256 | unsafe { Repr { raw: FatPtr { data, len } }.rust } |
257 | } | |
258 | ||
60c5eb7d XL |
259 | /// Performs the same functionality as [`slice_from_raw_parts`], except that a |
260 | /// raw mutable slice is returned, as opposed to a raw immutable slice. | |
dc9dc135 | 261 | /// |
60c5eb7d | 262 | /// See the documentation of [`slice_from_raw_parts`] for more details. |
dc9dc135 | 263 | /// |
60c5eb7d XL |
264 | /// This function is safe, but actually using the return value is unsafe. |
265 | /// See the documentation of [`from_raw_parts_mut`] for slice safety requirements. | |
266 | /// | |
267 | /// [`slice_from_raw_parts`]: fn.slice_from_raw_parts.html | |
268 | /// [`from_raw_parts_mut`]: ../../std/slice/fn.from_raw_parts_mut.html | |
dc9dc135 | 269 | #[inline] |
dfeec247 XL |
270 | #[stable(feature = "slice_from_raw_parts", since = "1.42.0")] |
271 | #[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")] | |
272 | pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] { | |
dc9dc135 XL |
273 | unsafe { Repr { raw: FatPtr { data, len } }.rust_mut } |
274 | } | |
275 | ||
1a4d82fc | 276 | /// Swaps the values at two mutable locations of the same type, without |
ff7c6d11 XL |
277 | /// deinitializing either. |
278 | /// | |
0bf4aa26 XL |
279 | /// But for the following two exceptions, this function is semantically |
280 | /// equivalent to [`mem::swap`]: | |
281 | /// | |
282 | /// * It operates on raw pointers instead of references. When references are | |
283 | /// available, [`mem::swap`] should be preferred. | |
284 | /// | |
285 | /// * The two pointed-to values may overlap. If the values do overlap, then the | |
286 | /// overlapping region of memory from `x` will be used. This is demonstrated | |
287 | /// in the second example below. | |
288 | /// | |
289 | /// [`mem::swap`]: ../mem/fn.swap.html | |
1a4d82fc JJ |
290 | /// |
291 | /// # Safety | |
292 | /// | |
0bf4aa26 XL |
293 | /// Behavior is undefined if any of the following conditions are violated: |
294 | /// | |
74b04a01 | 295 | /// * Both `x` and `y` must be [valid] for both reads and writes. |
0bf4aa26 XL |
296 | /// |
297 | /// * Both `x` and `y` must be properly aligned. | |
298 | /// | |
299 | /// Note that even if `T` has size `0`, the pointers must be non-NULL and properly aligned. | |
32a655c1 | 300 | /// |
0bf4aa26 | 301 | /// [valid]: ../ptr/index.html#safety |
ff7c6d11 XL |
302 | /// |
303 | /// # Examples | |
304 | /// | |
305 | /// Swapping two non-overlapping regions: | |
306 | /// | |
307 | /// ``` | |
308 | /// use std::ptr; | |
309 | /// | |
310 | /// let mut array = [0, 1, 2, 3]; | |
311 | /// | |
0bf4aa26 XL |
312 | /// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; // this is `array[0..2]` |
313 | /// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; // this is `array[2..4]` | |
ff7c6d11 XL |
314 | /// |
315 | /// unsafe { | |
316 | /// ptr::swap(x, y); | |
317 | /// assert_eq!([2, 3, 0, 1], array); | |
318 | /// } | |
319 | /// ``` | |
320 | /// | |
321 | /// Swapping two overlapping regions: | |
322 | /// | |
323 | /// ``` | |
324 | /// use std::ptr; | |
325 | /// | |
326 | /// let mut array = [0, 1, 2, 3]; | |
327 | /// | |
0bf4aa26 XL |
328 | /// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; // this is `array[0..3]` |
329 | /// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; // this is `array[1..4]` | |
ff7c6d11 XL |
330 | /// |
331 | /// unsafe { | |
332 | /// ptr::swap(x, y); | |
0bf4aa26 XL |
333 | /// // The indices `1..3` of the slice overlap between `x` and `y`. |
334 | /// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are | |
335 | /// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]` | |
336 | /// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`). | |
337 | /// // This implementation is defined to make the latter choice. | |
ff7c6d11 XL |
338 | /// assert_eq!([1, 0, 1, 2], array); |
339 | /// } | |
340 | /// ``` | |
1a4d82fc | 341 | #[inline] |
85aaf69f | 342 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 343 | pub unsafe fn swap<T>(x: *mut T, y: *mut T) { |
a1dfa0c6 XL |
344 | // Give ourselves some scratch space to work with. |
345 | // We do not have to worry about drops: `MaybeUninit` does nothing when dropped. | |
532ac7d7 | 346 | let mut tmp = MaybeUninit::<T>::uninit(); |
1a4d82fc JJ |
347 | |
348 | // Perform the swap | |
a1dfa0c6 | 349 | copy_nonoverlapping(x, tmp.as_mut_ptr(), 1); |
c34b1796 | 350 | copy(y, x, 1); // `x` and `y` may overlap |
532ac7d7 | 351 | copy_nonoverlapping(tmp.as_ptr(), y, 1); |
1a4d82fc JJ |
352 | } |
353 | ||
0bf4aa26 XL |
354 | /// Swaps `count * size_of::<T>()` bytes between the two regions of memory |
355 | /// beginning at `x` and `y`. The two regions must *not* overlap. | |
041b39d2 XL |
356 | /// |
357 | /// # Safety | |
358 | /// | |
0bf4aa26 XL |
359 | /// Behavior is undefined if any of the following conditions are violated: |
360 | /// | |
74b04a01 | 361 | /// * Both `x` and `y` must be [valid] for both reads and writes of `count * |
0bf4aa26 XL |
362 | /// size_of::<T>()` bytes. |
363 | /// | |
364 | /// * Both `x` and `y` must be properly aligned. | |
365 | /// | |
366 | /// * The region of memory beginning at `x` with a size of `count * | |
367 | /// size_of::<T>()` bytes must *not* overlap with the region of memory | |
368 | /// beginning at `y` with the same size. | |
369 | /// | |
370 | /// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`, | |
371 | /// the pointers must be non-NULL and properly aligned. | |
372 | /// | |
373 | /// [valid]: ../ptr/index.html#safety | |
041b39d2 XL |
374 | /// |
375 | /// # Examples | |
376 | /// | |
377 | /// Basic usage: | |
378 | /// | |
379 | /// ``` | |
041b39d2 XL |
380 | /// use std::ptr; |
381 | /// | |
382 | /// let mut x = [1, 2, 3, 4]; | |
383 | /// let mut y = [7, 8, 9]; | |
384 | /// | |
385 | /// unsafe { | |
386 | /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2); | |
387 | /// } | |
388 | /// | |
389 | /// assert_eq!(x, [7, 8, 3, 4]); | |
390 | /// assert_eq!(y, [1, 2, 9]); | |
391 | /// ``` | |
392 | #[inline] | |
83c7162d | 393 | #[stable(feature = "swap_nonoverlapping", since = "1.27.0")] |
041b39d2 | 394 | pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) { |
74b04a01 XL |
395 | debug_assert!(is_aligned_and_not_null(x), "attempt to swap unaligned or null pointer"); |
396 | debug_assert!(is_aligned_and_not_null(y), "attempt to swap unaligned or null pointer"); | |
397 | debug_assert!(is_nonoverlapping(x, y, count), "attempt to swap overlapping memory"); | |
398 | ||
041b39d2 XL |
399 | let x = x as *mut u8; |
400 | let y = y as *mut u8; | |
401 | let len = mem::size_of::<T>() * count; | |
402 | swap_nonoverlapping_bytes(x, y, len) | |
403 | } | |
404 | ||
8faf50e0 XL |
405 | #[inline] |
406 | pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) { | |
407 | // For types smaller than the block optimization below, | |
408 | // just swap directly to avoid pessimizing codegen. | |
409 | if mem::size_of::<T>() < 32 { | |
410 | let z = read(x); | |
411 | copy_nonoverlapping(y, x, 1); | |
412 | write(y, z); | |
413 | } else { | |
414 | swap_nonoverlapping(x, y, 1); | |
415 | } | |
416 | } | |
417 | ||
041b39d2 XL |
418 | #[inline] |
419 | unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { | |
420 | // The approach here is to utilize simd to swap x & y efficiently. Testing reveals | |
a1dfa0c6 | 421 | // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel |
041b39d2 XL |
422 | // Haswell E processors. LLVM is more able to optimize if we give a struct a |
423 | // #[repr(simd)], even if we don't actually use this struct directly. | |
424 | // | |
425 | // FIXME repr(simd) broken on emscripten and redox | |
48663c56 | 426 | #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox")), repr(simd))] |
041b39d2 XL |
427 | struct Block(u64, u64, u64, u64); |
428 | struct UnalignedBlock(u64, u64, u64, u64); | |
429 | ||
430 | let block_size = mem::size_of::<Block>(); | |
431 | ||
432 | // Loop through x & y, copying them `Block` at a time | |
433 | // The optimizer should unroll the loop fully for most types | |
434 | // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively | |
435 | let mut i = 0; | |
436 | while i + block_size <= len { | |
437 | // Create some uninitialized memory as scratch space | |
438 | // Declaring `t` here avoids aligning the stack when this loop is unused | |
532ac7d7 | 439 | let mut t = mem::MaybeUninit::<Block>::uninit(); |
a1dfa0c6 | 440 | let t = t.as_mut_ptr() as *mut u8; |
b7449926 XL |
441 | let x = x.add(i); |
442 | let y = y.add(i); | |
041b39d2 XL |
443 | |
444 | // Swap a block of bytes of x & y, using t as a temporary buffer | |
445 | // This should be optimized into efficient SIMD operations where available | |
446 | copy_nonoverlapping(x, t, block_size); | |
447 | copy_nonoverlapping(y, x, block_size); | |
448 | copy_nonoverlapping(t, y, block_size); | |
449 | i += block_size; | |
450 | } | |
451 | ||
452 | if i < len { | |
453 | // Swap any remaining bytes | |
532ac7d7 | 454 | let mut t = mem::MaybeUninit::<UnalignedBlock>::uninit(); |
041b39d2 XL |
455 | let rem = len - i; |
456 | ||
a1dfa0c6 | 457 | let t = t.as_mut_ptr() as *mut u8; |
b7449926 XL |
458 | let x = x.add(i); |
459 | let y = y.add(i); | |
041b39d2 XL |
460 | |
461 | copy_nonoverlapping(x, t, rem); | |
462 | copy_nonoverlapping(y, x, rem); | |
463 | copy_nonoverlapping(t, y, rem); | |
464 | } | |
465 | } | |
466 | ||
0bf4aa26 | 467 | /// Moves `src` into the pointed `dst`, returning the previous `dst` value. |
94b46f34 XL |
468 | /// |
469 | /// Neither value is dropped. | |
1a4d82fc | 470 | /// |
0bf4aa26 XL |
471 | /// This function is semantically equivalent to [`mem::replace`] except that it |
472 | /// operates on raw pointers instead of references. When references are | |
473 | /// available, [`mem::replace`] should be preferred. | |
474 | /// | |
475 | /// [`mem::replace`]: ../mem/fn.replace.html | |
476 | /// | |
1a4d82fc JJ |
477 | /// # Safety |
478 | /// | |
0bf4aa26 XL |
479 | /// Behavior is undefined if any of the following conditions are violated: |
480 | /// | |
74b04a01 | 481 | /// * `dst` must be [valid] for both reads and writes. |
0bf4aa26 XL |
482 | /// |
483 | /// * `dst` must be properly aligned. | |
484 | /// | |
74b04a01 XL |
485 | /// * `dst` must point to a properly initialized value of type `T`. |
486 | /// | |
0bf4aa26 XL |
487 | /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. |
488 | /// | |
489 | /// [valid]: ../ptr/index.html#safety | |
490 | /// | |
491 | /// # Examples | |
492 | /// | |
493 | /// ``` | |
494 | /// use std::ptr; | |
495 | /// | |
496 | /// let mut rust = vec!['b', 'u', 's', 't']; | |
497 | /// | |
498 | /// // `mem::replace` would have the same effect without requiring the unsafe | |
499 | /// // block. | |
500 | /// let b = unsafe { | |
501 | /// ptr::replace(&mut rust[0], 'r') | |
502 | /// }; | |
503 | /// | |
504 | /// assert_eq!(b, 'b'); | |
505 | /// assert_eq!(rust, &['r', 'u', 's', 't']); | |
506 | /// ``` | |
1a4d82fc | 507 | #[inline] |
85aaf69f | 508 | #[stable(feature = "rust1", since = "1.0.0")] |
0bf4aa26 XL |
509 | pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T { |
510 | mem::swap(&mut *dst, &mut src); // cannot overlap | |
1a4d82fc JJ |
511 | src |
512 | } | |
513 | ||
85aaf69f | 514 | /// Reads the value from `src` without moving it. This leaves the |
1a4d82fc JJ |
515 | /// memory in `src` unchanged. |
516 | /// | |
517 | /// # Safety | |
518 | /// | |
0bf4aa26 XL |
519 | /// Behavior is undefined if any of the following conditions are violated: |
520 | /// | |
521 | /// * `src` must be [valid] for reads. | |
a7813a04 | 522 | /// |
0bf4aa26 XL |
523 | /// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the |
524 | /// case. | |
525 | /// | |
74b04a01 XL |
526 | /// * `src` must point to a properly initialized value of type `T`. |
527 | /// | |
0bf4aa26 | 528 | /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. |
476ff2be | 529 | /// |
a7813a04 XL |
530 | /// # Examples |
531 | /// | |
532 | /// Basic usage: | |
533 | /// | |
534 | /// ``` | |
535 | /// let x = 12; | |
536 | /// let y = &x as *const i32; | |
537 | /// | |
9e0c209e SL |
538 | /// unsafe { |
539 | /// assert_eq!(std::ptr::read(y), 12); | |
540 | /// } | |
a7813a04 | 541 | /// ``` |
0bf4aa26 XL |
542 | /// |
543 | /// Manually implement [`mem::swap`]: | |
544 | /// | |
545 | /// ``` | |
546 | /// use std::ptr; | |
547 | /// | |
548 | /// fn swap<T>(a: &mut T, b: &mut T) { | |
549 | /// unsafe { | |
550 | /// // Create a bitwise copy of the value at `a` in `tmp`. | |
551 | /// let tmp = ptr::read(a); | |
552 | /// | |
553 | /// // Exiting at this point (either by explicitly returning or by | |
554 | /// // calling a function which panics) would cause the value in `tmp` to | |
555 | /// // be dropped while the same value is still referenced by `a`. This | |
556 | /// // could trigger undefined behavior if `T` is not `Copy`. | |
557 | /// | |
558 | /// // Create a bitwise copy of the value at `b` in `a`. | |
559 | /// // This is safe because mutable references cannot alias. | |
560 | /// ptr::copy_nonoverlapping(b, a, 1); | |
561 | /// | |
562 | /// // As above, exiting here could trigger undefined behavior because | |
563 | /// // the same value is referenced by `a` and `b`. | |
564 | /// | |
565 | /// // Move `tmp` into `b`. | |
566 | /// ptr::write(b, tmp); | |
567 | /// | |
568 | /// // `tmp` has been moved (`write` takes ownership of its second argument), | |
569 | /// // so nothing is dropped implicitly here. | |
570 | /// } | |
571 | /// } | |
572 | /// | |
573 | /// let mut foo = "foo".to_owned(); | |
574 | /// let mut bar = "bar".to_owned(); | |
575 | /// | |
576 | /// swap(&mut foo, &mut bar); | |
577 | /// | |
578 | /// assert_eq!(foo, "bar"); | |
579 | /// assert_eq!(bar, "foo"); | |
580 | /// ``` | |
581 | /// | |
582 | /// ## Ownership of the Returned Value | |
583 | /// | |
584 | /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`]. | |
585 | /// If `T` is not [`Copy`], using both the returned value and the value at | |
9fa01778 | 586 | /// `*src` can violate memory safety. Note that assigning to `*src` counts as a |
0bf4aa26 XL |
587 | /// use because it will attempt to drop the value at `*src`. |
588 | /// | |
589 | /// [`write`] can be used to overwrite data without causing it to be dropped. | |
590 | /// | |
591 | /// ``` | |
592 | /// use std::ptr; | |
593 | /// | |
594 | /// let mut s = String::from("foo"); | |
595 | /// unsafe { | |
596 | /// // `s2` now points to the same underlying memory as `s`. | |
597 | /// let mut s2: String = ptr::read(&s); | |
598 | /// | |
599 | /// assert_eq!(s2, "foo"); | |
600 | /// | |
601 | /// // Assigning to `s2` causes its original value to be dropped. Beyond | |
602 | /// // this point, `s` must no longer be used, as the underlying memory has | |
603 | /// // been freed. | |
604 | /// s2 = String::default(); | |
605 | /// assert_eq!(s2, ""); | |
606 | /// | |
607 | /// // Assigning to `s` would cause the old value to be dropped again, | |
608 | /// // resulting in undefined behavior. | |
609 | /// // s = String::from("bar"); // ERROR | |
610 | /// | |
611 | /// // `ptr::write` can be used to overwrite a value without dropping it. | |
612 | /// ptr::write(&mut s, String::from("bar")); | |
613 | /// } | |
614 | /// | |
615 | /// assert_eq!(s, "bar"); | |
616 | /// ``` | |
617 | /// | |
618 | /// [`mem::swap`]: ../mem/fn.swap.html | |
619 | /// [valid]: ../ptr/index.html#safety | |
620 | /// [`Copy`]: ../marker/trait.Copy.html | |
621 | /// [`read_unaligned`]: ./fn.read_unaligned.html | |
622 | /// [`write`]: ./fn.write.html | |
3b2f2976 | 623 | #[inline] |
85aaf69f | 624 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 625 | pub unsafe fn read<T>(src: *const T) -> T { |
74b04a01 | 626 | // `copy_nonoverlapping` takes care of debug_assert. |
532ac7d7 | 627 | let mut tmp = MaybeUninit::<T>::uninit(); |
a1dfa0c6 | 628 | copy_nonoverlapping(src, tmp.as_mut_ptr(), 1); |
532ac7d7 | 629 | tmp.assume_init() |
1a4d82fc JJ |
630 | } |
631 | ||
476ff2be SL |
632 | /// Reads the value from `src` without moving it. This leaves the |
633 | /// memory in `src` unchanged. | |
634 | /// | |
0bf4aa26 | 635 | /// Unlike [`read`], `read_unaligned` works with unaligned pointers. |
476ff2be SL |
636 | /// |
637 | /// # Safety | |
638 | /// | |
0bf4aa26 XL |
639 | /// Behavior is undefined if any of the following conditions are violated: |
640 | /// | |
641 | /// * `src` must be [valid] for reads. | |
642 | /// | |
74b04a01 XL |
643 | /// * `src` must point to a properly initialized value of type `T`. |
644 | /// | |
0bf4aa26 | 645 | /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of |
9fa01778 | 646 | /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned |
0bf4aa26 XL |
647 | /// value and the value at `*src` can [violate memory safety][read-ownership]. |
648 | /// | |
0731742a | 649 | /// Note that even if `T` has size `0`, the pointer must be non-NULL. |
0bf4aa26 XL |
650 | /// |
651 | /// [`Copy`]: ../marker/trait.Copy.html | |
652 | /// [`read`]: ./fn.read.html | |
653 | /// [`write_unaligned`]: ./fn.write_unaligned.html | |
654 | /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value | |
655 | /// [valid]: ../ptr/index.html#safety | |
476ff2be | 656 | /// |
416331ca | 657 | /// ## On `packed` structs |
476ff2be | 658 | /// |
416331ca XL |
659 | /// It is currently impossible to create raw pointers to unaligned fields |
660 | /// of a packed struct. | |
476ff2be | 661 | /// |
416331ca XL |
662 | /// Attempting to create a raw pointer to an `unaligned` struct field with |
663 | /// an expression such as `&packed.unaligned as *const FieldType` creates an | |
664 | /// intermediate unaligned reference before converting that to a raw pointer. | |
665 | /// That this reference is temporary and immediately cast is inconsequential | |
666 | /// as the compiler always expects references to be properly aligned. | |
667 | /// As a result, using `&packed.unaligned as *const FieldType` causes immediate | |
668 | /// *undefined behavior* in your program. | |
476ff2be | 669 | /// |
416331ca XL |
670 | /// An example of what not to do and how this relates to `read_unaligned` is: |
671 | /// | |
672 | /// ```no_run | |
0bf4aa26 XL |
673 | /// #[repr(packed, C)] |
674 | /// struct Packed { | |
675 | /// _padding: u8, | |
676 | /// unaligned: u32, | |
476ff2be | 677 | /// } |
0bf4aa26 | 678 | /// |
416331ca | 679 | /// let packed = Packed { |
0bf4aa26 XL |
680 | /// _padding: 0x00, |
681 | /// unaligned: 0x01020304, | |
682 | /// }; | |
683 | /// | |
684 | /// let v = unsafe { | |
416331ca XL |
685 | /// // Here we attempt to take the address of a 32-bit integer which is not aligned. |
686 | /// let unaligned = | |
687 | /// // A temporary unaligned reference is created here which results in | |
688 | /// // undefined behavior regardless of whether the reference is used or not. | |
689 | /// &packed.unaligned | |
690 | /// // Casting to a raw pointer doesn't help; the mistake already happened. | |
691 | /// as *const u32; | |
0bf4aa26 | 692 | /// |
416331ca | 693 | /// let v = std::ptr::read_unaligned(unaligned); |
0bf4aa26 XL |
694 | /// |
695 | /// v | |
696 | /// }; | |
416331ca XL |
697 | /// ``` |
698 | /// | |
699 | /// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however. | |
700 | // FIXME: Update docs based on outcome of RFC #2582 and friends. | |
701 | /// | |
702 | /// # Examples | |
703 | /// | |
704 | /// Read an usize value from a byte buffer: | |
0bf4aa26 | 705 | /// |
416331ca XL |
706 | /// ``` |
707 | /// use std::mem; | |
708 | /// | |
709 | /// fn read_usize(x: &[u8]) -> usize { | |
710 | /// assert!(x.len() >= mem::size_of::<usize>()); | |
711 | /// | |
712 | /// let ptr = x.as_ptr() as *const usize; | |
713 | /// | |
714 | /// unsafe { ptr.read_unaligned() } | |
715 | /// } | |
476ff2be | 716 | /// ``` |
3b2f2976 | 717 | #[inline] |
8bb4bdeb | 718 | #[stable(feature = "ptr_unaligned", since = "1.17.0")] |
476ff2be | 719 | pub unsafe fn read_unaligned<T>(src: *const T) -> T { |
74b04a01 | 720 | // `copy_nonoverlapping` takes care of debug_assert. |
532ac7d7 | 721 | let mut tmp = MaybeUninit::<T>::uninit(); |
60c5eb7d | 722 | copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>()); |
532ac7d7 | 723 | tmp.assume_init() |
476ff2be SL |
724 | } |
725 | ||
1a4d82fc JJ |
726 | /// Overwrites a memory location with the given value without reading or |
727 | /// dropping the old value. | |
728 | /// | |
0bf4aa26 XL |
729 | /// `write` does not drop the contents of `dst`. This is safe, but it could leak |
730 | /// allocations or resources, so care should be taken not to overwrite an object | |
b039eaaf | 731 | /// that should be dropped. |
1a4d82fc | 732 | /// |
cc61c64b XL |
733 | /// Additionally, it does not drop `src`. Semantically, `src` is moved into the |
734 | /// location pointed to by `dst`. | |
8bb4bdeb | 735 | /// |
1a4d82fc | 736 | /// This is appropriate for initializing uninitialized memory, or overwriting |
0bf4aa26 XL |
737 | /// memory that has previously been [`read`] from. |
738 | /// | |
739 | /// [`read`]: ./fn.read.html | |
a7813a04 | 740 | /// |
0bf4aa26 XL |
741 | /// # Safety |
742 | /// | |
743 | /// Behavior is undefined if any of the following conditions are violated: | |
744 | /// | |
745 | /// * `dst` must be [valid] for writes. | |
746 | /// | |
747 | /// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the | |
748 | /// case. | |
749 | /// | |
750 | /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. | |
751 | /// | |
752 | /// [valid]: ../ptr/index.html#safety | |
753 | /// [`write_unaligned`]: ./fn.write_unaligned.html | |
476ff2be | 754 | /// |
a7813a04 XL |
755 | /// # Examples |
756 | /// | |
757 | /// Basic usage: | |
758 | /// | |
759 | /// ``` | |
760 | /// let mut x = 0; | |
761 | /// let y = &mut x as *mut i32; | |
762 | /// let z = 12; | |
763 | /// | |
764 | /// unsafe { | |
765 | /// std::ptr::write(y, z); | |
9e0c209e | 766 | /// assert_eq!(std::ptr::read(y), 12); |
a7813a04 XL |
767 | /// } |
768 | /// ``` | |
0bf4aa26 XL |
769 | /// |
770 | /// Manually implement [`mem::swap`]: | |
771 | /// | |
772 | /// ``` | |
773 | /// use std::ptr; | |
774 | /// | |
775 | /// fn swap<T>(a: &mut T, b: &mut T) { | |
776 | /// unsafe { | |
777 | /// // Create a bitwise copy of the value at `a` in `tmp`. | |
778 | /// let tmp = ptr::read(a); | |
779 | /// | |
780 | /// // Exiting at this point (either by explicitly returning or by | |
781 | /// // calling a function which panics) would cause the value in `tmp` to | |
782 | /// // be dropped while the same value is still referenced by `a`. This | |
783 | /// // could trigger undefined behavior if `T` is not `Copy`. | |
784 | /// | |
785 | /// // Create a bitwise copy of the value at `b` in `a`. | |
786 | /// // This is safe because mutable references cannot alias. | |
787 | /// ptr::copy_nonoverlapping(b, a, 1); | |
788 | /// | |
789 | /// // As above, exiting here could trigger undefined behavior because | |
790 | /// // the same value is referenced by `a` and `b`. | |
791 | /// | |
792 | /// // Move `tmp` into `b`. | |
793 | /// ptr::write(b, tmp); | |
794 | /// | |
795 | /// // `tmp` has been moved (`write` takes ownership of its second argument), | |
796 | /// // so nothing is dropped implicitly here. | |
797 | /// } | |
798 | /// } | |
799 | /// | |
800 | /// let mut foo = "foo".to_owned(); | |
801 | /// let mut bar = "bar".to_owned(); | |
802 | /// | |
803 | /// swap(&mut foo, &mut bar); | |
804 | /// | |
805 | /// assert_eq!(foo, "bar"); | |
806 | /// assert_eq!(bar, "foo"); | |
807 | /// ``` | |
808 | /// | |
809 | /// [`mem::swap`]: ../mem/fn.swap.html | |
1a4d82fc | 810 | #[inline] |
85aaf69f | 811 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 812 | pub unsafe fn write<T>(dst: *mut T, src: T) { |
ba9703b0 | 813 | debug_assert!(is_aligned_and_not_null(dst), "attempt to write to unaligned or null pointer"); |
1a4d82fc JJ |
814 | intrinsics::move_val_init(&mut *dst, src) |
815 | } | |
816 | ||
476ff2be SL |
817 | /// Overwrites a memory location with the given value without reading or |
818 | /// dropping the old value. | |
819 | /// | |
0bf4aa26 | 820 | /// Unlike [`write`], the pointer may be unaligned. |
476ff2be | 821 | /// |
0bf4aa26 XL |
822 | /// `write_unaligned` does not drop the contents of `dst`. This is safe, but it |
823 | /// could leak allocations or resources, so care should be taken not to overwrite | |
824 | /// an object that should be dropped. | |
476ff2be | 825 | /// |
cc61c64b XL |
826 | /// Additionally, it does not drop `src`. Semantically, `src` is moved into the |
827 | /// location pointed to by `dst`. | |
828 | /// | |
476ff2be | 829 | /// This is appropriate for initializing uninitialized memory, or overwriting |
0bf4aa26 XL |
830 | /// memory that has previously been read with [`read_unaligned`]. |
831 | /// | |
832 | /// [`write`]: ./fn.write.html | |
833 | /// [`read_unaligned`]: ./fn.read_unaligned.html | |
834 | /// | |
835 | /// # Safety | |
836 | /// | |
837 | /// Behavior is undefined if any of the following conditions are violated: | |
838 | /// | |
839 | /// * `dst` must be [valid] for writes. | |
840 | /// | |
0731742a | 841 | /// Note that even if `T` has size `0`, the pointer must be non-NULL. |
0bf4aa26 XL |
842 | /// |
843 | /// [valid]: ../ptr/index.html#safety | |
476ff2be | 844 | /// |
416331ca | 845 | /// ## On `packed` structs |
476ff2be | 846 | /// |
416331ca XL |
847 | /// It is currently impossible to create raw pointers to unaligned fields |
848 | /// of a packed struct. | |
476ff2be | 849 | /// |
416331ca XL |
850 | /// Attempting to create a raw pointer to an `unaligned` struct field with |
851 | /// an expression such as `&packed.unaligned as *const FieldType` creates an | |
852 | /// intermediate unaligned reference before converting that to a raw pointer. | |
853 | /// That this reference is temporary and immediately cast is inconsequential | |
854 | /// as the compiler always expects references to be properly aligned. | |
855 | /// As a result, using `&packed.unaligned as *const FieldType` causes immediate | |
856 | /// *undefined behavior* in your program. | |
857 | /// | |
858 | /// An example of what not to do and how this relates to `write_unaligned` is: | |
0bf4aa26 | 859 | /// |
416331ca | 860 | /// ```no_run |
0bf4aa26 | 861 | /// #[repr(packed, C)] |
0bf4aa26 XL |
862 | /// struct Packed { |
863 | /// _padding: u8, | |
864 | /// unaligned: u32, | |
865 | /// } | |
866 | /// | |
867 | /// let v = 0x01020304; | |
416331ca | 868 | /// let mut packed: Packed = unsafe { std::mem::zeroed() }; |
476ff2be | 869 | /// |
416331ca XL |
870 | /// let v = unsafe { |
871 | /// // Here we attempt to take the address of a 32-bit integer which is not aligned. | |
872 | /// let unaligned = | |
873 | /// // A temporary unaligned reference is created here which results in | |
874 | /// // undefined behavior regardless of whether the reference is used or not. | |
875 | /// &mut packed.unaligned | |
876 | /// // Casting to a raw pointer doesn't help; the mistake already happened. | |
877 | /// as *mut u32; | |
0bf4aa26 | 878 | /// |
416331ca | 879 | /// std::ptr::write_unaligned(unaligned, v); |
0bf4aa26 | 880 | /// |
416331ca XL |
881 | /// v |
882 | /// }; | |
883 | /// ``` | |
884 | /// | |
885 | /// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however. | |
886 | // FIXME: Update docs based on outcome of RFC #2582 and friends. | |
887 | /// | |
888 | /// # Examples | |
889 | /// | |
890 | /// Write an usize value to a byte buffer: | |
891 | /// | |
892 | /// ``` | |
893 | /// use std::mem; | |
894 | /// | |
895 | /// fn write_usize(x: &mut [u8], val: usize) { | |
896 | /// assert!(x.len() >= mem::size_of::<usize>()); | |
897 | /// | |
898 | /// let ptr = x.as_mut_ptr() as *mut usize; | |
0bf4aa26 | 899 | /// |
416331ca XL |
900 | /// unsafe { ptr.write_unaligned(val) } |
901 | /// } | |
476ff2be SL |
902 | /// ``` |
903 | #[inline] | |
8bb4bdeb | 904 | #[stable(feature = "ptr_unaligned", since = "1.17.0")] |
476ff2be | 905 | pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) { |
74b04a01 | 906 | // `copy_nonoverlapping` takes care of debug_assert. |
60c5eb7d | 907 | copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>()); |
476ff2be SL |
908 | mem::forget(src); |
909 | } | |
910 | ||
7453a54e SL |
911 | /// Performs a volatile read of the value from `src` without moving it. This |
912 | /// leaves the memory in `src` unchanged. | |
913 | /// | |
914 | /// Volatile operations are intended to act on I/O memory, and are guaranteed | |
915 | /// to not be elided or reordered by the compiler across other volatile | |
54a0048b | 916 | /// operations. |
7453a54e | 917 | /// |
0bf4aa26 XL |
918 | /// [`write_volatile`]: ./fn.write_volatile.html |
919 | /// | |
54a0048b SL |
920 | /// # Notes |
921 | /// | |
922 | /// Rust does not currently have a rigorously and formally defined memory model, | |
923 | /// so the precise semantics of what "volatile" means here is subject to change | |
924 | /// over time. That being said, the semantics will almost always end up pretty | |
925 | /// similar to [C11's definition of volatile][c11]. | |
926 | /// | |
3b2f2976 XL |
927 | /// The compiler shouldn't change the relative order or number of volatile |
928 | /// memory operations. However, volatile memory operations on zero-sized types | |
9fa01778 | 929 | /// (e.g., if a zero-sized type is passed to `read_volatile`) are noops |
3b2f2976 XL |
930 | /// and may be ignored. |
931 | /// | |
54a0048b | 932 | /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf |
7453a54e SL |
933 | /// |
934 | /// # Safety | |
935 | /// | |
0bf4aa26 XL |
936 | /// Behavior is undefined if any of the following conditions are violated: |
937 | /// | |
938 | /// * `src` must be [valid] for reads. | |
939 | /// | |
940 | /// * `src` must be properly aligned. | |
941 | /// | |
74b04a01 XL |
942 | /// * `src` must point to a properly initialized value of type `T`. |
943 | /// | |
48663c56 | 944 | /// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of |
9fa01778 | 945 | /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned |
0bf4aa26 XL |
946 | /// value and the value at `*src` can [violate memory safety][read-ownership]. |
947 | /// However, storing non-[`Copy`] types in volatile memory is almost certainly | |
948 | /// incorrect. | |
949 | /// | |
950 | /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. | |
951 | /// | |
952 | /// [valid]: ../ptr/index.html#safety | |
953 | /// [`Copy`]: ../marker/trait.Copy.html | |
954 | /// [`read`]: ./fn.read.html | |
0731742a | 955 | /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value |
a7813a04 | 956 | /// |
b7449926 XL |
957 | /// Just like in C, whether an operation is volatile has no bearing whatsoever |
958 | /// on questions involving concurrent access from multiple threads. Volatile | |
959 | /// accesses behave exactly like non-atomic accesses in that regard. In particular, | |
960 | /// a race between a `read_volatile` and any write operation to the same location | |
961 | /// is undefined behavior. | |
962 | /// | |
a7813a04 XL |
963 | /// # Examples |
964 | /// | |
965 | /// Basic usage: | |
966 | /// | |
967 | /// ``` | |
968 | /// let x = 12; | |
969 | /// let y = &x as *const i32; | |
970 | /// | |
9e0c209e SL |
971 | /// unsafe { |
972 | /// assert_eq!(std::ptr::read_volatile(y), 12); | |
973 | /// } | |
a7813a04 | 974 | /// ``` |
7453a54e | 975 | #[inline] |
54a0048b | 976 | #[stable(feature = "volatile", since = "1.9.0")] |
7453a54e | 977 | pub unsafe fn read_volatile<T>(src: *const T) -> T { |
74b04a01 | 978 | debug_assert!(is_aligned_and_not_null(src), "attempt to read from unaligned or null pointer"); |
7453a54e SL |
979 | intrinsics::volatile_load(src) |
980 | } | |
981 | ||
982 | /// Performs a volatile write of a memory location with the given value without | |
983 | /// reading or dropping the old value. | |
984 | /// | |
985 | /// Volatile operations are intended to act on I/O memory, and are guaranteed | |
986 | /// to not be elided or reordered by the compiler across other volatile | |
54a0048b SL |
987 | /// operations. |
988 | /// | |
0bf4aa26 XL |
989 | /// `write_volatile` does not drop the contents of `dst`. This is safe, but it |
990 | /// could leak allocations or resources, so care should be taken not to overwrite | |
991 | /// an object that should be dropped. | |
992 | /// | |
993 | /// Additionally, it does not drop `src`. Semantically, `src` is moved into the | |
994 | /// location pointed to by `dst`. | |
995 | /// | |
996 | /// [`read_volatile`]: ./fn.read_volatile.html | |
997 | /// | |
54a0048b SL |
998 | /// # Notes |
999 | /// | |
1000 | /// Rust does not currently have a rigorously and formally defined memory model, | |
1001 | /// so the precise semantics of what "volatile" means here is subject to change | |
1002 | /// over time. That being said, the semantics will almost always end up pretty | |
1003 | /// similar to [C11's definition of volatile][c11]. | |
7453a54e | 1004 | /// |
3b2f2976 XL |
1005 | /// The compiler shouldn't change the relative order or number of volatile |
1006 | /// memory operations. However, volatile memory operations on zero-sized types | |
9fa01778 | 1007 | /// (e.g., if a zero-sized type is passed to `write_volatile`) are noops |
3b2f2976 XL |
1008 | /// and may be ignored. |
1009 | /// | |
54a0048b | 1010 | /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf |
7453a54e SL |
1011 | /// |
1012 | /// # Safety | |
1013 | /// | |
0bf4aa26 | 1014 | /// Behavior is undefined if any of the following conditions are violated: |
7453a54e | 1015 | /// |
0bf4aa26 | 1016 | /// * `dst` must be [valid] for writes. |
7453a54e | 1017 | /// |
0bf4aa26 XL |
1018 | /// * `dst` must be properly aligned. |
1019 | /// | |
1020 | /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. | |
1021 | /// | |
1022 | /// [valid]: ../ptr/index.html#safety | |
a7813a04 | 1023 | /// |
b7449926 XL |
1024 | /// Just like in C, whether an operation is volatile has no bearing whatsoever |
1025 | /// on questions involving concurrent access from multiple threads. Volatile | |
1026 | /// accesses behave exactly like non-atomic accesses in that regard. In particular, | |
1027 | /// a race between a `write_volatile` and any other operation (reading or writing) | |
1028 | /// on the same location is undefined behavior. | |
1029 | /// | |
a7813a04 XL |
1030 | /// # Examples |
1031 | /// | |
1032 | /// Basic usage: | |
1033 | /// | |
1034 | /// ``` | |
1035 | /// let mut x = 0; | |
1036 | /// let y = &mut x as *mut i32; | |
1037 | /// let z = 12; | |
1038 | /// | |
1039 | /// unsafe { | |
1040 | /// std::ptr::write_volatile(y, z); | |
9e0c209e | 1041 | /// assert_eq!(std::ptr::read_volatile(y), 12); |
a7813a04 XL |
1042 | /// } |
1043 | /// ``` | |
7453a54e | 1044 | #[inline] |
54a0048b | 1045 | #[stable(feature = "volatile", since = "1.9.0")] |
7453a54e | 1046 | pub unsafe fn write_volatile<T>(dst: *mut T, src: T) { |
74b04a01 | 1047 | debug_assert!(is_aligned_and_not_null(dst), "attempt to write to unaligned or null pointer"); |
7453a54e SL |
1048 | intrinsics::volatile_store(dst, src); |
1049 | } | |
1050 | ||
dfeec247 XL |
1051 | /// Align pointer `p`. |
1052 | /// | |
1053 | /// Calculate offset (in terms of elements of `stride` stride) that has to be applied | |
1054 | /// to pointer `p` so that pointer `p` would get aligned to `a`. | |
1055 | /// | |
1056 | /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic. | |
1057 | /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated | |
1058 | /// constants. | |
1059 | /// | |
1060 | /// If we ever decide to make it possible to call the intrinsic with `a` that is not a | |
1061 | /// power-of-two, it will probably be more prudent to just change to a naive implementation rather | |
1062 | /// than trying to adapt this to accommodate that change. | |
1063 | /// | |
1064 | /// Any questions go to @nagisa. | |
1065 | #[lang = "align_offset"] | |
1066 | pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize { | |
1067 | /// Calculate multiplicative modular inverse of `x` modulo `m`. | |
0531ce1d | 1068 | /// |
dfeec247 | 1069 | /// This implementation is tailored for align_offset and has following preconditions: |
0531ce1d | 1070 | /// |
dfeec247 XL |
1071 | /// * `m` is a power-of-two; |
1072 | /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead) | |
0531ce1d | 1073 | /// |
dfeec247 | 1074 | /// Implementation of this function shall not panic. Ever. |
0531ce1d | 1075 | #[inline] |
dfeec247 XL |
1076 | fn mod_inv(x: usize, m: usize) -> usize { |
1077 | /// Multiplicative modular inverse table modulo 2⁴ = 16. | |
1078 | /// | |
1079 | /// Note, that this table does not contain values where inverse does not exist (i.e., for | |
1080 | /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.) | |
1081 | const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15]; | |
1082 | /// Modulo for which the `INV_TABLE_MOD_16` is intended. | |
1083 | const INV_TABLE_MOD: usize = 16; | |
1084 | /// INV_TABLE_MOD² | |
1085 | const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD; | |
0531ce1d | 1086 | |
dfeec247 XL |
1087 | let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize; |
1088 | if m <= INV_TABLE_MOD { | |
1089 | table_inverse & (m - 1) | |
1090 | } else { | |
1091 | // We iterate "up" using the following formula: | |
1092 | // | |
1093 | // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$ | |
1094 | // | |
1095 | // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`. | |
1096 | let mut inverse = table_inverse; | |
1097 | let mut going_mod = INV_TABLE_MOD_SQUARED; | |
1098 | loop { | |
1099 | // y = y * (2 - xy) mod n | |
1100 | // | |
1101 | // Note, that we use wrapping operations here intentionally – the original formula | |
1102 | // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod | |
1103 | // usize::max_value()` instead, because we take the result `mod n` at the end | |
1104 | // anyway. | |
74b04a01 XL |
1105 | inverse = inverse.wrapping_mul(2usize.wrapping_sub(x.wrapping_mul(inverse))); |
1106 | if going_mod >= m { | |
dfeec247 XL |
1107 | return inverse & (m - 1); |
1108 | } | |
1109 | going_mod = going_mod.wrapping_mul(going_mod); | |
1110 | } | |
1111 | } | |
0531ce1d XL |
1112 | } |
1113 | ||
dfeec247 XL |
1114 | let stride = mem::size_of::<T>(); |
1115 | let a_minus_one = a.wrapping_sub(1); | |
1116 | let pmoda = p as usize & a_minus_one; | |
ea8adc8c | 1117 | |
dfeec247 XL |
1118 | if pmoda == 0 { |
1119 | // Already aligned. Yay! | |
1120 | return 0; | |
ea8adc8c XL |
1121 | } |
1122 | ||
dfeec247 XL |
1123 | if stride <= 1 { |
1124 | return if stride == 0 { | |
1125 | // If the pointer is not aligned, and the element is zero-sized, then no amount of | |
1126 | // elements will ever align the pointer. | |
1127 | !0 | |
1128 | } else { | |
1129 | a.wrapping_sub(pmoda) | |
1130 | }; | |
ea8adc8c XL |
1131 | } |
1132 | ||
dfeec247 XL |
1133 | let smoda = stride & a_minus_one; |
1134 | // a is power-of-two so cannot be 0. stride = 0 is handled above. | |
1135 | let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)); | |
1136 | let gcd = 1usize << gcdpow; | |
ea8adc8c | 1137 | |
74b04a01 | 1138 | if p as usize & (gcd.wrapping_sub(1)) == 0 { |
dfeec247 XL |
1139 | // This branch solves for the following linear congruence equation: |
1140 | // | |
74b04a01 | 1141 | // ` p + so = 0 mod a ` |
dfeec247 | 1142 | // |
74b04a01 | 1143 | // `p` here is the pointer value, `s` - stride of `T`, `o` offset in `T`s, and `a` - the |
dfeec247 XL |
1144 | // requested alignment. |
1145 | // | |
74b04a01 XL |
1146 | // With `g = gcd(a, s)`, and the above asserting that `p` is also divisible by `g`, we can |
1147 | // denote `a' = a/g`, `s' = s/g`, `p' = p/g`, then this becomes equivalent to: | |
1148 | // | |
1149 | // ` p' + s'o = 0 mod a' ` | |
1150 | // ` o = (a' - (p' mod a')) * (s'^-1 mod a') ` | |
dfeec247 | 1151 | // |
74b04a01 XL |
1152 | // The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the second |
1153 | // term is "how does incrementing `p` by `s` bytes change the relative alignment of `p`" (again | |
1154 | // divided by `g`). | |
1155 | // Division by `g` is necessary to make the inverse well formed if `a` and `s` are not | |
1156 | // co-prime. | |
dfeec247 | 1157 | // |
74b04a01 XL |
1158 | // Furthermore, the result produced by this solution is not "minimal", so it is necessary |
1159 | // to take the result `o mod lcm(s, a)`. We can replace `lcm(s, a)` with just a `a'`. | |
1160 | let a2 = a >> gcdpow; | |
1161 | let a2minus1 = a2.wrapping_sub(1); | |
1162 | let s2 = smoda >> gcdpow; | |
1163 | let minusp2 = a2.wrapping_sub(pmoda >> gcdpow); | |
1164 | return (minusp2.wrapping_mul(mod_inv(s2, a2))) & a2minus1; | |
ea8adc8c XL |
1165 | } |
1166 | ||
dfeec247 XL |
1167 | // Cannot be aligned at all. |
1168 | usize::max_value() | |
1169 | } | |
1a4d82fc | 1170 | |
9fa01778 | 1171 | /// Compares raw pointers for equality. |
9e0c209e SL |
1172 | /// |
1173 | /// This is the same as using the `==` operator, but less generic: | |
1174 | /// the arguments have to be `*const T` raw pointers, | |
1175 | /// not anything that implements `PartialEq`. | |
1176 | /// | |
1177 | /// This can be used to compare `&T` references (which coerce to `*const T` implicitly) | |
1178 | /// by their address rather than comparing the values they point to | |
1179 | /// (which is what the `PartialEq for &T` implementation does). | |
1180 | /// | |
1181 | /// # Examples | |
1182 | /// | |
1183 | /// ``` | |
9e0c209e SL |
1184 | /// use std::ptr; |
1185 | /// | |
1186 | /// let five = 5; | |
1187 | /// let other_five = 5; | |
1188 | /// let five_ref = &five; | |
1189 | /// let same_five_ref = &five; | |
1190 | /// let other_five_ref = &other_five; | |
1191 | /// | |
1192 | /// assert!(five_ref == same_five_ref); | |
9e0c209e | 1193 | /// assert!(ptr::eq(five_ref, same_five_ref)); |
532ac7d7 XL |
1194 | /// |
1195 | /// assert!(five_ref == other_five_ref); | |
9e0c209e SL |
1196 | /// assert!(!ptr::eq(five_ref, other_five_ref)); |
1197 | /// ``` | |
532ac7d7 XL |
1198 | /// |
1199 | /// Slices are also compared by their length (fat pointers): | |
1200 | /// | |
1201 | /// ``` | |
1202 | /// let a = [1, 2, 3]; | |
1203 | /// assert!(std::ptr::eq(&a[..3], &a[..3])); | |
1204 | /// assert!(!std::ptr::eq(&a[..2], &a[..3])); | |
1205 | /// assert!(!std::ptr::eq(&a[0..2], &a[1..3])); | |
1206 | /// ``` | |
1207 | /// | |
1208 | /// Traits are also compared by their implementation: | |
1209 | /// | |
1210 | /// ``` | |
1211 | /// #[repr(transparent)] | |
1212 | /// struct Wrapper { member: i32 } | |
1213 | /// | |
1214 | /// trait Trait {} | |
1215 | /// impl Trait for Wrapper {} | |
1216 | /// impl Trait for i32 {} | |
1217 | /// | |
e74abb32 XL |
1218 | /// let wrapper = Wrapper { member: 10 }; |
1219 | /// | |
1220 | /// // Pointers have equal addresses. | |
1221 | /// assert!(std::ptr::eq( | |
1222 | /// &wrapper as *const Wrapper as *const u8, | |
1223 | /// &wrapper.member as *const i32 as *const u8 | |
1224 | /// )); | |
1225 | /// | |
1226 | /// // Objects have equal addresses, but `Trait` has different implementations. | |
1227 | /// assert!(!std::ptr::eq( | |
1228 | /// &wrapper as &dyn Trait, | |
1229 | /// &wrapper.member as &dyn Trait, | |
1230 | /// )); | |
1231 | /// assert!(!std::ptr::eq( | |
1232 | /// &wrapper as &dyn Trait as *const dyn Trait, | |
1233 | /// &wrapper.member as &dyn Trait as *const dyn Trait, | |
1234 | /// )); | |
1235 | /// | |
1236 | /// // Converting the reference to a `*const u8` compares by address. | |
1237 | /// assert!(std::ptr::eq( | |
1238 | /// &wrapper as &dyn Trait as *const dyn Trait as *const u8, | |
1239 | /// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8, | |
1240 | /// )); | |
532ac7d7 | 1241 | /// ``` |
8bb4bdeb | 1242 | #[stable(feature = "ptr_eq", since = "1.17.0")] |
9e0c209e SL |
1243 | #[inline] |
1244 | pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool { | |
1245 | a == b | |
1246 | } | |
1247 | ||
0731742a XL |
1248 | /// Hash a raw pointer. |
1249 | /// | |
1250 | /// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly) | |
1251 | /// by its address rather than the value it points to | |
1252 | /// (which is what the `Hash for &T` implementation does). | |
1253 | /// | |
1254 | /// # Examples | |
1255 | /// | |
1256 | /// ``` | |
0731742a XL |
1257 | /// use std::collections::hash_map::DefaultHasher; |
1258 | /// use std::hash::{Hash, Hasher}; | |
1259 | /// use std::ptr; | |
1260 | /// | |
1261 | /// let five = 5; | |
1262 | /// let five_ref = &five; | |
1263 | /// | |
1264 | /// let mut hasher = DefaultHasher::new(); | |
1265 | /// ptr::hash(five_ref, &mut hasher); | |
1266 | /// let actual = hasher.finish(); | |
1267 | /// | |
1268 | /// let mut hasher = DefaultHasher::new(); | |
1269 | /// (five_ref as *const i32).hash(&mut hasher); | |
1270 | /// let expected = hasher.finish(); | |
1271 | /// | |
1272 | /// assert_eq!(actual, expected); | |
1273 | /// ``` | |
532ac7d7 | 1274 | #[stable(feature = "ptr_hash", since = "1.35.0")] |
0731742a | 1275 | pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) { |
48663c56 | 1276 | use crate::hash::Hash; |
0731742a XL |
1277 | hashee.hash(into); |
1278 | } | |
1279 | ||
e9174d1e SL |
1280 | // Impls for function pointers |
1281 | macro_rules! fnptr_impls_safety_abi { | |
1282 | ($FnTy: ty, $($Arg: ident),*) => { | |
e9174d1e SL |
1283 | #[stable(feature = "fnptr_impls", since = "1.4.0")] |
1284 | impl<Ret, $($Arg),*> PartialEq for $FnTy { | |
1285 | #[inline] | |
1286 | fn eq(&self, other: &Self) -> bool { | |
1287 | *self as usize == *other as usize | |
1288 | } | |
1a4d82fc | 1289 | } |
e9174d1e SL |
1290 | |
1291 | #[stable(feature = "fnptr_impls", since = "1.4.0")] | |
1292 | impl<Ret, $($Arg),*> Eq for $FnTy {} | |
1293 | ||
1294 | #[stable(feature = "fnptr_impls", since = "1.4.0")] | |
1295 | impl<Ret, $($Arg),*> PartialOrd for $FnTy { | |
1296 | #[inline] | |
1297 | fn partial_cmp(&self, other: &Self) -> Option<Ordering> { | |
1298 | (*self as usize).partial_cmp(&(*other as usize)) | |
1299 | } | |
1300 | } | |
1301 | ||
1302 | #[stable(feature = "fnptr_impls", since = "1.4.0")] | |
1303 | impl<Ret, $($Arg),*> Ord for $FnTy { | |
1304 | #[inline] | |
1305 | fn cmp(&self, other: &Self) -> Ordering { | |
1306 | (*self as usize).cmp(&(*other as usize)) | |
1307 | } | |
1308 | } | |
1309 | ||
1310 | #[stable(feature = "fnptr_impls", since = "1.4.0")] | |
1311 | impl<Ret, $($Arg),*> hash::Hash for $FnTy { | |
1312 | fn hash<HH: hash::Hasher>(&self, state: &mut HH) { | |
1313 | state.write_usize(*self as usize) | |
1314 | } | |
1315 | } | |
1316 | ||
1317 | #[stable(feature = "fnptr_impls", since = "1.4.0")] | |
1318 | impl<Ret, $($Arg),*> fmt::Pointer for $FnTy { | |
48663c56 | 1319 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
e9174d1e SL |
1320 | fmt::Pointer::fmt(&(*self as *const ()), f) |
1321 | } | |
1322 | } | |
1323 | ||
1324 | #[stable(feature = "fnptr_impls", since = "1.4.0")] | |
1325 | impl<Ret, $($Arg),*> fmt::Debug for $FnTy { | |
48663c56 | 1326 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
e9174d1e | 1327 | fmt::Pointer::fmt(&(*self as *const ()), f) |
1a4d82fc JJ |
1328 | } |
1329 | } | |
1330 | } | |
1a4d82fc JJ |
1331 | } |
1332 | ||
e9174d1e | 1333 | macro_rules! fnptr_impls_args { |
5bcae85e | 1334 | ($($Arg: ident),+) => { |
dc9dc135 XL |
1335 | fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ } |
1336 | fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ } | |
1337 | fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ } | |
1338 | fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ } | |
1339 | fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ } | |
1340 | fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ } | |
5bcae85e SL |
1341 | }; |
1342 | () => { | |
1343 | // No variadic functions with 0 parameters | |
1344 | fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, } | |
1345 | fnptr_impls_safety_abi! { extern "C" fn() -> Ret, } | |
1346 | fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, } | |
1347 | fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, } | |
1348 | }; | |
e9174d1e SL |
1349 | } |
1350 | ||
60c5eb7d | 1351 | fnptr_impls_args! {} |
e9174d1e SL |
1352 | fnptr_impls_args! { A } |
1353 | fnptr_impls_args! { A, B } | |
1354 | fnptr_impls_args! { A, B, C } | |
1355 | fnptr_impls_args! { A, B, C, D } | |
1356 | fnptr_impls_args! { A, B, C, D, E } | |
1357 | fnptr_impls_args! { A, B, C, D, E, F } | |
1358 | fnptr_impls_args! { A, B, C, D, E, F, G } | |
1359 | fnptr_impls_args! { A, B, C, D, E, F, G, H } | |
1360 | fnptr_impls_args! { A, B, C, D, E, F, G, H, I } | |
1361 | fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J } | |
1362 | fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K } | |
1363 | fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L } |