]> git.proxmox.com Git - rustc.git/blame - library/core/src/ptr/mod.rs
New upstream version 1.55.0+dfsg1
[rustc.git] / library / core / src / ptr / mod.rs
CommitLineData
0bf4aa26 1//! Manually manage memory through raw pointers.
1a4d82fc 2//!
6a06907d 3//! *[See also the pointer primitive types](pointer).*
0bf4aa26
XL
4//!
5//! # Safety
6//!
7//! Many functions in this module take raw pointers as arguments and read from
8//! or write to them. For this to be safe, these pointers must be *valid*.
9//! Whether a pointer is valid depends on the operation it is used for
10//! (read or write), and the extent of the memory that is accessed (i.e.,
11//! how many bytes are read/written). Most functions use `*mut T` and `*const T`
12//! to access only a single value, in which case the documentation omits the size
13//! and implicitly assumes it to be `size_of::<T>()` bytes.
14//!
9fa01778 15//! The precise rules for validity are not determined yet. The guarantees that are
0bf4aa26
XL
16//! provided at this point are very minimal:
17//!
18//! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst].
60c5eb7d 19//! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer
74b04a01 20//! be *dereferenceable*: the memory range of the given size starting at the pointer must all be
60c5eb7d
XL
21//! within the bounds of a single allocated object. Note that in Rust,
22//! every (stack-allocated) variable is considered a separate allocated object.
fc512014
XL
23//! * Even for operations of [size zero][zst], the pointer must not be pointing to deallocated
24//! memory, i.e., deallocation makes pointers invalid even for zero-sized operations. However,
25//! casting any non-zero integer *literal* to a pointer is valid for zero-sized accesses, even if
26//! some memory happens to exist at that address and gets deallocated. This corresponds to writing
27//! your own allocator: allocating zero-sized objects is not very hard. The canonical way to
28//! obtain a pointer that is valid for zero-sized accesses is [`NonNull::dangling`].
0bf4aa26
XL
29//! * All accesses performed by functions in this module are *non-atomic* in the sense
30//! of [atomic operations] used to synchronize between threads. This means it is
31//! undefined behavior to perform two concurrent accesses to the same location from different
32//! threads unless both accesses only read from memory. Notice that this explicitly
33//! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot
34//! be used for inter-thread synchronization.
35//! * The result of casting a reference to a pointer is valid for as long as the
36//! underlying object is live and no reference (just raw pointers) is used to
37//! access the same memory.
38//!
39//! These axioms, along with careful use of [`offset`] for pointer arithmetic,
40//! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
41//! will be provided eventually, as the [aliasing] rules are being determined. For more
42//! information, see the [book] as well as the section in the reference devoted
43//! to [undefined behavior][ub].
44//!
45//! ## Alignment
46//!
47//! Valid raw pointers as defined above are not necessarily properly aligned (where
48//! "proper" alignment is defined by the pointee type, i.e., `*const T` must be
49//! aligned to `mem::align_of::<T>()`). However, most functions require their
50//! arguments to be properly aligned, and will explicitly state
51//! this requirement in their documentation. Notable exceptions to this are
52//! [`read_unaligned`] and [`write_unaligned`].
53//!
54//! When a function requires proper alignment, it does so even if the access
55//! has size 0, i.e., even if memory is not actually touched. Consider using
56//! [`NonNull::dangling`] in such cases.
57//!
cdc7bbd5
XL
58//! ## Allocated object
59//!
60//! For several operations, such as [`offset`] or field projections (`expr.field`), the notion of an
61//! "allocated object" becomes relevant. An allocated object is a contiguous region of memory.
62//! Common examples of allocated objects include stack-allocated variables (each variable is a
63//! separate allocated object), heap allocations (each allocation created by the global allocator is
64//! a separate allocated object), and `static` variables.
65//!
0bf4aa26 66//! [aliasing]: ../../nomicon/aliasing.html
13cf67c4 67//! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer
0bf4aa26 68//! [ub]: ../../reference/behavior-considered-undefined.html
0bf4aa26 69//! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts
1b1a35ee 70//! [atomic operations]: crate::sync::atomic
6a06907d 71//! [`offset`]: pointer::offset
1a4d82fc 72
85aaf69f 73#![stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 74
dfeec247 75use crate::cmp::Ordering;
48663c56
XL
76use crate::fmt;
77use crate::hash;
6a06907d 78use crate::intrinsics::{self, abort, is_aligned_and_not_null};
48663c56 79use crate::mem::{self, MaybeUninit};
1a4d82fc 80
c34b1796 81#[stable(feature = "rust1", since = "1.0.0")]
f9f354fc 82#[doc(inline)]
48663c56 83pub use crate::intrinsics::copy_nonoverlapping;
1a4d82fc 84
c34b1796 85#[stable(feature = "rust1", since = "1.0.0")]
f9f354fc 86#[doc(inline)]
48663c56 87pub use crate::intrinsics::copy;
1a4d82fc 88
c34b1796 89#[stable(feature = "rust1", since = "1.0.0")]
f9f354fc 90#[doc(inline)]
48663c56 91pub use crate::intrinsics::write_bytes;
1a4d82fc 92
6a06907d 93mod metadata;
6a06907d 94pub(crate) use metadata::PtrRepr;
6a06907d
XL
95#[unstable(feature = "ptr_metadata", issue = "81513")]
96pub use metadata::{from_raw_parts, from_raw_parts_mut, metadata, DynMetadata, Pointee, Thin};
97
dc9dc135
XL
98mod non_null;
99#[stable(feature = "nonnull", since = "1.25.0")]
100pub use non_null::NonNull;
101
102mod unique;
dfeec247 103#[unstable(feature = "ptr_internals", issue = "none")]
dc9dc135
XL
104pub use unique::Unique;
105
dfeec247
XL
106mod const_ptr;
107mod mut_ptr;
108
cc61c64b
XL
109/// Executes the destructor (if any) of the pointed-to value.
110///
0bf4aa26
XL
111/// This is semantically equivalent to calling [`ptr::read`] and discarding
112/// the result, but has the following advantages:
cc61c64b
XL
113///
114/// * It is *required* to use `drop_in_place` to drop unsized types like
115/// trait objects, because they can't be read out onto the stack and
116/// dropped normally.
117///
0bf4aa26 118/// * It is friendlier to the optimizer to do this over [`ptr::read`] when
29967ef6
XL
119/// dropping manually allocated memory (e.g., in the implementations of
120/// `Box`/`Rc`/`Vec`), as the compiler doesn't need to prove that it's
121/// sound to elide the copy.
cc61c64b 122///
f9f354fc
XL
123/// * It can be used to drop [pinned] data when `T` is not `repr(packed)`
124/// (pinned data must not be moved before it is dropped).
125///
416331ca 126/// Unaligned values cannot be dropped in place, they must be copied to an aligned
f9f354fc
XL
127/// location first using [`ptr::read_unaligned`]. For packed structs, this move is
128/// done automatically by the compiler. This means the fields of packed structs
129/// are not dropped in-place.
416331ca 130///
1b1a35ee
XL
131/// [`ptr::read`]: self::read
132/// [`ptr::read_unaligned`]: self::read_unaligned
133/// [pinned]: crate::pin
0bf4aa26 134///
ea8adc8c 135/// # Safety
cc61c64b 136///
0bf4aa26
XL
137/// Behavior is undefined if any of the following conditions are violated:
138///
74b04a01 139/// * `to_drop` must be [valid] for both reads and writes.
0bf4aa26 140///
416331ca 141/// * `to_drop` must be properly aligned.
0bf4aa26 142///
74b04a01
XL
143/// * The value `to_drop` points to must be valid for dropping, which may mean it must uphold
144/// additional invariants - this is type-dependent.
145///
0bf4aa26
XL
146/// Additionally, if `T` is not [`Copy`], using the pointed-to value after
147/// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
a1dfa0c6 148/// foo` counts as a use because it will cause the value to be dropped
1b1a35ee 149/// again. [`write()`] can be used to overwrite data without causing it to be
0bf4aa26
XL
150/// dropped.
151///
17df50a5 152/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
0bf4aa26 153///
1b1a35ee 154/// [valid]: self#safety
0bf4aa26
XL
155///
156/// # Examples
157///
158/// Manually remove the last item from a vector:
159///
160/// ```
161/// use std::ptr;
162/// use std::rc::Rc;
163///
164/// let last = Rc::new(1);
165/// let weak = Rc::downgrade(&last);
166///
167/// let mut v = vec![Rc::new(0), last];
168///
169/// unsafe {
170/// // Get a raw pointer to the last element in `v`.
171/// let ptr = &mut v[1] as *mut _;
9fa01778 172/// // Shorten `v` to prevent the last item from being dropped. We do that first,
0bf4aa26
XL
173/// // to prevent issues if the `drop_in_place` below panics.
174/// v.set_len(1);
175/// // Without a call `drop_in_place`, the last item would never be dropped,
176/// // and the memory it manages would be leaked.
177/// ptr::drop_in_place(ptr);
178/// }
179///
180/// assert_eq!(v, &[0.into()]);
181///
182/// // Ensure that the last item was dropped.
183/// assert!(weak.upgrade().is_none());
184/// ```
185///
0bf4aa26
XL
186/// Notice that the compiler performs this copy automatically when dropping packed structs,
187/// i.e., you do not usually have to worry about such issues unless you call `drop_in_place`
188/// manually.
cc61c64b 189#[stable(feature = "drop_in_place", since = "1.8.0")]
ea8adc8c 190#[lang = "drop_in_place"]
cc61c64b 191#[allow(unconditional_recursion)]
dfeec247 192pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
cc61c64b
XL
193 // Code here does not matter - this is replaced by the
194 // real drop glue by the compiler.
f035d41b
XL
195
196 // SAFETY: see comment above
197 unsafe { drop_in_place(to_drop) }
cc61c64b
XL
198}
199
1a4d82fc
JJ
200/// Creates a null raw pointer.
201///
202/// # Examples
203///
204/// ```
205/// use std::ptr;
206///
85aaf69f 207/// let p: *const i32 = ptr::null();
1a4d82fc
JJ
208/// assert!(p.is_null());
209/// ```
e74abb32 210#[inline(always)]
85aaf69f 211#[stable(feature = "rust1", since = "1.0.0")]
a1dfa0c6 212#[rustc_promotable]
cdc7bbd5
XL
213#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")]
214#[rustc_diagnostic_item = "ptr_null"]
60c5eb7d
XL
215pub const fn null<T>() -> *const T {
216 0 as *const T
217}
1a4d82fc
JJ
218
219/// Creates a null mutable raw pointer.
220///
221/// # Examples
222///
223/// ```
224/// use std::ptr;
225///
85aaf69f 226/// let p: *mut i32 = ptr::null_mut();
1a4d82fc
JJ
227/// assert!(p.is_null());
228/// ```
e74abb32 229#[inline(always)]
85aaf69f 230#[stable(feature = "rust1", since = "1.0.0")]
a1dfa0c6 231#[rustc_promotable]
cdc7bbd5
XL
232#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")]
233#[rustc_diagnostic_item = "ptr_null_mut"]
60c5eb7d
XL
234pub const fn null_mut<T>() -> *mut T {
235 0 as *mut T
236}
1a4d82fc 237
60c5eb7d 238/// Forms a raw slice from a pointer and a length.
dc9dc135
XL
239///
240/// The `len` argument is the number of **elements**, not the number of bytes.
241///
60c5eb7d 242/// This function is safe, but actually using the return value is unsafe.
1b1a35ee 243/// See the documentation of [`slice::from_raw_parts`] for slice safety requirements.
60c5eb7d 244///
1b1a35ee 245/// [`slice::from_raw_parts`]: crate::slice::from_raw_parts
60c5eb7d 246///
dc9dc135
XL
247/// # Examples
248///
249/// ```rust
dc9dc135
XL
250/// use std::ptr;
251///
252/// // create a slice pointer when starting out with a pointer to the first element
dfeec247 253/// let x = [5, 6, 7];
f9f354fc
XL
254/// let raw_pointer = x.as_ptr();
255/// let slice = ptr::slice_from_raw_parts(raw_pointer, 3);
dc9dc135
XL
256/// assert_eq!(unsafe { &*slice }[2], 7);
257/// ```
258#[inline]
dfeec247
XL
259#[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
260#[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
261pub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
6a06907d 262 from_raw_parts(data.cast(), len)
dc9dc135
XL
263}
264
60c5eb7d
XL
265/// Performs the same functionality as [`slice_from_raw_parts`], except that a
266/// raw mutable slice is returned, as opposed to a raw immutable slice.
dc9dc135 267///
60c5eb7d 268/// See the documentation of [`slice_from_raw_parts`] for more details.
dc9dc135 269///
60c5eb7d 270/// This function is safe, but actually using the return value is unsafe.
1b1a35ee 271/// See the documentation of [`slice::from_raw_parts_mut`] for slice safety requirements.
60c5eb7d 272///
1b1a35ee 273/// [`slice::from_raw_parts_mut`]: crate::slice::from_raw_parts_mut
f9f354fc
XL
274///
275/// # Examples
276///
277/// ```rust
278/// use std::ptr;
279///
280/// let x = &mut [5, 6, 7];
281/// let raw_pointer = x.as_mut_ptr();
282/// let slice = ptr::slice_from_raw_parts_mut(raw_pointer, 3);
283///
284/// unsafe {
285/// (*slice)[2] = 99; // assign a value at an index in the slice
286/// };
287///
288/// assert_eq!(unsafe { &*slice }[2], 99);
289/// ```
dc9dc135 290#[inline]
dfeec247
XL
291#[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
292#[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
293pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
6a06907d 294 from_raw_parts_mut(data.cast(), len)
dc9dc135
XL
295}
296
1a4d82fc 297/// Swaps the values at two mutable locations of the same type, without
ff7c6d11
XL
298/// deinitializing either.
299///
0bf4aa26
XL
300/// But for the following two exceptions, this function is semantically
301/// equivalent to [`mem::swap`]:
302///
303/// * It operates on raw pointers instead of references. When references are
304/// available, [`mem::swap`] should be preferred.
305///
306/// * The two pointed-to values may overlap. If the values do overlap, then the
307/// overlapping region of memory from `x` will be used. This is demonstrated
308/// in the second example below.
309///
1a4d82fc
JJ
310/// # Safety
311///
0bf4aa26
XL
312/// Behavior is undefined if any of the following conditions are violated:
313///
74b04a01 314/// * Both `x` and `y` must be [valid] for both reads and writes.
0bf4aa26
XL
315///
316/// * Both `x` and `y` must be properly aligned.
317///
17df50a5 318/// Note that even if `T` has size `0`, the pointers must be non-null and properly aligned.
32a655c1 319///
1b1a35ee 320/// [valid]: self#safety
ff7c6d11
XL
321///
322/// # Examples
323///
324/// Swapping two non-overlapping regions:
325///
326/// ```
327/// use std::ptr;
328///
329/// let mut array = [0, 1, 2, 3];
330///
0bf4aa26
XL
331/// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; // this is `array[0..2]`
332/// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; // this is `array[2..4]`
ff7c6d11
XL
333///
334/// unsafe {
335/// ptr::swap(x, y);
336/// assert_eq!([2, 3, 0, 1], array);
337/// }
338/// ```
339///
340/// Swapping two overlapping regions:
341///
342/// ```
343/// use std::ptr;
344///
17df50a5
XL
345/// let mut array: [i32; 4] = [0, 1, 2, 3];
346///
347/// let array_ptr: *mut i32 = array.as_mut_ptr();
ff7c6d11 348///
17df50a5
XL
349/// let x = array_ptr as *mut [i32; 3]; // this is `array[0..3]`
350/// let y = unsafe { array_ptr.add(1) } as *mut [i32; 3]; // this is `array[1..4]`
ff7c6d11
XL
351///
352/// unsafe {
353/// ptr::swap(x, y);
0bf4aa26
XL
354/// // The indices `1..3` of the slice overlap between `x` and `y`.
355/// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are
356/// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]`
357/// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`).
358/// // This implementation is defined to make the latter choice.
ff7c6d11
XL
359/// assert_eq!([1, 0, 1, 2], array);
360/// }
361/// ```
1a4d82fc 362#[inline]
85aaf69f 363#[stable(feature = "rust1", since = "1.0.0")]
6a06907d
XL
364#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
365pub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
a1dfa0c6
XL
366 // Give ourselves some scratch space to work with.
367 // We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
532ac7d7 368 let mut tmp = MaybeUninit::<T>::uninit();
1a4d82fc
JJ
369
370 // Perform the swap
f035d41b
XL
371 // SAFETY: the caller must guarantee that `x` and `y` are
372 // valid for writes and properly aligned. `tmp` cannot be
373 // overlapping either `x` or `y` because `tmp` was just allocated
374 // on the stack as a separate allocated object.
375 unsafe {
376 copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
377 copy(y, x, 1); // `x` and `y` may overlap
378 copy_nonoverlapping(tmp.as_ptr(), y, 1);
379 }
1a4d82fc
JJ
380}
381
0bf4aa26
XL
382/// Swaps `count * size_of::<T>()` bytes between the two regions of memory
383/// beginning at `x` and `y`. The two regions must *not* overlap.
041b39d2
XL
384///
385/// # Safety
386///
0bf4aa26
XL
387/// Behavior is undefined if any of the following conditions are violated:
388///
74b04a01 389/// * Both `x` and `y` must be [valid] for both reads and writes of `count *
0bf4aa26
XL
390/// size_of::<T>()` bytes.
391///
392/// * Both `x` and `y` must be properly aligned.
393///
394/// * The region of memory beginning at `x` with a size of `count *
395/// size_of::<T>()` bytes must *not* overlap with the region of memory
396/// beginning at `y` with the same size.
397///
398/// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`,
17df50a5 399/// the pointers must be non-null and properly aligned.
0bf4aa26 400///
1b1a35ee 401/// [valid]: self#safety
041b39d2
XL
402///
403/// # Examples
404///
405/// Basic usage:
406///
407/// ```
041b39d2
XL
408/// use std::ptr;
409///
410/// let mut x = [1, 2, 3, 4];
411/// let mut y = [7, 8, 9];
412///
413/// unsafe {
414/// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
415/// }
416///
417/// assert_eq!(x, [7, 8, 3, 4]);
418/// assert_eq!(y, [1, 2, 9]);
419/// ```
420#[inline]
83c7162d 421#[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
6a06907d
XL
422#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
423pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
041b39d2
XL
424 let x = x as *mut u8;
425 let y = y as *mut u8;
426 let len = mem::size_of::<T>() * count;
f035d41b
XL
427 // SAFETY: the caller must guarantee that `x` and `y` are
428 // valid for writes and properly aligned.
429 unsafe { swap_nonoverlapping_bytes(x, y, len) }
041b39d2
XL
430}
431
8faf50e0 432#[inline]
136023e0
XL
433#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
434pub(crate) const unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
cdc7bbd5
XL
435 // NOTE(eddyb) SPIR-V's Logical addressing model doesn't allow for arbitrary
436 // reinterpretation of values as (chunkable) byte arrays, and the loop in the
437 // block optimization in `swap_nonoverlapping_bytes` is hard to rewrite back
438 // into the (unoptimized) direct swapping implementation, so we disable it.
439 // FIXME(eddyb) the block optimization also prevents MIR optimizations from
440 // understanding `mem::replace`, `Option::take`, etc. - a better overall
441 // solution might be to make `swap_nonoverlapping` into an intrinsic, which
442 // a backend can choose to implement using the block optimization, or not.
443 #[cfg(not(target_arch = "spirv"))]
444 {
445 // Only apply the block optimization in `swap_nonoverlapping_bytes` for types
446 // at least as large as the block size, to avoid pessimizing codegen.
447 if mem::size_of::<T>() >= 32 {
448 // SAFETY: the caller must uphold the safety contract for `swap_nonoverlapping`.
449 unsafe { swap_nonoverlapping(x, y, 1) };
450 return;
f035d41b 451 }
cdc7bbd5
XL
452 }
453
454 // Direct swapping, for the cases not going through the block optimization.
455 // SAFETY: the caller must guarantee that `x` and `y` are valid
456 // for writes, properly aligned, and non-overlapping.
457 unsafe {
458 let z = read(x);
459 copy_nonoverlapping(y, x, 1);
460 write(y, z);
8faf50e0
XL
461 }
462}
463
041b39d2 464#[inline]
6a06907d
XL
465#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
466const unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
041b39d2 467 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
a1dfa0c6 468 // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel
041b39d2
XL
469 // Haswell E processors. LLVM is more able to optimize if we give a struct a
470 // #[repr(simd)], even if we don't actually use this struct directly.
471 //
472 // FIXME repr(simd) broken on emscripten and redox
48663c56 473 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox")), repr(simd))]
041b39d2
XL
474 struct Block(u64, u64, u64, u64);
475 struct UnalignedBlock(u64, u64, u64, u64);
476
477 let block_size = mem::size_of::<Block>();
478
479 // Loop through x & y, copying them `Block` at a time
480 // The optimizer should unroll the loop fully for most types
481 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
482 let mut i = 0;
483 while i + block_size <= len {
484 // Create some uninitialized memory as scratch space
485 // Declaring `t` here avoids aligning the stack when this loop is unused
532ac7d7 486 let mut t = mem::MaybeUninit::<Block>::uninit();
a1dfa0c6 487 let t = t.as_mut_ptr() as *mut u8;
041b39d2 488
f035d41b 489 // SAFETY: As `i < len`, and as the caller must guarantee that `x` and `y` are valid
6a06907d 490 // for `len` bytes, `x + i` and `y + i` must be valid addresses, which fulfills the
f035d41b
XL
491 // safety contract for `add`.
492 //
493 // Also, the caller must guarantee that `x` and `y` are valid for writes, properly aligned,
494 // and non-overlapping, which fulfills the safety contract for `copy_nonoverlapping`.
495 unsafe {
496 let x = x.add(i);
497 let y = y.add(i);
498
499 // Swap a block of bytes of x & y, using t as a temporary buffer
500 // This should be optimized into efficient SIMD operations where available
501 copy_nonoverlapping(x, t, block_size);
502 copy_nonoverlapping(y, x, block_size);
503 copy_nonoverlapping(t, y, block_size);
504 }
041b39d2
XL
505 i += block_size;
506 }
507
508 if i < len {
509 // Swap any remaining bytes
532ac7d7 510 let mut t = mem::MaybeUninit::<UnalignedBlock>::uninit();
041b39d2
XL
511 let rem = len - i;
512
a1dfa0c6 513 let t = t.as_mut_ptr() as *mut u8;
041b39d2 514
f035d41b
XL
515 // SAFETY: see previous safety comment.
516 unsafe {
517 let x = x.add(i);
518 let y = y.add(i);
519
520 copy_nonoverlapping(x, t, rem);
521 copy_nonoverlapping(y, x, rem);
522 copy_nonoverlapping(t, y, rem);
523 }
041b39d2
XL
524 }
525}
526
0bf4aa26 527/// Moves `src` into the pointed `dst`, returning the previous `dst` value.
94b46f34
XL
528///
529/// Neither value is dropped.
1a4d82fc 530///
0bf4aa26
XL
531/// This function is semantically equivalent to [`mem::replace`] except that it
532/// operates on raw pointers instead of references. When references are
533/// available, [`mem::replace`] should be preferred.
534///
1a4d82fc
JJ
535/// # Safety
536///
0bf4aa26
XL
537/// Behavior is undefined if any of the following conditions are violated:
538///
74b04a01 539/// * `dst` must be [valid] for both reads and writes.
0bf4aa26
XL
540///
541/// * `dst` must be properly aligned.
542///
74b04a01
XL
543/// * `dst` must point to a properly initialized value of type `T`.
544///
17df50a5 545/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
0bf4aa26 546///
1b1a35ee 547/// [valid]: self#safety
0bf4aa26
XL
548///
549/// # Examples
550///
551/// ```
552/// use std::ptr;
553///
554/// let mut rust = vec!['b', 'u', 's', 't'];
555///
556/// // `mem::replace` would have the same effect without requiring the unsafe
557/// // block.
558/// let b = unsafe {
559/// ptr::replace(&mut rust[0], 'r')
560/// };
561///
562/// assert_eq!(b, 'b');
563/// assert_eq!(rust, &['r', 'u', 's', 't']);
564/// ```
1a4d82fc 565#[inline]
85aaf69f 566#[stable(feature = "rust1", since = "1.0.0")]
136023e0
XL
567#[rustc_const_unstable(feature = "const_replace", issue = "83164")]
568pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
f035d41b
XL
569 // SAFETY: the caller must guarantee that `dst` is valid to be
570 // cast to a mutable reference (valid for writes, aligned, initialized),
571 // and cannot overlap `src` since `dst` must point to a distinct
572 // allocated object.
573 unsafe {
574 mem::swap(&mut *dst, &mut src); // cannot overlap
575 }
1a4d82fc
JJ
576 src
577}
578
85aaf69f 579/// Reads the value from `src` without moving it. This leaves the
1a4d82fc
JJ
580/// memory in `src` unchanged.
581///
582/// # Safety
583///
0bf4aa26
XL
584/// Behavior is undefined if any of the following conditions are violated:
585///
586/// * `src` must be [valid] for reads.
a7813a04 587///
0bf4aa26
XL
588/// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the
589/// case.
590///
74b04a01
XL
591/// * `src` must point to a properly initialized value of type `T`.
592///
17df50a5 593/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
476ff2be 594///
a7813a04
XL
595/// # Examples
596///
597/// Basic usage:
598///
599/// ```
600/// let x = 12;
601/// let y = &x as *const i32;
602///
9e0c209e
SL
603/// unsafe {
604/// assert_eq!(std::ptr::read(y), 12);
605/// }
a7813a04 606/// ```
0bf4aa26
XL
607///
608/// Manually implement [`mem::swap`]:
609///
610/// ```
611/// use std::ptr;
612///
613/// fn swap<T>(a: &mut T, b: &mut T) {
614/// unsafe {
615/// // Create a bitwise copy of the value at `a` in `tmp`.
616/// let tmp = ptr::read(a);
617///
618/// // Exiting at this point (either by explicitly returning or by
619/// // calling a function which panics) would cause the value in `tmp` to
620/// // be dropped while the same value is still referenced by `a`. This
621/// // could trigger undefined behavior if `T` is not `Copy`.
622///
623/// // Create a bitwise copy of the value at `b` in `a`.
624/// // This is safe because mutable references cannot alias.
625/// ptr::copy_nonoverlapping(b, a, 1);
626///
627/// // As above, exiting here could trigger undefined behavior because
628/// // the same value is referenced by `a` and `b`.
629///
630/// // Move `tmp` into `b`.
631/// ptr::write(b, tmp);
632///
633/// // `tmp` has been moved (`write` takes ownership of its second argument),
634/// // so nothing is dropped implicitly here.
635/// }
636/// }
637///
638/// let mut foo = "foo".to_owned();
639/// let mut bar = "bar".to_owned();
640///
641/// swap(&mut foo, &mut bar);
642///
643/// assert_eq!(foo, "bar");
644/// assert_eq!(bar, "foo");
645/// ```
646///
647/// ## Ownership of the Returned Value
648///
649/// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`].
650/// If `T` is not [`Copy`], using both the returned value and the value at
9fa01778 651/// `*src` can violate memory safety. Note that assigning to `*src` counts as a
0bf4aa26
XL
652/// use because it will attempt to drop the value at `*src`.
653///
1b1a35ee 654/// [`write()`] can be used to overwrite data without causing it to be dropped.
0bf4aa26
XL
655///
656/// ```
657/// use std::ptr;
658///
659/// let mut s = String::from("foo");
660/// unsafe {
661/// // `s2` now points to the same underlying memory as `s`.
662/// let mut s2: String = ptr::read(&s);
663///
664/// assert_eq!(s2, "foo");
665///
666/// // Assigning to `s2` causes its original value to be dropped. Beyond
667/// // this point, `s` must no longer be used, as the underlying memory has
668/// // been freed.
669/// s2 = String::default();
670/// assert_eq!(s2, "");
671///
672/// // Assigning to `s` would cause the old value to be dropped again,
673/// // resulting in undefined behavior.
674/// // s = String::from("bar"); // ERROR
675///
676/// // `ptr::write` can be used to overwrite a value without dropping it.
677/// ptr::write(&mut s, String::from("bar"));
678/// }
679///
680/// assert_eq!(s, "bar");
681/// ```
682///
1b1a35ee 683/// [valid]: self#safety
3b2f2976 684#[inline]
85aaf69f 685#[stable(feature = "rust1", since = "1.0.0")]
5869c6ff
XL
686#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
687pub const unsafe fn read<T>(src: *const T) -> T {
532ac7d7 688 let mut tmp = MaybeUninit::<T>::uninit();
f035d41b
XL
689 // SAFETY: the caller must guarantee that `src` is valid for reads.
690 // `src` cannot overlap `tmp` because `tmp` was just allocated on
691 // the stack as a separate allocated object.
692 //
693 // Also, since we just wrote a valid value into `tmp`, it is guaranteed
694 // to be properly initialized.
695 unsafe {
696 copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
697 tmp.assume_init()
698 }
1a4d82fc
JJ
699}
700
476ff2be
SL
701/// Reads the value from `src` without moving it. This leaves the
702/// memory in `src` unchanged.
703///
0bf4aa26 704/// Unlike [`read`], `read_unaligned` works with unaligned pointers.
476ff2be
SL
705///
706/// # Safety
707///
0bf4aa26
XL
708/// Behavior is undefined if any of the following conditions are violated:
709///
710/// * `src` must be [valid] for reads.
711///
74b04a01
XL
712/// * `src` must point to a properly initialized value of type `T`.
713///
0bf4aa26 714/// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of
9fa01778 715/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
0bf4aa26
XL
716/// value and the value at `*src` can [violate memory safety][read-ownership].
717///
17df50a5 718/// Note that even if `T` has size `0`, the pointer must be non-null.
0bf4aa26 719///
1b1a35ee
XL
720/// [read-ownership]: read#ownership-of-the-returned-value
721/// [valid]: self#safety
476ff2be 722///
416331ca 723/// ## On `packed` structs
476ff2be 724///
416331ca
XL
725/// Attempting to create a raw pointer to an `unaligned` struct field with
726/// an expression such as `&packed.unaligned as *const FieldType` creates an
727/// intermediate unaligned reference before converting that to a raw pointer.
728/// That this reference is temporary and immediately cast is inconsequential
729/// as the compiler always expects references to be properly aligned.
730/// As a result, using `&packed.unaligned as *const FieldType` causes immediate
731/// *undefined behavior* in your program.
476ff2be 732///
17df50a5
XL
733/// Instead you must use the [`ptr::addr_of!`](addr_of) macro to
734/// create the pointer. You may use that returned pointer together with this
735/// function.
736///
416331ca
XL
737/// An example of what not to do and how this relates to `read_unaligned` is:
738///
17df50a5 739/// ```
0bf4aa26
XL
740/// #[repr(packed, C)]
741/// struct Packed {
742/// _padding: u8,
743/// unaligned: u32,
476ff2be 744/// }
0bf4aa26 745///
416331ca 746/// let packed = Packed {
0bf4aa26
XL
747/// _padding: 0x00,
748/// unaligned: 0x01020304,
749/// };
750///
17df50a5
XL
751/// // Take the address of a 32-bit integer which is not aligned.
752/// // In contrast to `&packed.unaligned as *const _`, this has no undefined behavior.
753/// let unaligned = std::ptr::addr_of!(packed.unaligned);
0bf4aa26 754///
17df50a5
XL
755/// let v = unsafe { std::ptr::read_unaligned(unaligned) };
756/// assert_eq!(v, 0x01020304);
416331ca
XL
757/// ```
758///
759/// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however.
416331ca
XL
760///
761/// # Examples
762///
763/// Read an usize value from a byte buffer:
0bf4aa26 764///
416331ca
XL
765/// ```
766/// use std::mem;
767///
768/// fn read_usize(x: &[u8]) -> usize {
769/// assert!(x.len() >= mem::size_of::<usize>());
770///
771/// let ptr = x.as_ptr() as *const usize;
772///
773/// unsafe { ptr.read_unaligned() }
774/// }
476ff2be 775/// ```
3b2f2976 776#[inline]
8bb4bdeb 777#[stable(feature = "ptr_unaligned", since = "1.17.0")]
5869c6ff
XL
778#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
779pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
532ac7d7 780 let mut tmp = MaybeUninit::<T>::uninit();
f035d41b
XL
781 // SAFETY: the caller must guarantee that `src` is valid for reads.
782 // `src` cannot overlap `tmp` because `tmp` was just allocated on
783 // the stack as a separate allocated object.
784 //
785 // Also, since we just wrote a valid value into `tmp`, it is guaranteed
786 // to be properly initialized.
787 unsafe {
788 copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
789 tmp.assume_init()
790 }
476ff2be
SL
791}
792
1a4d82fc
JJ
793/// Overwrites a memory location with the given value without reading or
794/// dropping the old value.
795///
0bf4aa26
XL
796/// `write` does not drop the contents of `dst`. This is safe, but it could leak
797/// allocations or resources, so care should be taken not to overwrite an object
b039eaaf 798/// that should be dropped.
1a4d82fc 799///
cc61c64b
XL
800/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
801/// location pointed to by `dst`.
8bb4bdeb 802///
1a4d82fc 803/// This is appropriate for initializing uninitialized memory, or overwriting
0bf4aa26
XL
804/// memory that has previously been [`read`] from.
805///
0bf4aa26
XL
806/// # Safety
807///
808/// Behavior is undefined if any of the following conditions are violated:
809///
810/// * `dst` must be [valid] for writes.
811///
812/// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the
813/// case.
814///
17df50a5 815/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
0bf4aa26 816///
1b1a35ee 817/// [valid]: self#safety
476ff2be 818///
a7813a04
XL
819/// # Examples
820///
821/// Basic usage:
822///
823/// ```
824/// let mut x = 0;
825/// let y = &mut x as *mut i32;
826/// let z = 12;
827///
828/// unsafe {
829/// std::ptr::write(y, z);
9e0c209e 830/// assert_eq!(std::ptr::read(y), 12);
a7813a04
XL
831/// }
832/// ```
0bf4aa26
XL
833///
834/// Manually implement [`mem::swap`]:
835///
836/// ```
837/// use std::ptr;
838///
839/// fn swap<T>(a: &mut T, b: &mut T) {
840/// unsafe {
841/// // Create a bitwise copy of the value at `a` in `tmp`.
842/// let tmp = ptr::read(a);
843///
844/// // Exiting at this point (either by explicitly returning or by
845/// // calling a function which panics) would cause the value in `tmp` to
846/// // be dropped while the same value is still referenced by `a`. This
847/// // could trigger undefined behavior if `T` is not `Copy`.
848///
849/// // Create a bitwise copy of the value at `b` in `a`.
850/// // This is safe because mutable references cannot alias.
851/// ptr::copy_nonoverlapping(b, a, 1);
852///
853/// // As above, exiting here could trigger undefined behavior because
854/// // the same value is referenced by `a` and `b`.
855///
856/// // Move `tmp` into `b`.
857/// ptr::write(b, tmp);
858///
859/// // `tmp` has been moved (`write` takes ownership of its second argument),
860/// // so nothing is dropped implicitly here.
861/// }
862/// }
863///
864/// let mut foo = "foo".to_owned();
865/// let mut bar = "bar".to_owned();
866///
867/// swap(&mut foo, &mut bar);
868///
869/// assert_eq!(foo, "bar");
870/// assert_eq!(bar, "foo");
871/// ```
1a4d82fc 872#[inline]
85aaf69f 873#[stable(feature = "rust1", since = "1.0.0")]
136023e0
XL
874#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
875pub const unsafe fn write<T>(dst: *mut T, src: T) {
36d6ef2b
XL
876 // We are calling the intrinsics directly to avoid function calls in the generated code
877 // as `intrinsics::copy_nonoverlapping` is a wrapper function.
878 extern "rust-intrinsic" {
136023e0 879 #[rustc_const_unstable(feature = "const_intrinsic_copy", issue = "80697")]
36d6ef2b
XL
880 fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
881 }
882
5869c6ff
XL
883 // SAFETY: the caller must guarantee that `dst` is valid for writes.
884 // `dst` cannot overlap `src` because the caller has mutable access
885 // to `dst` while `src` is owned by this function.
886 unsafe {
887 copy_nonoverlapping(&src as *const T, dst, 1);
888 intrinsics::forget(src);
f035d41b 889 }
1a4d82fc
JJ
890}
891
476ff2be
SL
892/// Overwrites a memory location with the given value without reading or
893/// dropping the old value.
894///
1b1a35ee 895/// Unlike [`write()`], the pointer may be unaligned.
476ff2be 896///
0bf4aa26
XL
897/// `write_unaligned` does not drop the contents of `dst`. This is safe, but it
898/// could leak allocations or resources, so care should be taken not to overwrite
899/// an object that should be dropped.
476ff2be 900///
cc61c64b
XL
901/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
902/// location pointed to by `dst`.
903///
476ff2be 904/// This is appropriate for initializing uninitialized memory, or overwriting
0bf4aa26
XL
905/// memory that has previously been read with [`read_unaligned`].
906///
0bf4aa26
XL
907/// # Safety
908///
909/// Behavior is undefined if any of the following conditions are violated:
910///
911/// * `dst` must be [valid] for writes.
912///
17df50a5 913/// Note that even if `T` has size `0`, the pointer must be non-null.
0bf4aa26 914///
1b1a35ee 915/// [valid]: self#safety
476ff2be 916///
416331ca 917/// ## On `packed` structs
476ff2be 918///
416331ca
XL
919/// Attempting to create a raw pointer to an `unaligned` struct field with
920/// an expression such as `&packed.unaligned as *const FieldType` creates an
921/// intermediate unaligned reference before converting that to a raw pointer.
922/// That this reference is temporary and immediately cast is inconsequential
923/// as the compiler always expects references to be properly aligned.
924/// As a result, using `&packed.unaligned as *const FieldType` causes immediate
925/// *undefined behavior* in your program.
926///
17df50a5
XL
927/// Instead you must use the [`ptr::addr_of_mut!`](addr_of_mut)
928/// macro to create the pointer. You may use that returned pointer together with
929/// this function.
930///
931/// An example of how to do it and how this relates to `write_unaligned` is:
0bf4aa26 932///
17df50a5 933/// ```
0bf4aa26 934/// #[repr(packed, C)]
0bf4aa26
XL
935/// struct Packed {
936/// _padding: u8,
937/// unaligned: u32,
938/// }
939///
416331ca 940/// let mut packed: Packed = unsafe { std::mem::zeroed() };
476ff2be 941///
17df50a5
XL
942/// // Take the address of a 32-bit integer which is not aligned.
943/// // In contrast to `&packed.unaligned as *mut _`, this has no undefined behavior.
944/// let unaligned = std::ptr::addr_of_mut!(packed.unaligned);
0bf4aa26 945///
17df50a5 946/// unsafe { std::ptr::write_unaligned(unaligned, 42) };
0bf4aa26 947///
17df50a5 948/// assert_eq!({packed.unaligned}, 42); // `{...}` forces copying the field instead of creating a reference.
416331ca
XL
949/// ```
950///
17df50a5
XL
951/// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however
952/// (as can be seen in the `assert_eq!` above).
416331ca
XL
953///
954/// # Examples
955///
956/// Write an usize value to a byte buffer:
957///
958/// ```
959/// use std::mem;
960///
961/// fn write_usize(x: &mut [u8], val: usize) {
962/// assert!(x.len() >= mem::size_of::<usize>());
963///
964/// let ptr = x.as_mut_ptr() as *mut usize;
0bf4aa26 965///
416331ca
XL
966/// unsafe { ptr.write_unaligned(val) }
967/// }
476ff2be
SL
968/// ```
969#[inline]
8bb4bdeb 970#[stable(feature = "ptr_unaligned", since = "1.17.0")]
136023e0 971#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
6a06907d 972pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
f035d41b
XL
973 // SAFETY: the caller must guarantee that `dst` is valid for writes.
974 // `dst` cannot overlap `src` because the caller has mutable access
975 // to `dst` while `src` is owned by this function.
976 unsafe {
f035d41b 977 copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>());
6a06907d
XL
978 // We are calling the intrinsic directly to avoid function calls in the generated code.
979 intrinsics::forget(src);
f035d41b 980 }
476ff2be
SL
981}
982
7453a54e
SL
983/// Performs a volatile read of the value from `src` without moving it. This
984/// leaves the memory in `src` unchanged.
985///
986/// Volatile operations are intended to act on I/O memory, and are guaranteed
987/// to not be elided or reordered by the compiler across other volatile
54a0048b 988/// operations.
7453a54e 989///
54a0048b
SL
990/// # Notes
991///
992/// Rust does not currently have a rigorously and formally defined memory model,
993/// so the precise semantics of what "volatile" means here is subject to change
994/// over time. That being said, the semantics will almost always end up pretty
995/// similar to [C11's definition of volatile][c11].
996///
3b2f2976
XL
997/// The compiler shouldn't change the relative order or number of volatile
998/// memory operations. However, volatile memory operations on zero-sized types
9fa01778 999/// (e.g., if a zero-sized type is passed to `read_volatile`) are noops
3b2f2976
XL
1000/// and may be ignored.
1001///
54a0048b 1002/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
7453a54e
SL
1003///
1004/// # Safety
1005///
0bf4aa26
XL
1006/// Behavior is undefined if any of the following conditions are violated:
1007///
1008/// * `src` must be [valid] for reads.
1009///
1010/// * `src` must be properly aligned.
1011///
74b04a01
XL
1012/// * `src` must point to a properly initialized value of type `T`.
1013///
48663c56 1014/// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of
9fa01778 1015/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
0bf4aa26
XL
1016/// value and the value at `*src` can [violate memory safety][read-ownership].
1017/// However, storing non-[`Copy`] types in volatile memory is almost certainly
1018/// incorrect.
1019///
17df50a5 1020/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
0bf4aa26 1021///
1b1a35ee
XL
1022/// [valid]: self#safety
1023/// [read-ownership]: read#ownership-of-the-returned-value
a7813a04 1024///
b7449926
XL
1025/// Just like in C, whether an operation is volatile has no bearing whatsoever
1026/// on questions involving concurrent access from multiple threads. Volatile
1027/// accesses behave exactly like non-atomic accesses in that regard. In particular,
1028/// a race between a `read_volatile` and any write operation to the same location
1029/// is undefined behavior.
1030///
a7813a04
XL
1031/// # Examples
1032///
1033/// Basic usage:
1034///
1035/// ```
1036/// let x = 12;
1037/// let y = &x as *const i32;
1038///
9e0c209e
SL
1039/// unsafe {
1040/// assert_eq!(std::ptr::read_volatile(y), 12);
1041/// }
a7813a04 1042/// ```
7453a54e 1043#[inline]
54a0048b 1044#[stable(feature = "volatile", since = "1.9.0")]
7453a54e 1045pub unsafe fn read_volatile<T>(src: *const T) -> T {
f035d41b
XL
1046 if cfg!(debug_assertions) && !is_aligned_and_not_null(src) {
1047 // Not panicking to keep codegen impact smaller.
1048 abort();
1049 }
1050 // SAFETY: the caller must uphold the safety contract for `volatile_load`.
1051 unsafe { intrinsics::volatile_load(src) }
7453a54e
SL
1052}
1053
1054/// Performs a volatile write of a memory location with the given value without
1055/// reading or dropping the old value.
1056///
1057/// Volatile operations are intended to act on I/O memory, and are guaranteed
1058/// to not be elided or reordered by the compiler across other volatile
54a0048b
SL
1059/// operations.
1060///
0bf4aa26
XL
1061/// `write_volatile` does not drop the contents of `dst`. This is safe, but it
1062/// could leak allocations or resources, so care should be taken not to overwrite
1063/// an object that should be dropped.
1064///
1065/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
1066/// location pointed to by `dst`.
1067///
54a0048b
SL
1068/// # Notes
1069///
1070/// Rust does not currently have a rigorously and formally defined memory model,
1071/// so the precise semantics of what "volatile" means here is subject to change
1072/// over time. That being said, the semantics will almost always end up pretty
1073/// similar to [C11's definition of volatile][c11].
7453a54e 1074///
3b2f2976
XL
1075/// The compiler shouldn't change the relative order or number of volatile
1076/// memory operations. However, volatile memory operations on zero-sized types
9fa01778 1077/// (e.g., if a zero-sized type is passed to `write_volatile`) are noops
3b2f2976
XL
1078/// and may be ignored.
1079///
54a0048b 1080/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
7453a54e
SL
1081///
1082/// # Safety
1083///
0bf4aa26 1084/// Behavior is undefined if any of the following conditions are violated:
7453a54e 1085///
0bf4aa26 1086/// * `dst` must be [valid] for writes.
7453a54e 1087///
0bf4aa26
XL
1088/// * `dst` must be properly aligned.
1089///
17df50a5 1090/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
0bf4aa26 1091///
1b1a35ee 1092/// [valid]: self#safety
a7813a04 1093///
b7449926
XL
1094/// Just like in C, whether an operation is volatile has no bearing whatsoever
1095/// on questions involving concurrent access from multiple threads. Volatile
1096/// accesses behave exactly like non-atomic accesses in that regard. In particular,
1097/// a race between a `write_volatile` and any other operation (reading or writing)
1098/// on the same location is undefined behavior.
1099///
a7813a04
XL
1100/// # Examples
1101///
1102/// Basic usage:
1103///
1104/// ```
1105/// let mut x = 0;
1106/// let y = &mut x as *mut i32;
1107/// let z = 12;
1108///
1109/// unsafe {
1110/// std::ptr::write_volatile(y, z);
9e0c209e 1111/// assert_eq!(std::ptr::read_volatile(y), 12);
a7813a04
XL
1112/// }
1113/// ```
7453a54e 1114#[inline]
54a0048b 1115#[stable(feature = "volatile", since = "1.9.0")]
7453a54e 1116pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
f035d41b
XL
1117 if cfg!(debug_assertions) && !is_aligned_and_not_null(dst) {
1118 // Not panicking to keep codegen impact smaller.
1119 abort();
1120 }
1121 // SAFETY: the caller must uphold the safety contract for `volatile_store`.
1122 unsafe {
1123 intrinsics::volatile_store(dst, src);
1124 }
7453a54e
SL
1125}
1126
dfeec247
XL
1127/// Align pointer `p`.
1128///
1129/// Calculate offset (in terms of elements of `stride` stride) that has to be applied
1130/// to pointer `p` so that pointer `p` would get aligned to `a`.
1131///
1132/// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
1133/// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
1134/// constants.
1135///
1136/// If we ever decide to make it possible to call the intrinsic with `a` that is not a
1137/// power-of-two, it will probably be more prudent to just change to a naive implementation rather
1138/// than trying to adapt this to accommodate that change.
1139///
1140/// Any questions go to @nagisa.
1141#[lang = "align_offset"]
1142pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
3dfed10e
XL
1143 // FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
1144 // 1, where the method versions of these operations are not inlined.
29967ef6
XL
1145 use intrinsics::{
1146 unchecked_shl, unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
1147 };
3dfed10e 1148
dfeec247 1149 /// Calculate multiplicative modular inverse of `x` modulo `m`.
0531ce1d 1150 ///
3dfed10e 1151 /// This implementation is tailored for `align_offset` and has following preconditions:
0531ce1d 1152 ///
dfeec247
XL
1153 /// * `m` is a power-of-two;
1154 /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
0531ce1d 1155 ///
dfeec247 1156 /// Implementation of this function shall not panic. Ever.
0531ce1d 1157 #[inline]
3dfed10e 1158 unsafe fn mod_inv(x: usize, m: usize) -> usize {
dfeec247
XL
1159 /// Multiplicative modular inverse table modulo 2⁴ = 16.
1160 ///
1161 /// Note, that this table does not contain values where inverse does not exist (i.e., for
1162 /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
1163 const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
1164 /// Modulo for which the `INV_TABLE_MOD_16` is intended.
1165 const INV_TABLE_MOD: usize = 16;
1166 /// INV_TABLE_MOD²
1167 const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
0531ce1d 1168
dfeec247 1169 let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
3dfed10e
XL
1170 // SAFETY: `m` is required to be a power-of-two, hence non-zero.
1171 let m_minus_one = unsafe { unchecked_sub(m, 1) };
dfeec247 1172 if m <= INV_TABLE_MOD {
3dfed10e 1173 table_inverse & m_minus_one
dfeec247
XL
1174 } else {
1175 // We iterate "up" using the following formula:
1176 //
1177 // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
1178 //
1179 // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
1180 let mut inverse = table_inverse;
1181 let mut going_mod = INV_TABLE_MOD_SQUARED;
1182 loop {
1183 // y = y * (2 - xy) mod n
1184 //
1185 // Note, that we use wrapping operations here intentionally – the original formula
1186 // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
f035d41b 1187 // usize::MAX` instead, because we take the result `mod n` at the end
dfeec247 1188 // anyway.
3dfed10e 1189 inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
74b04a01 1190 if going_mod >= m {
3dfed10e 1191 return inverse & m_minus_one;
dfeec247 1192 }
3dfed10e 1193 going_mod = wrapping_mul(going_mod, going_mod);
dfeec247
XL
1194 }
1195 }
0531ce1d
XL
1196 }
1197
dfeec247 1198 let stride = mem::size_of::<T>();
3dfed10e
XL
1199 // SAFETY: `a` is a power-of-two, therefore non-zero.
1200 let a_minus_one = unsafe { unchecked_sub(a, 1) };
1201 if stride == 1 {
29967ef6
XL
1202 // `stride == 1` case can be computed more simply through `-p (mod a)`, but doing so
1203 // inhibits LLVM's ability to select instructions like `lea`. Instead we compute
1204 //
1205 // round_up_to_next_alignment(p, a) - p
1206 //
1207 // which distributes operations around the load-bearing, but pessimizing `and` sufficiently
1208 // for LLVM to be able to utilize the various optimizations it knows about.
1209 return wrapping_sub(
1210 wrapping_add(p as usize, a_minus_one) & wrapping_sub(0, a),
1211 p as usize,
1212 );
3dfed10e 1213 }
ea8adc8c 1214
3dfed10e 1215 let pmoda = p as usize & a_minus_one;
dfeec247
XL
1216 if pmoda == 0 {
1217 // Already aligned. Yay!
1218 return 0;
3dfed10e
XL
1219 } else if stride == 0 {
1220 // If the pointer is not aligned, and the element is zero-sized, then no amount of
1221 // elements will ever align the pointer.
1222 return usize::MAX;
ea8adc8c
XL
1223 }
1224
dfeec247 1225 let smoda = stride & a_minus_one;
3dfed10e 1226 // SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
f035d41b 1227 let gcdpow = unsafe { intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)) };
3dfed10e
XL
1228 // SAFETY: gcdpow has an upper-bound that’s at most the number of bits in an usize.
1229 let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
ea8adc8c 1230
3dfed10e
XL
1231 // SAFETY: gcd is always greater or equal to 1.
1232 if p as usize & unsafe { unchecked_sub(gcd, 1) } == 0 {
dfeec247
XL
1233 // This branch solves for the following linear congruence equation:
1234 //
74b04a01 1235 // ` p + so = 0 mod a `
dfeec247 1236 //
74b04a01 1237 // `p` here is the pointer value, `s` - stride of `T`, `o` offset in `T`s, and `a` - the
dfeec247
XL
1238 // requested alignment.
1239 //
3dfed10e
XL
1240 // With `g = gcd(a, s)`, and the above condition asserting that `p` is also divisible by
1241 // `g`, we can denote `a' = a/g`, `s' = s/g`, `p' = p/g`, then this becomes equivalent to:
74b04a01
XL
1242 //
1243 // ` p' + s'o = 0 mod a' `
1244 // ` o = (a' - (p' mod a')) * (s'^-1 mod a') `
dfeec247 1245 //
74b04a01
XL
1246 // The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the second
1247 // term is "how does incrementing `p` by `s` bytes change the relative alignment of `p`" (again
1248 // divided by `g`).
1249 // Division by `g` is necessary to make the inverse well formed if `a` and `s` are not
1250 // co-prime.
dfeec247 1251 //
74b04a01
XL
1252 // Furthermore, the result produced by this solution is not "minimal", so it is necessary
1253 // to take the result `o mod lcm(s, a)`. We can replace `lcm(s, a)` with just a `a'`.
3dfed10e
XL
1254
1255 // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1256 // `a`.
1257 let a2 = unsafe { unchecked_shr(a, gcdpow) };
1258 // SAFETY: `a2` is non-zero. Shifting `a` by `gcdpow` cannot shift out any of the set bits
1259 // in `a` (of which it has exactly one).
1260 let a2minus1 = unsafe { unchecked_sub(a2, 1) };
1261 // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1262 // `a`.
1263 let s2 = unsafe { unchecked_shr(smoda, gcdpow) };
1264 // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1265 // `a`. Furthermore, the subtraction cannot overflow, because `a2 = a >> gcdpow` will
1266 // always be strictly greater than `(p % a) >> gcdpow`.
1267 let minusp2 = unsafe { unchecked_sub(a2, unchecked_shr(pmoda, gcdpow)) };
1268 // SAFETY: `a2` is a power-of-two, as proven above. `s2` is strictly less than `a2`
1269 // because `(s % a) >> gcdpow` is strictly less than `a >> gcdpow`.
1270 return wrapping_mul(minusp2, unsafe { mod_inv(s2, a2) }) & a2minus1;
ea8adc8c
XL
1271 }
1272
dfeec247 1273 // Cannot be aligned at all.
f035d41b 1274 usize::MAX
dfeec247 1275}
1a4d82fc 1276
9fa01778 1277/// Compares raw pointers for equality.
9e0c209e
SL
1278///
1279/// This is the same as using the `==` operator, but less generic:
1280/// the arguments have to be `*const T` raw pointers,
1281/// not anything that implements `PartialEq`.
1282///
1283/// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
1284/// by their address rather than comparing the values they point to
1285/// (which is what the `PartialEq for &T` implementation does).
1286///
1287/// # Examples
1288///
1289/// ```
9e0c209e
SL
1290/// use std::ptr;
1291///
1292/// let five = 5;
1293/// let other_five = 5;
1294/// let five_ref = &five;
1295/// let same_five_ref = &five;
1296/// let other_five_ref = &other_five;
1297///
1298/// assert!(five_ref == same_five_ref);
9e0c209e 1299/// assert!(ptr::eq(five_ref, same_five_ref));
532ac7d7
XL
1300///
1301/// assert!(five_ref == other_five_ref);
9e0c209e
SL
1302/// assert!(!ptr::eq(five_ref, other_five_ref));
1303/// ```
532ac7d7
XL
1304///
1305/// Slices are also compared by their length (fat pointers):
1306///
1307/// ```
1308/// let a = [1, 2, 3];
1309/// assert!(std::ptr::eq(&a[..3], &a[..3]));
1310/// assert!(!std::ptr::eq(&a[..2], &a[..3]));
1311/// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
1312/// ```
1313///
1314/// Traits are also compared by their implementation:
1315///
1316/// ```
1317/// #[repr(transparent)]
1318/// struct Wrapper { member: i32 }
1319///
1320/// trait Trait {}
1321/// impl Trait for Wrapper {}
1322/// impl Trait for i32 {}
1323///
e74abb32
XL
1324/// let wrapper = Wrapper { member: 10 };
1325///
1326/// // Pointers have equal addresses.
1327/// assert!(std::ptr::eq(
1328/// &wrapper as *const Wrapper as *const u8,
1329/// &wrapper.member as *const i32 as *const u8
1330/// ));
1331///
1332/// // Objects have equal addresses, but `Trait` has different implementations.
1333/// assert!(!std::ptr::eq(
1334/// &wrapper as &dyn Trait,
1335/// &wrapper.member as &dyn Trait,
1336/// ));
1337/// assert!(!std::ptr::eq(
1338/// &wrapper as &dyn Trait as *const dyn Trait,
1339/// &wrapper.member as &dyn Trait as *const dyn Trait,
1340/// ));
1341///
1342/// // Converting the reference to a `*const u8` compares by address.
1343/// assert!(std::ptr::eq(
1344/// &wrapper as &dyn Trait as *const dyn Trait as *const u8,
1345/// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8,
1346/// ));
532ac7d7 1347/// ```
8bb4bdeb 1348#[stable(feature = "ptr_eq", since = "1.17.0")]
9e0c209e
SL
1349#[inline]
1350pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
1351 a == b
1352}
1353
0731742a
XL
1354/// Hash a raw pointer.
1355///
1356/// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly)
1357/// by its address rather than the value it points to
1358/// (which is what the `Hash for &T` implementation does).
1359///
1360/// # Examples
1361///
1362/// ```
0731742a
XL
1363/// use std::collections::hash_map::DefaultHasher;
1364/// use std::hash::{Hash, Hasher};
1365/// use std::ptr;
1366///
1367/// let five = 5;
1368/// let five_ref = &five;
1369///
1370/// let mut hasher = DefaultHasher::new();
1371/// ptr::hash(five_ref, &mut hasher);
1372/// let actual = hasher.finish();
1373///
1374/// let mut hasher = DefaultHasher::new();
1375/// (five_ref as *const i32).hash(&mut hasher);
1376/// let expected = hasher.finish();
1377///
1378/// assert_eq!(actual, expected);
1379/// ```
532ac7d7 1380#[stable(feature = "ptr_hash", since = "1.35.0")]
0731742a 1381pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
48663c56 1382 use crate::hash::Hash;
0731742a
XL
1383 hashee.hash(into);
1384}
1385
e9174d1e
SL
1386// Impls for function pointers
1387macro_rules! fnptr_impls_safety_abi {
1388 ($FnTy: ty, $($Arg: ident),*) => {
e9174d1e
SL
1389 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1390 impl<Ret, $($Arg),*> PartialEq for $FnTy {
1391 #[inline]
1392 fn eq(&self, other: &Self) -> bool {
1393 *self as usize == *other as usize
1394 }
1a4d82fc 1395 }
e9174d1e
SL
1396
1397 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1398 impl<Ret, $($Arg),*> Eq for $FnTy {}
1399
1400 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1401 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
1402 #[inline]
1403 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1404 (*self as usize).partial_cmp(&(*other as usize))
1405 }
1406 }
1407
1408 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1409 impl<Ret, $($Arg),*> Ord for $FnTy {
1410 #[inline]
1411 fn cmp(&self, other: &Self) -> Ordering {
1412 (*self as usize).cmp(&(*other as usize))
1413 }
1414 }
1415
1416 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1417 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
1418 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
1419 state.write_usize(*self as usize)
1420 }
1421 }
1422
1423 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1424 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
48663c56 1425 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f035d41b
XL
1426 // HACK: The intermediate cast as usize is required for AVR
1427 // so that the address space of the source function pointer
1428 // is preserved in the final function pointer.
1429 //
1430 // https://github.com/avr-rust/rust/issues/143
1431 fmt::Pointer::fmt(&(*self as usize as *const ()), f)
e9174d1e
SL
1432 }
1433 }
1434
1435 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1436 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
48663c56 1437 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f035d41b
XL
1438 // HACK: The intermediate cast as usize is required for AVR
1439 // so that the address space of the source function pointer
1440 // is preserved in the final function pointer.
1441 //
1442 // https://github.com/avr-rust/rust/issues/143
1443 fmt::Pointer::fmt(&(*self as usize as *const ()), f)
1a4d82fc
JJ
1444 }
1445 }
1446 }
1a4d82fc
JJ
1447}
1448
e9174d1e 1449macro_rules! fnptr_impls_args {
5bcae85e 1450 ($($Arg: ident),+) => {
dc9dc135
XL
1451 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
1452 fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
1453 fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
1454 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
1455 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
1456 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
5bcae85e
SL
1457 };
1458 () => {
1459 // No variadic functions with 0 parameters
1460 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
1461 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
1462 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
1463 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
1464 };
e9174d1e
SL
1465}
1466
60c5eb7d 1467fnptr_impls_args! {}
e9174d1e
SL
1468fnptr_impls_args! { A }
1469fnptr_impls_args! { A, B }
1470fnptr_impls_args! { A, B, C }
1471fnptr_impls_args! { A, B, C, D }
1472fnptr_impls_args! { A, B, C, D, E }
1473fnptr_impls_args! { A, B, C, D, E, F }
1474fnptr_impls_args! { A, B, C, D, E, F, G }
1475fnptr_impls_args! { A, B, C, D, E, F, G, H }
1476fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
1477fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
1478fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
1479fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
f035d41b
XL
1480
1481/// Create a `const` raw pointer to a place, without creating an intermediate reference.
1482///
1483/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
1484/// and points to initialized data. For cases where those requirements do not hold,
1485/// raw pointers should be used instead. However, `&expr as *const _` creates a reference
1486/// before casting it to a raw pointer, and that reference is subject to the same rules
1487/// as all other references. This macro can create a raw pointer *without* creating
1488/// a reference first.
1489///
cdc7bbd5
XL
1490/// Note, however, that the `expr` in `addr_of!(expr)` is still subject to all
1491/// the usual rules. In particular, `addr_of!(*ptr::null())` is Undefined
17df50a5 1492/// Behavior because it dereferences a null pointer.
cdc7bbd5 1493///
f035d41b
XL
1494/// # Example
1495///
1496/// ```
f035d41b
XL
1497/// use std::ptr;
1498///
1499/// #[repr(packed)]
1500/// struct Packed {
1501/// f1: u8,
1502/// f2: u16,
1503/// }
1504///
1505/// let packed = Packed { f1: 1, f2: 2 };
1506/// // `&packed.f2` would create an unaligned reference, and thus be Undefined Behavior!
5869c6ff 1507/// let raw_f2 = ptr::addr_of!(packed.f2);
f035d41b
XL
1508/// assert_eq!(unsafe { raw_f2.read_unaligned() }, 2);
1509/// ```
cdc7bbd5
XL
1510///
1511/// See [`addr_of_mut`] for how to create a pointer to unininitialized data.
1512/// Doing that with `addr_of` would not make much sense since one could only
1513/// read the data, and that would be Undefined Behavior.
5869c6ff 1514#[stable(feature = "raw_ref_macros", since = "1.51.0")]
f035d41b
XL
1515#[rustc_macro_transparency = "semitransparent"]
1516#[allow_internal_unstable(raw_ref_op)]
5869c6ff
XL
1517pub macro addr_of($place:expr) {
1518 &raw const $place
f035d41b
XL
1519}
1520
1521/// Create a `mut` raw pointer to a place, without creating an intermediate reference.
1522///
1523/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
1524/// and points to initialized data. For cases where those requirements do not hold,
1525/// raw pointers should be used instead. However, `&mut expr as *mut _` creates a reference
1526/// before casting it to a raw pointer, and that reference is subject to the same rules
1527/// as all other references. This macro can create a raw pointer *without* creating
1528/// a reference first.
1529///
cdc7bbd5
XL
1530/// Note, however, that the `expr` in `addr_of_mut!(expr)` is still subject to all
1531/// the usual rules. In particular, `addr_of_mut!(*ptr::null_mut())` is Undefined
17df50a5 1532/// Behavior because it dereferences a null pointer.
cdc7bbd5
XL
1533///
1534/// # Examples
1535///
1536/// **Creating a pointer to unaligned data:**
f035d41b
XL
1537///
1538/// ```
f035d41b
XL
1539/// use std::ptr;
1540///
1541/// #[repr(packed)]
1542/// struct Packed {
1543/// f1: u8,
1544/// f2: u16,
1545/// }
1546///
1547/// let mut packed = Packed { f1: 1, f2: 2 };
1548/// // `&mut packed.f2` would create an unaligned reference, and thus be Undefined Behavior!
5869c6ff 1549/// let raw_f2 = ptr::addr_of_mut!(packed.f2);
f035d41b
XL
1550/// unsafe { raw_f2.write_unaligned(42); }
1551/// assert_eq!({packed.f2}, 42); // `{...}` forces copying the field instead of creating a reference.
1552/// ```
cdc7bbd5
XL
1553///
1554/// **Creating a pointer to uninitialized data:**
1555///
1556/// ```rust
1557/// use std::{ptr, mem::MaybeUninit};
1558///
1559/// struct Demo {
1560/// field: bool,
1561/// }
1562///
1563/// let mut uninit = MaybeUninit::<Demo>::uninit();
1564/// // `&uninit.as_mut().field` would create a reference to an uninitialized `bool`,
1565/// // and thus be Undefined Behavior!
1566/// let f1_ptr = unsafe { ptr::addr_of_mut!((*uninit.as_mut_ptr()).field) };
1567/// unsafe { f1_ptr.write(true); }
1568/// let init = unsafe { uninit.assume_init() };
1569/// ```
5869c6ff 1570#[stable(feature = "raw_ref_macros", since = "1.51.0")]
f035d41b
XL
1571#[rustc_macro_transparency = "semitransparent"]
1572#[allow_internal_unstable(raw_ref_op)]
5869c6ff
XL
1573pub macro addr_of_mut($place:expr) {
1574 &raw mut $place
f035d41b 1575}