]> git.proxmox.com Git - rustc.git/blob - library/core/src/ptr/mod.rs
New upstream version 1.52.0+dfsg1
[rustc.git] / library / core / src / ptr / mod.rs
1 //! Manually manage memory through raw pointers.
2 //!
3 //! *[See also the pointer primitive types](pointer).*
4 //!
5 //! # Safety
6 //!
7 //! Many functions in this module take raw pointers as arguments and read from
8 //! or write to them. For this to be safe, these pointers must be *valid*.
9 //! Whether a pointer is valid depends on the operation it is used for
10 //! (read or write), and the extent of the memory that is accessed (i.e.,
11 //! how many bytes are read/written). Most functions use `*mut T` and `*const T`
12 //! to access only a single value, in which case the documentation omits the size
13 //! and implicitly assumes it to be `size_of::<T>()` bytes.
14 //!
15 //! The precise rules for validity are not determined yet. The guarantees that are
16 //! provided at this point are very minimal:
17 //!
18 //! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst].
19 //! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer
20 //! be *dereferenceable*: the memory range of the given size starting at the pointer must all be
21 //! within the bounds of a single allocated object. Note that in Rust,
22 //! every (stack-allocated) variable is considered a separate allocated object.
23 //! * Even for operations of [size zero][zst], the pointer must not be pointing to deallocated
24 //! memory, i.e., deallocation makes pointers invalid even for zero-sized operations. However,
25 //! casting any non-zero integer *literal* to a pointer is valid for zero-sized accesses, even if
26 //! some memory happens to exist at that address and gets deallocated. This corresponds to writing
27 //! your own allocator: allocating zero-sized objects is not very hard. The canonical way to
28 //! obtain a pointer that is valid for zero-sized accesses is [`NonNull::dangling`].
29 //! * All accesses performed by functions in this module are *non-atomic* in the sense
30 //! of [atomic operations] used to synchronize between threads. This means it is
31 //! undefined behavior to perform two concurrent accesses to the same location from different
32 //! threads unless both accesses only read from memory. Notice that this explicitly
33 //! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot
34 //! be used for inter-thread synchronization.
35 //! * The result of casting a reference to a pointer is valid for as long as the
36 //! underlying object is live and no reference (just raw pointers) is used to
37 //! access the same memory.
38 //!
39 //! These axioms, along with careful use of [`offset`] for pointer arithmetic,
40 //! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
41 //! will be provided eventually, as the [aliasing] rules are being determined. For more
42 //! information, see the [book] as well as the section in the reference devoted
43 //! to [undefined behavior][ub].
44 //!
45 //! ## Alignment
46 //!
47 //! Valid raw pointers as defined above are not necessarily properly aligned (where
48 //! "proper" alignment is defined by the pointee type, i.e., `*const T` must be
49 //! aligned to `mem::align_of::<T>()`). However, most functions require their
50 //! arguments to be properly aligned, and will explicitly state
51 //! this requirement in their documentation. Notable exceptions to this are
52 //! [`read_unaligned`] and [`write_unaligned`].
53 //!
54 //! When a function requires proper alignment, it does so even if the access
55 //! has size 0, i.e., even if memory is not actually touched. Consider using
56 //! [`NonNull::dangling`] in such cases.
57 //!
58 //! [aliasing]: ../../nomicon/aliasing.html
59 //! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer
60 //! [ub]: ../../reference/behavior-considered-undefined.html
61 //! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts
62 //! [atomic operations]: crate::sync::atomic
63 //! [`offset`]: pointer::offset
64
65 #![stable(feature = "rust1", since = "1.0.0")]
66
67 use crate::cmp::Ordering;
68 use crate::fmt;
69 use crate::hash;
70 use crate::intrinsics::{self, abort, is_aligned_and_not_null};
71 use crate::mem::{self, MaybeUninit};
72
73 #[stable(feature = "rust1", since = "1.0.0")]
74 #[doc(inline)]
75 pub use crate::intrinsics::copy_nonoverlapping;
76
77 #[stable(feature = "rust1", since = "1.0.0")]
78 #[doc(inline)]
79 pub use crate::intrinsics::copy;
80
81 #[stable(feature = "rust1", since = "1.0.0")]
82 #[doc(inline)]
83 pub use crate::intrinsics::write_bytes;
84
85 #[cfg(not(bootstrap))]
86 mod metadata;
87 #[cfg(not(bootstrap))]
88 pub(crate) use metadata::PtrRepr;
89 #[cfg(not(bootstrap))]
90 #[unstable(feature = "ptr_metadata", issue = "81513")]
91 pub use metadata::{from_raw_parts, from_raw_parts_mut, metadata, DynMetadata, Pointee, Thin};
92
93 mod non_null;
94 #[stable(feature = "nonnull", since = "1.25.0")]
95 pub use non_null::NonNull;
96
97 mod unique;
98 #[unstable(feature = "ptr_internals", issue = "none")]
99 pub use unique::Unique;
100
101 mod const_ptr;
102 mod mut_ptr;
103
104 /// Executes the destructor (if any) of the pointed-to value.
105 ///
106 /// This is semantically equivalent to calling [`ptr::read`] and discarding
107 /// the result, but has the following advantages:
108 ///
109 /// * It is *required* to use `drop_in_place` to drop unsized types like
110 /// trait objects, because they can't be read out onto the stack and
111 /// dropped normally.
112 ///
113 /// * It is friendlier to the optimizer to do this over [`ptr::read`] when
114 /// dropping manually allocated memory (e.g., in the implementations of
115 /// `Box`/`Rc`/`Vec`), as the compiler doesn't need to prove that it's
116 /// sound to elide the copy.
117 ///
118 /// * It can be used to drop [pinned] data when `T` is not `repr(packed)`
119 /// (pinned data must not be moved before it is dropped).
120 ///
121 /// Unaligned values cannot be dropped in place, they must be copied to an aligned
122 /// location first using [`ptr::read_unaligned`]. For packed structs, this move is
123 /// done automatically by the compiler. This means the fields of packed structs
124 /// are not dropped in-place.
125 ///
126 /// [`ptr::read`]: self::read
127 /// [`ptr::read_unaligned`]: self::read_unaligned
128 /// [pinned]: crate::pin
129 ///
130 /// # Safety
131 ///
132 /// Behavior is undefined if any of the following conditions are violated:
133 ///
134 /// * `to_drop` must be [valid] for both reads and writes.
135 ///
136 /// * `to_drop` must be properly aligned.
137 ///
138 /// * The value `to_drop` points to must be valid for dropping, which may mean it must uphold
139 /// additional invariants - this is type-dependent.
140 ///
141 /// Additionally, if `T` is not [`Copy`], using the pointed-to value after
142 /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
143 /// foo` counts as a use because it will cause the value to be dropped
144 /// again. [`write()`] can be used to overwrite data without causing it to be
145 /// dropped.
146 ///
147 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
148 ///
149 /// [valid]: self#safety
150 ///
151 /// # Examples
152 ///
153 /// Manually remove the last item from a vector:
154 ///
155 /// ```
156 /// use std::ptr;
157 /// use std::rc::Rc;
158 ///
159 /// let last = Rc::new(1);
160 /// let weak = Rc::downgrade(&last);
161 ///
162 /// let mut v = vec![Rc::new(0), last];
163 ///
164 /// unsafe {
165 /// // Get a raw pointer to the last element in `v`.
166 /// let ptr = &mut v[1] as *mut _;
167 /// // Shorten `v` to prevent the last item from being dropped. We do that first,
168 /// // to prevent issues if the `drop_in_place` below panics.
169 /// v.set_len(1);
170 /// // Without a call `drop_in_place`, the last item would never be dropped,
171 /// // and the memory it manages would be leaked.
172 /// ptr::drop_in_place(ptr);
173 /// }
174 ///
175 /// assert_eq!(v, &[0.into()]);
176 ///
177 /// // Ensure that the last item was dropped.
178 /// assert!(weak.upgrade().is_none());
179 /// ```
180 ///
181 /// Notice that the compiler performs this copy automatically when dropping packed structs,
182 /// i.e., you do not usually have to worry about such issues unless you call `drop_in_place`
183 /// manually.
184 #[stable(feature = "drop_in_place", since = "1.8.0")]
185 #[lang = "drop_in_place"]
186 #[allow(unconditional_recursion)]
187 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
188 // Code here does not matter - this is replaced by the
189 // real drop glue by the compiler.
190
191 // SAFETY: see comment above
192 unsafe { drop_in_place(to_drop) }
193 }
194
195 /// Creates a null raw pointer.
196 ///
197 /// # Examples
198 ///
199 /// ```
200 /// use std::ptr;
201 ///
202 /// let p: *const i32 = ptr::null();
203 /// assert!(p.is_null());
204 /// ```
205 #[inline(always)]
206 #[stable(feature = "rust1", since = "1.0.0")]
207 #[rustc_promotable]
208 #[rustc_const_stable(feature = "const_ptr_null", since = "1.32.0")]
209 pub const fn null<T>() -> *const T {
210 0 as *const T
211 }
212
213 /// Creates a null mutable raw pointer.
214 ///
215 /// # Examples
216 ///
217 /// ```
218 /// use std::ptr;
219 ///
220 /// let p: *mut i32 = ptr::null_mut();
221 /// assert!(p.is_null());
222 /// ```
223 #[inline(always)]
224 #[stable(feature = "rust1", since = "1.0.0")]
225 #[rustc_promotable]
226 #[rustc_const_stable(feature = "const_ptr_null", since = "1.32.0")]
227 pub const fn null_mut<T>() -> *mut T {
228 0 as *mut T
229 }
230
231 #[cfg(bootstrap)]
232 #[repr(C)]
233 pub(crate) union Repr<T> {
234 pub(crate) rust: *const [T],
235 rust_mut: *mut [T],
236 pub(crate) raw: FatPtr<T>,
237 }
238
239 #[cfg(bootstrap)]
240 #[repr(C)]
241 pub(crate) struct FatPtr<T> {
242 data: *const T,
243 pub(crate) len: usize,
244 }
245
246 #[cfg(bootstrap)]
247 // Manual impl needed to avoid `T: Clone` bound.
248 impl<T> Clone for FatPtr<T> {
249 fn clone(&self) -> Self {
250 *self
251 }
252 }
253
254 #[cfg(bootstrap)]
255 // Manual impl needed to avoid `T: Copy` bound.
256 impl<T> Copy for FatPtr<T> {}
257
258 /// Forms a raw slice from a pointer and a length.
259 ///
260 /// The `len` argument is the number of **elements**, not the number of bytes.
261 ///
262 /// This function is safe, but actually using the return value is unsafe.
263 /// See the documentation of [`slice::from_raw_parts`] for slice safety requirements.
264 ///
265 /// [`slice::from_raw_parts`]: crate::slice::from_raw_parts
266 ///
267 /// # Examples
268 ///
269 /// ```rust
270 /// use std::ptr;
271 ///
272 /// // create a slice pointer when starting out with a pointer to the first element
273 /// let x = [5, 6, 7];
274 /// let raw_pointer = x.as_ptr();
275 /// let slice = ptr::slice_from_raw_parts(raw_pointer, 3);
276 /// assert_eq!(unsafe { &*slice }[2], 7);
277 /// ```
278 #[inline]
279 #[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
280 #[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
281 pub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
282 #[cfg(bootstrap)]
283 {
284 // SAFETY: Accessing the value from the `Repr` union is safe since *const [T]
285 // and FatPtr have the same memory layouts. Only std can make this
286 // guarantee.
287 unsafe { Repr { raw: FatPtr { data, len } }.rust }
288 }
289 #[cfg(not(bootstrap))]
290 from_raw_parts(data.cast(), len)
291 }
292
293 /// Performs the same functionality as [`slice_from_raw_parts`], except that a
294 /// raw mutable slice is returned, as opposed to a raw immutable slice.
295 ///
296 /// See the documentation of [`slice_from_raw_parts`] for more details.
297 ///
298 /// This function is safe, but actually using the return value is unsafe.
299 /// See the documentation of [`slice::from_raw_parts_mut`] for slice safety requirements.
300 ///
301 /// [`slice::from_raw_parts_mut`]: crate::slice::from_raw_parts_mut
302 ///
303 /// # Examples
304 ///
305 /// ```rust
306 /// use std::ptr;
307 ///
308 /// let x = &mut [5, 6, 7];
309 /// let raw_pointer = x.as_mut_ptr();
310 /// let slice = ptr::slice_from_raw_parts_mut(raw_pointer, 3);
311 ///
312 /// unsafe {
313 /// (*slice)[2] = 99; // assign a value at an index in the slice
314 /// };
315 ///
316 /// assert_eq!(unsafe { &*slice }[2], 99);
317 /// ```
318 #[inline]
319 #[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
320 #[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
321 pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
322 #[cfg(bootstrap)]
323 {
324 // SAFETY: Accessing the value from the `Repr` union is safe since *mut [T]
325 // and FatPtr have the same memory layouts
326 unsafe { Repr { raw: FatPtr { data, len } }.rust_mut }
327 }
328 #[cfg(not(bootstrap))]
329 from_raw_parts_mut(data.cast(), len)
330 }
331
332 /// Swaps the values at two mutable locations of the same type, without
333 /// deinitializing either.
334 ///
335 /// But for the following two exceptions, this function is semantically
336 /// equivalent to [`mem::swap`]:
337 ///
338 /// * It operates on raw pointers instead of references. When references are
339 /// available, [`mem::swap`] should be preferred.
340 ///
341 /// * The two pointed-to values may overlap. If the values do overlap, then the
342 /// overlapping region of memory from `x` will be used. This is demonstrated
343 /// in the second example below.
344 ///
345 /// # Safety
346 ///
347 /// Behavior is undefined if any of the following conditions are violated:
348 ///
349 /// * Both `x` and `y` must be [valid] for both reads and writes.
350 ///
351 /// * Both `x` and `y` must be properly aligned.
352 ///
353 /// Note that even if `T` has size `0`, the pointers must be non-NULL and properly aligned.
354 ///
355 /// [valid]: self#safety
356 ///
357 /// # Examples
358 ///
359 /// Swapping two non-overlapping regions:
360 ///
361 /// ```
362 /// use std::ptr;
363 ///
364 /// let mut array = [0, 1, 2, 3];
365 ///
366 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; // this is `array[0..2]`
367 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; // this is `array[2..4]`
368 ///
369 /// unsafe {
370 /// ptr::swap(x, y);
371 /// assert_eq!([2, 3, 0, 1], array);
372 /// }
373 /// ```
374 ///
375 /// Swapping two overlapping regions:
376 ///
377 /// ```
378 /// use std::ptr;
379 ///
380 /// let mut array = [0, 1, 2, 3];
381 ///
382 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; // this is `array[0..3]`
383 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; // this is `array[1..4]`
384 ///
385 /// unsafe {
386 /// ptr::swap(x, y);
387 /// // The indices `1..3` of the slice overlap between `x` and `y`.
388 /// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are
389 /// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]`
390 /// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`).
391 /// // This implementation is defined to make the latter choice.
392 /// assert_eq!([1, 0, 1, 2], array);
393 /// }
394 /// ```
395 #[inline]
396 #[stable(feature = "rust1", since = "1.0.0")]
397 #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
398 pub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
399 // Give ourselves some scratch space to work with.
400 // We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
401 let mut tmp = MaybeUninit::<T>::uninit();
402
403 // Perform the swap
404 // SAFETY: the caller must guarantee that `x` and `y` are
405 // valid for writes and properly aligned. `tmp` cannot be
406 // overlapping either `x` or `y` because `tmp` was just allocated
407 // on the stack as a separate allocated object.
408 unsafe {
409 copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
410 copy(y, x, 1); // `x` and `y` may overlap
411 copy_nonoverlapping(tmp.as_ptr(), y, 1);
412 }
413 }
414
415 /// Swaps `count * size_of::<T>()` bytes between the two regions of memory
416 /// beginning at `x` and `y`. The two regions must *not* overlap.
417 ///
418 /// # Safety
419 ///
420 /// Behavior is undefined if any of the following conditions are violated:
421 ///
422 /// * Both `x` and `y` must be [valid] for both reads and writes of `count *
423 /// size_of::<T>()` bytes.
424 ///
425 /// * Both `x` and `y` must be properly aligned.
426 ///
427 /// * The region of memory beginning at `x` with a size of `count *
428 /// size_of::<T>()` bytes must *not* overlap with the region of memory
429 /// beginning at `y` with the same size.
430 ///
431 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`,
432 /// the pointers must be non-NULL and properly aligned.
433 ///
434 /// [valid]: self#safety
435 ///
436 /// # Examples
437 ///
438 /// Basic usage:
439 ///
440 /// ```
441 /// use std::ptr;
442 ///
443 /// let mut x = [1, 2, 3, 4];
444 /// let mut y = [7, 8, 9];
445 ///
446 /// unsafe {
447 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
448 /// }
449 ///
450 /// assert_eq!(x, [7, 8, 3, 4]);
451 /// assert_eq!(y, [1, 2, 9]);
452 /// ```
453 #[inline]
454 #[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
455 #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
456 pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
457 let x = x as *mut u8;
458 let y = y as *mut u8;
459 let len = mem::size_of::<T>() * count;
460 // SAFETY: the caller must guarantee that `x` and `y` are
461 // valid for writes and properly aligned.
462 unsafe { swap_nonoverlapping_bytes(x, y, len) }
463 }
464
465 #[inline]
466 pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
467 // For types smaller than the block optimization below,
468 // just swap directly to avoid pessimizing codegen.
469 if mem::size_of::<T>() < 32 {
470 // SAFETY: the caller must guarantee that `x` and `y` are valid
471 // for writes, properly aligned, and non-overlapping.
472 unsafe {
473 let z = read(x);
474 copy_nonoverlapping(y, x, 1);
475 write(y, z);
476 }
477 } else {
478 // SAFETY: the caller must uphold the safety contract for `swap_nonoverlapping`.
479 unsafe { swap_nonoverlapping(x, y, 1) };
480 }
481 }
482
483 #[inline]
484 #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
485 const unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
486 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
487 // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel
488 // Haswell E processors. LLVM is more able to optimize if we give a struct a
489 // #[repr(simd)], even if we don't actually use this struct directly.
490 //
491 // FIXME repr(simd) broken on emscripten and redox
492 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox")), repr(simd))]
493 struct Block(u64, u64, u64, u64);
494 struct UnalignedBlock(u64, u64, u64, u64);
495
496 let block_size = mem::size_of::<Block>();
497
498 // Loop through x & y, copying them `Block` at a time
499 // The optimizer should unroll the loop fully for most types
500 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
501 let mut i = 0;
502 while i + block_size <= len {
503 // Create some uninitialized memory as scratch space
504 // Declaring `t` here avoids aligning the stack when this loop is unused
505 let mut t = mem::MaybeUninit::<Block>::uninit();
506 let t = t.as_mut_ptr() as *mut u8;
507
508 // SAFETY: As `i < len`, and as the caller must guarantee that `x` and `y` are valid
509 // for `len` bytes, `x + i` and `y + i` must be valid addresses, which fulfills the
510 // safety contract for `add`.
511 //
512 // Also, the caller must guarantee that `x` and `y` are valid for writes, properly aligned,
513 // and non-overlapping, which fulfills the safety contract for `copy_nonoverlapping`.
514 unsafe {
515 let x = x.add(i);
516 let y = y.add(i);
517
518 // Swap a block of bytes of x & y, using t as a temporary buffer
519 // This should be optimized into efficient SIMD operations where available
520 copy_nonoverlapping(x, t, block_size);
521 copy_nonoverlapping(y, x, block_size);
522 copy_nonoverlapping(t, y, block_size);
523 }
524 i += block_size;
525 }
526
527 if i < len {
528 // Swap any remaining bytes
529 let mut t = mem::MaybeUninit::<UnalignedBlock>::uninit();
530 let rem = len - i;
531
532 let t = t.as_mut_ptr() as *mut u8;
533
534 // SAFETY: see previous safety comment.
535 unsafe {
536 let x = x.add(i);
537 let y = y.add(i);
538
539 copy_nonoverlapping(x, t, rem);
540 copy_nonoverlapping(y, x, rem);
541 copy_nonoverlapping(t, y, rem);
542 }
543 }
544 }
545
546 /// Moves `src` into the pointed `dst`, returning the previous `dst` value.
547 ///
548 /// Neither value is dropped.
549 ///
550 /// This function is semantically equivalent to [`mem::replace`] except that it
551 /// operates on raw pointers instead of references. When references are
552 /// available, [`mem::replace`] should be preferred.
553 ///
554 /// # Safety
555 ///
556 /// Behavior is undefined if any of the following conditions are violated:
557 ///
558 /// * `dst` must be [valid] for both reads and writes.
559 ///
560 /// * `dst` must be properly aligned.
561 ///
562 /// * `dst` must point to a properly initialized value of type `T`.
563 ///
564 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
565 ///
566 /// [valid]: self#safety
567 ///
568 /// # Examples
569 ///
570 /// ```
571 /// use std::ptr;
572 ///
573 /// let mut rust = vec!['b', 'u', 's', 't'];
574 ///
575 /// // `mem::replace` would have the same effect without requiring the unsafe
576 /// // block.
577 /// let b = unsafe {
578 /// ptr::replace(&mut rust[0], 'r')
579 /// };
580 ///
581 /// assert_eq!(b, 'b');
582 /// assert_eq!(rust, &['r', 'u', 's', 't']);
583 /// ```
584 #[inline]
585 #[stable(feature = "rust1", since = "1.0.0")]
586 pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
587 // SAFETY: the caller must guarantee that `dst` is valid to be
588 // cast to a mutable reference (valid for writes, aligned, initialized),
589 // and cannot overlap `src` since `dst` must point to a distinct
590 // allocated object.
591 unsafe {
592 mem::swap(&mut *dst, &mut src); // cannot overlap
593 }
594 src
595 }
596
597 /// Reads the value from `src` without moving it. This leaves the
598 /// memory in `src` unchanged.
599 ///
600 /// # Safety
601 ///
602 /// Behavior is undefined if any of the following conditions are violated:
603 ///
604 /// * `src` must be [valid] for reads.
605 ///
606 /// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the
607 /// case.
608 ///
609 /// * `src` must point to a properly initialized value of type `T`.
610 ///
611 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
612 ///
613 /// # Examples
614 ///
615 /// Basic usage:
616 ///
617 /// ```
618 /// let x = 12;
619 /// let y = &x as *const i32;
620 ///
621 /// unsafe {
622 /// assert_eq!(std::ptr::read(y), 12);
623 /// }
624 /// ```
625 ///
626 /// Manually implement [`mem::swap`]:
627 ///
628 /// ```
629 /// use std::ptr;
630 ///
631 /// fn swap<T>(a: &mut T, b: &mut T) {
632 /// unsafe {
633 /// // Create a bitwise copy of the value at `a` in `tmp`.
634 /// let tmp = ptr::read(a);
635 ///
636 /// // Exiting at this point (either by explicitly returning or by
637 /// // calling a function which panics) would cause the value in `tmp` to
638 /// // be dropped while the same value is still referenced by `a`. This
639 /// // could trigger undefined behavior if `T` is not `Copy`.
640 ///
641 /// // Create a bitwise copy of the value at `b` in `a`.
642 /// // This is safe because mutable references cannot alias.
643 /// ptr::copy_nonoverlapping(b, a, 1);
644 ///
645 /// // As above, exiting here could trigger undefined behavior because
646 /// // the same value is referenced by `a` and `b`.
647 ///
648 /// // Move `tmp` into `b`.
649 /// ptr::write(b, tmp);
650 ///
651 /// // `tmp` has been moved (`write` takes ownership of its second argument),
652 /// // so nothing is dropped implicitly here.
653 /// }
654 /// }
655 ///
656 /// let mut foo = "foo".to_owned();
657 /// let mut bar = "bar".to_owned();
658 ///
659 /// swap(&mut foo, &mut bar);
660 ///
661 /// assert_eq!(foo, "bar");
662 /// assert_eq!(bar, "foo");
663 /// ```
664 ///
665 /// ## Ownership of the Returned Value
666 ///
667 /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`].
668 /// If `T` is not [`Copy`], using both the returned value and the value at
669 /// `*src` can violate memory safety. Note that assigning to `*src` counts as a
670 /// use because it will attempt to drop the value at `*src`.
671 ///
672 /// [`write()`] can be used to overwrite data without causing it to be dropped.
673 ///
674 /// ```
675 /// use std::ptr;
676 ///
677 /// let mut s = String::from("foo");
678 /// unsafe {
679 /// // `s2` now points to the same underlying memory as `s`.
680 /// let mut s2: String = ptr::read(&s);
681 ///
682 /// assert_eq!(s2, "foo");
683 ///
684 /// // Assigning to `s2` causes its original value to be dropped. Beyond
685 /// // this point, `s` must no longer be used, as the underlying memory has
686 /// // been freed.
687 /// s2 = String::default();
688 /// assert_eq!(s2, "");
689 ///
690 /// // Assigning to `s` would cause the old value to be dropped again,
691 /// // resulting in undefined behavior.
692 /// // s = String::from("bar"); // ERROR
693 ///
694 /// // `ptr::write` can be used to overwrite a value without dropping it.
695 /// ptr::write(&mut s, String::from("bar"));
696 /// }
697 ///
698 /// assert_eq!(s, "bar");
699 /// ```
700 ///
701 /// [valid]: self#safety
702 #[inline]
703 #[stable(feature = "rust1", since = "1.0.0")]
704 #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
705 pub const unsafe fn read<T>(src: *const T) -> T {
706 let mut tmp = MaybeUninit::<T>::uninit();
707 // SAFETY: the caller must guarantee that `src` is valid for reads.
708 // `src` cannot overlap `tmp` because `tmp` was just allocated on
709 // the stack as a separate allocated object.
710 //
711 // Also, since we just wrote a valid value into `tmp`, it is guaranteed
712 // to be properly initialized.
713 unsafe {
714 copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
715 tmp.assume_init()
716 }
717 }
718
719 /// Reads the value from `src` without moving it. This leaves the
720 /// memory in `src` unchanged.
721 ///
722 /// Unlike [`read`], `read_unaligned` works with unaligned pointers.
723 ///
724 /// # Safety
725 ///
726 /// Behavior is undefined if any of the following conditions are violated:
727 ///
728 /// * `src` must be [valid] for reads.
729 ///
730 /// * `src` must point to a properly initialized value of type `T`.
731 ///
732 /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of
733 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
734 /// value and the value at `*src` can [violate memory safety][read-ownership].
735 ///
736 /// Note that even if `T` has size `0`, the pointer must be non-NULL.
737 ///
738 /// [read-ownership]: read#ownership-of-the-returned-value
739 /// [valid]: self#safety
740 ///
741 /// ## On `packed` structs
742 ///
743 /// It is currently impossible to create raw pointers to unaligned fields
744 /// of a packed struct.
745 ///
746 /// Attempting to create a raw pointer to an `unaligned` struct field with
747 /// an expression such as `&packed.unaligned as *const FieldType` creates an
748 /// intermediate unaligned reference before converting that to a raw pointer.
749 /// That this reference is temporary and immediately cast is inconsequential
750 /// as the compiler always expects references to be properly aligned.
751 /// As a result, using `&packed.unaligned as *const FieldType` causes immediate
752 /// *undefined behavior* in your program.
753 ///
754 /// An example of what not to do and how this relates to `read_unaligned` is:
755 ///
756 /// ```no_run
757 /// #[repr(packed, C)]
758 /// struct Packed {
759 /// _padding: u8,
760 /// unaligned: u32,
761 /// }
762 ///
763 /// let packed = Packed {
764 /// _padding: 0x00,
765 /// unaligned: 0x01020304,
766 /// };
767 ///
768 /// let v = unsafe {
769 /// // Here we attempt to take the address of a 32-bit integer which is not aligned.
770 /// let unaligned =
771 /// // A temporary unaligned reference is created here which results in
772 /// // undefined behavior regardless of whether the reference is used or not.
773 /// &packed.unaligned
774 /// // Casting to a raw pointer doesn't help; the mistake already happened.
775 /// as *const u32;
776 ///
777 /// let v = std::ptr::read_unaligned(unaligned);
778 ///
779 /// v
780 /// };
781 /// ```
782 ///
783 /// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however.
784 // FIXME: Update docs based on outcome of RFC #2582 and friends.
785 ///
786 /// # Examples
787 ///
788 /// Read an usize value from a byte buffer:
789 ///
790 /// ```
791 /// use std::mem;
792 ///
793 /// fn read_usize(x: &[u8]) -> usize {
794 /// assert!(x.len() >= mem::size_of::<usize>());
795 ///
796 /// let ptr = x.as_ptr() as *const usize;
797 ///
798 /// unsafe { ptr.read_unaligned() }
799 /// }
800 /// ```
801 #[inline]
802 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
803 #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
804 pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
805 let mut tmp = MaybeUninit::<T>::uninit();
806 // SAFETY: the caller must guarantee that `src` is valid for reads.
807 // `src` cannot overlap `tmp` because `tmp` was just allocated on
808 // the stack as a separate allocated object.
809 //
810 // Also, since we just wrote a valid value into `tmp`, it is guaranteed
811 // to be properly initialized.
812 unsafe {
813 copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
814 tmp.assume_init()
815 }
816 }
817
818 /// Overwrites a memory location with the given value without reading or
819 /// dropping the old value.
820 ///
821 /// `write` does not drop the contents of `dst`. This is safe, but it could leak
822 /// allocations or resources, so care should be taken not to overwrite an object
823 /// that should be dropped.
824 ///
825 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
826 /// location pointed to by `dst`.
827 ///
828 /// This is appropriate for initializing uninitialized memory, or overwriting
829 /// memory that has previously been [`read`] from.
830 ///
831 /// # Safety
832 ///
833 /// Behavior is undefined if any of the following conditions are violated:
834 ///
835 /// * `dst` must be [valid] for writes.
836 ///
837 /// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the
838 /// case.
839 ///
840 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
841 ///
842 /// [valid]: self#safety
843 ///
844 /// # Examples
845 ///
846 /// Basic usage:
847 ///
848 /// ```
849 /// let mut x = 0;
850 /// let y = &mut x as *mut i32;
851 /// let z = 12;
852 ///
853 /// unsafe {
854 /// std::ptr::write(y, z);
855 /// assert_eq!(std::ptr::read(y), 12);
856 /// }
857 /// ```
858 ///
859 /// Manually implement [`mem::swap`]:
860 ///
861 /// ```
862 /// use std::ptr;
863 ///
864 /// fn swap<T>(a: &mut T, b: &mut T) {
865 /// unsafe {
866 /// // Create a bitwise copy of the value at `a` in `tmp`.
867 /// let tmp = ptr::read(a);
868 ///
869 /// // Exiting at this point (either by explicitly returning or by
870 /// // calling a function which panics) would cause the value in `tmp` to
871 /// // be dropped while the same value is still referenced by `a`. This
872 /// // could trigger undefined behavior if `T` is not `Copy`.
873 ///
874 /// // Create a bitwise copy of the value at `b` in `a`.
875 /// // This is safe because mutable references cannot alias.
876 /// ptr::copy_nonoverlapping(b, a, 1);
877 ///
878 /// // As above, exiting here could trigger undefined behavior because
879 /// // the same value is referenced by `a` and `b`.
880 ///
881 /// // Move `tmp` into `b`.
882 /// ptr::write(b, tmp);
883 ///
884 /// // `tmp` has been moved (`write` takes ownership of its second argument),
885 /// // so nothing is dropped implicitly here.
886 /// }
887 /// }
888 ///
889 /// let mut foo = "foo".to_owned();
890 /// let mut bar = "bar".to_owned();
891 ///
892 /// swap(&mut foo, &mut bar);
893 ///
894 /// assert_eq!(foo, "bar");
895 /// assert_eq!(bar, "foo");
896 /// ```
897 #[inline]
898 #[stable(feature = "rust1", since = "1.0.0")]
899 pub unsafe fn write<T>(dst: *mut T, src: T) {
900 // We are calling the intrinsics directly to avoid function calls in the generated code
901 // as `intrinsics::copy_nonoverlapping` is a wrapper function.
902 extern "rust-intrinsic" {
903 fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
904 }
905
906 // SAFETY: the caller must guarantee that `dst` is valid for writes.
907 // `dst` cannot overlap `src` because the caller has mutable access
908 // to `dst` while `src` is owned by this function.
909 unsafe {
910 copy_nonoverlapping(&src as *const T, dst, 1);
911 intrinsics::forget(src);
912 }
913 }
914
915 /// Overwrites a memory location with the given value without reading or
916 /// dropping the old value.
917 ///
918 /// Unlike [`write()`], the pointer may be unaligned.
919 ///
920 /// `write_unaligned` does not drop the contents of `dst`. This is safe, but it
921 /// could leak allocations or resources, so care should be taken not to overwrite
922 /// an object that should be dropped.
923 ///
924 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
925 /// location pointed to by `dst`.
926 ///
927 /// This is appropriate for initializing uninitialized memory, or overwriting
928 /// memory that has previously been read with [`read_unaligned`].
929 ///
930 /// # Safety
931 ///
932 /// Behavior is undefined if any of the following conditions are violated:
933 ///
934 /// * `dst` must be [valid] for writes.
935 ///
936 /// Note that even if `T` has size `0`, the pointer must be non-NULL.
937 ///
938 /// [valid]: self#safety
939 ///
940 /// ## On `packed` structs
941 ///
942 /// It is currently impossible to create raw pointers to unaligned fields
943 /// of a packed struct.
944 ///
945 /// Attempting to create a raw pointer to an `unaligned` struct field with
946 /// an expression such as `&packed.unaligned as *const FieldType` creates an
947 /// intermediate unaligned reference before converting that to a raw pointer.
948 /// That this reference is temporary and immediately cast is inconsequential
949 /// as the compiler always expects references to be properly aligned.
950 /// As a result, using `&packed.unaligned as *const FieldType` causes immediate
951 /// *undefined behavior* in your program.
952 ///
953 /// An example of what not to do and how this relates to `write_unaligned` is:
954 ///
955 /// ```no_run
956 /// #[repr(packed, C)]
957 /// struct Packed {
958 /// _padding: u8,
959 /// unaligned: u32,
960 /// }
961 ///
962 /// let v = 0x01020304;
963 /// let mut packed: Packed = unsafe { std::mem::zeroed() };
964 ///
965 /// let v = unsafe {
966 /// // Here we attempt to take the address of a 32-bit integer which is not aligned.
967 /// let unaligned =
968 /// // A temporary unaligned reference is created here which results in
969 /// // undefined behavior regardless of whether the reference is used or not.
970 /// &mut packed.unaligned
971 /// // Casting to a raw pointer doesn't help; the mistake already happened.
972 /// as *mut u32;
973 ///
974 /// std::ptr::write_unaligned(unaligned, v);
975 ///
976 /// v
977 /// };
978 /// ```
979 ///
980 /// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however.
981 // FIXME: Update docs based on outcome of RFC #2582 and friends.
982 ///
983 /// # Examples
984 ///
985 /// Write an usize value to a byte buffer:
986 ///
987 /// ```
988 /// use std::mem;
989 ///
990 /// fn write_usize(x: &mut [u8], val: usize) {
991 /// assert!(x.len() >= mem::size_of::<usize>());
992 ///
993 /// let ptr = x.as_mut_ptr() as *mut usize;
994 ///
995 /// unsafe { ptr.write_unaligned(val) }
996 /// }
997 /// ```
998 #[inline]
999 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
1000 #[rustc_const_unstable(feature = "const_ptr_write", issue = "none")]
1001 pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
1002 // SAFETY: the caller must guarantee that `dst` is valid for writes.
1003 // `dst` cannot overlap `src` because the caller has mutable access
1004 // to `dst` while `src` is owned by this function.
1005 unsafe {
1006 copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>());
1007 // We are calling the intrinsic directly to avoid function calls in the generated code.
1008 intrinsics::forget(src);
1009 }
1010 }
1011
1012 /// Performs a volatile read of the value from `src` without moving it. This
1013 /// leaves the memory in `src` unchanged.
1014 ///
1015 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1016 /// to not be elided or reordered by the compiler across other volatile
1017 /// operations.
1018 ///
1019 /// # Notes
1020 ///
1021 /// Rust does not currently have a rigorously and formally defined memory model,
1022 /// so the precise semantics of what "volatile" means here is subject to change
1023 /// over time. That being said, the semantics will almost always end up pretty
1024 /// similar to [C11's definition of volatile][c11].
1025 ///
1026 /// The compiler shouldn't change the relative order or number of volatile
1027 /// memory operations. However, volatile memory operations on zero-sized types
1028 /// (e.g., if a zero-sized type is passed to `read_volatile`) are noops
1029 /// and may be ignored.
1030 ///
1031 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1032 ///
1033 /// # Safety
1034 ///
1035 /// Behavior is undefined if any of the following conditions are violated:
1036 ///
1037 /// * `src` must be [valid] for reads.
1038 ///
1039 /// * `src` must be properly aligned.
1040 ///
1041 /// * `src` must point to a properly initialized value of type `T`.
1042 ///
1043 /// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of
1044 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
1045 /// value and the value at `*src` can [violate memory safety][read-ownership].
1046 /// However, storing non-[`Copy`] types in volatile memory is almost certainly
1047 /// incorrect.
1048 ///
1049 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
1050 ///
1051 /// [valid]: self#safety
1052 /// [read-ownership]: read#ownership-of-the-returned-value
1053 ///
1054 /// Just like in C, whether an operation is volatile has no bearing whatsoever
1055 /// on questions involving concurrent access from multiple threads. Volatile
1056 /// accesses behave exactly like non-atomic accesses in that regard. In particular,
1057 /// a race between a `read_volatile` and any write operation to the same location
1058 /// is undefined behavior.
1059 ///
1060 /// # Examples
1061 ///
1062 /// Basic usage:
1063 ///
1064 /// ```
1065 /// let x = 12;
1066 /// let y = &x as *const i32;
1067 ///
1068 /// unsafe {
1069 /// assert_eq!(std::ptr::read_volatile(y), 12);
1070 /// }
1071 /// ```
1072 #[inline]
1073 #[stable(feature = "volatile", since = "1.9.0")]
1074 pub unsafe fn read_volatile<T>(src: *const T) -> T {
1075 if cfg!(debug_assertions) && !is_aligned_and_not_null(src) {
1076 // Not panicking to keep codegen impact smaller.
1077 abort();
1078 }
1079 // SAFETY: the caller must uphold the safety contract for `volatile_load`.
1080 unsafe { intrinsics::volatile_load(src) }
1081 }
1082
1083 /// Performs a volatile write of a memory location with the given value without
1084 /// reading or dropping the old value.
1085 ///
1086 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1087 /// to not be elided or reordered by the compiler across other volatile
1088 /// operations.
1089 ///
1090 /// `write_volatile` does not drop the contents of `dst`. This is safe, but it
1091 /// could leak allocations or resources, so care should be taken not to overwrite
1092 /// an object that should be dropped.
1093 ///
1094 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
1095 /// location pointed to by `dst`.
1096 ///
1097 /// # Notes
1098 ///
1099 /// Rust does not currently have a rigorously and formally defined memory model,
1100 /// so the precise semantics of what "volatile" means here is subject to change
1101 /// over time. That being said, the semantics will almost always end up pretty
1102 /// similar to [C11's definition of volatile][c11].
1103 ///
1104 /// The compiler shouldn't change the relative order or number of volatile
1105 /// memory operations. However, volatile memory operations on zero-sized types
1106 /// (e.g., if a zero-sized type is passed to `write_volatile`) are noops
1107 /// and may be ignored.
1108 ///
1109 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1110 ///
1111 /// # Safety
1112 ///
1113 /// Behavior is undefined if any of the following conditions are violated:
1114 ///
1115 /// * `dst` must be [valid] for writes.
1116 ///
1117 /// * `dst` must be properly aligned.
1118 ///
1119 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
1120 ///
1121 /// [valid]: self#safety
1122 ///
1123 /// Just like in C, whether an operation is volatile has no bearing whatsoever
1124 /// on questions involving concurrent access from multiple threads. Volatile
1125 /// accesses behave exactly like non-atomic accesses in that regard. In particular,
1126 /// a race between a `write_volatile` and any other operation (reading or writing)
1127 /// on the same location is undefined behavior.
1128 ///
1129 /// # Examples
1130 ///
1131 /// Basic usage:
1132 ///
1133 /// ```
1134 /// let mut x = 0;
1135 /// let y = &mut x as *mut i32;
1136 /// let z = 12;
1137 ///
1138 /// unsafe {
1139 /// std::ptr::write_volatile(y, z);
1140 /// assert_eq!(std::ptr::read_volatile(y), 12);
1141 /// }
1142 /// ```
1143 #[inline]
1144 #[stable(feature = "volatile", since = "1.9.0")]
1145 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
1146 if cfg!(debug_assertions) && !is_aligned_and_not_null(dst) {
1147 // Not panicking to keep codegen impact smaller.
1148 abort();
1149 }
1150 // SAFETY: the caller must uphold the safety contract for `volatile_store`.
1151 unsafe {
1152 intrinsics::volatile_store(dst, src);
1153 }
1154 }
1155
1156 /// Align pointer `p`.
1157 ///
1158 /// Calculate offset (in terms of elements of `stride` stride) that has to be applied
1159 /// to pointer `p` so that pointer `p` would get aligned to `a`.
1160 ///
1161 /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
1162 /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
1163 /// constants.
1164 ///
1165 /// If we ever decide to make it possible to call the intrinsic with `a` that is not a
1166 /// power-of-two, it will probably be more prudent to just change to a naive implementation rather
1167 /// than trying to adapt this to accommodate that change.
1168 ///
1169 /// Any questions go to @nagisa.
1170 #[lang = "align_offset"]
1171 pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
1172 // FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
1173 // 1, where the method versions of these operations are not inlined.
1174 use intrinsics::{
1175 unchecked_shl, unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
1176 };
1177
1178 /// Calculate multiplicative modular inverse of `x` modulo `m`.
1179 ///
1180 /// This implementation is tailored for `align_offset` and has following preconditions:
1181 ///
1182 /// * `m` is a power-of-two;
1183 /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
1184 ///
1185 /// Implementation of this function shall not panic. Ever.
1186 #[inline]
1187 unsafe fn mod_inv(x: usize, m: usize) -> usize {
1188 /// Multiplicative modular inverse table modulo 2⁴ = 16.
1189 ///
1190 /// Note, that this table does not contain values where inverse does not exist (i.e., for
1191 /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
1192 const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
1193 /// Modulo for which the `INV_TABLE_MOD_16` is intended.
1194 const INV_TABLE_MOD: usize = 16;
1195 /// INV_TABLE_MOD²
1196 const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
1197
1198 let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
1199 // SAFETY: `m` is required to be a power-of-two, hence non-zero.
1200 let m_minus_one = unsafe { unchecked_sub(m, 1) };
1201 if m <= INV_TABLE_MOD {
1202 table_inverse & m_minus_one
1203 } else {
1204 // We iterate "up" using the following formula:
1205 //
1206 // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
1207 //
1208 // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
1209 let mut inverse = table_inverse;
1210 let mut going_mod = INV_TABLE_MOD_SQUARED;
1211 loop {
1212 // y = y * (2 - xy) mod n
1213 //
1214 // Note, that we use wrapping operations here intentionally – the original formula
1215 // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
1216 // usize::MAX` instead, because we take the result `mod n` at the end
1217 // anyway.
1218 inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
1219 if going_mod >= m {
1220 return inverse & m_minus_one;
1221 }
1222 going_mod = wrapping_mul(going_mod, going_mod);
1223 }
1224 }
1225 }
1226
1227 let stride = mem::size_of::<T>();
1228 // SAFETY: `a` is a power-of-two, therefore non-zero.
1229 let a_minus_one = unsafe { unchecked_sub(a, 1) };
1230 if stride == 1 {
1231 // `stride == 1` case can be computed more simply through `-p (mod a)`, but doing so
1232 // inhibits LLVM's ability to select instructions like `lea`. Instead we compute
1233 //
1234 // round_up_to_next_alignment(p, a) - p
1235 //
1236 // which distributes operations around the load-bearing, but pessimizing `and` sufficiently
1237 // for LLVM to be able to utilize the various optimizations it knows about.
1238 return wrapping_sub(
1239 wrapping_add(p as usize, a_minus_one) & wrapping_sub(0, a),
1240 p as usize,
1241 );
1242 }
1243
1244 let pmoda = p as usize & a_minus_one;
1245 if pmoda == 0 {
1246 // Already aligned. Yay!
1247 return 0;
1248 } else if stride == 0 {
1249 // If the pointer is not aligned, and the element is zero-sized, then no amount of
1250 // elements will ever align the pointer.
1251 return usize::MAX;
1252 }
1253
1254 let smoda = stride & a_minus_one;
1255 // SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
1256 let gcdpow = unsafe { intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)) };
1257 // SAFETY: gcdpow has an upper-bound that’s at most the number of bits in an usize.
1258 let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
1259
1260 // SAFETY: gcd is always greater or equal to 1.
1261 if p as usize & unsafe { unchecked_sub(gcd, 1) } == 0 {
1262 // This branch solves for the following linear congruence equation:
1263 //
1264 // ` p + so = 0 mod a `
1265 //
1266 // `p` here is the pointer value, `s` - stride of `T`, `o` offset in `T`s, and `a` - the
1267 // requested alignment.
1268 //
1269 // With `g = gcd(a, s)`, and the above condition asserting that `p` is also divisible by
1270 // `g`, we can denote `a' = a/g`, `s' = s/g`, `p' = p/g`, then this becomes equivalent to:
1271 //
1272 // ` p' + s'o = 0 mod a' `
1273 // ` o = (a' - (p' mod a')) * (s'^-1 mod a') `
1274 //
1275 // The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the second
1276 // term is "how does incrementing `p` by `s` bytes change the relative alignment of `p`" (again
1277 // divided by `g`).
1278 // Division by `g` is necessary to make the inverse well formed if `a` and `s` are not
1279 // co-prime.
1280 //
1281 // Furthermore, the result produced by this solution is not "minimal", so it is necessary
1282 // to take the result `o mod lcm(s, a)`. We can replace `lcm(s, a)` with just a `a'`.
1283
1284 // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1285 // `a`.
1286 let a2 = unsafe { unchecked_shr(a, gcdpow) };
1287 // SAFETY: `a2` is non-zero. Shifting `a` by `gcdpow` cannot shift out any of the set bits
1288 // in `a` (of which it has exactly one).
1289 let a2minus1 = unsafe { unchecked_sub(a2, 1) };
1290 // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1291 // `a`.
1292 let s2 = unsafe { unchecked_shr(smoda, gcdpow) };
1293 // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1294 // `a`. Furthermore, the subtraction cannot overflow, because `a2 = a >> gcdpow` will
1295 // always be strictly greater than `(p % a) >> gcdpow`.
1296 let minusp2 = unsafe { unchecked_sub(a2, unchecked_shr(pmoda, gcdpow)) };
1297 // SAFETY: `a2` is a power-of-two, as proven above. `s2` is strictly less than `a2`
1298 // because `(s % a) >> gcdpow` is strictly less than `a >> gcdpow`.
1299 return wrapping_mul(minusp2, unsafe { mod_inv(s2, a2) }) & a2minus1;
1300 }
1301
1302 // Cannot be aligned at all.
1303 usize::MAX
1304 }
1305
1306 /// Compares raw pointers for equality.
1307 ///
1308 /// This is the same as using the `==` operator, but less generic:
1309 /// the arguments have to be `*const T` raw pointers,
1310 /// not anything that implements `PartialEq`.
1311 ///
1312 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
1313 /// by their address rather than comparing the values they point to
1314 /// (which is what the `PartialEq for &T` implementation does).
1315 ///
1316 /// # Examples
1317 ///
1318 /// ```
1319 /// use std::ptr;
1320 ///
1321 /// let five = 5;
1322 /// let other_five = 5;
1323 /// let five_ref = &five;
1324 /// let same_five_ref = &five;
1325 /// let other_five_ref = &other_five;
1326 ///
1327 /// assert!(five_ref == same_five_ref);
1328 /// assert!(ptr::eq(five_ref, same_five_ref));
1329 ///
1330 /// assert!(five_ref == other_five_ref);
1331 /// assert!(!ptr::eq(five_ref, other_five_ref));
1332 /// ```
1333 ///
1334 /// Slices are also compared by their length (fat pointers):
1335 ///
1336 /// ```
1337 /// let a = [1, 2, 3];
1338 /// assert!(std::ptr::eq(&a[..3], &a[..3]));
1339 /// assert!(!std::ptr::eq(&a[..2], &a[..3]));
1340 /// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
1341 /// ```
1342 ///
1343 /// Traits are also compared by their implementation:
1344 ///
1345 /// ```
1346 /// #[repr(transparent)]
1347 /// struct Wrapper { member: i32 }
1348 ///
1349 /// trait Trait {}
1350 /// impl Trait for Wrapper {}
1351 /// impl Trait for i32 {}
1352 ///
1353 /// let wrapper = Wrapper { member: 10 };
1354 ///
1355 /// // Pointers have equal addresses.
1356 /// assert!(std::ptr::eq(
1357 /// &wrapper as *const Wrapper as *const u8,
1358 /// &wrapper.member as *const i32 as *const u8
1359 /// ));
1360 ///
1361 /// // Objects have equal addresses, but `Trait` has different implementations.
1362 /// assert!(!std::ptr::eq(
1363 /// &wrapper as &dyn Trait,
1364 /// &wrapper.member as &dyn Trait,
1365 /// ));
1366 /// assert!(!std::ptr::eq(
1367 /// &wrapper as &dyn Trait as *const dyn Trait,
1368 /// &wrapper.member as &dyn Trait as *const dyn Trait,
1369 /// ));
1370 ///
1371 /// // Converting the reference to a `*const u8` compares by address.
1372 /// assert!(std::ptr::eq(
1373 /// &wrapper as &dyn Trait as *const dyn Trait as *const u8,
1374 /// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8,
1375 /// ));
1376 /// ```
1377 #[stable(feature = "ptr_eq", since = "1.17.0")]
1378 #[inline]
1379 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
1380 a == b
1381 }
1382
1383 /// Hash a raw pointer.
1384 ///
1385 /// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly)
1386 /// by its address rather than the value it points to
1387 /// (which is what the `Hash for &T` implementation does).
1388 ///
1389 /// # Examples
1390 ///
1391 /// ```
1392 /// use std::collections::hash_map::DefaultHasher;
1393 /// use std::hash::{Hash, Hasher};
1394 /// use std::ptr;
1395 ///
1396 /// let five = 5;
1397 /// let five_ref = &five;
1398 ///
1399 /// let mut hasher = DefaultHasher::new();
1400 /// ptr::hash(five_ref, &mut hasher);
1401 /// let actual = hasher.finish();
1402 ///
1403 /// let mut hasher = DefaultHasher::new();
1404 /// (five_ref as *const i32).hash(&mut hasher);
1405 /// let expected = hasher.finish();
1406 ///
1407 /// assert_eq!(actual, expected);
1408 /// ```
1409 #[stable(feature = "ptr_hash", since = "1.35.0")]
1410 pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
1411 use crate::hash::Hash;
1412 hashee.hash(into);
1413 }
1414
1415 // Impls for function pointers
1416 macro_rules! fnptr_impls_safety_abi {
1417 ($FnTy: ty, $($Arg: ident),*) => {
1418 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1419 impl<Ret, $($Arg),*> PartialEq for $FnTy {
1420 #[inline]
1421 fn eq(&self, other: &Self) -> bool {
1422 *self as usize == *other as usize
1423 }
1424 }
1425
1426 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1427 impl<Ret, $($Arg),*> Eq for $FnTy {}
1428
1429 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1430 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
1431 #[inline]
1432 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1433 (*self as usize).partial_cmp(&(*other as usize))
1434 }
1435 }
1436
1437 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1438 impl<Ret, $($Arg),*> Ord for $FnTy {
1439 #[inline]
1440 fn cmp(&self, other: &Self) -> Ordering {
1441 (*self as usize).cmp(&(*other as usize))
1442 }
1443 }
1444
1445 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1446 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
1447 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
1448 state.write_usize(*self as usize)
1449 }
1450 }
1451
1452 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1453 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
1454 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1455 // HACK: The intermediate cast as usize is required for AVR
1456 // so that the address space of the source function pointer
1457 // is preserved in the final function pointer.
1458 //
1459 // https://github.com/avr-rust/rust/issues/143
1460 fmt::Pointer::fmt(&(*self as usize as *const ()), f)
1461 }
1462 }
1463
1464 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1465 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
1466 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1467 // HACK: The intermediate cast as usize is required for AVR
1468 // so that the address space of the source function pointer
1469 // is preserved in the final function pointer.
1470 //
1471 // https://github.com/avr-rust/rust/issues/143
1472 fmt::Pointer::fmt(&(*self as usize as *const ()), f)
1473 }
1474 }
1475 }
1476 }
1477
1478 macro_rules! fnptr_impls_args {
1479 ($($Arg: ident),+) => {
1480 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
1481 fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
1482 fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
1483 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
1484 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
1485 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
1486 };
1487 () => {
1488 // No variadic functions with 0 parameters
1489 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
1490 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
1491 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
1492 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
1493 };
1494 }
1495
1496 fnptr_impls_args! {}
1497 fnptr_impls_args! { A }
1498 fnptr_impls_args! { A, B }
1499 fnptr_impls_args! { A, B, C }
1500 fnptr_impls_args! { A, B, C, D }
1501 fnptr_impls_args! { A, B, C, D, E }
1502 fnptr_impls_args! { A, B, C, D, E, F }
1503 fnptr_impls_args! { A, B, C, D, E, F, G }
1504 fnptr_impls_args! { A, B, C, D, E, F, G, H }
1505 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
1506 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
1507 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
1508 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
1509
1510 /// Create a `const` raw pointer to a place, without creating an intermediate reference.
1511 ///
1512 /// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
1513 /// and points to initialized data. For cases where those requirements do not hold,
1514 /// raw pointers should be used instead. However, `&expr as *const _` creates a reference
1515 /// before casting it to a raw pointer, and that reference is subject to the same rules
1516 /// as all other references. This macro can create a raw pointer *without* creating
1517 /// a reference first.
1518 ///
1519 /// # Example
1520 ///
1521 /// ```
1522 /// use std::ptr;
1523 ///
1524 /// #[repr(packed)]
1525 /// struct Packed {
1526 /// f1: u8,
1527 /// f2: u16,
1528 /// }
1529 ///
1530 /// let packed = Packed { f1: 1, f2: 2 };
1531 /// // `&packed.f2` would create an unaligned reference, and thus be Undefined Behavior!
1532 /// let raw_f2 = ptr::addr_of!(packed.f2);
1533 /// assert_eq!(unsafe { raw_f2.read_unaligned() }, 2);
1534 /// ```
1535 #[stable(feature = "raw_ref_macros", since = "1.51.0")]
1536 #[rustc_macro_transparency = "semitransparent"]
1537 #[allow_internal_unstable(raw_ref_op)]
1538 pub macro addr_of($place:expr) {
1539 &raw const $place
1540 }
1541
1542 /// Create a `mut` raw pointer to a place, without creating an intermediate reference.
1543 ///
1544 /// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
1545 /// and points to initialized data. For cases where those requirements do not hold,
1546 /// raw pointers should be used instead. However, `&mut expr as *mut _` creates a reference
1547 /// before casting it to a raw pointer, and that reference is subject to the same rules
1548 /// as all other references. This macro can create a raw pointer *without* creating
1549 /// a reference first.
1550 ///
1551 /// # Example
1552 ///
1553 /// ```
1554 /// use std::ptr;
1555 ///
1556 /// #[repr(packed)]
1557 /// struct Packed {
1558 /// f1: u8,
1559 /// f2: u16,
1560 /// }
1561 ///
1562 /// let mut packed = Packed { f1: 1, f2: 2 };
1563 /// // `&mut packed.f2` would create an unaligned reference, and thus be Undefined Behavior!
1564 /// let raw_f2 = ptr::addr_of_mut!(packed.f2);
1565 /// unsafe { raw_f2.write_unaligned(42); }
1566 /// assert_eq!({packed.f2}, 42); // `{...}` forces copying the field instead of creating a reference.
1567 /// ```
1568 #[stable(feature = "raw_ref_macros", since = "1.51.0")]
1569 #[rustc_macro_transparency = "semitransparent"]
1570 #[allow_internal_unstable(raw_ref_op)]
1571 pub macro addr_of_mut($place:expr) {
1572 &raw mut $place
1573 }