]> git.proxmox.com Git - rustc.git/blob - library/core/src/ptr/mod.rs
3a27f01444be8e5cc3c5e7ee39cbedee81fcb670
[rustc.git] / library / core / src / ptr / mod.rs
1 //! Manually manage memory through raw pointers.
2 //!
3 //! *[See also the pointer primitive types](pointer).*
4 //!
5 //! # Safety
6 //!
7 //! Many functions in this module take raw pointers as arguments and read from
8 //! or write to them. For this to be safe, these pointers must be *valid*.
9 //! Whether a pointer is valid depends on the operation it is used for
10 //! (read or write), and the extent of the memory that is accessed (i.e.,
11 //! how many bytes are read/written). Most functions use `*mut T` and `*const T`
12 //! to access only a single value, in which case the documentation omits the size
13 //! and implicitly assumes it to be `size_of::<T>()` bytes.
14 //!
15 //! The precise rules for validity are not determined yet. The guarantees that are
16 //! provided at this point are very minimal:
17 //!
18 //! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst].
19 //! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer
20 //! be *dereferenceable*: the memory range of the given size starting at the pointer must all be
21 //! within the bounds of a single allocated object. Note that in Rust,
22 //! every (stack-allocated) variable is considered a separate allocated object.
23 //! * Even for operations of [size zero][zst], the pointer must not be pointing to deallocated
24 //! memory, i.e., deallocation makes pointers invalid even for zero-sized operations. However,
25 //! casting any non-zero integer *literal* to a pointer is valid for zero-sized accesses, even if
26 //! some memory happens to exist at that address and gets deallocated. This corresponds to writing
27 //! your own allocator: allocating zero-sized objects is not very hard. The canonical way to
28 //! obtain a pointer that is valid for zero-sized accesses is [`NonNull::dangling`].
29 //! * All accesses performed by functions in this module are *non-atomic* in the sense
30 //! of [atomic operations] used to synchronize between threads. This means it is
31 //! undefined behavior to perform two concurrent accesses to the same location from different
32 //! threads unless both accesses only read from memory. Notice that this explicitly
33 //! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot
34 //! be used for inter-thread synchronization.
35 //! * The result of casting a reference to a pointer is valid for as long as the
36 //! underlying object is live and no reference (just raw pointers) is used to
37 //! access the same memory.
38 //!
39 //! These axioms, along with careful use of [`offset`] for pointer arithmetic,
40 //! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
41 //! will be provided eventually, as the [aliasing] rules are being determined. For more
42 //! information, see the [book] as well as the section in the reference devoted
43 //! to [undefined behavior][ub].
44 //!
45 //! ## Alignment
46 //!
47 //! Valid raw pointers as defined above are not necessarily properly aligned (where
48 //! "proper" alignment is defined by the pointee type, i.e., `*const T` must be
49 //! aligned to `mem::align_of::<T>()`). However, most functions require their
50 //! arguments to be properly aligned, and will explicitly state
51 //! this requirement in their documentation. Notable exceptions to this are
52 //! [`read_unaligned`] and [`write_unaligned`].
53 //!
54 //! When a function requires proper alignment, it does so even if the access
55 //! has size 0, i.e., even if memory is not actually touched. Consider using
56 //! [`NonNull::dangling`] in such cases.
57 //!
58 //! [aliasing]: ../../nomicon/aliasing.html
59 //! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer
60 //! [ub]: ../../reference/behavior-considered-undefined.html
61 //! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts
62 //! [atomic operations]: crate::sync::atomic
63 //! [`offset`]: pointer::offset
64
65 #![stable(feature = "rust1", since = "1.0.0")]
66
67 use crate::cmp::Ordering;
68 use crate::fmt;
69 use crate::hash;
70 use crate::intrinsics::{self, abort, is_aligned_and_not_null};
71 use crate::mem::{self, MaybeUninit};
72
73 #[stable(feature = "rust1", since = "1.0.0")]
74 #[doc(inline)]
75 pub use crate::intrinsics::copy_nonoverlapping;
76
77 #[stable(feature = "rust1", since = "1.0.0")]
78 #[doc(inline)]
79 pub use crate::intrinsics::copy;
80
81 #[stable(feature = "rust1", since = "1.0.0")]
82 #[doc(inline)]
83 pub use crate::intrinsics::write_bytes;
84
85 #[cfg(not(bootstrap))]
86 mod metadata;
87 #[cfg(not(bootstrap))]
88 pub(crate) use metadata::PtrRepr;
89 #[cfg(not(bootstrap))]
90 #[unstable(feature = "ptr_metadata", issue = "81513")]
91 pub use metadata::{from_raw_parts, from_raw_parts_mut, metadata, DynMetadata, Pointee, Thin};
92
93 mod non_null;
94 #[stable(feature = "nonnull", since = "1.25.0")]
95 pub use non_null::NonNull;
96
97 mod unique;
98 #[unstable(feature = "ptr_internals", issue = "none")]
99 pub use unique::Unique;
100
101 mod const_ptr;
102 mod mut_ptr;
103
104 /// Executes the destructor (if any) of the pointed-to value.
105 ///
106 /// This is semantically equivalent to calling [`ptr::read`] and discarding
107 /// the result, but has the following advantages:
108 ///
109 /// * It is *required* to use `drop_in_place` to drop unsized types like
110 /// trait objects, because they can't be read out onto the stack and
111 /// dropped normally.
112 ///
113 /// * It is friendlier to the optimizer to do this over [`ptr::read`] when
114 /// dropping manually allocated memory (e.g., in the implementations of
115 /// `Box`/`Rc`/`Vec`), as the compiler doesn't need to prove that it's
116 /// sound to elide the copy.
117 ///
118 /// * It can be used to drop [pinned] data when `T` is not `repr(packed)`
119 /// (pinned data must not be moved before it is dropped).
120 ///
121 /// Unaligned values cannot be dropped in place, they must be copied to an aligned
122 /// location first using [`ptr::read_unaligned`]. For packed structs, this move is
123 /// done automatically by the compiler. This means the fields of packed structs
124 /// are not dropped in-place.
125 ///
126 /// [`ptr::read`]: self::read
127 /// [`ptr::read_unaligned`]: self::read_unaligned
128 /// [pinned]: crate::pin
129 ///
130 /// # Safety
131 ///
132 /// Behavior is undefined if any of the following conditions are violated:
133 ///
134 /// * `to_drop` must be [valid] for both reads and writes.
135 ///
136 /// * `to_drop` must be properly aligned.
137 ///
138 /// * The value `to_drop` points to must be valid for dropping, which may mean it must uphold
139 /// additional invariants - this is type-dependent.
140 ///
141 /// Additionally, if `T` is not [`Copy`], using the pointed-to value after
142 /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
143 /// foo` counts as a use because it will cause the value to be dropped
144 /// again. [`write()`] can be used to overwrite data without causing it to be
145 /// dropped.
146 ///
147 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
148 ///
149 /// [valid]: self#safety
150 ///
151 /// # Examples
152 ///
153 /// Manually remove the last item from a vector:
154 ///
155 /// ```
156 /// use std::ptr;
157 /// use std::rc::Rc;
158 ///
159 /// let last = Rc::new(1);
160 /// let weak = Rc::downgrade(&last);
161 ///
162 /// let mut v = vec![Rc::new(0), last];
163 ///
164 /// unsafe {
165 /// // Get a raw pointer to the last element in `v`.
166 /// let ptr = &mut v[1] as *mut _;
167 /// // Shorten `v` to prevent the last item from being dropped. We do that first,
168 /// // to prevent issues if the `drop_in_place` below panics.
169 /// v.set_len(1);
170 /// // Without a call `drop_in_place`, the last item would never be dropped,
171 /// // and the memory it manages would be leaked.
172 /// ptr::drop_in_place(ptr);
173 /// }
174 ///
175 /// assert_eq!(v, &[0.into()]);
176 ///
177 /// // Ensure that the last item was dropped.
178 /// assert!(weak.upgrade().is_none());
179 /// ```
180 ///
181 /// Notice that the compiler performs this copy automatically when dropping packed structs,
182 /// i.e., you do not usually have to worry about such issues unless you call `drop_in_place`
183 /// manually.
184 #[stable(feature = "drop_in_place", since = "1.8.0")]
185 #[lang = "drop_in_place"]
186 #[allow(unconditional_recursion)]
187 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
188 // Code here does not matter - this is replaced by the
189 // real drop glue by the compiler.
190
191 // SAFETY: see comment above
192 unsafe { drop_in_place(to_drop) }
193 }
194
195 /// Creates a null raw pointer.
196 ///
197 /// # Examples
198 ///
199 /// ```
200 /// use std::ptr;
201 ///
202 /// let p: *const i32 = ptr::null();
203 /// assert!(p.is_null());
204 /// ```
205 #[inline(always)]
206 #[stable(feature = "rust1", since = "1.0.0")]
207 #[rustc_promotable]
208 #[rustc_const_stable(feature = "const_ptr_null", since = "1.32.0")]
209 pub const fn null<T>() -> *const T {
210 0 as *const T
211 }
212
213 /// Creates a null mutable raw pointer.
214 ///
215 /// # Examples
216 ///
217 /// ```
218 /// use std::ptr;
219 ///
220 /// let p: *mut i32 = ptr::null_mut();
221 /// assert!(p.is_null());
222 /// ```
223 #[inline(always)]
224 #[stable(feature = "rust1", since = "1.0.0")]
225 #[rustc_promotable]
226 #[rustc_const_stable(feature = "const_ptr_null", since = "1.32.0")]
227 pub const fn null_mut<T>() -> *mut T {
228 0 as *mut T
229 }
230
231 #[cfg(bootstrap)]
232 #[repr(C)]
233 pub(crate) union Repr<T> {
234 pub(crate) rust: *const [T],
235 rust_mut: *mut [T],
236 pub(crate) raw: FatPtr<T>,
237 }
238
239 #[cfg(bootstrap)]
240 #[repr(C)]
241 pub(crate) struct FatPtr<T> {
242 data: *const T,
243 pub(crate) len: usize,
244 }
245
246 #[cfg(bootstrap)]
247 // Manual impl needed to avoid `T: Clone` bound.
248 impl<T> Clone for FatPtr<T> {
249 fn clone(&self) -> Self {
250 *self
251 }
252 }
253
254 #[cfg(bootstrap)]
255 // Manual impl needed to avoid `T: Copy` bound.
256 impl<T> Copy for FatPtr<T> {}
257
258 /// Forms a raw slice from a pointer and a length.
259 ///
260 /// The `len` argument is the number of **elements**, not the number of bytes.
261 ///
262 /// This function is safe, but actually using the return value is unsafe.
263 /// See the documentation of [`slice::from_raw_parts`] for slice safety requirements.
264 ///
265 /// [`slice::from_raw_parts`]: crate::slice::from_raw_parts
266 ///
267 /// # Examples
268 ///
269 /// ```rust
270 /// use std::ptr;
271 ///
272 /// // create a slice pointer when starting out with a pointer to the first element
273 /// let x = [5, 6, 7];
274 /// let raw_pointer = x.as_ptr();
275 /// let slice = ptr::slice_from_raw_parts(raw_pointer, 3);
276 /// assert_eq!(unsafe { &*slice }[2], 7);
277 /// ```
278 #[inline]
279 #[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
280 #[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
281 pub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
282 #[cfg(bootstrap)]
283 {
284 // SAFETY: Accessing the value from the `Repr` union is safe since *const [T]
285 // and FatPtr have the same memory layouts. Only std can make this
286 // guarantee.
287 unsafe { Repr { raw: FatPtr { data, len } }.rust }
288 }
289 #[cfg(not(bootstrap))]
290 from_raw_parts(data.cast(), len)
291 }
292
293 /// Performs the same functionality as [`slice_from_raw_parts`], except that a
294 /// raw mutable slice is returned, as opposed to a raw immutable slice.
295 ///
296 /// See the documentation of [`slice_from_raw_parts`] for more details.
297 ///
298 /// This function is safe, but actually using the return value is unsafe.
299 /// See the documentation of [`slice::from_raw_parts_mut`] for slice safety requirements.
300 ///
301 /// [`slice::from_raw_parts_mut`]: crate::slice::from_raw_parts_mut
302 ///
303 /// # Examples
304 ///
305 /// ```rust
306 /// use std::ptr;
307 ///
308 /// let x = &mut [5, 6, 7];
309 /// let raw_pointer = x.as_mut_ptr();
310 /// let slice = ptr::slice_from_raw_parts_mut(raw_pointer, 3);
311 ///
312 /// unsafe {
313 /// (*slice)[2] = 99; // assign a value at an index in the slice
314 /// };
315 ///
316 /// assert_eq!(unsafe { &*slice }[2], 99);
317 /// ```
318 #[inline]
319 #[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
320 #[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
321 pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
322 #[cfg(bootstrap)]
323 {
324 // SAFETY: Accessing the value from the `Repr` union is safe since *mut [T]
325 // and FatPtr have the same memory layouts
326 unsafe { Repr { raw: FatPtr { data, len } }.rust_mut }
327 }
328 #[cfg(not(bootstrap))]
329 from_raw_parts_mut(data.cast(), len)
330 }
331
332 /// Swaps the values at two mutable locations of the same type, without
333 /// deinitializing either.
334 ///
335 /// But for the following two exceptions, this function is semantically
336 /// equivalent to [`mem::swap`]:
337 ///
338 /// * It operates on raw pointers instead of references. When references are
339 /// available, [`mem::swap`] should be preferred.
340 ///
341 /// * The two pointed-to values may overlap. If the values do overlap, then the
342 /// overlapping region of memory from `x` will be used. This is demonstrated
343 /// in the second example below.
344 ///
345 /// # Safety
346 ///
347 /// Behavior is undefined if any of the following conditions are violated:
348 ///
349 /// * Both `x` and `y` must be [valid] for both reads and writes.
350 ///
351 /// * Both `x` and `y` must be properly aligned.
352 ///
353 /// Note that even if `T` has size `0`, the pointers must be non-NULL and properly aligned.
354 ///
355 /// [valid]: self#safety
356 ///
357 /// # Examples
358 ///
359 /// Swapping two non-overlapping regions:
360 ///
361 /// ```
362 /// use std::ptr;
363 ///
364 /// let mut array = [0, 1, 2, 3];
365 ///
366 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; // this is `array[0..2]`
367 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; // this is `array[2..4]`
368 ///
369 /// unsafe {
370 /// ptr::swap(x, y);
371 /// assert_eq!([2, 3, 0, 1], array);
372 /// }
373 /// ```
374 ///
375 /// Swapping two overlapping regions:
376 ///
377 /// ```
378 /// use std::ptr;
379 ///
380 /// let mut array = [0, 1, 2, 3];
381 ///
382 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; // this is `array[0..3]`
383 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; // this is `array[1..4]`
384 ///
385 /// unsafe {
386 /// ptr::swap(x, y);
387 /// // The indices `1..3` of the slice overlap between `x` and `y`.
388 /// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are
389 /// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]`
390 /// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`).
391 /// // This implementation is defined to make the latter choice.
392 /// assert_eq!([1, 0, 1, 2], array);
393 /// }
394 /// ```
395 #[inline]
396 #[stable(feature = "rust1", since = "1.0.0")]
397 #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
398 pub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
399 // Give ourselves some scratch space to work with.
400 // We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
401 let mut tmp = MaybeUninit::<T>::uninit();
402
403 // Perform the swap
404 // SAFETY: the caller must guarantee that `x` and `y` are
405 // valid for writes and properly aligned. `tmp` cannot be
406 // overlapping either `x` or `y` because `tmp` was just allocated
407 // on the stack as a separate allocated object.
408 unsafe {
409 copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
410 copy(y, x, 1); // `x` and `y` may overlap
411 copy_nonoverlapping(tmp.as_ptr(), y, 1);
412 }
413 }
414
415 /// Swaps `count * size_of::<T>()` bytes between the two regions of memory
416 /// beginning at `x` and `y`. The two regions must *not* overlap.
417 ///
418 /// # Safety
419 ///
420 /// Behavior is undefined if any of the following conditions are violated:
421 ///
422 /// * Both `x` and `y` must be [valid] for both reads and writes of `count *
423 /// size_of::<T>()` bytes.
424 ///
425 /// * Both `x` and `y` must be properly aligned.
426 ///
427 /// * The region of memory beginning at `x` with a size of `count *
428 /// size_of::<T>()` bytes must *not* overlap with the region of memory
429 /// beginning at `y` with the same size.
430 ///
431 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`,
432 /// the pointers must be non-NULL and properly aligned.
433 ///
434 /// [valid]: self#safety
435 ///
436 /// # Examples
437 ///
438 /// Basic usage:
439 ///
440 /// ```
441 /// use std::ptr;
442 ///
443 /// let mut x = [1, 2, 3, 4];
444 /// let mut y = [7, 8, 9];
445 ///
446 /// unsafe {
447 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
448 /// }
449 ///
450 /// assert_eq!(x, [7, 8, 3, 4]);
451 /// assert_eq!(y, [1, 2, 9]);
452 /// ```
453 #[inline]
454 #[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
455 #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
456 pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
457 let x = x as *mut u8;
458 let y = y as *mut u8;
459 let len = mem::size_of::<T>() * count;
460 // SAFETY: the caller must guarantee that `x` and `y` are
461 // valid for writes and properly aligned.
462 unsafe { swap_nonoverlapping_bytes(x, y, len) }
463 }
464
465 #[inline]
466 #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
467 pub(crate) const unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
468 // For types smaller than the block optimization below,
469 // just swap directly to avoid pessimizing codegen.
470 if mem::size_of::<T>() < 32 {
471 // SAFETY: the caller must guarantee that `x` and `y` are valid
472 // for writes, properly aligned, and non-overlapping.
473 unsafe {
474 let z = read(x);
475 copy_nonoverlapping(y, x, 1);
476 write(y, z);
477 }
478 } else {
479 // SAFETY: the caller must uphold the safety contract for `swap_nonoverlapping`.
480 unsafe { swap_nonoverlapping(x, y, 1) };
481 }
482 }
483
484 #[inline]
485 #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
486 const unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
487 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
488 // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel
489 // Haswell E processors. LLVM is more able to optimize if we give a struct a
490 // #[repr(simd)], even if we don't actually use this struct directly.
491 //
492 // FIXME repr(simd) broken on emscripten and redox
493 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox")), repr(simd))]
494 struct Block(u64, u64, u64, u64);
495 struct UnalignedBlock(u64, u64, u64, u64);
496
497 let block_size = mem::size_of::<Block>();
498
499 // Loop through x & y, copying them `Block` at a time
500 // The optimizer should unroll the loop fully for most types
501 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
502 let mut i = 0;
503 while i + block_size <= len {
504 // Create some uninitialized memory as scratch space
505 // Declaring `t` here avoids aligning the stack when this loop is unused
506 let mut t = mem::MaybeUninit::<Block>::uninit();
507 let t = t.as_mut_ptr() as *mut u8;
508
509 // SAFETY: As `i < len`, and as the caller must guarantee that `x` and `y` are valid
510 // for `len` bytes, `x + i` and `y + i` must be valid addresses, which fulfills the
511 // safety contract for `add`.
512 //
513 // Also, the caller must guarantee that `x` and `y` are valid for writes, properly aligned,
514 // and non-overlapping, which fulfills the safety contract for `copy_nonoverlapping`.
515 unsafe {
516 let x = x.add(i);
517 let y = y.add(i);
518
519 // Swap a block of bytes of x & y, using t as a temporary buffer
520 // This should be optimized into efficient SIMD operations where available
521 copy_nonoverlapping(x, t, block_size);
522 copy_nonoverlapping(y, x, block_size);
523 copy_nonoverlapping(t, y, block_size);
524 }
525 i += block_size;
526 }
527
528 if i < len {
529 // Swap any remaining bytes
530 let mut t = mem::MaybeUninit::<UnalignedBlock>::uninit();
531 let rem = len - i;
532
533 let t = t.as_mut_ptr() as *mut u8;
534
535 // SAFETY: see previous safety comment.
536 unsafe {
537 let x = x.add(i);
538 let y = y.add(i);
539
540 copy_nonoverlapping(x, t, rem);
541 copy_nonoverlapping(y, x, rem);
542 copy_nonoverlapping(t, y, rem);
543 }
544 }
545 }
546
547 /// Moves `src` into the pointed `dst`, returning the previous `dst` value.
548 ///
549 /// Neither value is dropped.
550 ///
551 /// This function is semantically equivalent to [`mem::replace`] except that it
552 /// operates on raw pointers instead of references. When references are
553 /// available, [`mem::replace`] should be preferred.
554 ///
555 /// # Safety
556 ///
557 /// Behavior is undefined if any of the following conditions are violated:
558 ///
559 /// * `dst` must be [valid] for both reads and writes.
560 ///
561 /// * `dst` must be properly aligned.
562 ///
563 /// * `dst` must point to a properly initialized value of type `T`.
564 ///
565 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
566 ///
567 /// [valid]: self#safety
568 ///
569 /// # Examples
570 ///
571 /// ```
572 /// use std::ptr;
573 ///
574 /// let mut rust = vec!['b', 'u', 's', 't'];
575 ///
576 /// // `mem::replace` would have the same effect without requiring the unsafe
577 /// // block.
578 /// let b = unsafe {
579 /// ptr::replace(&mut rust[0], 'r')
580 /// };
581 ///
582 /// assert_eq!(b, 'b');
583 /// assert_eq!(rust, &['r', 'u', 's', 't']);
584 /// ```
585 #[inline]
586 #[stable(feature = "rust1", since = "1.0.0")]
587 #[rustc_const_unstable(feature = "const_replace", issue = "83164")]
588 pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
589 // SAFETY: the caller must guarantee that `dst` is valid to be
590 // cast to a mutable reference (valid for writes, aligned, initialized),
591 // and cannot overlap `src` since `dst` must point to a distinct
592 // allocated object.
593 unsafe {
594 mem::swap(&mut *dst, &mut src); // cannot overlap
595 }
596 src
597 }
598
599 /// Reads the value from `src` without moving it. This leaves the
600 /// memory in `src` unchanged.
601 ///
602 /// # Safety
603 ///
604 /// Behavior is undefined if any of the following conditions are violated:
605 ///
606 /// * `src` must be [valid] for reads.
607 ///
608 /// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the
609 /// case.
610 ///
611 /// * `src` must point to a properly initialized value of type `T`.
612 ///
613 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
614 ///
615 /// # Examples
616 ///
617 /// Basic usage:
618 ///
619 /// ```
620 /// let x = 12;
621 /// let y = &x as *const i32;
622 ///
623 /// unsafe {
624 /// assert_eq!(std::ptr::read(y), 12);
625 /// }
626 /// ```
627 ///
628 /// Manually implement [`mem::swap`]:
629 ///
630 /// ```
631 /// use std::ptr;
632 ///
633 /// fn swap<T>(a: &mut T, b: &mut T) {
634 /// unsafe {
635 /// // Create a bitwise copy of the value at `a` in `tmp`.
636 /// let tmp = ptr::read(a);
637 ///
638 /// // Exiting at this point (either by explicitly returning or by
639 /// // calling a function which panics) would cause the value in `tmp` to
640 /// // be dropped while the same value is still referenced by `a`. This
641 /// // could trigger undefined behavior if `T` is not `Copy`.
642 ///
643 /// // Create a bitwise copy of the value at `b` in `a`.
644 /// // This is safe because mutable references cannot alias.
645 /// ptr::copy_nonoverlapping(b, a, 1);
646 ///
647 /// // As above, exiting here could trigger undefined behavior because
648 /// // the same value is referenced by `a` and `b`.
649 ///
650 /// // Move `tmp` into `b`.
651 /// ptr::write(b, tmp);
652 ///
653 /// // `tmp` has been moved (`write` takes ownership of its second argument),
654 /// // so nothing is dropped implicitly here.
655 /// }
656 /// }
657 ///
658 /// let mut foo = "foo".to_owned();
659 /// let mut bar = "bar".to_owned();
660 ///
661 /// swap(&mut foo, &mut bar);
662 ///
663 /// assert_eq!(foo, "bar");
664 /// assert_eq!(bar, "foo");
665 /// ```
666 ///
667 /// ## Ownership of the Returned Value
668 ///
669 /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`].
670 /// If `T` is not [`Copy`], using both the returned value and the value at
671 /// `*src` can violate memory safety. Note that assigning to `*src` counts as a
672 /// use because it will attempt to drop the value at `*src`.
673 ///
674 /// [`write()`] can be used to overwrite data without causing it to be dropped.
675 ///
676 /// ```
677 /// use std::ptr;
678 ///
679 /// let mut s = String::from("foo");
680 /// unsafe {
681 /// // `s2` now points to the same underlying memory as `s`.
682 /// let mut s2: String = ptr::read(&s);
683 ///
684 /// assert_eq!(s2, "foo");
685 ///
686 /// // Assigning to `s2` causes its original value to be dropped. Beyond
687 /// // this point, `s` must no longer be used, as the underlying memory has
688 /// // been freed.
689 /// s2 = String::default();
690 /// assert_eq!(s2, "");
691 ///
692 /// // Assigning to `s` would cause the old value to be dropped again,
693 /// // resulting in undefined behavior.
694 /// // s = String::from("bar"); // ERROR
695 ///
696 /// // `ptr::write` can be used to overwrite a value without dropping it.
697 /// ptr::write(&mut s, String::from("bar"));
698 /// }
699 ///
700 /// assert_eq!(s, "bar");
701 /// ```
702 ///
703 /// [valid]: self#safety
704 #[inline]
705 #[stable(feature = "rust1", since = "1.0.0")]
706 #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
707 pub const unsafe fn read<T>(src: *const T) -> T {
708 let mut tmp = MaybeUninit::<T>::uninit();
709 // SAFETY: the caller must guarantee that `src` is valid for reads.
710 // `src` cannot overlap `tmp` because `tmp` was just allocated on
711 // the stack as a separate allocated object.
712 //
713 // Also, since we just wrote a valid value into `tmp`, it is guaranteed
714 // to be properly initialized.
715 unsafe {
716 copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
717 tmp.assume_init()
718 }
719 }
720
721 /// Reads the value from `src` without moving it. This leaves the
722 /// memory in `src` unchanged.
723 ///
724 /// Unlike [`read`], `read_unaligned` works with unaligned pointers.
725 ///
726 /// # Safety
727 ///
728 /// Behavior is undefined if any of the following conditions are violated:
729 ///
730 /// * `src` must be [valid] for reads.
731 ///
732 /// * `src` must point to a properly initialized value of type `T`.
733 ///
734 /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of
735 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
736 /// value and the value at `*src` can [violate memory safety][read-ownership].
737 ///
738 /// Note that even if `T` has size `0`, the pointer must be non-NULL.
739 ///
740 /// [read-ownership]: read#ownership-of-the-returned-value
741 /// [valid]: self#safety
742 ///
743 /// ## On `packed` structs
744 ///
745 /// It is currently impossible to create raw pointers to unaligned fields
746 /// of a packed struct.
747 ///
748 /// Attempting to create a raw pointer to an `unaligned` struct field with
749 /// an expression such as `&packed.unaligned as *const FieldType` creates an
750 /// intermediate unaligned reference before converting that to a raw pointer.
751 /// That this reference is temporary and immediately cast is inconsequential
752 /// as the compiler always expects references to be properly aligned.
753 /// As a result, using `&packed.unaligned as *const FieldType` causes immediate
754 /// *undefined behavior* in your program.
755 ///
756 /// An example of what not to do and how this relates to `read_unaligned` is:
757 ///
758 /// ```no_run
759 /// #[repr(packed, C)]
760 /// struct Packed {
761 /// _padding: u8,
762 /// unaligned: u32,
763 /// }
764 ///
765 /// let packed = Packed {
766 /// _padding: 0x00,
767 /// unaligned: 0x01020304,
768 /// };
769 ///
770 /// let v = unsafe {
771 /// // Here we attempt to take the address of a 32-bit integer which is not aligned.
772 /// let unaligned =
773 /// // A temporary unaligned reference is created here which results in
774 /// // undefined behavior regardless of whether the reference is used or not.
775 /// &packed.unaligned
776 /// // Casting to a raw pointer doesn't help; the mistake already happened.
777 /// as *const u32;
778 ///
779 /// let v = std::ptr::read_unaligned(unaligned);
780 ///
781 /// v
782 /// };
783 /// ```
784 ///
785 /// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however.
786 // FIXME: Update docs based on outcome of RFC #2582 and friends.
787 ///
788 /// # Examples
789 ///
790 /// Read an usize value from a byte buffer:
791 ///
792 /// ```
793 /// use std::mem;
794 ///
795 /// fn read_usize(x: &[u8]) -> usize {
796 /// assert!(x.len() >= mem::size_of::<usize>());
797 ///
798 /// let ptr = x.as_ptr() as *const usize;
799 ///
800 /// unsafe { ptr.read_unaligned() }
801 /// }
802 /// ```
803 #[inline]
804 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
805 #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
806 pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
807 let mut tmp = MaybeUninit::<T>::uninit();
808 // SAFETY: the caller must guarantee that `src` is valid for reads.
809 // `src` cannot overlap `tmp` because `tmp` was just allocated on
810 // the stack as a separate allocated object.
811 //
812 // Also, since we just wrote a valid value into `tmp`, it is guaranteed
813 // to be properly initialized.
814 unsafe {
815 copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
816 tmp.assume_init()
817 }
818 }
819
820 /// Overwrites a memory location with the given value without reading or
821 /// dropping the old value.
822 ///
823 /// `write` does not drop the contents of `dst`. This is safe, but it could leak
824 /// allocations or resources, so care should be taken not to overwrite an object
825 /// that should be dropped.
826 ///
827 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
828 /// location pointed to by `dst`.
829 ///
830 /// This is appropriate for initializing uninitialized memory, or overwriting
831 /// memory that has previously been [`read`] from.
832 ///
833 /// # Safety
834 ///
835 /// Behavior is undefined if any of the following conditions are violated:
836 ///
837 /// * `dst` must be [valid] for writes.
838 ///
839 /// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the
840 /// case.
841 ///
842 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
843 ///
844 /// [valid]: self#safety
845 ///
846 /// # Examples
847 ///
848 /// Basic usage:
849 ///
850 /// ```
851 /// let mut x = 0;
852 /// let y = &mut x as *mut i32;
853 /// let z = 12;
854 ///
855 /// unsafe {
856 /// std::ptr::write(y, z);
857 /// assert_eq!(std::ptr::read(y), 12);
858 /// }
859 /// ```
860 ///
861 /// Manually implement [`mem::swap`]:
862 ///
863 /// ```
864 /// use std::ptr;
865 ///
866 /// fn swap<T>(a: &mut T, b: &mut T) {
867 /// unsafe {
868 /// // Create a bitwise copy of the value at `a` in `tmp`.
869 /// let tmp = ptr::read(a);
870 ///
871 /// // Exiting at this point (either by explicitly returning or by
872 /// // calling a function which panics) would cause the value in `tmp` to
873 /// // be dropped while the same value is still referenced by `a`. This
874 /// // could trigger undefined behavior if `T` is not `Copy`.
875 ///
876 /// // Create a bitwise copy of the value at `b` in `a`.
877 /// // This is safe because mutable references cannot alias.
878 /// ptr::copy_nonoverlapping(b, a, 1);
879 ///
880 /// // As above, exiting here could trigger undefined behavior because
881 /// // the same value is referenced by `a` and `b`.
882 ///
883 /// // Move `tmp` into `b`.
884 /// ptr::write(b, tmp);
885 ///
886 /// // `tmp` has been moved (`write` takes ownership of its second argument),
887 /// // so nothing is dropped implicitly here.
888 /// }
889 /// }
890 ///
891 /// let mut foo = "foo".to_owned();
892 /// let mut bar = "bar".to_owned();
893 ///
894 /// swap(&mut foo, &mut bar);
895 ///
896 /// assert_eq!(foo, "bar");
897 /// assert_eq!(bar, "foo");
898 /// ```
899 #[inline]
900 #[stable(feature = "rust1", since = "1.0.0")]
901 #[rustc_const_unstable(feature = "const_ptr_write", issue = "none")]
902 pub const unsafe fn write<T>(dst: *mut T, src: T) {
903 // SAFETY: the caller must guarantee that `dst` is valid for writes.
904 // `dst` cannot overlap `src` because the caller has mutable access
905 // to `dst` while `src` is owned by this function.
906 unsafe {
907 copy_nonoverlapping(&src as *const T, dst, 1);
908 // We are calling the intrinsic directly to avoid function calls in the generated code.
909 intrinsics::forget(src);
910 }
911 }
912
913 /// Overwrites a memory location with the given value without reading or
914 /// dropping the old value.
915 ///
916 /// Unlike [`write()`], the pointer may be unaligned.
917 ///
918 /// `write_unaligned` does not drop the contents of `dst`. This is safe, but it
919 /// could leak allocations or resources, so care should be taken not to overwrite
920 /// an object that should be dropped.
921 ///
922 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
923 /// location pointed to by `dst`.
924 ///
925 /// This is appropriate for initializing uninitialized memory, or overwriting
926 /// memory that has previously been read with [`read_unaligned`].
927 ///
928 /// # Safety
929 ///
930 /// Behavior is undefined if any of the following conditions are violated:
931 ///
932 /// * `dst` must be [valid] for writes.
933 ///
934 /// Note that even if `T` has size `0`, the pointer must be non-NULL.
935 ///
936 /// [valid]: self#safety
937 ///
938 /// ## On `packed` structs
939 ///
940 /// It is currently impossible to create raw pointers to unaligned fields
941 /// of a packed struct.
942 ///
943 /// Attempting to create a raw pointer to an `unaligned` struct field with
944 /// an expression such as `&packed.unaligned as *const FieldType` creates an
945 /// intermediate unaligned reference before converting that to a raw pointer.
946 /// That this reference is temporary and immediately cast is inconsequential
947 /// as the compiler always expects references to be properly aligned.
948 /// As a result, using `&packed.unaligned as *const FieldType` causes immediate
949 /// *undefined behavior* in your program.
950 ///
951 /// An example of what not to do and how this relates to `write_unaligned` is:
952 ///
953 /// ```no_run
954 /// #[repr(packed, C)]
955 /// struct Packed {
956 /// _padding: u8,
957 /// unaligned: u32,
958 /// }
959 ///
960 /// let v = 0x01020304;
961 /// let mut packed: Packed = unsafe { std::mem::zeroed() };
962 ///
963 /// let v = unsafe {
964 /// // Here we attempt to take the address of a 32-bit integer which is not aligned.
965 /// let unaligned =
966 /// // A temporary unaligned reference is created here which results in
967 /// // undefined behavior regardless of whether the reference is used or not.
968 /// &mut packed.unaligned
969 /// // Casting to a raw pointer doesn't help; the mistake already happened.
970 /// as *mut u32;
971 ///
972 /// std::ptr::write_unaligned(unaligned, v);
973 ///
974 /// v
975 /// };
976 /// ```
977 ///
978 /// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however.
979 // FIXME: Update docs based on outcome of RFC #2582 and friends.
980 ///
981 /// # Examples
982 ///
983 /// Write an usize value to a byte buffer:
984 ///
985 /// ```
986 /// use std::mem;
987 ///
988 /// fn write_usize(x: &mut [u8], val: usize) {
989 /// assert!(x.len() >= mem::size_of::<usize>());
990 ///
991 /// let ptr = x.as_mut_ptr() as *mut usize;
992 ///
993 /// unsafe { ptr.write_unaligned(val) }
994 /// }
995 /// ```
996 #[inline]
997 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
998 #[rustc_const_unstable(feature = "const_ptr_write", issue = "none")]
999 pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
1000 // SAFETY: the caller must guarantee that `dst` is valid for writes.
1001 // `dst` cannot overlap `src` because the caller has mutable access
1002 // to `dst` while `src` is owned by this function.
1003 unsafe {
1004 copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>());
1005 // We are calling the intrinsic directly to avoid function calls in the generated code.
1006 intrinsics::forget(src);
1007 }
1008 }
1009
1010 /// Performs a volatile read of the value from `src` without moving it. This
1011 /// leaves the memory in `src` unchanged.
1012 ///
1013 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1014 /// to not be elided or reordered by the compiler across other volatile
1015 /// operations.
1016 ///
1017 /// # Notes
1018 ///
1019 /// Rust does not currently have a rigorously and formally defined memory model,
1020 /// so the precise semantics of what "volatile" means here is subject to change
1021 /// over time. That being said, the semantics will almost always end up pretty
1022 /// similar to [C11's definition of volatile][c11].
1023 ///
1024 /// The compiler shouldn't change the relative order or number of volatile
1025 /// memory operations. However, volatile memory operations on zero-sized types
1026 /// (e.g., if a zero-sized type is passed to `read_volatile`) are noops
1027 /// and may be ignored.
1028 ///
1029 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1030 ///
1031 /// # Safety
1032 ///
1033 /// Behavior is undefined if any of the following conditions are violated:
1034 ///
1035 /// * `src` must be [valid] for reads.
1036 ///
1037 /// * `src` must be properly aligned.
1038 ///
1039 /// * `src` must point to a properly initialized value of type `T`.
1040 ///
1041 /// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of
1042 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
1043 /// value and the value at `*src` can [violate memory safety][read-ownership].
1044 /// However, storing non-[`Copy`] types in volatile memory is almost certainly
1045 /// incorrect.
1046 ///
1047 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
1048 ///
1049 /// [valid]: self#safety
1050 /// [read-ownership]: read#ownership-of-the-returned-value
1051 ///
1052 /// Just like in C, whether an operation is volatile has no bearing whatsoever
1053 /// on questions involving concurrent access from multiple threads. Volatile
1054 /// accesses behave exactly like non-atomic accesses in that regard. In particular,
1055 /// a race between a `read_volatile` and any write operation to the same location
1056 /// is undefined behavior.
1057 ///
1058 /// # Examples
1059 ///
1060 /// Basic usage:
1061 ///
1062 /// ```
1063 /// let x = 12;
1064 /// let y = &x as *const i32;
1065 ///
1066 /// unsafe {
1067 /// assert_eq!(std::ptr::read_volatile(y), 12);
1068 /// }
1069 /// ```
1070 #[inline]
1071 #[stable(feature = "volatile", since = "1.9.0")]
1072 pub unsafe fn read_volatile<T>(src: *const T) -> T {
1073 if cfg!(debug_assertions) && !is_aligned_and_not_null(src) {
1074 // Not panicking to keep codegen impact smaller.
1075 abort();
1076 }
1077 // SAFETY: the caller must uphold the safety contract for `volatile_load`.
1078 unsafe { intrinsics::volatile_load(src) }
1079 }
1080
1081 /// Performs a volatile write of a memory location with the given value without
1082 /// reading or dropping the old value.
1083 ///
1084 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1085 /// to not be elided or reordered by the compiler across other volatile
1086 /// operations.
1087 ///
1088 /// `write_volatile` does not drop the contents of `dst`. This is safe, but it
1089 /// could leak allocations or resources, so care should be taken not to overwrite
1090 /// an object that should be dropped.
1091 ///
1092 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
1093 /// location pointed to by `dst`.
1094 ///
1095 /// # Notes
1096 ///
1097 /// Rust does not currently have a rigorously and formally defined memory model,
1098 /// so the precise semantics of what "volatile" means here is subject to change
1099 /// over time. That being said, the semantics will almost always end up pretty
1100 /// similar to [C11's definition of volatile][c11].
1101 ///
1102 /// The compiler shouldn't change the relative order or number of volatile
1103 /// memory operations. However, volatile memory operations on zero-sized types
1104 /// (e.g., if a zero-sized type is passed to `write_volatile`) are noops
1105 /// and may be ignored.
1106 ///
1107 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1108 ///
1109 /// # Safety
1110 ///
1111 /// Behavior is undefined if any of the following conditions are violated:
1112 ///
1113 /// * `dst` must be [valid] for writes.
1114 ///
1115 /// * `dst` must be properly aligned.
1116 ///
1117 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
1118 ///
1119 /// [valid]: self#safety
1120 ///
1121 /// Just like in C, whether an operation is volatile has no bearing whatsoever
1122 /// on questions involving concurrent access from multiple threads. Volatile
1123 /// accesses behave exactly like non-atomic accesses in that regard. In particular,
1124 /// a race between a `write_volatile` and any other operation (reading or writing)
1125 /// on the same location is undefined behavior.
1126 ///
1127 /// # Examples
1128 ///
1129 /// Basic usage:
1130 ///
1131 /// ```
1132 /// let mut x = 0;
1133 /// let y = &mut x as *mut i32;
1134 /// let z = 12;
1135 ///
1136 /// unsafe {
1137 /// std::ptr::write_volatile(y, z);
1138 /// assert_eq!(std::ptr::read_volatile(y), 12);
1139 /// }
1140 /// ```
1141 #[inline]
1142 #[stable(feature = "volatile", since = "1.9.0")]
1143 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
1144 if cfg!(debug_assertions) && !is_aligned_and_not_null(dst) {
1145 // Not panicking to keep codegen impact smaller.
1146 abort();
1147 }
1148 // SAFETY: the caller must uphold the safety contract for `volatile_store`.
1149 unsafe {
1150 intrinsics::volatile_store(dst, src);
1151 }
1152 }
1153
1154 /// Align pointer `p`.
1155 ///
1156 /// Calculate offset (in terms of elements of `stride` stride) that has to be applied
1157 /// to pointer `p` so that pointer `p` would get aligned to `a`.
1158 ///
1159 /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
1160 /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
1161 /// constants.
1162 ///
1163 /// If we ever decide to make it possible to call the intrinsic with `a` that is not a
1164 /// power-of-two, it will probably be more prudent to just change to a naive implementation rather
1165 /// than trying to adapt this to accommodate that change.
1166 ///
1167 /// Any questions go to @nagisa.
1168 #[lang = "align_offset"]
1169 pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
1170 // FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
1171 // 1, where the method versions of these operations are not inlined.
1172 use intrinsics::{
1173 unchecked_shl, unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
1174 };
1175
1176 /// Calculate multiplicative modular inverse of `x` modulo `m`.
1177 ///
1178 /// This implementation is tailored for `align_offset` and has following preconditions:
1179 ///
1180 /// * `m` is a power-of-two;
1181 /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
1182 ///
1183 /// Implementation of this function shall not panic. Ever.
1184 #[inline]
1185 unsafe fn mod_inv(x: usize, m: usize) -> usize {
1186 /// Multiplicative modular inverse table modulo 2⁴ = 16.
1187 ///
1188 /// Note, that this table does not contain values where inverse does not exist (i.e., for
1189 /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
1190 const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
1191 /// Modulo for which the `INV_TABLE_MOD_16` is intended.
1192 const INV_TABLE_MOD: usize = 16;
1193 /// INV_TABLE_MOD²
1194 const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
1195
1196 let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
1197 // SAFETY: `m` is required to be a power-of-two, hence non-zero.
1198 let m_minus_one = unsafe { unchecked_sub(m, 1) };
1199 if m <= INV_TABLE_MOD {
1200 table_inverse & m_minus_one
1201 } else {
1202 // We iterate "up" using the following formula:
1203 //
1204 // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
1205 //
1206 // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
1207 let mut inverse = table_inverse;
1208 let mut going_mod = INV_TABLE_MOD_SQUARED;
1209 loop {
1210 // y = y * (2 - xy) mod n
1211 //
1212 // Note, that we use wrapping operations here intentionally – the original formula
1213 // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
1214 // usize::MAX` instead, because we take the result `mod n` at the end
1215 // anyway.
1216 inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
1217 if going_mod >= m {
1218 return inverse & m_minus_one;
1219 }
1220 going_mod = wrapping_mul(going_mod, going_mod);
1221 }
1222 }
1223 }
1224
1225 let stride = mem::size_of::<T>();
1226 // SAFETY: `a` is a power-of-two, therefore non-zero.
1227 let a_minus_one = unsafe { unchecked_sub(a, 1) };
1228 if stride == 1 {
1229 // `stride == 1` case can be computed more simply through `-p (mod a)`, but doing so
1230 // inhibits LLVM's ability to select instructions like `lea`. Instead we compute
1231 //
1232 // round_up_to_next_alignment(p, a) - p
1233 //
1234 // which distributes operations around the load-bearing, but pessimizing `and` sufficiently
1235 // for LLVM to be able to utilize the various optimizations it knows about.
1236 return wrapping_sub(
1237 wrapping_add(p as usize, a_minus_one) & wrapping_sub(0, a),
1238 p as usize,
1239 );
1240 }
1241
1242 let pmoda = p as usize & a_minus_one;
1243 if pmoda == 0 {
1244 // Already aligned. Yay!
1245 return 0;
1246 } else if stride == 0 {
1247 // If the pointer is not aligned, and the element is zero-sized, then no amount of
1248 // elements will ever align the pointer.
1249 return usize::MAX;
1250 }
1251
1252 let smoda = stride & a_minus_one;
1253 // SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
1254 let gcdpow = unsafe { intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)) };
1255 // SAFETY: gcdpow has an upper-bound that’s at most the number of bits in an usize.
1256 let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
1257
1258 // SAFETY: gcd is always greater or equal to 1.
1259 if p as usize & unsafe { unchecked_sub(gcd, 1) } == 0 {
1260 // This branch solves for the following linear congruence equation:
1261 //
1262 // ` p + so = 0 mod a `
1263 //
1264 // `p` here is the pointer value, `s` - stride of `T`, `o` offset in `T`s, and `a` - the
1265 // requested alignment.
1266 //
1267 // With `g = gcd(a, s)`, and the above condition asserting that `p` is also divisible by
1268 // `g`, we can denote `a' = a/g`, `s' = s/g`, `p' = p/g`, then this becomes equivalent to:
1269 //
1270 // ` p' + s'o = 0 mod a' `
1271 // ` o = (a' - (p' mod a')) * (s'^-1 mod a') `
1272 //
1273 // The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the second
1274 // term is "how does incrementing `p` by `s` bytes change the relative alignment of `p`" (again
1275 // divided by `g`).
1276 // Division by `g` is necessary to make the inverse well formed if `a` and `s` are not
1277 // co-prime.
1278 //
1279 // Furthermore, the result produced by this solution is not "minimal", so it is necessary
1280 // to take the result `o mod lcm(s, a)`. We can replace `lcm(s, a)` with just a `a'`.
1281
1282 // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1283 // `a`.
1284 let a2 = unsafe { unchecked_shr(a, gcdpow) };
1285 // SAFETY: `a2` is non-zero. Shifting `a` by `gcdpow` cannot shift out any of the set bits
1286 // in `a` (of which it has exactly one).
1287 let a2minus1 = unsafe { unchecked_sub(a2, 1) };
1288 // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1289 // `a`.
1290 let s2 = unsafe { unchecked_shr(smoda, gcdpow) };
1291 // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1292 // `a`. Furthermore, the subtraction cannot overflow, because `a2 = a >> gcdpow` will
1293 // always be strictly greater than `(p % a) >> gcdpow`.
1294 let minusp2 = unsafe { unchecked_sub(a2, unchecked_shr(pmoda, gcdpow)) };
1295 // SAFETY: `a2` is a power-of-two, as proven above. `s2` is strictly less than `a2`
1296 // because `(s % a) >> gcdpow` is strictly less than `a >> gcdpow`.
1297 return wrapping_mul(minusp2, unsafe { mod_inv(s2, a2) }) & a2minus1;
1298 }
1299
1300 // Cannot be aligned at all.
1301 usize::MAX
1302 }
1303
1304 /// Compares raw pointers for equality.
1305 ///
1306 /// This is the same as using the `==` operator, but less generic:
1307 /// the arguments have to be `*const T` raw pointers,
1308 /// not anything that implements `PartialEq`.
1309 ///
1310 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
1311 /// by their address rather than comparing the values they point to
1312 /// (which is what the `PartialEq for &T` implementation does).
1313 ///
1314 /// # Examples
1315 ///
1316 /// ```
1317 /// use std::ptr;
1318 ///
1319 /// let five = 5;
1320 /// let other_five = 5;
1321 /// let five_ref = &five;
1322 /// let same_five_ref = &five;
1323 /// let other_five_ref = &other_five;
1324 ///
1325 /// assert!(five_ref == same_five_ref);
1326 /// assert!(ptr::eq(five_ref, same_five_ref));
1327 ///
1328 /// assert!(five_ref == other_five_ref);
1329 /// assert!(!ptr::eq(five_ref, other_five_ref));
1330 /// ```
1331 ///
1332 /// Slices are also compared by their length (fat pointers):
1333 ///
1334 /// ```
1335 /// let a = [1, 2, 3];
1336 /// assert!(std::ptr::eq(&a[..3], &a[..3]));
1337 /// assert!(!std::ptr::eq(&a[..2], &a[..3]));
1338 /// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
1339 /// ```
1340 ///
1341 /// Traits are also compared by their implementation:
1342 ///
1343 /// ```
1344 /// #[repr(transparent)]
1345 /// struct Wrapper { member: i32 }
1346 ///
1347 /// trait Trait {}
1348 /// impl Trait for Wrapper {}
1349 /// impl Trait for i32 {}
1350 ///
1351 /// let wrapper = Wrapper { member: 10 };
1352 ///
1353 /// // Pointers have equal addresses.
1354 /// assert!(std::ptr::eq(
1355 /// &wrapper as *const Wrapper as *const u8,
1356 /// &wrapper.member as *const i32 as *const u8
1357 /// ));
1358 ///
1359 /// // Objects have equal addresses, but `Trait` has different implementations.
1360 /// assert!(!std::ptr::eq(
1361 /// &wrapper as &dyn Trait,
1362 /// &wrapper.member as &dyn Trait,
1363 /// ));
1364 /// assert!(!std::ptr::eq(
1365 /// &wrapper as &dyn Trait as *const dyn Trait,
1366 /// &wrapper.member as &dyn Trait as *const dyn Trait,
1367 /// ));
1368 ///
1369 /// // Converting the reference to a `*const u8` compares by address.
1370 /// assert!(std::ptr::eq(
1371 /// &wrapper as &dyn Trait as *const dyn Trait as *const u8,
1372 /// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8,
1373 /// ));
1374 /// ```
1375 #[stable(feature = "ptr_eq", since = "1.17.0")]
1376 #[inline]
1377 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
1378 a == b
1379 }
1380
1381 /// Hash a raw pointer.
1382 ///
1383 /// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly)
1384 /// by its address rather than the value it points to
1385 /// (which is what the `Hash for &T` implementation does).
1386 ///
1387 /// # Examples
1388 ///
1389 /// ```
1390 /// use std::collections::hash_map::DefaultHasher;
1391 /// use std::hash::{Hash, Hasher};
1392 /// use std::ptr;
1393 ///
1394 /// let five = 5;
1395 /// let five_ref = &five;
1396 ///
1397 /// let mut hasher = DefaultHasher::new();
1398 /// ptr::hash(five_ref, &mut hasher);
1399 /// let actual = hasher.finish();
1400 ///
1401 /// let mut hasher = DefaultHasher::new();
1402 /// (five_ref as *const i32).hash(&mut hasher);
1403 /// let expected = hasher.finish();
1404 ///
1405 /// assert_eq!(actual, expected);
1406 /// ```
1407 #[stable(feature = "ptr_hash", since = "1.35.0")]
1408 pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
1409 use crate::hash::Hash;
1410 hashee.hash(into);
1411 }
1412
1413 // Impls for function pointers
1414 macro_rules! fnptr_impls_safety_abi {
1415 ($FnTy: ty, $($Arg: ident),*) => {
1416 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1417 impl<Ret, $($Arg),*> PartialEq for $FnTy {
1418 #[inline]
1419 fn eq(&self, other: &Self) -> bool {
1420 *self as usize == *other as usize
1421 }
1422 }
1423
1424 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1425 impl<Ret, $($Arg),*> Eq for $FnTy {}
1426
1427 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1428 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
1429 #[inline]
1430 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1431 (*self as usize).partial_cmp(&(*other as usize))
1432 }
1433 }
1434
1435 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1436 impl<Ret, $($Arg),*> Ord for $FnTy {
1437 #[inline]
1438 fn cmp(&self, other: &Self) -> Ordering {
1439 (*self as usize).cmp(&(*other as usize))
1440 }
1441 }
1442
1443 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1444 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
1445 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
1446 state.write_usize(*self as usize)
1447 }
1448 }
1449
1450 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1451 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
1452 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1453 // HACK: The intermediate cast as usize is required for AVR
1454 // so that the address space of the source function pointer
1455 // is preserved in the final function pointer.
1456 //
1457 // https://github.com/avr-rust/rust/issues/143
1458 fmt::Pointer::fmt(&(*self as usize as *const ()), f)
1459 }
1460 }
1461
1462 #[stable(feature = "fnptr_impls", since = "1.4.0")]
1463 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
1464 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1465 // HACK: The intermediate cast as usize is required for AVR
1466 // so that the address space of the source function pointer
1467 // is preserved in the final function pointer.
1468 //
1469 // https://github.com/avr-rust/rust/issues/143
1470 fmt::Pointer::fmt(&(*self as usize as *const ()), f)
1471 }
1472 }
1473 }
1474 }
1475
1476 macro_rules! fnptr_impls_args {
1477 ($($Arg: ident),+) => {
1478 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
1479 fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
1480 fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
1481 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
1482 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
1483 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
1484 };
1485 () => {
1486 // No variadic functions with 0 parameters
1487 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
1488 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
1489 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
1490 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
1491 };
1492 }
1493
1494 fnptr_impls_args! {}
1495 fnptr_impls_args! { A }
1496 fnptr_impls_args! { A, B }
1497 fnptr_impls_args! { A, B, C }
1498 fnptr_impls_args! { A, B, C, D }
1499 fnptr_impls_args! { A, B, C, D, E }
1500 fnptr_impls_args! { A, B, C, D, E, F }
1501 fnptr_impls_args! { A, B, C, D, E, F, G }
1502 fnptr_impls_args! { A, B, C, D, E, F, G, H }
1503 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
1504 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
1505 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
1506 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
1507
1508 /// Create a `const` raw pointer to a place, without creating an intermediate reference.
1509 ///
1510 /// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
1511 /// and points to initialized data. For cases where those requirements do not hold,
1512 /// raw pointers should be used instead. However, `&expr as *const _` creates a reference
1513 /// before casting it to a raw pointer, and that reference is subject to the same rules
1514 /// as all other references. This macro can create a raw pointer *without* creating
1515 /// a reference first.
1516 ///
1517 /// # Example
1518 ///
1519 /// ```
1520 /// use std::ptr;
1521 ///
1522 /// #[repr(packed)]
1523 /// struct Packed {
1524 /// f1: u8,
1525 /// f2: u16,
1526 /// }
1527 ///
1528 /// let packed = Packed { f1: 1, f2: 2 };
1529 /// // `&packed.f2` would create an unaligned reference, and thus be Undefined Behavior!
1530 /// let raw_f2 = ptr::addr_of!(packed.f2);
1531 /// assert_eq!(unsafe { raw_f2.read_unaligned() }, 2);
1532 /// ```
1533 #[stable(feature = "raw_ref_macros", since = "1.51.0")]
1534 #[rustc_macro_transparency = "semitransparent"]
1535 #[allow_internal_unstable(raw_ref_op)]
1536 pub macro addr_of($place:expr) {
1537 &raw const $place
1538 }
1539
1540 /// Create a `mut` raw pointer to a place, without creating an intermediate reference.
1541 ///
1542 /// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
1543 /// and points to initialized data. For cases where those requirements do not hold,
1544 /// raw pointers should be used instead. However, `&mut expr as *mut _` creates a reference
1545 /// before casting it to a raw pointer, and that reference is subject to the same rules
1546 /// as all other references. This macro can create a raw pointer *without* creating
1547 /// a reference first.
1548 ///
1549 /// # Example
1550 ///
1551 /// ```
1552 /// use std::ptr;
1553 ///
1554 /// #[repr(packed)]
1555 /// struct Packed {
1556 /// f1: u8,
1557 /// f2: u16,
1558 /// }
1559 ///
1560 /// let mut packed = Packed { f1: 1, f2: 2 };
1561 /// // `&mut packed.f2` would create an unaligned reference, and thus be Undefined Behavior!
1562 /// let raw_f2 = ptr::addr_of_mut!(packed.f2);
1563 /// unsafe { raw_f2.write_unaligned(42); }
1564 /// assert_eq!({packed.f2}, 42); // `{...}` forces copying the field instead of creating a reference.
1565 /// ```
1566 #[stable(feature = "raw_ref_macros", since = "1.51.0")]
1567 #[rustc_macro_transparency = "semitransparent"]
1568 #[allow_internal_unstable(raw_ref_op)]
1569 pub macro addr_of_mut($place:expr) {
1570 &raw mut $place
1571 }