]> git.proxmox.com Git - rustc.git/blob - library/core/src/ptr/const_ptr.rs
New upstream version 1.75.0+dfsg1
[rustc.git] / library / core / src / ptr / const_ptr.rs
1 use super::*;
2 use crate::cmp::Ordering::{self, Equal, Greater, Less};
3 use crate::intrinsics::{self, const_eval_select};
4 use crate::mem::{self, SizedTypeProperties};
5 use crate::slice::{self, SliceIndex};
6
7 impl<T: ?Sized> *const T {
8 /// Returns `true` if the pointer is null.
9 ///
10 /// Note that unsized types have many possible null pointers, as only the
11 /// raw data pointer is considered, not their length, vtable, etc.
12 /// Therefore, two pointers that are null may still not compare equal to
13 /// each other.
14 ///
15 /// ## Behavior during const evaluation
16 ///
17 /// When this function is used during const evaluation, it may return `false` for pointers
18 /// that turn out to be null at runtime. Specifically, when a pointer to some memory
19 /// is offset beyond its bounds in such a way that the resulting pointer is null,
20 /// the function will still return `false`. There is no way for CTFE to know
21 /// the absolute position of that memory, so we cannot tell if the pointer is
22 /// null or not.
23 ///
24 /// # Examples
25 ///
26 /// ```
27 /// let s: &str = "Follow the rabbit";
28 /// let ptr: *const u8 = s.as_ptr();
29 /// assert!(!ptr.is_null());
30 /// ```
31 #[stable(feature = "rust1", since = "1.0.0")]
32 #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
33 #[rustc_diagnostic_item = "ptr_const_is_null"]
34 #[inline]
35 pub const fn is_null(self) -> bool {
36 #[inline]
37 fn runtime_impl(ptr: *const u8) -> bool {
38 ptr.addr() == 0
39 }
40
41 #[inline]
42 const fn const_impl(ptr: *const u8) -> bool {
43 // Compare via a cast to a thin pointer, so fat pointers are only
44 // considering their "data" part for null-ness.
45 match (ptr).guaranteed_eq(null_mut()) {
46 None => false,
47 Some(res) => res,
48 }
49 }
50
51 // SAFETY: The two versions are equivalent at runtime.
52 unsafe { const_eval_select((self as *const u8,), const_impl, runtime_impl) }
53 }
54
55 /// Casts to a pointer of another type.
56 #[stable(feature = "ptr_cast", since = "1.38.0")]
57 #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
58 #[rustc_diagnostic_item = "const_ptr_cast"]
59 #[inline(always)]
60 pub const fn cast<U>(self) -> *const U {
61 self as _
62 }
63
64 /// Use the pointer value in a new pointer of another type.
65 ///
66 /// In case `meta` is a (fat) pointer to an unsized type, this operation
67 /// will ignore the pointer part, whereas for (thin) pointers to sized
68 /// types, this has the same effect as a simple cast.
69 ///
70 /// The resulting pointer will have provenance of `self`, i.e., for a fat
71 /// pointer, this operation is semantically the same as creating a new
72 /// fat pointer with the data pointer value of `self` but the metadata of
73 /// `meta`.
74 ///
75 /// # Examples
76 ///
77 /// This function is primarily useful for allowing byte-wise pointer
78 /// arithmetic on potentially fat pointers:
79 ///
80 /// ```
81 /// #![feature(set_ptr_value)]
82 /// # use core::fmt::Debug;
83 /// let arr: [i32; 3] = [1, 2, 3];
84 /// let mut ptr = arr.as_ptr() as *const dyn Debug;
85 /// let thin = ptr as *const u8;
86 /// unsafe {
87 /// ptr = thin.add(8).with_metadata_of(ptr);
88 /// # assert_eq!(*(ptr as *const i32), 3);
89 /// println!("{:?}", &*ptr); // will print "3"
90 /// }
91 /// ```
92 #[unstable(feature = "set_ptr_value", issue = "75091")]
93 #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
94 #[must_use = "returns a new pointer rather than modifying its argument"]
95 #[inline]
96 pub const fn with_metadata_of<U>(self, meta: *const U) -> *const U
97 where
98 U: ?Sized,
99 {
100 from_raw_parts::<U>(self as *const (), metadata(meta))
101 }
102
103 /// Changes constness without changing the type.
104 ///
105 /// This is a bit safer than `as` because it wouldn't silently change the type if the code is
106 /// refactored.
107 #[stable(feature = "ptr_const_cast", since = "1.65.0")]
108 #[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
109 #[rustc_diagnostic_item = "ptr_cast_mut"]
110 #[inline(always)]
111 pub const fn cast_mut(self) -> *mut T {
112 self as _
113 }
114
115 /// Casts a pointer to its raw bits.
116 ///
117 /// This is equivalent to `as usize`, but is more specific to enhance readability.
118 /// The inverse method is [`from_bits`](#method.from_bits).
119 ///
120 /// In particular, `*p as usize` and `p as usize` will both compile for
121 /// pointers to numeric types but do very different things, so using this
122 /// helps emphasize that reading the bits was intentional.
123 ///
124 /// # Examples
125 ///
126 /// ```
127 /// #![feature(ptr_to_from_bits)]
128 /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
129 /// let array = [13, 42];
130 /// let p0: *const i32 = &array[0];
131 /// assert_eq!(<*const _>::from_bits(p0.to_bits()), p0);
132 /// let p1: *const i32 = &array[1];
133 /// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
134 /// # }
135 /// ```
136 #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
137 #[deprecated(
138 since = "1.67.0",
139 note = "replaced by the `expose_addr` method, or update your code \
140 to follow the strict provenance rules using its APIs"
141 )]
142 #[inline(always)]
143 pub fn to_bits(self) -> usize
144 where
145 T: Sized,
146 {
147 self as usize
148 }
149
150 /// Creates a pointer from its raw bits.
151 ///
152 /// This is equivalent to `as *const T`, but is more specific to enhance readability.
153 /// The inverse method is [`to_bits`](#method.to_bits).
154 ///
155 /// # Examples
156 ///
157 /// ```
158 /// #![feature(ptr_to_from_bits)]
159 /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
160 /// use std::ptr::NonNull;
161 /// let dangling: *const u8 = NonNull::dangling().as_ptr();
162 /// assert_eq!(<*const u8>::from_bits(1), dangling);
163 /// # }
164 /// ```
165 #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
166 #[deprecated(
167 since = "1.67.0",
168 note = "replaced by the `ptr::from_exposed_addr` function, or update \
169 your code to follow the strict provenance rules using its APIs"
170 )]
171 #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
172 #[inline(always)]
173 pub fn from_bits(bits: usize) -> Self
174 where
175 T: Sized,
176 {
177 bits as Self
178 }
179
180 /// Gets the "address" portion of the pointer.
181 ///
182 /// This is similar to `self as usize`, which semantically discards *provenance* and
183 /// *address-space* information. However, unlike `self as usize`, casting the returned address
184 /// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
185 /// properly restore the lost information and obtain a dereferenceable pointer, use
186 /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
187 ///
188 /// If using those APIs is not possible because there is no way to preserve a pointer with the
189 /// required provenance, use [`expose_addr`][pointer::expose_addr] and
190 /// [`from_exposed_addr`][from_exposed_addr] instead. However, note that this makes
191 /// your code less portable and less amenable to tools that check for compliance with the Rust
192 /// memory model.
193 ///
194 /// On most platforms this will produce a value with the same bytes as the original
195 /// pointer, because all the bytes are dedicated to describing the address.
196 /// Platforms which need to store additional information in the pointer may
197 /// perform a change of representation to produce a value containing only the address
198 /// portion of the pointer. What that means is up to the platform to define.
199 ///
200 /// This API and its claimed semantics are part of the Strict Provenance experiment, and as such
201 /// might change in the future (including possibly weakening this so it becomes wholly
202 /// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
203 #[must_use]
204 #[inline(always)]
205 #[unstable(feature = "strict_provenance", issue = "95228")]
206 pub fn addr(self) -> usize {
207 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
208 // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
209 // provenance).
210 unsafe { mem::transmute(self.cast::<()>()) }
211 }
212
213 /// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
214 /// use in [`from_exposed_addr`][].
215 ///
216 /// This is equivalent to `self as usize`, which semantically discards *provenance* and
217 /// *address-space* information. Furthermore, this (like the `as` cast) has the implicit
218 /// side-effect of marking the provenance as 'exposed', so on platforms that support it you can
219 /// later call [`from_exposed_addr`][] to reconstitute the original pointer including its
220 /// provenance. (Reconstructing address space information, if required, is your responsibility.)
221 ///
222 /// Using this method means that code is *not* following Strict Provenance rules. Supporting
223 /// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by
224 /// tools that help you to stay conformant with the Rust memory model, so it is recommended to
225 /// use [`addr`][pointer::addr] wherever possible.
226 ///
227 /// On most platforms this will produce a value with the same bytes as the original pointer,
228 /// because all the bytes are dedicated to describing the address. Platforms which need to store
229 /// additional information in the pointer may not support this operation, since the 'expose'
230 /// side-effect which is required for [`from_exposed_addr`][] to work is typically not
231 /// available.
232 ///
233 /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
234 /// [module documentation][crate::ptr] for details.
235 ///
236 /// [`from_exposed_addr`]: from_exposed_addr
237 #[must_use]
238 #[inline(always)]
239 #[unstable(feature = "strict_provenance", issue = "95228")]
240 pub fn expose_addr(self) -> usize {
241 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
242 self.cast::<()>() as usize
243 }
244
245 /// Creates a new pointer with the given address.
246 ///
247 /// This performs the same operation as an `addr as ptr` cast, but copies
248 /// the *address-space* and *provenance* of `self` to the new pointer.
249 /// This allows us to dynamically preserve and propagate this important
250 /// information in a way that is otherwise impossible with a unary cast.
251 ///
252 /// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
253 /// `self` to the given address, and therefore has all the same capabilities and restrictions.
254 ///
255 /// This API and its claimed semantics are part of the Strict Provenance experiment,
256 /// see the [module documentation][crate::ptr] for details.
257 #[must_use]
258 #[inline]
259 #[unstable(feature = "strict_provenance", issue = "95228")]
260 pub fn with_addr(self, addr: usize) -> Self {
261 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
262 //
263 // In the mean-time, this operation is defined to be "as if" it was
264 // a wrapping_offset, so we can emulate it as such. This should properly
265 // restore pointer provenance even under today's compiler.
266 let self_addr = self.addr() as isize;
267 let dest_addr = addr as isize;
268 let offset = dest_addr.wrapping_sub(self_addr);
269
270 // This is the canonical desugaring of this operation
271 self.wrapping_byte_offset(offset)
272 }
273
274 /// Creates a new pointer by mapping `self`'s address to a new one.
275 ///
276 /// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details.
277 ///
278 /// This API and its claimed semantics are part of the Strict Provenance experiment,
279 /// see the [module documentation][crate::ptr] for details.
280 #[must_use]
281 #[inline]
282 #[unstable(feature = "strict_provenance", issue = "95228")]
283 pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self {
284 self.with_addr(f(self.addr()))
285 }
286
287 /// Decompose a (possibly wide) pointer into its address and metadata components.
288 ///
289 /// The pointer can be later reconstructed with [`from_raw_parts`].
290 #[unstable(feature = "ptr_metadata", issue = "81513")]
291 #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
292 #[inline]
293 pub const fn to_raw_parts(self) -> (*const (), <T as super::Pointee>::Metadata) {
294 (self.cast(), metadata(self))
295 }
296
297 /// Returns `None` if the pointer is null, or else returns a shared reference to
298 /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`]
299 /// must be used instead.
300 ///
301 /// [`as_uninit_ref`]: #method.as_uninit_ref
302 ///
303 /// # Safety
304 ///
305 /// When calling this method, you have to ensure that *either* the pointer is null *or*
306 /// all of the following is true:
307 ///
308 /// * The pointer must be properly aligned.
309 ///
310 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
311 ///
312 /// * The pointer must point to an initialized instance of `T`.
313 ///
314 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
315 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
316 /// In particular, while this reference exists, the memory the pointer points to must
317 /// not get mutated (except inside `UnsafeCell`).
318 ///
319 /// This applies even if the result of this method is unused!
320 /// (The part about being initialized is not yet fully decided, but until
321 /// it is, the only safe approach is to ensure that they are indeed initialized.)
322 ///
323 /// [the module documentation]: crate::ptr#safety
324 ///
325 /// # Examples
326 ///
327 /// ```
328 /// let ptr: *const u8 = &10u8 as *const u8;
329 ///
330 /// unsafe {
331 /// if let Some(val_back) = ptr.as_ref() {
332 /// println!("We got back the value: {val_back}!");
333 /// }
334 /// }
335 /// ```
336 ///
337 /// # Null-unchecked version
338 ///
339 /// If you are sure the pointer can never be null and are looking for some kind of
340 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
341 /// dereference the pointer directly.
342 ///
343 /// ```
344 /// let ptr: *const u8 = &10u8 as *const u8;
345 ///
346 /// unsafe {
347 /// let val_back = &*ptr;
348 /// println!("We got back the value: {val_back}!");
349 /// }
350 /// ```
351 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
352 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
353 #[inline]
354 pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> {
355 // SAFETY: the caller must guarantee that `self` is valid
356 // for a reference if it isn't null.
357 if self.is_null() { None } else { unsafe { Some(&*self) } }
358 }
359
360 /// Returns `None` if the pointer is null, or else returns a shared reference to
361 /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
362 /// that the value has to be initialized.
363 ///
364 /// [`as_ref`]: #method.as_ref
365 ///
366 /// # Safety
367 ///
368 /// When calling this method, you have to ensure that *either* the pointer is null *or*
369 /// all of the following is true:
370 ///
371 /// * The pointer must be properly aligned.
372 ///
373 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
374 ///
375 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
376 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
377 /// In particular, while this reference exists, the memory the pointer points to must
378 /// not get mutated (except inside `UnsafeCell`).
379 ///
380 /// This applies even if the result of this method is unused!
381 ///
382 /// [the module documentation]: crate::ptr#safety
383 ///
384 /// # Examples
385 ///
386 /// ```
387 /// #![feature(ptr_as_uninit)]
388 ///
389 /// let ptr: *const u8 = &10u8 as *const u8;
390 ///
391 /// unsafe {
392 /// if let Some(val_back) = ptr.as_uninit_ref() {
393 /// println!("We got back the value: {}!", val_back.assume_init());
394 /// }
395 /// }
396 /// ```
397 #[inline]
398 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
399 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
400 pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
401 where
402 T: Sized,
403 {
404 // SAFETY: the caller must guarantee that `self` meets all the
405 // requirements for a reference.
406 if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) }
407 }
408
409 /// Calculates the offset from a pointer.
410 ///
411 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
412 /// offset of `3 * size_of::<T>()` bytes.
413 ///
414 /// # Safety
415 ///
416 /// If any of the following conditions are violated, the result is Undefined
417 /// Behavior:
418 ///
419 /// * Both the starting and resulting pointer must be either in bounds or one
420 /// byte past the end of the same [allocated object].
421 ///
422 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
423 ///
424 /// * The offset being in bounds cannot rely on "wrapping around" the address
425 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
426 ///
427 /// The compiler and standard library generally tries to ensure allocations
428 /// never reach a size where an offset is a concern. For instance, `Vec`
429 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
430 /// `vec.as_ptr().add(vec.len())` is always safe.
431 ///
432 /// Most platforms fundamentally can't even construct such an allocation.
433 /// For instance, no known 64-bit platform can ever serve a request
434 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
435 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
436 /// more than `isize::MAX` bytes with things like Physical Address
437 /// Extension. As such, memory acquired directly from allocators or memory
438 /// mapped files *may* be too large to handle with this function.
439 ///
440 /// Consider using [`wrapping_offset`] instead if these constraints are
441 /// difficult to satisfy. The only advantage of this method is that it
442 /// enables more aggressive compiler optimizations.
443 ///
444 /// [`wrapping_offset`]: #method.wrapping_offset
445 /// [allocated object]: crate::ptr#allocated-object
446 ///
447 /// # Examples
448 ///
449 /// ```
450 /// let s: &str = "123";
451 /// let ptr: *const u8 = s.as_ptr();
452 ///
453 /// unsafe {
454 /// println!("{}", *ptr.offset(1) as char);
455 /// println!("{}", *ptr.offset(2) as char);
456 /// }
457 /// ```
458 #[stable(feature = "rust1", since = "1.0.0")]
459 #[must_use = "returns a new pointer rather than modifying its argument"]
460 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
461 #[inline(always)]
462 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
463 pub const unsafe fn offset(self, count: isize) -> *const T
464 where
465 T: Sized,
466 {
467 // SAFETY: the caller must uphold the safety contract for `offset`.
468 unsafe { intrinsics::offset(self, count) }
469 }
470
471 /// Calculates the offset from a pointer in bytes.
472 ///
473 /// `count` is in units of **bytes**.
474 ///
475 /// This is purely a convenience for casting to a `u8` pointer and
476 /// using [offset][pointer::offset] on it. See that method for documentation
477 /// and safety requirements.
478 ///
479 /// For non-`Sized` pointees this operation changes only the data pointer,
480 /// leaving the metadata untouched.
481 #[must_use]
482 #[inline(always)]
483 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
484 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
485 #[rustc_allow_const_fn_unstable(set_ptr_value)]
486 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
487 pub const unsafe fn byte_offset(self, count: isize) -> Self {
488 // SAFETY: the caller must uphold the safety contract for `offset`.
489 unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
490 }
491
492 /// Calculates the offset from a pointer using wrapping arithmetic.
493 ///
494 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
495 /// offset of `3 * size_of::<T>()` bytes.
496 ///
497 /// # Safety
498 ///
499 /// This operation itself is always safe, but using the resulting pointer is not.
500 ///
501 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
502 /// be used to read or write other allocated objects.
503 ///
504 /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z`
505 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
506 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
507 /// `x` and `y` point into the same allocated object.
508 ///
509 /// Compared to [`offset`], this method basically delays the requirement of staying within the
510 /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object
511 /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a
512 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`]
513 /// can be optimized better and is thus preferable in performance-sensitive code.
514 ///
515 /// The delayed check only considers the value of the pointer that was dereferenced, not the
516 /// intermediate values used during the computation of the final result. For example,
517 /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other
518 /// words, leaving the allocated object and then re-entering it later is permitted.
519 ///
520 /// [`offset`]: #method.offset
521 /// [allocated object]: crate::ptr#allocated-object
522 ///
523 /// # Examples
524 ///
525 /// ```
526 /// // Iterate using a raw pointer in increments of two elements
527 /// let data = [1u8, 2, 3, 4, 5];
528 /// let mut ptr: *const u8 = data.as_ptr();
529 /// let step = 2;
530 /// let end_rounded_up = ptr.wrapping_offset(6);
531 ///
532 /// // This loop prints "1, 3, 5, "
533 /// while ptr != end_rounded_up {
534 /// unsafe {
535 /// print!("{}, ", *ptr);
536 /// }
537 /// ptr = ptr.wrapping_offset(step);
538 /// }
539 /// ```
540 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
541 #[must_use = "returns a new pointer rather than modifying its argument"]
542 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
543 #[inline(always)]
544 pub const fn wrapping_offset(self, count: isize) -> *const T
545 where
546 T: Sized,
547 {
548 // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
549 unsafe { intrinsics::arith_offset(self, count) }
550 }
551
552 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
553 ///
554 /// `count` is in units of **bytes**.
555 ///
556 /// This is purely a convenience for casting to a `u8` pointer and
557 /// using [wrapping_offset][pointer::wrapping_offset] on it. See that method
558 /// for documentation.
559 ///
560 /// For non-`Sized` pointees this operation changes only the data pointer,
561 /// leaving the metadata untouched.
562 #[must_use]
563 #[inline(always)]
564 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
565 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
566 #[rustc_allow_const_fn_unstable(set_ptr_value)]
567 pub const fn wrapping_byte_offset(self, count: isize) -> Self {
568 self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
569 }
570
571 /// Masks out bits of the pointer according to a mask.
572 ///
573 /// This is convenience for `ptr.map_addr(|a| a & mask)`.
574 ///
575 /// For non-`Sized` pointees this operation changes only the data pointer,
576 /// leaving the metadata untouched.
577 ///
578 /// ## Examples
579 ///
580 /// ```
581 /// #![feature(ptr_mask, strict_provenance)]
582 /// let v = 17_u32;
583 /// let ptr: *const u32 = &v;
584 ///
585 /// // `u32` is 4 bytes aligned,
586 /// // which means that lower 2 bits are always 0.
587 /// let tag_mask = 0b11;
588 /// let ptr_mask = !tag_mask;
589 ///
590 /// // We can store something in these lower bits
591 /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
592 ///
593 /// // Get the "tag" back
594 /// let tag = tagged_ptr.addr() & tag_mask;
595 /// assert_eq!(tag, 0b10);
596 ///
597 /// // Note that `tagged_ptr` is unaligned, it's UB to read from it.
598 /// // To get original pointer `mask` can be used:
599 /// let masked_ptr = tagged_ptr.mask(ptr_mask);
600 /// assert_eq!(unsafe { *masked_ptr }, 17);
601 /// ```
602 #[unstable(feature = "ptr_mask", issue = "98290")]
603 #[must_use = "returns a new pointer rather than modifying its argument"]
604 #[inline(always)]
605 pub fn mask(self, mask: usize) -> *const T {
606 intrinsics::ptr_mask(self.cast::<()>(), mask).with_metadata_of(self)
607 }
608
609 /// Calculates the distance between two pointers. The returned value is in
610 /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
611 ///
612 /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
613 /// except that it has a lot more opportunities for UB, in exchange for the compiler
614 /// better understanding what you are doing.
615 ///
616 /// The primary motivation of this method is for computing the `len` of an array/slice
617 /// of `T` that you are currently representing as a "start" and "end" pointer
618 /// (and "end" is "one past the end" of the array).
619 /// In that case, `end.offset_from(start)` gets you the length of the array.
620 ///
621 /// All of the following safety requirements are trivially satisfied for this usecase.
622 ///
623 /// [`offset`]: #method.offset
624 ///
625 /// # Safety
626 ///
627 /// If any of the following conditions are violated, the result is Undefined
628 /// Behavior:
629 ///
630 /// * Both `self` and `origin` must be either in bounds or one
631 /// byte past the end of the same [allocated object].
632 ///
633 /// * Both pointers must be *derived from* a pointer to the same object.
634 /// (See below for an example.)
635 ///
636 /// * The distance between the pointers, in bytes, must be an exact multiple
637 /// of the size of `T`.
638 ///
639 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
640 ///
641 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
642 ///
643 /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the
644 /// address space, so two pointers within some value of any Rust type `T` will always satisfy
645 /// the last two conditions. The standard library also generally ensures that allocations
646 /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they
647 /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
648 /// always satisfies the last two conditions.
649 ///
650 /// Most platforms fundamentally can't even construct such a large allocation.
651 /// For instance, no known 64-bit platform can ever serve a request
652 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
653 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
654 /// more than `isize::MAX` bytes with things like Physical Address
655 /// Extension. As such, memory acquired directly from allocators or memory
656 /// mapped files *may* be too large to handle with this function.
657 /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
658 /// such large allocations either.)
659 ///
660 /// The requirement for pointers to be derived from the same allocated object is primarily
661 /// needed for `const`-compatibility: the distance between pointers into *different* allocated
662 /// objects is not known at compile-time. However, the requirement also exists at
663 /// runtime and may be exploited by optimizations. If you wish to compute the difference between
664 /// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
665 /// origin as isize) / mem::size_of::<T>()`.
666 // FIXME: recommend `addr()` instead of `as usize` once that is stable.
667 ///
668 /// [`add`]: #method.add
669 /// [allocated object]: crate::ptr#allocated-object
670 ///
671 /// # Panics
672 ///
673 /// This function panics if `T` is a Zero-Sized Type ("ZST").
674 ///
675 /// # Examples
676 ///
677 /// Basic usage:
678 ///
679 /// ```
680 /// let a = [0; 5];
681 /// let ptr1: *const i32 = &a[1];
682 /// let ptr2: *const i32 = &a[3];
683 /// unsafe {
684 /// assert_eq!(ptr2.offset_from(ptr1), 2);
685 /// assert_eq!(ptr1.offset_from(ptr2), -2);
686 /// assert_eq!(ptr1.offset(2), ptr2);
687 /// assert_eq!(ptr2.offset(-2), ptr1);
688 /// }
689 /// ```
690 ///
691 /// *Incorrect* usage:
692 ///
693 /// ```rust,no_run
694 /// let ptr1 = Box::into_raw(Box::new(0u8)) as *const u8;
695 /// let ptr2 = Box::into_raw(Box::new(1u8)) as *const u8;
696 /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize);
697 /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
698 /// let ptr2_other = (ptr1 as *const u8).wrapping_offset(diff);
699 /// assert_eq!(ptr2 as usize, ptr2_other as usize);
700 /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
701 /// // computing their offset is undefined behavior, even though
702 /// // they point to the same address!
703 /// unsafe {
704 /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
705 /// }
706 /// ```
707 #[stable(feature = "ptr_offset_from", since = "1.47.0")]
708 #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
709 #[inline]
710 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
711 pub const unsafe fn offset_from(self, origin: *const T) -> isize
712 where
713 T: Sized,
714 {
715 let pointee_size = mem::size_of::<T>();
716 assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
717 // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`.
718 unsafe { intrinsics::ptr_offset_from(self, origin) }
719 }
720
721 /// Calculates the distance between two pointers. The returned value is in
722 /// units of **bytes**.
723 ///
724 /// This is purely a convenience for casting to a `u8` pointer and
725 /// using [`offset_from`][pointer::offset_from] on it. See that method for
726 /// documentation and safety requirements.
727 ///
728 /// For non-`Sized` pointees this operation considers only the data pointers,
729 /// ignoring the metadata.
730 #[inline(always)]
731 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
732 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
733 #[rustc_allow_const_fn_unstable(set_ptr_value)]
734 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
735 pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
736 // SAFETY: the caller must uphold the safety contract for `offset_from`.
737 unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
738 }
739
740 /// Calculates the distance between two pointers, *where it's known that
741 /// `self` is equal to or greater than `origin`*. The returned value is in
742 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
743 ///
744 /// This computes the same value that [`offset_from`](#method.offset_from)
745 /// would compute, but with the added precondition that the offset is
746 /// guaranteed to be non-negative. This method is equivalent to
747 /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`,
748 /// but it provides slightly more information to the optimizer, which can
749 /// sometimes allow it to optimize slightly better with some backends.
750 ///
751 /// This method can be though of as recovering the `count` that was passed
752 /// to [`add`](#method.add) (or, with the parameters in the other order,
753 /// to [`sub`](#method.sub)). The following are all equivalent, assuming
754 /// that their safety preconditions are met:
755 /// ```rust
756 /// # #![feature(ptr_sub_ptr)]
757 /// # unsafe fn blah(ptr: *const i32, origin: *const i32, count: usize) -> bool {
758 /// ptr.sub_ptr(origin) == count
759 /// # &&
760 /// origin.add(count) == ptr
761 /// # &&
762 /// ptr.sub(count) == origin
763 /// # }
764 /// ```
765 ///
766 /// # Safety
767 ///
768 /// - The distance between the pointers must be non-negative (`self >= origin`)
769 ///
770 /// - *All* the safety conditions of [`offset_from`](#method.offset_from)
771 /// apply to this method as well; see it for the full details.
772 ///
773 /// Importantly, despite the return type of this method being able to represent
774 /// a larger offset, it's still *not permitted* to pass pointers which differ
775 /// by more than `isize::MAX` *bytes*. As such, the result of this method will
776 /// always be less than or equal to `isize::MAX as usize`.
777 ///
778 /// # Panics
779 ///
780 /// This function panics if `T` is a Zero-Sized Type ("ZST").
781 ///
782 /// # Examples
783 ///
784 /// ```
785 /// #![feature(ptr_sub_ptr)]
786 ///
787 /// let a = [0; 5];
788 /// let ptr1: *const i32 = &a[1];
789 /// let ptr2: *const i32 = &a[3];
790 /// unsafe {
791 /// assert_eq!(ptr2.sub_ptr(ptr1), 2);
792 /// assert_eq!(ptr1.add(2), ptr2);
793 /// assert_eq!(ptr2.sub(2), ptr1);
794 /// assert_eq!(ptr2.sub_ptr(ptr2), 0);
795 /// }
796 ///
797 /// // This would be incorrect, as the pointers are not correctly ordered:
798 /// // ptr1.sub_ptr(ptr2)
799 /// ```
800 #[unstable(feature = "ptr_sub_ptr", issue = "95892")]
801 #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
802 #[inline]
803 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
804 pub const unsafe fn sub_ptr(self, origin: *const T) -> usize
805 where
806 T: Sized,
807 {
808 let this = self;
809 // SAFETY: The comparison has no side-effects, and the intrinsic
810 // does this check internally in the CTFE implementation.
811 unsafe {
812 assert_unsafe_precondition!(
813 "ptr::sub_ptr requires `this >= origin`",
814 [T](this: *const T, origin: *const T) => this >= origin
815 )
816 };
817
818 let pointee_size = mem::size_of::<T>();
819 assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
820 // SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`.
821 unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) }
822 }
823
824 /// Returns whether two pointers are guaranteed to be equal.
825 ///
826 /// At runtime this function behaves like `Some(self == other)`.
827 /// However, in some contexts (e.g., compile-time evaluation),
828 /// it is not always possible to determine equality of two pointers, so this function may
829 /// spuriously return `None` for pointers that later actually turn out to have its equality known.
830 /// But when it returns `Some`, the pointers' equality is guaranteed to be known.
831 ///
832 /// The return value may change from `Some` to `None` and vice versa depending on the compiler
833 /// version and unsafe code must not
834 /// rely on the result of this function for soundness. It is suggested to only use this function
835 /// for performance optimizations where spurious `None` return values by this function do not
836 /// affect the outcome, but just the performance.
837 /// The consequences of using this method to make runtime and compile-time code behave
838 /// differently have not been explored. This method should not be used to introduce such
839 /// differences, and it should also not be stabilized before we have a better understanding
840 /// of this issue.
841 #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
842 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
843 #[inline]
844 pub const fn guaranteed_eq(self, other: *const T) -> Option<bool>
845 where
846 T: Sized,
847 {
848 match intrinsics::ptr_guaranteed_cmp(self, other) {
849 2 => None,
850 other => Some(other == 1),
851 }
852 }
853
854 /// Returns whether two pointers are guaranteed to be inequal.
855 ///
856 /// At runtime this function behaves like `Some(self != other)`.
857 /// However, in some contexts (e.g., compile-time evaluation),
858 /// it is not always possible to determine inequality of two pointers, so this function may
859 /// spuriously return `None` for pointers that later actually turn out to have its inequality known.
860 /// But when it returns `Some`, the pointers' inequality is guaranteed to be known.
861 ///
862 /// The return value may change from `Some` to `None` and vice versa depending on the compiler
863 /// version and unsafe code must not
864 /// rely on the result of this function for soundness. It is suggested to only use this function
865 /// for performance optimizations where spurious `None` return values by this function do not
866 /// affect the outcome, but just the performance.
867 /// The consequences of using this method to make runtime and compile-time code behave
868 /// differently have not been explored. This method should not be used to introduce such
869 /// differences, and it should also not be stabilized before we have a better understanding
870 /// of this issue.
871 #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
872 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
873 #[inline]
874 pub const fn guaranteed_ne(self, other: *const T) -> Option<bool>
875 where
876 T: Sized,
877 {
878 match self.guaranteed_eq(other) {
879 None => None,
880 Some(eq) => Some(!eq),
881 }
882 }
883
884 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
885 ///
886 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
887 /// offset of `3 * size_of::<T>()` bytes.
888 ///
889 /// # Safety
890 ///
891 /// If any of the following conditions are violated, the result is Undefined
892 /// Behavior:
893 ///
894 /// * Both the starting and resulting pointer must be either in bounds or one
895 /// byte past the end of the same [allocated object].
896 ///
897 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
898 ///
899 /// * The offset being in bounds cannot rely on "wrapping around" the address
900 /// space. That is, the infinite-precision sum must fit in a `usize`.
901 ///
902 /// The compiler and standard library generally tries to ensure allocations
903 /// never reach a size where an offset is a concern. For instance, `Vec`
904 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
905 /// `vec.as_ptr().add(vec.len())` is always safe.
906 ///
907 /// Most platforms fundamentally can't even construct such an allocation.
908 /// For instance, no known 64-bit platform can ever serve a request
909 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
910 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
911 /// more than `isize::MAX` bytes with things like Physical Address
912 /// Extension. As such, memory acquired directly from allocators or memory
913 /// mapped files *may* be too large to handle with this function.
914 ///
915 /// Consider using [`wrapping_add`] instead if these constraints are
916 /// difficult to satisfy. The only advantage of this method is that it
917 /// enables more aggressive compiler optimizations.
918 ///
919 /// [`wrapping_add`]: #method.wrapping_add
920 /// [allocated object]: crate::ptr#allocated-object
921 ///
922 /// # Examples
923 ///
924 /// ```
925 /// let s: &str = "123";
926 /// let ptr: *const u8 = s.as_ptr();
927 ///
928 /// unsafe {
929 /// println!("{}", *ptr.add(1) as char);
930 /// println!("{}", *ptr.add(2) as char);
931 /// }
932 /// ```
933 #[stable(feature = "pointer_methods", since = "1.26.0")]
934 #[must_use = "returns a new pointer rather than modifying its argument"]
935 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
936 #[inline(always)]
937 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
938 pub const unsafe fn add(self, count: usize) -> Self
939 where
940 T: Sized,
941 {
942 // SAFETY: the caller must uphold the safety contract for `offset`.
943 unsafe { intrinsics::offset(self, count) }
944 }
945
946 /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
947 ///
948 /// `count` is in units of bytes.
949 ///
950 /// This is purely a convenience for casting to a `u8` pointer and
951 /// using [add][pointer::add] on it. See that method for documentation
952 /// and safety requirements.
953 ///
954 /// For non-`Sized` pointees this operation changes only the data pointer,
955 /// leaving the metadata untouched.
956 #[must_use]
957 #[inline(always)]
958 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
959 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
960 #[rustc_allow_const_fn_unstable(set_ptr_value)]
961 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
962 pub const unsafe fn byte_add(self, count: usize) -> Self {
963 // SAFETY: the caller must uphold the safety contract for `add`.
964 unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
965 }
966
967 /// Calculates the offset from a pointer (convenience for
968 /// `.offset((count as isize).wrapping_neg())`).
969 ///
970 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
971 /// offset of `3 * size_of::<T>()` bytes.
972 ///
973 /// # Safety
974 ///
975 /// If any of the following conditions are violated, the result is Undefined
976 /// Behavior:
977 ///
978 /// * Both the starting and resulting pointer must be either in bounds or one
979 /// byte past the end of the same [allocated object].
980 ///
981 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
982 ///
983 /// * The offset being in bounds cannot rely on "wrapping around" the address
984 /// space. That is, the infinite-precision sum must fit in a usize.
985 ///
986 /// The compiler and standard library generally tries to ensure allocations
987 /// never reach a size where an offset is a concern. For instance, `Vec`
988 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
989 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
990 ///
991 /// Most platforms fundamentally can't even construct such an allocation.
992 /// For instance, no known 64-bit platform can ever serve a request
993 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
994 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
995 /// more than `isize::MAX` bytes with things like Physical Address
996 /// Extension. As such, memory acquired directly from allocators or memory
997 /// mapped files *may* be too large to handle with this function.
998 ///
999 /// Consider using [`wrapping_sub`] instead if these constraints are
1000 /// difficult to satisfy. The only advantage of this method is that it
1001 /// enables more aggressive compiler optimizations.
1002 ///
1003 /// [`wrapping_sub`]: #method.wrapping_sub
1004 /// [allocated object]: crate::ptr#allocated-object
1005 ///
1006 /// # Examples
1007 ///
1008 /// ```
1009 /// let s: &str = "123";
1010 ///
1011 /// unsafe {
1012 /// let end: *const u8 = s.as_ptr().add(3);
1013 /// println!("{}", *end.sub(1) as char);
1014 /// println!("{}", *end.sub(2) as char);
1015 /// }
1016 /// ```
1017 #[stable(feature = "pointer_methods", since = "1.26.0")]
1018 #[must_use = "returns a new pointer rather than modifying its argument"]
1019 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1020 // We could always go back to wrapping if unchecked becomes unacceptable
1021 #[rustc_allow_const_fn_unstable(const_int_unchecked_arith)]
1022 #[inline(always)]
1023 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1024 pub const unsafe fn sub(self, count: usize) -> Self
1025 where
1026 T: Sized,
1027 {
1028 if T::IS_ZST {
1029 // Pointer arithmetic does nothing when the pointee is a ZST.
1030 self
1031 } else {
1032 // SAFETY: the caller must uphold the safety contract for `offset`.
1033 // Because the pointee is *not* a ZST, that means that `count` is
1034 // at most `isize::MAX`, and thus the negation cannot overflow.
1035 unsafe { self.offset(intrinsics::unchecked_sub(0, count as isize)) }
1036 }
1037 }
1038
1039 /// Calculates the offset from a pointer in bytes (convenience for
1040 /// `.byte_offset((count as isize).wrapping_neg())`).
1041 ///
1042 /// `count` is in units of bytes.
1043 ///
1044 /// This is purely a convenience for casting to a `u8` pointer and
1045 /// using [sub][pointer::sub] on it. See that method for documentation
1046 /// and safety requirements.
1047 ///
1048 /// For non-`Sized` pointees this operation changes only the data pointer,
1049 /// leaving the metadata untouched.
1050 #[must_use]
1051 #[inline(always)]
1052 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
1053 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
1054 #[rustc_allow_const_fn_unstable(set_ptr_value)]
1055 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1056 pub const unsafe fn byte_sub(self, count: usize) -> Self {
1057 // SAFETY: the caller must uphold the safety contract for `sub`.
1058 unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
1059 }
1060
1061 /// Calculates the offset from a pointer using wrapping arithmetic.
1062 /// (convenience for `.wrapping_offset(count as isize)`)
1063 ///
1064 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1065 /// offset of `3 * size_of::<T>()` bytes.
1066 ///
1067 /// # Safety
1068 ///
1069 /// This operation itself is always safe, but using the resulting pointer is not.
1070 ///
1071 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
1072 /// be used to read or write other allocated objects.
1073 ///
1074 /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z`
1075 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
1076 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
1077 /// `x` and `y` point into the same allocated object.
1078 ///
1079 /// Compared to [`add`], this method basically delays the requirement of staying within the
1080 /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object
1081 /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a
1082 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`]
1083 /// can be optimized better and is thus preferable in performance-sensitive code.
1084 ///
1085 /// The delayed check only considers the value of the pointer that was dereferenced, not the
1086 /// intermediate values used during the computation of the final result. For example,
1087 /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
1088 /// allocated object and then re-entering it later is permitted.
1089 ///
1090 /// [`add`]: #method.add
1091 /// [allocated object]: crate::ptr#allocated-object
1092 ///
1093 /// # Examples
1094 ///
1095 /// ```
1096 /// // Iterate using a raw pointer in increments of two elements
1097 /// let data = [1u8, 2, 3, 4, 5];
1098 /// let mut ptr: *const u8 = data.as_ptr();
1099 /// let step = 2;
1100 /// let end_rounded_up = ptr.wrapping_add(6);
1101 ///
1102 /// // This loop prints "1, 3, 5, "
1103 /// while ptr != end_rounded_up {
1104 /// unsafe {
1105 /// print!("{}, ", *ptr);
1106 /// }
1107 /// ptr = ptr.wrapping_add(step);
1108 /// }
1109 /// ```
1110 #[stable(feature = "pointer_methods", since = "1.26.0")]
1111 #[must_use = "returns a new pointer rather than modifying its argument"]
1112 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1113 #[inline(always)]
1114 pub const fn wrapping_add(self, count: usize) -> Self
1115 where
1116 T: Sized,
1117 {
1118 self.wrapping_offset(count as isize)
1119 }
1120
1121 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
1122 /// (convenience for `.wrapping_byte_offset(count as isize)`)
1123 ///
1124 /// `count` is in units of bytes.
1125 ///
1126 /// This is purely a convenience for casting to a `u8` pointer and
1127 /// using [wrapping_add][pointer::wrapping_add] on it. See that method for documentation.
1128 ///
1129 /// For non-`Sized` pointees this operation changes only the data pointer,
1130 /// leaving the metadata untouched.
1131 #[must_use]
1132 #[inline(always)]
1133 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
1134 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
1135 #[rustc_allow_const_fn_unstable(set_ptr_value)]
1136 pub const fn wrapping_byte_add(self, count: usize) -> Self {
1137 self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
1138 }
1139
1140 /// Calculates the offset from a pointer using wrapping arithmetic.
1141 /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
1142 ///
1143 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1144 /// offset of `3 * size_of::<T>()` bytes.
1145 ///
1146 /// # Safety
1147 ///
1148 /// This operation itself is always safe, but using the resulting pointer is not.
1149 ///
1150 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
1151 /// be used to read or write other allocated objects.
1152 ///
1153 /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z`
1154 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
1155 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
1156 /// `x` and `y` point into the same allocated object.
1157 ///
1158 /// Compared to [`sub`], this method basically delays the requirement of staying within the
1159 /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object
1160 /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a
1161 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`]
1162 /// can be optimized better and is thus preferable in performance-sensitive code.
1163 ///
1164 /// The delayed check only considers the value of the pointer that was dereferenced, not the
1165 /// intermediate values used during the computation of the final result. For example,
1166 /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
1167 /// allocated object and then re-entering it later is permitted.
1168 ///
1169 /// [`sub`]: #method.sub
1170 /// [allocated object]: crate::ptr#allocated-object
1171 ///
1172 /// # Examples
1173 ///
1174 /// ```
1175 /// // Iterate using a raw pointer in increments of two elements (backwards)
1176 /// let data = [1u8, 2, 3, 4, 5];
1177 /// let mut ptr: *const u8 = data.as_ptr();
1178 /// let start_rounded_down = ptr.wrapping_sub(2);
1179 /// ptr = ptr.wrapping_add(4);
1180 /// let step = 2;
1181 /// // This loop prints "5, 3, 1, "
1182 /// while ptr != start_rounded_down {
1183 /// unsafe {
1184 /// print!("{}, ", *ptr);
1185 /// }
1186 /// ptr = ptr.wrapping_sub(step);
1187 /// }
1188 /// ```
1189 #[stable(feature = "pointer_methods", since = "1.26.0")]
1190 #[must_use = "returns a new pointer rather than modifying its argument"]
1191 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1192 #[inline(always)]
1193 pub const fn wrapping_sub(self, count: usize) -> Self
1194 where
1195 T: Sized,
1196 {
1197 self.wrapping_offset((count as isize).wrapping_neg())
1198 }
1199
1200 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
1201 /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
1202 ///
1203 /// `count` is in units of bytes.
1204 ///
1205 /// This is purely a convenience for casting to a `u8` pointer and
1206 /// using [wrapping_sub][pointer::wrapping_sub] on it. See that method for documentation.
1207 ///
1208 /// For non-`Sized` pointees this operation changes only the data pointer,
1209 /// leaving the metadata untouched.
1210 #[must_use]
1211 #[inline(always)]
1212 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
1213 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
1214 #[rustc_allow_const_fn_unstable(set_ptr_value)]
1215 pub const fn wrapping_byte_sub(self, count: usize) -> Self {
1216 self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
1217 }
1218
1219 /// Reads the value from `self` without moving it. This leaves the
1220 /// memory in `self` unchanged.
1221 ///
1222 /// See [`ptr::read`] for safety concerns and examples.
1223 ///
1224 /// [`ptr::read`]: crate::ptr::read()
1225 #[stable(feature = "pointer_methods", since = "1.26.0")]
1226 #[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
1227 #[inline]
1228 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1229 pub const unsafe fn read(self) -> T
1230 where
1231 T: Sized,
1232 {
1233 // SAFETY: the caller must uphold the safety contract for `read`.
1234 unsafe { read(self) }
1235 }
1236
1237 /// Performs a volatile read of the value from `self` without moving it. This
1238 /// leaves the memory in `self` unchanged.
1239 ///
1240 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1241 /// to not be elided or reordered by the compiler across other volatile
1242 /// operations.
1243 ///
1244 /// See [`ptr::read_volatile`] for safety concerns and examples.
1245 ///
1246 /// [`ptr::read_volatile`]: crate::ptr::read_volatile()
1247 #[stable(feature = "pointer_methods", since = "1.26.0")]
1248 #[inline]
1249 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1250 pub unsafe fn read_volatile(self) -> T
1251 where
1252 T: Sized,
1253 {
1254 // SAFETY: the caller must uphold the safety contract for `read_volatile`.
1255 unsafe { read_volatile(self) }
1256 }
1257
1258 /// Reads the value from `self` without moving it. This leaves the
1259 /// memory in `self` unchanged.
1260 ///
1261 /// Unlike `read`, the pointer may be unaligned.
1262 ///
1263 /// See [`ptr::read_unaligned`] for safety concerns and examples.
1264 ///
1265 /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
1266 #[stable(feature = "pointer_methods", since = "1.26.0")]
1267 #[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
1268 #[inline]
1269 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1270 pub const unsafe fn read_unaligned(self) -> T
1271 where
1272 T: Sized,
1273 {
1274 // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
1275 unsafe { read_unaligned(self) }
1276 }
1277
1278 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1279 /// and destination may overlap.
1280 ///
1281 /// NOTE: this has the *same* argument order as [`ptr::copy`].
1282 ///
1283 /// See [`ptr::copy`] for safety concerns and examples.
1284 ///
1285 /// [`ptr::copy`]: crate::ptr::copy()
1286 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
1287 #[stable(feature = "pointer_methods", since = "1.26.0")]
1288 #[inline]
1289 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1290 pub const unsafe fn copy_to(self, dest: *mut T, count: usize)
1291 where
1292 T: Sized,
1293 {
1294 // SAFETY: the caller must uphold the safety contract for `copy`.
1295 unsafe { copy(self, dest, count) }
1296 }
1297
1298 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1299 /// and destination may *not* overlap.
1300 ///
1301 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
1302 ///
1303 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
1304 ///
1305 /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
1306 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
1307 #[stable(feature = "pointer_methods", since = "1.26.0")]
1308 #[inline]
1309 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1310 pub const unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1311 where
1312 T: Sized,
1313 {
1314 // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
1315 unsafe { copy_nonoverlapping(self, dest, count) }
1316 }
1317
1318 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
1319 /// `align`.
1320 ///
1321 /// If it is not possible to align the pointer, the implementation returns
1322 /// `usize::MAX`. It is permissible for the implementation to *always*
1323 /// return `usize::MAX`. Only your algorithm's performance can depend
1324 /// on getting a usable offset here, not its correctness.
1325 ///
1326 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
1327 /// used with the `wrapping_add` method.
1328 ///
1329 /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
1330 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
1331 /// the returned offset is correct in all terms other than alignment.
1332 ///
1333 /// # Panics
1334 ///
1335 /// The function panics if `align` is not a power-of-two.
1336 ///
1337 /// # Examples
1338 ///
1339 /// Accessing adjacent `u8` as `u16`
1340 ///
1341 /// ```
1342 /// use std::mem::align_of;
1343 ///
1344 /// # unsafe {
1345 /// let x = [5_u8, 6, 7, 8, 9];
1346 /// let ptr = x.as_ptr();
1347 /// let offset = ptr.align_offset(align_of::<u16>());
1348 ///
1349 /// if offset < x.len() - 1 {
1350 /// let u16_ptr = ptr.add(offset).cast::<u16>();
1351 /// assert!(*u16_ptr == u16::from_ne_bytes([5, 6]) || *u16_ptr == u16::from_ne_bytes([6, 7]));
1352 /// } else {
1353 /// // while the pointer can be aligned via `offset`, it would point
1354 /// // outside the allocation
1355 /// }
1356 /// # }
1357 /// ```
1358 #[must_use]
1359 #[inline]
1360 #[stable(feature = "align_offset", since = "1.36.0")]
1361 #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
1362 pub const fn align_offset(self, align: usize) -> usize
1363 where
1364 T: Sized,
1365 {
1366 if !align.is_power_of_two() {
1367 panic!("align_offset: align is not a power-of-two");
1368 }
1369
1370 {
1371 // SAFETY: `align` has been checked to be a power of 2 above
1372 unsafe { align_offset(self, align) }
1373 }
1374 }
1375
1376 /// Returns whether the pointer is properly aligned for `T`.
1377 ///
1378 /// # Examples
1379 ///
1380 /// ```
1381 /// #![feature(pointer_is_aligned)]
1382 ///
1383 /// // On some platforms, the alignment of i32 is less than 4.
1384 /// #[repr(align(4))]
1385 /// struct AlignedI32(i32);
1386 ///
1387 /// let data = AlignedI32(42);
1388 /// let ptr = &data as *const AlignedI32;
1389 ///
1390 /// assert!(ptr.is_aligned());
1391 /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
1392 /// ```
1393 ///
1394 /// # At compiletime
1395 /// **Note: Alignment at compiletime is experimental and subject to change. See the
1396 /// [tracking issue] for details.**
1397 ///
1398 /// At compiletime, the compiler may not know where a value will end up in memory.
1399 /// Calling this function on a pointer created from a reference at compiletime will only
1400 /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
1401 /// is never aligned if cast to a type with a stricter alignment than the reference's
1402 /// underlying allocation.
1403 ///
1404 /// ```
1405 /// #![feature(pointer_is_aligned)]
1406 /// #![feature(const_pointer_is_aligned)]
1407 ///
1408 /// // On some platforms, the alignment of primitives is less than their size.
1409 /// #[repr(align(4))]
1410 /// struct AlignedI32(i32);
1411 /// #[repr(align(8))]
1412 /// struct AlignedI64(i64);
1413 ///
1414 /// const _: () = {
1415 /// let data = AlignedI32(42);
1416 /// let ptr = &data as *const AlignedI32;
1417 /// assert!(ptr.is_aligned());
1418 ///
1419 /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
1420 /// let ptr1 = ptr.cast::<AlignedI64>();
1421 /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
1422 /// assert!(!ptr1.is_aligned());
1423 /// assert!(!ptr2.is_aligned());
1424 /// };
1425 /// ```
1426 ///
1427 /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
1428 /// pointer is aligned, even if the compiletime pointer wasn't aligned.
1429 ///
1430 /// ```
1431 /// #![feature(pointer_is_aligned)]
1432 /// #![feature(const_pointer_is_aligned)]
1433 ///
1434 /// // On some platforms, the alignment of primitives is less than their size.
1435 /// #[repr(align(4))]
1436 /// struct AlignedI32(i32);
1437 /// #[repr(align(8))]
1438 /// struct AlignedI64(i64);
1439 ///
1440 /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
1441 /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
1442 /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
1443 /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
1444 ///
1445 /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
1446 /// let runtime_ptr = COMPTIME_PTR;
1447 /// assert_ne!(
1448 /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
1449 /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
1450 /// );
1451 /// ```
1452 ///
1453 /// If a pointer is created from a fixed address, this function behaves the same during
1454 /// runtime and compiletime.
1455 ///
1456 /// ```
1457 /// #![feature(pointer_is_aligned)]
1458 /// #![feature(const_pointer_is_aligned)]
1459 ///
1460 /// // On some platforms, the alignment of primitives is less than their size.
1461 /// #[repr(align(4))]
1462 /// struct AlignedI32(i32);
1463 /// #[repr(align(8))]
1464 /// struct AlignedI64(i64);
1465 ///
1466 /// const _: () = {
1467 /// let ptr = 40 as *const AlignedI32;
1468 /// assert!(ptr.is_aligned());
1469 ///
1470 /// // For pointers with a known address, runtime and compiletime behavior are identical.
1471 /// let ptr1 = ptr.cast::<AlignedI64>();
1472 /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
1473 /// assert!(ptr1.is_aligned());
1474 /// assert!(!ptr2.is_aligned());
1475 /// };
1476 /// ```
1477 ///
1478 /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
1479 #[must_use]
1480 #[inline]
1481 #[unstable(feature = "pointer_is_aligned", issue = "96284")]
1482 #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
1483 pub const fn is_aligned(self) -> bool
1484 where
1485 T: Sized,
1486 {
1487 self.is_aligned_to(mem::align_of::<T>())
1488 }
1489
1490 /// Returns whether the pointer is aligned to `align`.
1491 ///
1492 /// For non-`Sized` pointees this operation considers only the data pointer,
1493 /// ignoring the metadata.
1494 ///
1495 /// # Panics
1496 ///
1497 /// The function panics if `align` is not a power-of-two (this includes 0).
1498 ///
1499 /// # Examples
1500 ///
1501 /// ```
1502 /// #![feature(pointer_is_aligned)]
1503 ///
1504 /// // On some platforms, the alignment of i32 is less than 4.
1505 /// #[repr(align(4))]
1506 /// struct AlignedI32(i32);
1507 ///
1508 /// let data = AlignedI32(42);
1509 /// let ptr = &data as *const AlignedI32;
1510 ///
1511 /// assert!(ptr.is_aligned_to(1));
1512 /// assert!(ptr.is_aligned_to(2));
1513 /// assert!(ptr.is_aligned_to(4));
1514 ///
1515 /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
1516 /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
1517 ///
1518 /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
1519 /// ```
1520 ///
1521 /// # At compiletime
1522 /// **Note: Alignment at compiletime is experimental and subject to change. See the
1523 /// [tracking issue] for details.**
1524 ///
1525 /// At compiletime, the compiler may not know where a value will end up in memory.
1526 /// Calling this function on a pointer created from a reference at compiletime will only
1527 /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
1528 /// cannot be stricter aligned than the reference's underlying allocation.
1529 ///
1530 /// ```
1531 /// #![feature(pointer_is_aligned)]
1532 /// #![feature(const_pointer_is_aligned)]
1533 ///
1534 /// // On some platforms, the alignment of i32 is less than 4.
1535 /// #[repr(align(4))]
1536 /// struct AlignedI32(i32);
1537 ///
1538 /// const _: () = {
1539 /// let data = AlignedI32(42);
1540 /// let ptr = &data as *const AlignedI32;
1541 ///
1542 /// assert!(ptr.is_aligned_to(1));
1543 /// assert!(ptr.is_aligned_to(2));
1544 /// assert!(ptr.is_aligned_to(4));
1545 ///
1546 /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
1547 /// assert!(!ptr.is_aligned_to(8));
1548 /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
1549 /// };
1550 /// ```
1551 ///
1552 /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
1553 /// pointer is aligned, even if the compiletime pointer wasn't aligned.
1554 ///
1555 /// ```
1556 /// #![feature(pointer_is_aligned)]
1557 /// #![feature(const_pointer_is_aligned)]
1558 ///
1559 /// // On some platforms, the alignment of i32 is less than 4.
1560 /// #[repr(align(4))]
1561 /// struct AlignedI32(i32);
1562 ///
1563 /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
1564 /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
1565 /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
1566 /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
1567 ///
1568 /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
1569 /// let runtime_ptr = COMPTIME_PTR;
1570 /// assert_ne!(
1571 /// runtime_ptr.is_aligned_to(8),
1572 /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
1573 /// );
1574 /// ```
1575 ///
1576 /// If a pointer is created from a fixed address, this function behaves the same during
1577 /// runtime and compiletime.
1578 ///
1579 /// ```
1580 /// #![feature(pointer_is_aligned)]
1581 /// #![feature(const_pointer_is_aligned)]
1582 ///
1583 /// const _: () = {
1584 /// let ptr = 40 as *const u8;
1585 /// assert!(ptr.is_aligned_to(1));
1586 /// assert!(ptr.is_aligned_to(2));
1587 /// assert!(ptr.is_aligned_to(4));
1588 /// assert!(ptr.is_aligned_to(8));
1589 /// assert!(!ptr.is_aligned_to(16));
1590 /// };
1591 /// ```
1592 ///
1593 /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
1594 #[must_use]
1595 #[inline]
1596 #[unstable(feature = "pointer_is_aligned", issue = "96284")]
1597 #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
1598 pub const fn is_aligned_to(self, align: usize) -> bool {
1599 if !align.is_power_of_two() {
1600 panic!("is_aligned_to: align is not a power-of-two");
1601 }
1602
1603 #[inline]
1604 fn runtime_impl(ptr: *const (), align: usize) -> bool {
1605 ptr.addr() & (align - 1) == 0
1606 }
1607
1608 #[inline]
1609 const fn const_impl(ptr: *const (), align: usize) -> bool {
1610 // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
1611 // The cast to `()` is used to
1612 // 1. deal with fat pointers; and
1613 // 2. ensure that `align_offset` doesn't actually try to compute an offset.
1614 ptr.align_offset(align) == 0
1615 }
1616
1617 // SAFETY: The two versions are equivalent at runtime.
1618 unsafe { const_eval_select((self.cast::<()>(), align), const_impl, runtime_impl) }
1619 }
1620 }
1621
1622 impl<T> *const [T] {
1623 /// Returns the length of a raw slice.
1624 ///
1625 /// The returned value is the number of **elements**, not the number of bytes.
1626 ///
1627 /// This function is safe, even when the raw slice cannot be cast to a slice
1628 /// reference because the pointer is null or unaligned.
1629 ///
1630 /// # Examples
1631 ///
1632 /// ```rust
1633 /// #![feature(slice_ptr_len)]
1634 ///
1635 /// use std::ptr;
1636 ///
1637 /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
1638 /// assert_eq!(slice.len(), 3);
1639 /// ```
1640 #[inline]
1641 #[unstable(feature = "slice_ptr_len", issue = "71146")]
1642 #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
1643 pub const fn len(self) -> usize {
1644 metadata(self)
1645 }
1646
1647 /// Returns a raw pointer to the slice's buffer.
1648 ///
1649 /// This is equivalent to casting `self` to `*const T`, but more type-safe.
1650 ///
1651 /// # Examples
1652 ///
1653 /// ```rust
1654 /// #![feature(slice_ptr_get)]
1655 /// use std::ptr;
1656 ///
1657 /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
1658 /// assert_eq!(slice.as_ptr(), ptr::null());
1659 /// ```
1660 #[inline]
1661 #[unstable(feature = "slice_ptr_get", issue = "74265")]
1662 #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
1663 pub const fn as_ptr(self) -> *const T {
1664 self as *const T
1665 }
1666
1667 /// Returns a raw pointer to an element or subslice, without doing bounds
1668 /// checking.
1669 ///
1670 /// Calling this method with an out-of-bounds index or when `self` is not dereferenceable
1671 /// is *[undefined behavior]* even if the resulting pointer is not used.
1672 ///
1673 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1674 ///
1675 /// # Examples
1676 ///
1677 /// ```
1678 /// #![feature(slice_ptr_get)]
1679 ///
1680 /// let x = &[1, 2, 4] as *const [i32];
1681 ///
1682 /// unsafe {
1683 /// assert_eq!(x.get_unchecked(1), x.as_ptr().add(1));
1684 /// }
1685 /// ```
1686 #[unstable(feature = "slice_ptr_get", issue = "74265")]
1687 #[inline]
1688 pub unsafe fn get_unchecked<I>(self, index: I) -> *const I::Output
1689 where
1690 I: SliceIndex<[T]>,
1691 {
1692 // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
1693 unsafe { index.get_unchecked(self) }
1694 }
1695
1696 /// Returns `None` if the pointer is null, or else returns a shared slice to
1697 /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
1698 /// that the value has to be initialized.
1699 ///
1700 /// [`as_ref`]: #method.as_ref
1701 ///
1702 /// # Safety
1703 ///
1704 /// When calling this method, you have to ensure that *either* the pointer is null *or*
1705 /// all of the following is true:
1706 ///
1707 /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
1708 /// and it must be properly aligned. This means in particular:
1709 ///
1710 /// * The entire memory range of this slice must be contained within a single [allocated object]!
1711 /// Slices can never span across multiple allocated objects.
1712 ///
1713 /// * The pointer must be aligned even for zero-length slices. One
1714 /// reason for this is that enum layout optimizations may rely on references
1715 /// (including slices of any length) being aligned and non-null to distinguish
1716 /// them from other data. You can obtain a pointer that is usable as `data`
1717 /// for zero-length slices using [`NonNull::dangling()`].
1718 ///
1719 /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
1720 /// See the safety documentation of [`pointer::offset`].
1721 ///
1722 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
1723 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
1724 /// In particular, while this reference exists, the memory the pointer points to must
1725 /// not get mutated (except inside `UnsafeCell`).
1726 ///
1727 /// This applies even if the result of this method is unused!
1728 ///
1729 /// See also [`slice::from_raw_parts`][].
1730 ///
1731 /// [valid]: crate::ptr#safety
1732 /// [allocated object]: crate::ptr#allocated-object
1733 #[inline]
1734 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
1735 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
1736 pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
1737 if self.is_null() {
1738 None
1739 } else {
1740 // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
1741 Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit<T>, self.len()) })
1742 }
1743 }
1744 }
1745
1746 // Equality for pointers
1747 #[stable(feature = "rust1", since = "1.0.0")]
1748 impl<T: ?Sized> PartialEq for *const T {
1749 #[inline]
1750 fn eq(&self, other: &*const T) -> bool {
1751 *self == *other
1752 }
1753 }
1754
1755 #[stable(feature = "rust1", since = "1.0.0")]
1756 impl<T: ?Sized> Eq for *const T {}
1757
1758 // Comparison for pointers
1759 #[stable(feature = "rust1", since = "1.0.0")]
1760 impl<T: ?Sized> Ord for *const T {
1761 #[inline]
1762 fn cmp(&self, other: &*const T) -> Ordering {
1763 if self < other {
1764 Less
1765 } else if self == other {
1766 Equal
1767 } else {
1768 Greater
1769 }
1770 }
1771 }
1772
1773 #[stable(feature = "rust1", since = "1.0.0")]
1774 impl<T: ?Sized> PartialOrd for *const T {
1775 #[inline]
1776 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
1777 Some(self.cmp(other))
1778 }
1779
1780 #[inline]
1781 fn lt(&self, other: &*const T) -> bool {
1782 *self < *other
1783 }
1784
1785 #[inline]
1786 fn le(&self, other: &*const T) -> bool {
1787 *self <= *other
1788 }
1789
1790 #[inline]
1791 fn gt(&self, other: &*const T) -> bool {
1792 *self > *other
1793 }
1794
1795 #[inline]
1796 fn ge(&self, other: &*const T) -> bool {
1797 *self >= *other
1798 }
1799 }