]> git.proxmox.com Git - rustc.git/blame - library/core/src/ptr/mut_ptr.rs
New upstream version 1.70.0+dfsg1
[rustc.git] / library / core / src / ptr / mut_ptr.rs
CommitLineData
dfeec247
XL
1use super::*;
2use crate::cmp::Ordering::{self, Equal, Greater, Less};
9c376795 3use crate::intrinsics::{self, const_eval_select};
3dfed10e 4use crate::slice::{self, SliceIndex};
dfeec247 5
dfeec247
XL
6impl<T: ?Sized> *mut T {
7 /// Returns `true` if the pointer is null.
8 ///
9 /// Note that unsized types have many possible null pointers, as only the
10 /// raw data pointer is considered, not their length, vtable, etc.
11 /// Therefore, two pointers that are null may still not compare equal to
12 /// each other.
13 ///
3dfed10e
XL
14 /// ## Behavior during const evaluation
15 ///
16 /// When this function is used during const evaluation, it may return `false` for pointers
17 /// that turn out to be null at runtime. Specifically, when a pointer to some memory
18 /// is offset beyond its bounds in such a way that the resulting pointer is null,
19 /// the function will still return `false`. There is no way for CTFE to know
20 /// the absolute position of that memory, so we cannot tell if the pointer is
21 /// null or not.
22 ///
dfeec247
XL
23 /// # Examples
24 ///
dfeec247
XL
25 /// ```
26 /// let mut s = [1, 2, 3];
27 /// let ptr: *mut u32 = s.as_mut_ptr();
28 /// assert!(!ptr.is_null());
29 /// ```
30 #[stable(feature = "rust1", since = "1.0.0")]
3dfed10e 31 #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
dfeec247 32 #[inline]
3dfed10e 33 pub const fn is_null(self) -> bool {
9c376795
FG
34 #[inline]
35 fn runtime_impl(ptr: *mut u8) -> bool {
36 ptr.addr() == 0
f2b60f7d 37 }
9c376795
FG
38
39 #[inline]
40 const fn const_impl(ptr: *mut u8) -> bool {
41 // Compare via a cast to a thin pointer, so fat pointers are only
42 // considering their "data" part for null-ness.
43 match (ptr).guaranteed_eq(null_mut()) {
44 None => false,
45 Some(res) => res,
46 }
47 }
48
49 // SAFETY: The two versions are equivalent at runtime.
50 unsafe { const_eval_select((self as *mut u8,), const_impl, runtime_impl) }
dfeec247
XL
51 }
52
53 /// Casts to a pointer of another type.
54 #[stable(feature = "ptr_cast", since = "1.38.0")]
55 #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
17df50a5 56 #[inline(always)]
dfeec247
XL
57 pub const fn cast<U>(self) -> *mut U {
58 self as _
59 }
60
5e7ed085
FG
61 /// Use the pointer value in a new pointer of another type.
62 ///
353b0b11 63 /// In case `meta` is a (fat) pointer to an unsized type, this operation
5e7ed085
FG
64 /// will ignore the pointer part, whereas for (thin) pointers to sized
65 /// types, this has the same effect as a simple cast.
66 ///
67 /// The resulting pointer will have provenance of `self`, i.e., for a fat
68 /// pointer, this operation is semantically the same as creating a new
69 /// fat pointer with the data pointer value of `self` but the metadata of
353b0b11 70 /// `meta`.
5e7ed085
FG
71 ///
72 /// # Examples
73 ///
74 /// This function is primarily useful for allowing byte-wise pointer
75 /// arithmetic on potentially fat pointers:
76 ///
77 /// ```
78 /// #![feature(set_ptr_value)]
79 /// # use core::fmt::Debug;
80 /// let mut arr: [i32; 3] = [1, 2, 3];
81 /// let mut ptr = arr.as_mut_ptr() as *mut dyn Debug;
82 /// let thin = ptr as *mut u8;
83 /// unsafe {
84 /// ptr = thin.add(8).with_metadata_of(ptr);
85 /// # assert_eq!(*(ptr as *mut i32), 3);
86 /// println!("{:?}", &*ptr); // will print "3"
87 /// }
88 /// ```
89 #[unstable(feature = "set_ptr_value", issue = "75091")]
487cf647 90 #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
5e7ed085
FG
91 #[must_use = "returns a new pointer rather than modifying its argument"]
92 #[inline]
487cf647 93 pub const fn with_metadata_of<U>(self, meta: *const U) -> *mut U
5e7ed085
FG
94 where
95 U: ?Sized,
96 {
487cf647 97 from_raw_parts_mut::<U>(self as *mut (), metadata(meta))
5e7ed085
FG
98 }
99
5099ac24
FG
100 /// Changes constness without changing the type.
101 ///
102 /// This is a bit safer than `as` because it wouldn't silently change the type if the code is
103 /// refactored.
104 ///
105 /// While not strictly required (`*mut T` coerces to `*const T`), this is provided for symmetry
064997fb 106 /// with [`cast_mut`] on `*const T` and may have documentation value if used instead of implicit
5099ac24 107 /// coercion.
064997fb
FG
108 ///
109 /// [`cast_mut`]: #method.cast_mut
f2b60f7d
FG
110 #[stable(feature = "ptr_const_cast", since = "1.65.0")]
111 #[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
487cf647 112 #[inline(always)]
064997fb 113 pub const fn cast_const(self) -> *const T {
5099ac24
FG
114 self as _
115 }
116
a2a8927a
XL
117 /// Casts a pointer to its raw bits.
118 ///
119 /// This is equivalent to `as usize`, but is more specific to enhance readability.
120 /// The inverse method is [`from_bits`](#method.from_bits-1).
121 ///
122 /// In particular, `*p as usize` and `p as usize` will both compile for
123 /// pointers to numeric types but do very different things, so using this
124 /// helps emphasize that reading the bits was intentional.
125 ///
126 /// # Examples
127 ///
128 /// ```
129 /// #![feature(ptr_to_from_bits)]
487cf647 130 /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
a2a8927a
XL
131 /// let mut array = [13, 42];
132 /// let mut it = array.iter_mut();
133 /// let p0: *mut i32 = it.next().unwrap();
134 /// assert_eq!(<*mut _>::from_bits(p0.to_bits()), p0);
135 /// let p1: *mut i32 = it.next().unwrap();
136 /// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
487cf647 137 /// }
a2a8927a
XL
138 /// ```
139 #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
487cf647
FG
140 #[deprecated(
141 since = "1.67",
142 note = "replaced by the `exposed_addr` method, or update your code \
143 to follow the strict provenance rules using its APIs"
144 )]
145 #[inline(always)]
a2a8927a
XL
146 pub fn to_bits(self) -> usize
147 where
148 T: Sized,
149 {
150 self as usize
151 }
152
153 /// Creates a pointer from its raw bits.
154 ///
155 /// This is equivalent to `as *mut T`, but is more specific to enhance readability.
156 /// The inverse method is [`to_bits`](#method.to_bits-1).
157 ///
158 /// # Examples
159 ///
160 /// ```
161 /// #![feature(ptr_to_from_bits)]
487cf647 162 /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
a2a8927a
XL
163 /// use std::ptr::NonNull;
164 /// let dangling: *mut u8 = NonNull::dangling().as_ptr();
165 /// assert_eq!(<*mut u8>::from_bits(1), dangling);
487cf647 166 /// }
a2a8927a
XL
167 /// ```
168 #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
487cf647
FG
169 #[deprecated(
170 since = "1.67",
171 note = "replaced by the `ptr::from_exposed_addr_mut` function, or \
172 update your code to follow the strict provenance rules using its APIs"
173 )]
174 #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
175 #[inline(always)]
a2a8927a
XL
176 pub fn from_bits(bits: usize) -> Self
177 where
178 T: Sized,
179 {
180 bits as Self
181 }
182
5e7ed085
FG
183 /// Gets the "address" portion of the pointer.
184 ///
04454e1e
FG
185 /// This is similar to `self as usize`, which semantically discards *provenance* and
186 /// *address-space* information. However, unlike `self as usize`, casting the returned address
187 /// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
f2b60f7d 188 /// properly restore the lost information and obtain a dereferenceable pointer, use
04454e1e
FG
189 /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
190 ///
191 /// If using those APIs is not possible because there is no way to preserve a pointer with the
192 /// required provenance, use [`expose_addr`][pointer::expose_addr] and
193 /// [`from_exposed_addr_mut`][from_exposed_addr_mut] instead. However, note that this makes
194 /// your code less portable and less amenable to tools that check for compliance with the Rust
195 /// memory model.
5e7ed085
FG
196 ///
197 /// On most platforms this will produce a value with the same bytes as the original
198 /// pointer, because all the bytes are dedicated to describing the address.
199 /// Platforms which need to store additional information in the pointer may
200 /// perform a change of representation to produce a value containing only the address
201 /// portion of the pointer. What that means is up to the platform to define.
202 ///
04454e1e
FG
203 /// This API and its claimed semantics are part of the Strict Provenance experiment, and as such
204 /// might change in the future (including possibly weakening this so it becomes wholly
205 /// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
5e7ed085 206 #[must_use]
487cf647 207 #[inline(always)]
5e7ed085 208 #[unstable(feature = "strict_provenance", issue = "95228")]
9c376795 209 pub fn addr(self) -> usize {
5e7ed085 210 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
923072b8
FG
211 // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
212 // provenance).
9c376795 213 unsafe { mem::transmute(self.cast::<()>()) }
5e7ed085
FG
214 }
215
04454e1e
FG
216 /// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
217 /// use in [`from_exposed_addr`][].
218 ///
219 /// This is equivalent to `self as usize`, which semantically discards *provenance* and
220 /// *address-space* information. Furthermore, this (like the `as` cast) has the implicit
221 /// side-effect of marking the provenance as 'exposed', so on platforms that support it you can
222 /// later call [`from_exposed_addr_mut`][] to reconstitute the original pointer including its
223 /// provenance. (Reconstructing address space information, if required, is your responsibility.)
224 ///
225 /// Using this method means that code is *not* following Strict Provenance rules. Supporting
226 /// [`from_exposed_addr_mut`][] complicates specification and reasoning and may not be supported
227 /// by tools that help you to stay conformant with the Rust memory model, so it is recommended
228 /// to use [`addr`][pointer::addr] wherever possible.
229 ///
230 /// On most platforms this will produce a value with the same bytes as the original pointer,
231 /// because all the bytes are dedicated to describing the address. Platforms which need to store
232 /// additional information in the pointer may not support this operation, since the 'expose'
233 /// side-effect which is required for [`from_exposed_addr_mut`][] to work is typically not
234 /// available.
235 ///
236 /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
237 /// [module documentation][crate::ptr] for details.
238 ///
239 /// [`from_exposed_addr_mut`]: from_exposed_addr_mut
240 #[must_use]
487cf647 241 #[inline(always)]
04454e1e 242 #[unstable(feature = "strict_provenance", issue = "95228")]
9c376795 243 pub fn expose_addr(self) -> usize {
04454e1e 244 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
9c376795 245 self.cast::<()>() as usize
04454e1e
FG
246 }
247
5e7ed085
FG
248 /// Creates a new pointer with the given address.
249 ///
250 /// This performs the same operation as an `addr as ptr` cast, but copies
251 /// the *address-space* and *provenance* of `self` to the new pointer.
252 /// This allows us to dynamically preserve and propagate this important
253 /// information in a way that is otherwise impossible with a unary cast.
254 ///
255 /// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
256 /// `self` to the given address, and therefore has all the same capabilities and restrictions.
257 ///
258 /// This API and its claimed semantics are part of the Strict Provenance experiment,
259 /// see the [module documentation][crate::ptr] for details.
260 #[must_use]
261 #[inline]
262 #[unstable(feature = "strict_provenance", issue = "95228")]
9c376795 263 pub fn with_addr(self, addr: usize) -> Self {
5e7ed085
FG
264 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
265 //
266 // In the mean-time, this operation is defined to be "as if" it was
267 // a wrapping_offset, so we can emulate it as such. This should properly
268 // restore pointer provenance even under today's compiler.
269 let self_addr = self.addr() as isize;
270 let dest_addr = addr as isize;
271 let offset = dest_addr.wrapping_sub(self_addr);
272
273 // This is the canonical desugarring of this operation
f2b60f7d 274 self.wrapping_byte_offset(offset)
5e7ed085
FG
275 }
276
277 /// Creates a new pointer by mapping `self`'s address to a new one.
278 ///
279 /// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details.
280 ///
281 /// This API and its claimed semantics are part of the Strict Provenance experiment,
282 /// see the [module documentation][crate::ptr] for details.
283 #[must_use]
284 #[inline]
285 #[unstable(feature = "strict_provenance", issue = "95228")]
9c376795 286 pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self {
5e7ed085
FG
287 self.with_addr(f(self.addr()))
288 }
289
94222f64 290 /// Decompose a (possibly wide) pointer into its address and metadata components.
6a06907d
XL
291 ///
292 /// The pointer can be later reconstructed with [`from_raw_parts_mut`].
6a06907d
XL
293 #[unstable(feature = "ptr_metadata", issue = "81513")]
294 #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
295 #[inline]
296 pub const fn to_raw_parts(self) -> (*mut (), <T as super::Pointee>::Metadata) {
297 (self.cast(), super::metadata(self))
298 }
299
3dfed10e
XL
300 /// Returns `None` if the pointer is null, or else returns a shared reference to
301 /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`]
302 /// must be used instead.
303 ///
304 /// For the mutable counterpart see [`as_mut`].
305 ///
306 /// [`as_uninit_ref`]: #method.as_uninit_ref-1
064997fb 307 /// [`as_mut`]: #method.as_mut
dfeec247
XL
308 ///
309 /// # Safety
310 ///
17df50a5 311 /// When calling this method, you have to ensure that *either* the pointer is null *or*
3dfed10e
XL
312 /// all of the following is true:
313 ///
314 /// * The pointer must be properly aligned.
315 ///
a2a8927a 316 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
3dfed10e
XL
317 ///
318 /// * The pointer must point to an initialized instance of `T`.
dfeec247 319 ///
3dfed10e
XL
320 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
321 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
04454e1e 322 /// In particular, while this reference exists, the memory the pointer points to must
3dfed10e
XL
323 /// not get mutated (except inside `UnsafeCell`).
324 ///
325 /// This applies even if the result of this method is unused!
dfeec247
XL
326 /// (The part about being initialized is not yet fully decided, but until
327 /// it is, the only safe approach is to ensure that they are indeed initialized.)
328 ///
3dfed10e 329 /// [the module documentation]: crate::ptr#safety
dfeec247
XL
330 ///
331 /// # Examples
332 ///
dfeec247
XL
333 /// ```
334 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
335 ///
336 /// unsafe {
337 /// if let Some(val_back) = ptr.as_ref() {
5e7ed085 338 /// println!("We got back the value: {val_back}!");
dfeec247
XL
339 /// }
340 /// }
341 /// ```
342 ///
343 /// # Null-unchecked version
344 ///
345 /// If you are sure the pointer can never be null and are looking for some kind of
346 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
347 /// dereference the pointer directly.
348 ///
349 /// ```
350 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
351 ///
352 /// unsafe {
353 /// let val_back = &*ptr;
5e7ed085 354 /// println!("We got back the value: {val_back}!");
dfeec247
XL
355 /// }
356 /// ```
357 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
a2a8927a 358 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
dfeec247 359 #[inline]
a2a8927a 360 pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> {
f035d41b
XL
361 // SAFETY: the caller must guarantee that `self` is valid for a
362 // reference if it isn't null.
363 if self.is_null() { None } else { unsafe { Some(&*self) } }
dfeec247
XL
364 }
365
3dfed10e
XL
366 /// Returns `None` if the pointer is null, or else returns a shared reference to
367 /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
368 /// that the value has to be initialized.
369 ///
370 /// For the mutable counterpart see [`as_uninit_mut`].
371 ///
372 /// [`as_ref`]: #method.as_ref-1
373 /// [`as_uninit_mut`]: #method.as_uninit_mut
374 ///
375 /// # Safety
376 ///
17df50a5 377 /// When calling this method, you have to ensure that *either* the pointer is null *or*
3dfed10e
XL
378 /// all of the following is true:
379 ///
380 /// * The pointer must be properly aligned.
381 ///
a2a8927a 382 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
3dfed10e
XL
383 ///
384 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
385 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
04454e1e 386 /// In particular, while this reference exists, the memory the pointer points to must
3dfed10e
XL
387 /// not get mutated (except inside `UnsafeCell`).
388 ///
389 /// This applies even if the result of this method is unused!
390 ///
391 /// [the module documentation]: crate::ptr#safety
392 ///
393 /// # Examples
394 ///
3dfed10e
XL
395 /// ```
396 /// #![feature(ptr_as_uninit)]
397 ///
398 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
399 ///
400 /// unsafe {
401 /// if let Some(val_back) = ptr.as_uninit_ref() {
402 /// println!("We got back the value: {}!", val_back.assume_init());
403 /// }
404 /// }
405 /// ```
406 #[inline]
407 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
a2a8927a
XL
408 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
409 pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
3dfed10e
XL
410 where
411 T: Sized,
412 {
413 // SAFETY: the caller must guarantee that `self` meets all the
414 // requirements for a reference.
415 if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) }
416 }
417
dfeec247
XL
418 /// Calculates the offset from a pointer.
419 ///
420 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
421 /// offset of `3 * size_of::<T>()` bytes.
422 ///
423 /// # Safety
424 ///
425 /// If any of the following conditions are violated, the result is Undefined
426 /// Behavior:
427 ///
428 /// * Both the starting and resulting pointer must be either in bounds or one
cdc7bbd5 429 /// byte past the end of the same [allocated object].
dfeec247
XL
430 ///
431 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
432 ///
433 /// * The offset being in bounds cannot rely on "wrapping around" the address
434 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
435 ///
436 /// The compiler and standard library generally tries to ensure allocations
437 /// never reach a size where an offset is a concern. For instance, `Vec`
438 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
439 /// `vec.as_ptr().add(vec.len())` is always safe.
440 ///
441 /// Most platforms fundamentally can't even construct such an allocation.
442 /// For instance, no known 64-bit platform can ever serve a request
443 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
444 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
445 /// more than `isize::MAX` bytes with things like Physical Address
446 /// Extension. As such, memory acquired directly from allocators or memory
447 /// mapped files *may* be too large to handle with this function.
448 ///
449 /// Consider using [`wrapping_offset`] instead if these constraints are
450 /// difficult to satisfy. The only advantage of this method is that it
451 /// enables more aggressive compiler optimizations.
452 ///
453 /// [`wrapping_offset`]: #method.wrapping_offset
cdc7bbd5 454 /// [allocated object]: crate::ptr#allocated-object
dfeec247
XL
455 ///
456 /// # Examples
457 ///
dfeec247
XL
458 /// ```
459 /// let mut s = [1, 2, 3];
460 /// let ptr: *mut u32 = s.as_mut_ptr();
461 ///
462 /// unsafe {
463 /// println!("{}", *ptr.offset(1));
464 /// println!("{}", *ptr.offset(2));
465 /// }
466 /// ```
467 #[stable(feature = "rust1", since = "1.0.0")]
f9f354fc 468 #[must_use = "returns a new pointer rather than modifying its argument"]
5e7ed085 469 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
cdc7bbd5 470 #[inline(always)]
064997fb 471 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
f9f354fc 472 pub const unsafe fn offset(self, count: isize) -> *mut T
dfeec247
XL
473 where
474 T: Sized,
475 {
f035d41b
XL
476 // SAFETY: the caller must uphold the safety contract for `offset`.
477 // The obtained pointer is valid for writes since the caller must
478 // guarantee that it points to the same allocated object as `self`.
479 unsafe { intrinsics::offset(self, count) as *mut T }
dfeec247
XL
480 }
481
923072b8
FG
482 /// Calculates the offset from a pointer in bytes.
483 ///
484 /// `count` is in units of **bytes**.
485 ///
486 /// This is purely a convenience for casting to a `u8` pointer and
487 /// using [offset][pointer::offset] on it. See that method for documentation
488 /// and safety requirements.
489 ///
490 /// For non-`Sized` pointees this operation changes only the data pointer,
491 /// leaving the metadata untouched.
492 #[must_use]
493 #[inline(always)]
494 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
495 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
064997fb 496 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
923072b8
FG
497 pub const unsafe fn byte_offset(self, count: isize) -> Self {
498 // SAFETY: the caller must uphold the safety contract for `offset`.
487cf647 499 unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
923072b8
FG
500 }
501
dfeec247
XL
502 /// Calculates the offset from a pointer using wrapping arithmetic.
503 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
504 /// offset of `3 * size_of::<T>()` bytes.
505 ///
506 /// # Safety
507 ///
5869c6ff 508 /// This operation itself is always safe, but using the resulting pointer is not.
dfeec247 509 ///
94222f64 510 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
cdc7bbd5 511 /// be used to read or write other allocated objects.
dfeec247 512 ///
5869c6ff
XL
513 /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z`
514 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
515 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
516 /// `x` and `y` point into the same allocated object.
dfeec247 517 ///
5869c6ff
XL
518 /// Compared to [`offset`], this method basically delays the requirement of staying within the
519 /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object
520 /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a
521 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`]
522 /// can be optimized better and is thus preferable in performance-sensitive code.
523 ///
524 /// The delayed check only considers the value of the pointer that was dereferenced, not the
525 /// intermediate values used during the computation of the final result. For example,
526 /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other
527 /// words, leaving the allocated object and then re-entering it later is permitted.
dfeec247 528 ///
dfeec247 529 /// [`offset`]: #method.offset
cdc7bbd5 530 /// [allocated object]: crate::ptr#allocated-object
dfeec247
XL
531 ///
532 /// # Examples
533 ///
dfeec247
XL
534 /// ```
535 /// // Iterate using a raw pointer in increments of two elements
536 /// let mut data = [1u8, 2, 3, 4, 5];
537 /// let mut ptr: *mut u8 = data.as_mut_ptr();
538 /// let step = 2;
539 /// let end_rounded_up = ptr.wrapping_offset(6);
540 ///
541 /// while ptr != end_rounded_up {
542 /// unsafe {
543 /// *ptr = 0;
544 /// }
545 /// ptr = ptr.wrapping_offset(step);
546 /// }
547 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
548 /// ```
549 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
f9f354fc 550 #[must_use = "returns a new pointer rather than modifying its argument"]
5e7ed085 551 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
cdc7bbd5 552 #[inline(always)]
f9f354fc 553 pub const fn wrapping_offset(self, count: isize) -> *mut T
dfeec247
XL
554 where
555 T: Sized,
556 {
f9f354fc 557 // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
dfeec247
XL
558 unsafe { intrinsics::arith_offset(self, count) as *mut T }
559 }
560
923072b8
FG
561 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
562 ///
563 /// `count` is in units of **bytes**.
564 ///
565 /// This is purely a convenience for casting to a `u8` pointer and
566 /// using [wrapping_offset][pointer::wrapping_offset] on it. See that method
567 /// for documentation.
568 ///
569 /// For non-`Sized` pointees this operation changes only the data pointer,
570 /// leaving the metadata untouched.
571 #[must_use]
572 #[inline(always)]
573 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
574 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
575 pub const fn wrapping_byte_offset(self, count: isize) -> Self {
487cf647 576 self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
923072b8
FG
577 }
578
f2b60f7d
FG
579 /// Masks out bits of the pointer according to a mask.
580 ///
581 /// This is convenience for `ptr.map_addr(|a| a & mask)`.
582 ///
583 /// For non-`Sized` pointees this operation changes only the data pointer,
584 /// leaving the metadata untouched.
487cf647
FG
585 ///
586 /// ## Examples
587 ///
588 /// ```
589 /// #![feature(ptr_mask, strict_provenance)]
590 /// let mut v = 17_u32;
591 /// let ptr: *mut u32 = &mut v;
592 ///
593 /// // `u32` is 4 bytes aligned,
594 /// // which means that lower 2 bits are always 0.
595 /// let tag_mask = 0b11;
596 /// let ptr_mask = !tag_mask;
597 ///
598 /// // We can store something in these lower bits
599 /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
600 ///
601 /// // Get the "tag" back
602 /// let tag = tagged_ptr.addr() & tag_mask;
603 /// assert_eq!(tag, 0b10);
604 ///
605 /// // Note that `tagged_ptr` is unaligned, it's UB to read from/write to it.
606 /// // To get original pointer `mask` can be used:
607 /// let masked_ptr = tagged_ptr.mask(ptr_mask);
608 /// assert_eq!(unsafe { *masked_ptr }, 17);
609 ///
610 /// unsafe { *masked_ptr = 0 };
611 /// assert_eq!(v, 0);
612 /// ```
f2b60f7d
FG
613 #[unstable(feature = "ptr_mask", issue = "98290")]
614 #[must_use = "returns a new pointer rather than modifying its argument"]
615 #[inline(always)]
616 pub fn mask(self, mask: usize) -> *mut T {
487cf647 617 intrinsics::ptr_mask(self.cast::<()>(), mask).cast_mut().with_metadata_of(self)
f2b60f7d
FG
618 }
619
3dfed10e
XL
620 /// Returns `None` if the pointer is null, or else returns a unique reference to
621 /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_mut`]
622 /// must be used instead.
dfeec247 623 ///
3dfed10e 624 /// For the shared counterpart see [`as_ref`].
dfeec247 625 ///
3dfed10e
XL
626 /// [`as_uninit_mut`]: #method.as_uninit_mut
627 /// [`as_ref`]: #method.as_ref-1
628 ///
629 /// # Safety
dfeec247 630 ///
17df50a5 631 /// When calling this method, you have to ensure that *either* the pointer is null *or*
dfeec247 632 /// all of the following is true:
3dfed10e
XL
633 ///
634 /// * The pointer must be properly aligned.
635 ///
a2a8927a 636 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
3dfed10e
XL
637 ///
638 /// * The pointer must point to an initialized instance of `T`.
639 ///
640 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
641 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
04454e1e 642 /// In particular, while this reference exists, the memory the pointer points to must
3dfed10e 643 /// not get accessed (read or written) through any other pointer.
dfeec247
XL
644 ///
645 /// This applies even if the result of this method is unused!
646 /// (The part about being initialized is not yet fully decided, but until
3dfed10e 647 /// it is, the only safe approach is to ensure that they are indeed initialized.)
dfeec247 648 ///
3dfed10e 649 /// [the module documentation]: crate::ptr#safety
dfeec247
XL
650 ///
651 /// # Examples
652 ///
dfeec247
XL
653 /// ```
654 /// let mut s = [1, 2, 3];
655 /// let ptr: *mut u32 = s.as_mut_ptr();
656 /// let first_value = unsafe { ptr.as_mut().unwrap() };
657 /// *first_value = 4;
3dfed10e 658 /// # assert_eq!(s, [4, 2, 3]);
5e7ed085 659 /// println!("{s:?}"); // It'll print: "[4, 2, 3]".
dfeec247
XL
660 /// ```
661 ///
662 /// # Null-unchecked version
663 ///
664 /// If you are sure the pointer can never be null and are looking for some kind of
665 /// `as_mut_unchecked` that returns the `&mut T` instead of `Option<&mut T>`, know that
666 /// you can dereference the pointer directly.
667 ///
668 /// ```
669 /// let mut s = [1, 2, 3];
670 /// let ptr: *mut u32 = s.as_mut_ptr();
671 /// let first_value = unsafe { &mut *ptr };
672 /// *first_value = 4;
3dfed10e 673 /// # assert_eq!(s, [4, 2, 3]);
5e7ed085 674 /// println!("{s:?}"); // It'll print: "[4, 2, 3]".
dfeec247
XL
675 /// ```
676 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
a2a8927a 677 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
dfeec247 678 #[inline]
a2a8927a 679 pub const unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
f035d41b
XL
680 // SAFETY: the caller must guarantee that `self` is be valid for
681 // a mutable reference if it isn't null.
682 if self.is_null() { None } else { unsafe { Some(&mut *self) } }
683 }
684
3dfed10e
XL
685 /// Returns `None` if the pointer is null, or else returns a unique reference to
686 /// the value wrapped in `Some`. In contrast to [`as_mut`], this does not require
687 /// that the value has to be initialized.
688 ///
689 /// For the shared counterpart see [`as_uninit_ref`].
690 ///
691 /// [`as_mut`]: #method.as_mut
692 /// [`as_uninit_ref`]: #method.as_uninit_ref-1
693 ///
694 /// # Safety
695 ///
17df50a5 696 /// When calling this method, you have to ensure that *either* the pointer is null *or*
3dfed10e
XL
697 /// all of the following is true:
698 ///
699 /// * The pointer must be properly aligned.
700 ///
a2a8927a 701 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
3dfed10e
XL
702 ///
703 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
704 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
04454e1e 705 /// In particular, while this reference exists, the memory the pointer points to must
3dfed10e
XL
706 /// not get accessed (read or written) through any other pointer.
707 ///
708 /// This applies even if the result of this method is unused!
709 ///
710 /// [the module documentation]: crate::ptr#safety
711 #[inline]
712 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
a2a8927a
XL
713 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
714 pub const unsafe fn as_uninit_mut<'a>(self) -> Option<&'a mut MaybeUninit<T>>
3dfed10e
XL
715 where
716 T: Sized,
717 {
718 // SAFETY: the caller must guarantee that `self` meets all the
719 // requirements for a reference.
720 if self.is_null() { None } else { Some(unsafe { &mut *(self as *mut MaybeUninit<T>) }) }
721 }
722
f035d41b
XL
723 /// Returns whether two pointers are guaranteed to be equal.
724 ///
f2b60f7d 725 /// At runtime this function behaves like `Some(self == other)`.
f035d41b
XL
726 /// However, in some contexts (e.g., compile-time evaluation),
727 /// it is not always possible to determine equality of two pointers, so this function may
f2b60f7d
FG
728 /// spuriously return `None` for pointers that later actually turn out to have its equality known.
729 /// But when it returns `Some`, the pointers' equality is guaranteed to be known.
f035d41b 730 ///
f2b60f7d
FG
731 /// The return value may change from `Some` to `None` and vice versa depending on the compiler
732 /// version and unsafe code must not
f035d41b 733 /// rely on the result of this function for soundness. It is suggested to only use this function
f2b60f7d 734 /// for performance optimizations where spurious `None` return values by this function do not
f035d41b
XL
735 /// affect the outcome, but just the performance.
736 /// The consequences of using this method to make runtime and compile-time code behave
737 /// differently have not been explored. This method should not be used to introduce such
738 /// differences, and it should also not be stabilized before we have a better understanding
739 /// of this issue.
740 #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
741 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
742 #[inline]
f2b60f7d 743 pub const fn guaranteed_eq(self, other: *mut T) -> Option<bool>
f035d41b
XL
744 where
745 T: Sized,
746 {
f2b60f7d 747 (self as *const T).guaranteed_eq(other as _)
f035d41b
XL
748 }
749
f2b60f7d 750 /// Returns whether two pointers are guaranteed to be inequal.
f035d41b 751 ///
2b03887a 752 /// At runtime this function behaves like `Some(self != other)`.
f035d41b 753 /// However, in some contexts (e.g., compile-time evaluation),
f2b60f7d
FG
754 /// it is not always possible to determine inequality of two pointers, so this function may
755 /// spuriously return `None` for pointers that later actually turn out to have its inequality known.
756 /// But when it returns `Some`, the pointers' inequality is guaranteed to be known.
f035d41b 757 ///
f2b60f7d
FG
758 /// The return value may change from `Some` to `None` and vice versa depending on the compiler
759 /// version and unsafe code must not
f035d41b 760 /// rely on the result of this function for soundness. It is suggested to only use this function
f2b60f7d 761 /// for performance optimizations where spurious `None` return values by this function do not
f035d41b
XL
762 /// affect the outcome, but just the performance.
763 /// The consequences of using this method to make runtime and compile-time code behave
764 /// differently have not been explored. This method should not be used to introduce such
765 /// differences, and it should also not be stabilized before we have a better understanding
766 /// of this issue.
767 #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
768 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
769 #[inline]
f2b60f7d 770 pub const fn guaranteed_ne(self, other: *mut T) -> Option<bool>
f035d41b
XL
771 where
772 T: Sized,
773 {
f2b60f7d 774 (self as *const T).guaranteed_ne(other as _)
dfeec247
XL
775 }
776
777 /// Calculates the distance between two pointers. The returned value is in
04454e1e 778 /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
dfeec247
XL
779 ///
780 /// This function is the inverse of [`offset`].
781 ///
782 /// [`offset`]: #method.offset-1
dfeec247
XL
783 ///
784 /// # Safety
785 ///
786 /// If any of the following conditions are violated, the result is Undefined
787 /// Behavior:
788 ///
789 /// * Both the starting and other pointer must be either in bounds or one
cdc7bbd5 790 /// byte past the end of the same [allocated object].
dfeec247 791 ///
3dfed10e
XL
792 /// * Both pointers must be *derived from* a pointer to the same object.
793 /// (See below for an example.)
794 ///
dfeec247
XL
795 /// * The distance between the pointers, in bytes, must be an exact multiple
796 /// of the size of `T`.
797 ///
6a06907d
XL
798 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
799 ///
dfeec247
XL
800 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
801 ///
6a06907d
XL
802 /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the
803 /// address space, so two pointers within some value of any Rust type `T` will always satisfy
804 /// the last two conditions. The standard library also generally ensures that allocations
805 /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they
806 /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
807 /// always satisfies the last two conditions.
dfeec247 808 ///
6a06907d 809 /// Most platforms fundamentally can't even construct such a large allocation.
dfeec247
XL
810 /// For instance, no known 64-bit platform can ever serve a request
811 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
812 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
813 /// more than `isize::MAX` bytes with things like Physical Address
814 /// Extension. As such, memory acquired directly from allocators or memory
815 /// mapped files *may* be too large to handle with this function.
6a06907d
XL
816 /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
817 /// such large allocations either.)
818 ///
819 /// [`add`]: #method.add
cdc7bbd5 820 /// [allocated object]: crate::ptr#allocated-object
dfeec247 821 ///
dfeec247
XL
822 /// # Panics
823 ///
824 /// This function panics if `T` is a Zero-Sized Type ("ZST").
825 ///
826 /// # Examples
827 ///
828 /// Basic usage:
829 ///
830 /// ```
dfeec247
XL
831 /// let mut a = [0; 5];
832 /// let ptr1: *mut i32 = &mut a[1];
833 /// let ptr2: *mut i32 = &mut a[3];
834 /// unsafe {
835 /// assert_eq!(ptr2.offset_from(ptr1), 2);
836 /// assert_eq!(ptr1.offset_from(ptr2), -2);
837 /// assert_eq!(ptr1.offset(2), ptr2);
838 /// assert_eq!(ptr2.offset(-2), ptr1);
839 /// }
840 /// ```
3dfed10e
XL
841 ///
842 /// *Incorrect* usage:
843 ///
844 /// ```rust,no_run
845 /// let ptr1 = Box::into_raw(Box::new(0u8));
846 /// let ptr2 = Box::into_raw(Box::new(1u8));
847 /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize);
848 /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
849 /// let ptr2_other = (ptr1 as *mut u8).wrapping_offset(diff);
850 /// assert_eq!(ptr2 as usize, ptr2_other as usize);
851 /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
852 /// // computing their offset is undefined behavior, even though
853 /// // they point to the same address!
854 /// unsafe {
855 /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
856 /// }
857 /// ```
858 #[stable(feature = "ptr_offset_from", since = "1.47.0")]
f2b60f7d 859 #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
17df50a5 860 #[inline(always)]
064997fb 861 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
dfeec247
XL
862 pub const unsafe fn offset_from(self, origin: *const T) -> isize
863 where
864 T: Sized,
865 {
f035d41b
XL
866 // SAFETY: the caller must uphold the safety contract for `offset_from`.
867 unsafe { (self as *const T).offset_from(origin) }
dfeec247
XL
868 }
869
923072b8
FG
870 /// Calculates the distance between two pointers. The returned value is in
871 /// units of **bytes**.
872 ///
873 /// This is purely a convenience for casting to a `u8` pointer and
874 /// using [offset_from][pointer::offset_from] on it. See that method for
875 /// documentation and safety requirements.
876 ///
877 /// For non-`Sized` pointees this operation considers only the data pointers,
878 /// ignoring the metadata.
879 #[inline(always)]
880 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
881 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
064997fb 882 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
487cf647 883 pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
923072b8
FG
884 // SAFETY: the caller must uphold the safety contract for `offset_from`.
885 unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
886 }
887
04454e1e
FG
888 /// Calculates the distance between two pointers, *where it's known that
889 /// `self` is equal to or greater than `origin`*. The returned value is in
890 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
891 ///
892 /// This computes the same value that [`offset_from`](#method.offset_from)
2b03887a 893 /// would compute, but with the added precondition that the offset is
04454e1e 894 /// guaranteed to be non-negative. This method is equivalent to
9ffffee4 895 /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`,
04454e1e
FG
896 /// but it provides slightly more information to the optimizer, which can
897 /// sometimes allow it to optimize slightly better with some backends.
898 ///
899 /// This method can be though of as recovering the `count` that was passed
900 /// to [`add`](#method.add) (or, with the parameters in the other order,
901 /// to [`sub`](#method.sub)). The following are all equivalent, assuming
902 /// that their safety preconditions are met:
903 /// ```rust
904 /// # #![feature(ptr_sub_ptr)]
905 /// # unsafe fn blah(ptr: *mut i32, origin: *mut i32, count: usize) -> bool {
906 /// ptr.sub_ptr(origin) == count
907 /// # &&
908 /// origin.add(count) == ptr
909 /// # &&
910 /// ptr.sub(count) == origin
911 /// # }
912 /// ```
913 ///
914 /// # Safety
915 ///
916 /// - The distance between the pointers must be non-negative (`self >= origin`)
917 ///
918 /// - *All* the safety conditions of [`offset_from`](#method.offset_from)
919 /// apply to this method as well; see it for the full details.
920 ///
921 /// Importantly, despite the return type of this method being able to represent
922 /// a larger offset, it's still *not permitted* to pass pointers which differ
923 /// by more than `isize::MAX` *bytes*. As such, the result of this method will
924 /// always be less than or equal to `isize::MAX as usize`.
925 ///
926 /// # Panics
927 ///
928 /// This function panics if `T` is a Zero-Sized Type ("ZST").
929 ///
930 /// # Examples
931 ///
932 /// ```
933 /// #![feature(ptr_sub_ptr)]
934 ///
935 /// let mut a = [0; 5];
936 /// let p: *mut i32 = a.as_mut_ptr();
937 /// unsafe {
938 /// let ptr1: *mut i32 = p.add(1);
939 /// let ptr2: *mut i32 = p.add(3);
940 ///
941 /// assert_eq!(ptr2.sub_ptr(ptr1), 2);
942 /// assert_eq!(ptr1.add(2), ptr2);
943 /// assert_eq!(ptr2.sub(2), ptr1);
944 /// assert_eq!(ptr2.sub_ptr(ptr2), 0);
945 /// }
946 ///
947 /// // This would be incorrect, as the pointers are not correctly ordered:
948 /// // ptr1.offset_from(ptr2)
949 #[unstable(feature = "ptr_sub_ptr", issue = "95892")]
950 #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
951 #[inline]
064997fb 952 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
04454e1e
FG
953 pub const unsafe fn sub_ptr(self, origin: *const T) -> usize
954 where
955 T: Sized,
956 {
957 // SAFETY: the caller must uphold the safety contract for `sub_ptr`.
958 unsafe { (self as *const T).sub_ptr(origin) }
959 }
960
dfeec247
XL
961 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
962 ///
963 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
964 /// offset of `3 * size_of::<T>()` bytes.
965 ///
966 /// # Safety
967 ///
968 /// If any of the following conditions are violated, the result is Undefined
969 /// Behavior:
970 ///
971 /// * Both the starting and resulting pointer must be either in bounds or one
cdc7bbd5 972 /// byte past the end of the same [allocated object].
dfeec247
XL
973 ///
974 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
975 ///
976 /// * The offset being in bounds cannot rely on "wrapping around" the address
977 /// space. That is, the infinite-precision sum must fit in a `usize`.
978 ///
979 /// The compiler and standard library generally tries to ensure allocations
980 /// never reach a size where an offset is a concern. For instance, `Vec`
981 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
982 /// `vec.as_ptr().add(vec.len())` is always safe.
983 ///
984 /// Most platforms fundamentally can't even construct such an allocation.
985 /// For instance, no known 64-bit platform can ever serve a request
986 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
987 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
988 /// more than `isize::MAX` bytes with things like Physical Address
989 /// Extension. As such, memory acquired directly from allocators or memory
990 /// mapped files *may* be too large to handle with this function.
991 ///
992 /// Consider using [`wrapping_add`] instead if these constraints are
993 /// difficult to satisfy. The only advantage of this method is that it
994 /// enables more aggressive compiler optimizations.
995 ///
996 /// [`wrapping_add`]: #method.wrapping_add
94222f64 997 /// [allocated object]: crate::ptr#allocated-object
dfeec247
XL
998 ///
999 /// # Examples
1000 ///
dfeec247
XL
1001 /// ```
1002 /// let s: &str = "123";
1003 /// let ptr: *const u8 = s.as_ptr();
1004 ///
1005 /// unsafe {
1006 /// println!("{}", *ptr.add(1) as char);
1007 /// println!("{}", *ptr.add(2) as char);
1008 /// }
1009 /// ```
1010 #[stable(feature = "pointer_methods", since = "1.26.0")]
f9f354fc 1011 #[must_use = "returns a new pointer rather than modifying its argument"]
5e7ed085 1012 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
cdc7bbd5 1013 #[inline(always)]
064997fb 1014 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
f9f354fc 1015 pub const unsafe fn add(self, count: usize) -> Self
dfeec247
XL
1016 where
1017 T: Sized,
1018 {
f035d41b
XL
1019 // SAFETY: the caller must uphold the safety contract for `offset`.
1020 unsafe { self.offset(count as isize) }
dfeec247
XL
1021 }
1022
923072b8
FG
1023 /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
1024 ///
1025 /// `count` is in units of bytes.
1026 ///
1027 /// This is purely a convenience for casting to a `u8` pointer and
1028 /// using [add][pointer::add] on it. See that method for documentation
1029 /// and safety requirements.
1030 ///
1031 /// For non-`Sized` pointees this operation changes only the data pointer,
1032 /// leaving the metadata untouched.
1033 #[must_use]
1034 #[inline(always)]
1035 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
1036 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
064997fb 1037 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
923072b8
FG
1038 pub const unsafe fn byte_add(self, count: usize) -> Self {
1039 // SAFETY: the caller must uphold the safety contract for `add`.
487cf647 1040 unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
923072b8
FG
1041 }
1042
dfeec247
XL
1043 /// Calculates the offset from a pointer (convenience for
1044 /// `.offset((count as isize).wrapping_neg())`).
1045 ///
1046 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1047 /// offset of `3 * size_of::<T>()` bytes.
1048 ///
1049 /// # Safety
1050 ///
1051 /// If any of the following conditions are violated, the result is Undefined
1052 /// Behavior:
1053 ///
1054 /// * Both the starting and resulting pointer must be either in bounds or one
cdc7bbd5 1055 /// byte past the end of the same [allocated object].
dfeec247
XL
1056 ///
1057 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1058 ///
1059 /// * The offset being in bounds cannot rely on "wrapping around" the address
1060 /// space. That is, the infinite-precision sum must fit in a usize.
1061 ///
1062 /// The compiler and standard library generally tries to ensure allocations
1063 /// never reach a size where an offset is a concern. For instance, `Vec`
1064 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1065 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1066 ///
1067 /// Most platforms fundamentally can't even construct such an allocation.
1068 /// For instance, no known 64-bit platform can ever serve a request
1069 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1070 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1071 /// more than `isize::MAX` bytes with things like Physical Address
1072 /// Extension. As such, memory acquired directly from allocators or memory
1073 /// mapped files *may* be too large to handle with this function.
1074 ///
1075 /// Consider using [`wrapping_sub`] instead if these constraints are
1076 /// difficult to satisfy. The only advantage of this method is that it
1077 /// enables more aggressive compiler optimizations.
1078 ///
1079 /// [`wrapping_sub`]: #method.wrapping_sub
cdc7bbd5 1080 /// [allocated object]: crate::ptr#allocated-object
dfeec247
XL
1081 ///
1082 /// # Examples
1083 ///
dfeec247
XL
1084 /// ```
1085 /// let s: &str = "123";
1086 ///
1087 /// unsafe {
1088 /// let end: *const u8 = s.as_ptr().add(3);
1089 /// println!("{}", *end.sub(1) as char);
1090 /// println!("{}", *end.sub(2) as char);
1091 /// }
1092 /// ```
1093 #[stable(feature = "pointer_methods", since = "1.26.0")]
f9f354fc 1094 #[must_use = "returns a new pointer rather than modifying its argument"]
5e7ed085 1095 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
9c376795 1096 #[inline(always)]
064997fb 1097 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
f9f354fc 1098 pub const unsafe fn sub(self, count: usize) -> Self
dfeec247
XL
1099 where
1100 T: Sized,
1101 {
f035d41b
XL
1102 // SAFETY: the caller must uphold the safety contract for `offset`.
1103 unsafe { self.offset((count as isize).wrapping_neg()) }
dfeec247
XL
1104 }
1105
923072b8
FG
1106 /// Calculates the offset from a pointer in bytes (convenience for
1107 /// `.byte_offset((count as isize).wrapping_neg())`).
1108 ///
1109 /// `count` is in units of bytes.
1110 ///
1111 /// This is purely a convenience for casting to a `u8` pointer and
1112 /// using [sub][pointer::sub] on it. See that method for documentation
1113 /// and safety requirements.
1114 ///
1115 /// For non-`Sized` pointees this operation changes only the data pointer,
1116 /// leaving the metadata untouched.
1117 #[must_use]
1118 #[inline(always)]
1119 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
1120 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
064997fb 1121 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
923072b8
FG
1122 pub const unsafe fn byte_sub(self, count: usize) -> Self {
1123 // SAFETY: the caller must uphold the safety contract for `sub`.
487cf647 1124 unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
923072b8
FG
1125 }
1126
dfeec247
XL
1127 /// Calculates the offset from a pointer using wrapping arithmetic.
1128 /// (convenience for `.wrapping_offset(count as isize)`)
1129 ///
1130 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1131 /// offset of `3 * size_of::<T>()` bytes.
1132 ///
1133 /// # Safety
1134 ///
5869c6ff
XL
1135 /// This operation itself is always safe, but using the resulting pointer is not.
1136 ///
94222f64 1137 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
cdc7bbd5 1138 /// be used to read or write other allocated objects.
5869c6ff
XL
1139 ///
1140 /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z`
1141 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
1142 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
1143 /// `x` and `y` point into the same allocated object.
dfeec247 1144 ///
5869c6ff
XL
1145 /// Compared to [`add`], this method basically delays the requirement of staying within the
1146 /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object
1147 /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a
1148 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`]
1149 /// can be optimized better and is thus preferable in performance-sensitive code.
dfeec247 1150 ///
5869c6ff
XL
1151 /// The delayed check only considers the value of the pointer that was dereferenced, not the
1152 /// intermediate values used during the computation of the final result. For example,
1153 /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
1154 /// allocated object and then re-entering it later is permitted.
dfeec247 1155 ///
dfeec247 1156 /// [`add`]: #method.add
cdc7bbd5 1157 /// [allocated object]: crate::ptr#allocated-object
dfeec247
XL
1158 ///
1159 /// # Examples
1160 ///
dfeec247
XL
1161 /// ```
1162 /// // Iterate using a raw pointer in increments of two elements
1163 /// let data = [1u8, 2, 3, 4, 5];
1164 /// let mut ptr: *const u8 = data.as_ptr();
1165 /// let step = 2;
1166 /// let end_rounded_up = ptr.wrapping_add(6);
1167 ///
1168 /// // This loop prints "1, 3, 5, "
1169 /// while ptr != end_rounded_up {
1170 /// unsafe {
1171 /// print!("{}, ", *ptr);
1172 /// }
1173 /// ptr = ptr.wrapping_add(step);
1174 /// }
1175 /// ```
1176 #[stable(feature = "pointer_methods", since = "1.26.0")]
f9f354fc 1177 #[must_use = "returns a new pointer rather than modifying its argument"]
5e7ed085 1178 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
cdc7bbd5 1179 #[inline(always)]
f9f354fc 1180 pub const fn wrapping_add(self, count: usize) -> Self
dfeec247
XL
1181 where
1182 T: Sized,
1183 {
1184 self.wrapping_offset(count as isize)
1185 }
1186
923072b8
FG
1187 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
1188 /// (convenience for `.wrapping_byte_offset(count as isize)`)
1189 ///
1190 /// `count` is in units of bytes.
1191 ///
1192 /// This is purely a convenience for casting to a `u8` pointer and
1193 /// using [wrapping_add][pointer::wrapping_add] on it. See that method for documentation.
1194 ///
1195 /// For non-`Sized` pointees this operation changes only the data pointer,
1196 /// leaving the metadata untouched.
1197 #[must_use]
1198 #[inline(always)]
1199 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
1200 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
1201 pub const fn wrapping_byte_add(self, count: usize) -> Self {
487cf647 1202 self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
923072b8
FG
1203 }
1204
dfeec247 1205 /// Calculates the offset from a pointer using wrapping arithmetic.
5869c6ff 1206 /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
dfeec247
XL
1207 ///
1208 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1209 /// offset of `3 * size_of::<T>()` bytes.
1210 ///
1211 /// # Safety
1212 ///
5869c6ff
XL
1213 /// This operation itself is always safe, but using the resulting pointer is not.
1214 ///
94222f64 1215 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
cdc7bbd5 1216 /// be used to read or write other allocated objects.
5869c6ff
XL
1217 ///
1218 /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z`
1219 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
1220 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
1221 /// `x` and `y` point into the same allocated object.
dfeec247 1222 ///
5869c6ff
XL
1223 /// Compared to [`sub`], this method basically delays the requirement of staying within the
1224 /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object
1225 /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a
1226 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`]
1227 /// can be optimized better and is thus preferable in performance-sensitive code.
dfeec247 1228 ///
5869c6ff
XL
1229 /// The delayed check only considers the value of the pointer that was dereferenced, not the
1230 /// intermediate values used during the computation of the final result. For example,
1231 /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
1232 /// allocated object and then re-entering it later is permitted.
dfeec247 1233 ///
dfeec247 1234 /// [`sub`]: #method.sub
cdc7bbd5 1235 /// [allocated object]: crate::ptr#allocated-object
dfeec247
XL
1236 ///
1237 /// # Examples
1238 ///
dfeec247
XL
1239 /// ```
1240 /// // Iterate using a raw pointer in increments of two elements (backwards)
1241 /// let data = [1u8, 2, 3, 4, 5];
1242 /// let mut ptr: *const u8 = data.as_ptr();
1243 /// let start_rounded_down = ptr.wrapping_sub(2);
1244 /// ptr = ptr.wrapping_add(4);
1245 /// let step = 2;
1246 /// // This loop prints "5, 3, 1, "
1247 /// while ptr != start_rounded_down {
1248 /// unsafe {
1249 /// print!("{}, ", *ptr);
1250 /// }
1251 /// ptr = ptr.wrapping_sub(step);
1252 /// }
1253 /// ```
1254 #[stable(feature = "pointer_methods", since = "1.26.0")]
f9f354fc 1255 #[must_use = "returns a new pointer rather than modifying its argument"]
5e7ed085 1256 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
9c376795 1257 #[inline(always)]
f9f354fc 1258 pub const fn wrapping_sub(self, count: usize) -> Self
dfeec247
XL
1259 where
1260 T: Sized,
1261 {
1262 self.wrapping_offset((count as isize).wrapping_neg())
1263 }
1264
923072b8
FG
1265 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
1266 /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
1267 ///
1268 /// `count` is in units of bytes.
1269 ///
1270 /// This is purely a convenience for casting to a `u8` pointer and
1271 /// using [wrapping_sub][pointer::wrapping_sub] on it. See that method for documentation.
1272 ///
1273 /// For non-`Sized` pointees this operation changes only the data pointer,
1274 /// leaving the metadata untouched.
1275 #[must_use]
1276 #[inline(always)]
1277 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
1278 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
1279 pub const fn wrapping_byte_sub(self, count: usize) -> Self {
487cf647 1280 self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
923072b8
FG
1281 }
1282
dfeec247
XL
1283 /// Reads the value from `self` without moving it. This leaves the
1284 /// memory in `self` unchanged.
1285 ///
1286 /// See [`ptr::read`] for safety concerns and examples.
1287 ///
fc512014 1288 /// [`ptr::read`]: crate::ptr::read()
dfeec247 1289 #[stable(feature = "pointer_methods", since = "1.26.0")]
5869c6ff 1290 #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
17df50a5 1291 #[inline(always)]
064997fb 1292 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
5869c6ff 1293 pub const unsafe fn read(self) -> T
dfeec247
XL
1294 where
1295 T: Sized,
1296 {
f035d41b
XL
1297 // SAFETY: the caller must uphold the safety contract for ``.
1298 unsafe { read(self) }
dfeec247
XL
1299 }
1300
1301 /// Performs a volatile read of the value from `self` without moving it. This
1302 /// leaves the memory in `self` unchanged.
1303 ///
1304 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1305 /// to not be elided or reordered by the compiler across other volatile
1306 /// operations.
1307 ///
1308 /// See [`ptr::read_volatile`] for safety concerns and examples.
1309 ///
fc512014 1310 /// [`ptr::read_volatile`]: crate::ptr::read_volatile()
dfeec247 1311 #[stable(feature = "pointer_methods", since = "1.26.0")]
17df50a5 1312 #[inline(always)]
064997fb 1313 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
dfeec247
XL
1314 pub unsafe fn read_volatile(self) -> T
1315 where
1316 T: Sized,
1317 {
f035d41b
XL
1318 // SAFETY: the caller must uphold the safety contract for `read_volatile`.
1319 unsafe { read_volatile(self) }
dfeec247
XL
1320 }
1321
1322 /// Reads the value from `self` without moving it. This leaves the
1323 /// memory in `self` unchanged.
1324 ///
1325 /// Unlike `read`, the pointer may be unaligned.
1326 ///
1327 /// See [`ptr::read_unaligned`] for safety concerns and examples.
1328 ///
fc512014 1329 /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
dfeec247 1330 #[stable(feature = "pointer_methods", since = "1.26.0")]
5869c6ff 1331 #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
17df50a5 1332 #[inline(always)]
064997fb 1333 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
5869c6ff 1334 pub const unsafe fn read_unaligned(self) -> T
dfeec247
XL
1335 where
1336 T: Sized,
1337 {
f035d41b
XL
1338 // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
1339 unsafe { read_unaligned(self) }
dfeec247
XL
1340 }
1341
1342 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1343 /// and destination may overlap.
1344 ///
1345 /// NOTE: this has the *same* argument order as [`ptr::copy`].
1346 ///
1347 /// See [`ptr::copy`] for safety concerns and examples.
1348 ///
fc512014 1349 /// [`ptr::copy`]: crate::ptr::copy()
923072b8 1350 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
dfeec247 1351 #[stable(feature = "pointer_methods", since = "1.26.0")]
17df50a5 1352 #[inline(always)]
064997fb 1353 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
6a06907d 1354 pub const unsafe fn copy_to(self, dest: *mut T, count: usize)
dfeec247
XL
1355 where
1356 T: Sized,
1357 {
f035d41b
XL
1358 // SAFETY: the caller must uphold the safety contract for `copy`.
1359 unsafe { copy(self, dest, count) }
dfeec247
XL
1360 }
1361
1362 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1363 /// and destination may *not* overlap.
1364 ///
1365 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
1366 ///
1367 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
1368 ///
fc512014 1369 /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
923072b8 1370 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
dfeec247 1371 #[stable(feature = "pointer_methods", since = "1.26.0")]
17df50a5 1372 #[inline(always)]
064997fb 1373 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
6a06907d 1374 pub const unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
dfeec247
XL
1375 where
1376 T: Sized,
1377 {
f035d41b
XL
1378 // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
1379 unsafe { copy_nonoverlapping(self, dest, count) }
dfeec247
XL
1380 }
1381
1382 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1383 /// and destination may overlap.
1384 ///
1385 /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
1386 ///
1387 /// See [`ptr::copy`] for safety concerns and examples.
1388 ///
fc512014 1389 /// [`ptr::copy`]: crate::ptr::copy()
923072b8 1390 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
dfeec247 1391 #[stable(feature = "pointer_methods", since = "1.26.0")]
17df50a5 1392 #[inline(always)]
064997fb 1393 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
6a06907d 1394 pub const unsafe fn copy_from(self, src: *const T, count: usize)
dfeec247
XL
1395 where
1396 T: Sized,
1397 {
f035d41b
XL
1398 // SAFETY: the caller must uphold the safety contract for `copy`.
1399 unsafe { copy(src, self, count) }
dfeec247
XL
1400 }
1401
1402 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1403 /// and destination may *not* overlap.
1404 ///
1405 /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
1406 ///
1407 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
1408 ///
fc512014 1409 /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
923072b8 1410 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
dfeec247 1411 #[stable(feature = "pointer_methods", since = "1.26.0")]
17df50a5 1412 #[inline(always)]
064997fb 1413 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
6a06907d 1414 pub const unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
dfeec247
XL
1415 where
1416 T: Sized,
1417 {
f035d41b
XL
1418 // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
1419 unsafe { copy_nonoverlapping(src, self, count) }
dfeec247
XL
1420 }
1421
1422 /// Executes the destructor (if any) of the pointed-to value.
1423 ///
1424 /// See [`ptr::drop_in_place`] for safety concerns and examples.
1425 ///
fc512014 1426 /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place()
dfeec247 1427 #[stable(feature = "pointer_methods", since = "1.26.0")]
17df50a5 1428 #[inline(always)]
dfeec247 1429 pub unsafe fn drop_in_place(self) {
f035d41b
XL
1430 // SAFETY: the caller must uphold the safety contract for `drop_in_place`.
1431 unsafe { drop_in_place(self) }
dfeec247
XL
1432 }
1433
1434 /// Overwrites a memory location with the given value without reading or
1435 /// dropping the old value.
1436 ///
1437 /// See [`ptr::write`] for safety concerns and examples.
1438 ///
fc512014 1439 /// [`ptr::write`]: crate::ptr::write()
dfeec247 1440 #[stable(feature = "pointer_methods", since = "1.26.0")]
136023e0 1441 #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
17df50a5 1442 #[inline(always)]
064997fb 1443 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
136023e0 1444 pub const unsafe fn write(self, val: T)
dfeec247
XL
1445 where
1446 T: Sized,
1447 {
f035d41b
XL
1448 // SAFETY: the caller must uphold the safety contract for `write`.
1449 unsafe { write(self, val) }
dfeec247
XL
1450 }
1451
1452 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
1453 /// bytes of memory starting at `self` to `val`.
1454 ///
1455 /// See [`ptr::write_bytes`] for safety concerns and examples.
1456 ///
fc512014 1457 /// [`ptr::write_bytes`]: crate::ptr::write_bytes()
923072b8 1458 #[doc(alias = "memset")]
dfeec247 1459 #[stable(feature = "pointer_methods", since = "1.26.0")]
a2a8927a 1460 #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
17df50a5 1461 #[inline(always)]
064997fb 1462 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
a2a8927a 1463 pub const unsafe fn write_bytes(self, val: u8, count: usize)
dfeec247
XL
1464 where
1465 T: Sized,
1466 {
f035d41b
XL
1467 // SAFETY: the caller must uphold the safety contract for `write_bytes`.
1468 unsafe { write_bytes(self, val, count) }
dfeec247
XL
1469 }
1470
1471 /// Performs a volatile write of a memory location with the given value without
1472 /// reading or dropping the old value.
1473 ///
1474 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1475 /// to not be elided or reordered by the compiler across other volatile
1476 /// operations.
1477 ///
1478 /// See [`ptr::write_volatile`] for safety concerns and examples.
1479 ///
fc512014 1480 /// [`ptr::write_volatile`]: crate::ptr::write_volatile()
dfeec247 1481 #[stable(feature = "pointer_methods", since = "1.26.0")]
17df50a5 1482 #[inline(always)]
064997fb 1483 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
dfeec247
XL
1484 pub unsafe fn write_volatile(self, val: T)
1485 where
1486 T: Sized,
1487 {
f035d41b
XL
1488 // SAFETY: the caller must uphold the safety contract for `write_volatile`.
1489 unsafe { write_volatile(self, val) }
dfeec247
XL
1490 }
1491
1492 /// Overwrites a memory location with the given value without reading or
1493 /// dropping the old value.
1494 ///
1495 /// Unlike `write`, the pointer may be unaligned.
1496 ///
1497 /// See [`ptr::write_unaligned`] for safety concerns and examples.
1498 ///
fc512014 1499 /// [`ptr::write_unaligned`]: crate::ptr::write_unaligned()
dfeec247 1500 #[stable(feature = "pointer_methods", since = "1.26.0")]
136023e0 1501 #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
17df50a5 1502 #[inline(always)]
064997fb 1503 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
6a06907d 1504 pub const unsafe fn write_unaligned(self, val: T)
dfeec247
XL
1505 where
1506 T: Sized,
1507 {
f035d41b
XL
1508 // SAFETY: the caller must uphold the safety contract for `write_unaligned`.
1509 unsafe { write_unaligned(self, val) }
dfeec247
XL
1510 }
1511
1512 /// Replaces the value at `self` with `src`, returning the old
1513 /// value, without dropping either.
1514 ///
1515 /// See [`ptr::replace`] for safety concerns and examples.
1516 ///
fc512014 1517 /// [`ptr::replace`]: crate::ptr::replace()
dfeec247 1518 #[stable(feature = "pointer_methods", since = "1.26.0")]
17df50a5 1519 #[inline(always)]
dfeec247
XL
1520 pub unsafe fn replace(self, src: T) -> T
1521 where
1522 T: Sized,
1523 {
f035d41b
XL
1524 // SAFETY: the caller must uphold the safety contract for `replace`.
1525 unsafe { replace(self, src) }
dfeec247
XL
1526 }
1527
1528 /// Swaps the values at two mutable locations of the same type, without
1529 /// deinitializing either. They may overlap, unlike `mem::swap` which is
1530 /// otherwise equivalent.
1531 ///
1532 /// See [`ptr::swap`] for safety concerns and examples.
1533 ///
fc512014 1534 /// [`ptr::swap`]: crate::ptr::swap()
dfeec247 1535 #[stable(feature = "pointer_methods", since = "1.26.0")]
3c0e092e 1536 #[rustc_const_unstable(feature = "const_swap", issue = "83163")]
17df50a5 1537 #[inline(always)]
3c0e092e 1538 pub const unsafe fn swap(self, with: *mut T)
dfeec247
XL
1539 where
1540 T: Sized,
1541 {
f035d41b
XL
1542 // SAFETY: the caller must uphold the safety contract for `swap`.
1543 unsafe { swap(self, with) }
dfeec247
XL
1544 }
1545
1546 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
1547 /// `align`.
1548 ///
1549 /// If it is not possible to align the pointer, the implementation returns
ba9703b0
XL
1550 /// `usize::MAX`. It is permissible for the implementation to *always*
1551 /// return `usize::MAX`. Only your algorithm's performance can depend
dfeec247
XL
1552 /// on getting a usable offset here, not its correctness.
1553 ///
1554 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
1555 /// used with the `wrapping_add` method.
1556 ///
1557 /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
1558 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
1559 /// the returned offset is correct in all terms other than alignment.
1560 ///
1561 /// # Panics
1562 ///
1563 /// The function panics if `align` is not a power-of-two.
1564 ///
1565 /// # Examples
1566 ///
1567 /// Accessing adjacent `u8` as `u16`
1568 ///
1569 /// ```
f2b60f7d
FG
1570 /// use std::mem::align_of;
1571 ///
dfeec247 1572 /// # unsafe {
f2b60f7d
FG
1573 /// let mut x = [5_u8, 6, 7, 8, 9];
1574 /// let ptr = x.as_mut_ptr();
dfeec247 1575 /// let offset = ptr.align_offset(align_of::<u16>());
f2b60f7d
FG
1576 ///
1577 /// if offset < x.len() - 1 {
1578 /// let u16_ptr = ptr.add(offset).cast::<u16>();
1579 /// *u16_ptr = 0;
1580 ///
1581 /// assert!(x == [0, 0, 7, 8, 9] || x == [5, 0, 0, 8, 9]);
dfeec247
XL
1582 /// } else {
1583 /// // while the pointer can be aligned via `offset`, it would point
1584 /// // outside the allocation
1585 /// }
f2b60f7d 1586 /// # }
dfeec247 1587 /// ```
487cf647
FG
1588 #[must_use]
1589 #[inline]
dfeec247 1590 #[stable(feature = "align_offset", since = "1.36.0")]
3c0e092e
XL
1591 #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
1592 pub const fn align_offset(self, align: usize) -> usize
dfeec247
XL
1593 where
1594 T: Sized,
1595 {
1596 if !align.is_power_of_two() {
1597 panic!("align_offset: align is not a power-of-two");
1598 }
3c0e092e 1599
487cf647
FG
1600 {
1601 // SAFETY: `align` has been checked to be a power of 2 above
1602 unsafe { align_offset(self, align) }
1603 }
dfeec247 1604 }
923072b8
FG
1605
1606 /// Returns whether the pointer is properly aligned for `T`.
487cf647
FG
1607 ///
1608 /// # Examples
1609 ///
487cf647
FG
1610 /// ```
1611 /// #![feature(pointer_is_aligned)]
1612 /// #![feature(pointer_byte_offsets)]
1613 ///
1614 /// // On some platforms, the alignment of i32 is less than 4.
1615 /// #[repr(align(4))]
1616 /// struct AlignedI32(i32);
1617 ///
1618 /// let mut data = AlignedI32(42);
1619 /// let ptr = &mut data as *mut AlignedI32;
1620 ///
1621 /// assert!(ptr.is_aligned());
1622 /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
1623 /// ```
1624 ///
1625 /// # At compiletime
1626 /// **Note: Alignment at compiletime is experimental and subject to change. See the
1627 /// [tracking issue] for details.**
1628 ///
1629 /// At compiletime, the compiler may not know where a value will end up in memory.
1630 /// Calling this function on a pointer created from a reference at compiletime will only
1631 /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
1632 /// is never aligned if cast to a type with a stricter alignment than the reference's
1633 /// underlying allocation.
1634 ///
9c376795 1635 /// ```
487cf647
FG
1636 /// #![feature(pointer_is_aligned)]
1637 /// #![feature(const_pointer_is_aligned)]
1638 /// #![feature(const_mut_refs)]
1639 ///
1640 /// // On some platforms, the alignment of primitives is less than their size.
1641 /// #[repr(align(4))]
1642 /// struct AlignedI32(i32);
1643 /// #[repr(align(8))]
1644 /// struct AlignedI64(i64);
1645 ///
1646 /// const _: () = {
1647 /// let mut data = AlignedI32(42);
1648 /// let ptr = &mut data as *mut AlignedI32;
1649 /// assert!(ptr.is_aligned());
1650 ///
1651 /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
1652 /// let ptr1 = ptr.cast::<AlignedI64>();
1653 /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
1654 /// assert!(!ptr1.is_aligned());
1655 /// assert!(!ptr2.is_aligned());
1656 /// };
1657 /// ```
1658 ///
1659 /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
1660 /// pointer is aligned, even if the compiletime pointer wasn't aligned.
1661 ///
9c376795 1662 /// ```
487cf647
FG
1663 /// #![feature(pointer_is_aligned)]
1664 /// #![feature(const_pointer_is_aligned)]
1665 ///
1666 /// // On some platforms, the alignment of primitives is less than their size.
1667 /// #[repr(align(4))]
1668 /// struct AlignedI32(i32);
1669 /// #[repr(align(8))]
1670 /// struct AlignedI64(i64);
1671 ///
1672 /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
1673 /// // Also, note that mutable references are not allowed in the final value of constants.
1674 /// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut();
1675 /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
1676 /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
1677 ///
1678 /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
1679 /// let runtime_ptr = COMPTIME_PTR;
1680 /// assert_ne!(
1681 /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
1682 /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
1683 /// );
1684 /// ```
1685 ///
1686 /// If a pointer is created from a fixed address, this function behaves the same during
1687 /// runtime and compiletime.
1688 ///
9c376795 1689 /// ```
487cf647
FG
1690 /// #![feature(pointer_is_aligned)]
1691 /// #![feature(const_pointer_is_aligned)]
1692 ///
1693 /// // On some platforms, the alignment of primitives is less than their size.
1694 /// #[repr(align(4))]
1695 /// struct AlignedI32(i32);
1696 /// #[repr(align(8))]
1697 /// struct AlignedI64(i64);
1698 ///
1699 /// const _: () = {
1700 /// let ptr = 40 as *mut AlignedI32;
1701 /// assert!(ptr.is_aligned());
1702 ///
1703 /// // For pointers with a known address, runtime and compiletime behavior are identical.
1704 /// let ptr1 = ptr.cast::<AlignedI64>();
1705 /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
1706 /// assert!(ptr1.is_aligned());
1707 /// assert!(!ptr2.is_aligned());
1708 /// };
1709 /// ```
1710 ///
1711 /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
923072b8
FG
1712 #[must_use]
1713 #[inline]
1714 #[unstable(feature = "pointer_is_aligned", issue = "96284")]
487cf647
FG
1715 #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
1716 pub const fn is_aligned(self) -> bool
923072b8
FG
1717 where
1718 T: Sized,
1719 {
487cf647 1720 self.is_aligned_to(mem::align_of::<T>())
923072b8
FG
1721 }
1722
1723 /// Returns whether the pointer is aligned to `align`.
1724 ///
1725 /// For non-`Sized` pointees this operation considers only the data pointer,
1726 /// ignoring the metadata.
1727 ///
1728 /// # Panics
1729 ///
1730 /// The function panics if `align` is not a power-of-two (this includes 0).
487cf647
FG
1731 ///
1732 /// # Examples
1733 ///
487cf647
FG
1734 /// ```
1735 /// #![feature(pointer_is_aligned)]
1736 /// #![feature(pointer_byte_offsets)]
1737 ///
1738 /// // On some platforms, the alignment of i32 is less than 4.
1739 /// #[repr(align(4))]
1740 /// struct AlignedI32(i32);
1741 ///
1742 /// let mut data = AlignedI32(42);
1743 /// let ptr = &mut data as *mut AlignedI32;
1744 ///
1745 /// assert!(ptr.is_aligned_to(1));
1746 /// assert!(ptr.is_aligned_to(2));
1747 /// assert!(ptr.is_aligned_to(4));
1748 ///
1749 /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
1750 /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
1751 ///
1752 /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
1753 /// ```
1754 ///
1755 /// # At compiletime
1756 /// **Note: Alignment at compiletime is experimental and subject to change. See the
1757 /// [tracking issue] for details.**
1758 ///
1759 /// At compiletime, the compiler may not know where a value will end up in memory.
1760 /// Calling this function on a pointer created from a reference at compiletime will only
1761 /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
1762 /// cannot be stricter aligned than the reference's underlying allocation.
1763 ///
9c376795 1764 /// ```
487cf647
FG
1765 /// #![feature(pointer_is_aligned)]
1766 /// #![feature(const_pointer_is_aligned)]
1767 /// #![feature(const_mut_refs)]
1768 ///
1769 /// // On some platforms, the alignment of i32 is less than 4.
1770 /// #[repr(align(4))]
1771 /// struct AlignedI32(i32);
1772 ///
1773 /// const _: () = {
1774 /// let mut data = AlignedI32(42);
1775 /// let ptr = &mut data as *mut AlignedI32;
1776 ///
1777 /// assert!(ptr.is_aligned_to(1));
1778 /// assert!(ptr.is_aligned_to(2));
1779 /// assert!(ptr.is_aligned_to(4));
1780 ///
1781 /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
1782 /// assert!(!ptr.is_aligned_to(8));
1783 /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
1784 /// };
1785 /// ```
1786 ///
1787 /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
1788 /// pointer is aligned, even if the compiletime pointer wasn't aligned.
1789 ///
9c376795 1790 /// ```
487cf647
FG
1791 /// #![feature(pointer_is_aligned)]
1792 /// #![feature(const_pointer_is_aligned)]
1793 ///
1794 /// // On some platforms, the alignment of i32 is less than 4.
1795 /// #[repr(align(4))]
1796 /// struct AlignedI32(i32);
1797 ///
1798 /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
1799 /// // Also, note that mutable references are not allowed in the final value of constants.
1800 /// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut();
1801 /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
1802 /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
1803 ///
1804 /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
1805 /// let runtime_ptr = COMPTIME_PTR;
1806 /// assert_ne!(
1807 /// runtime_ptr.is_aligned_to(8),
1808 /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
1809 /// );
1810 /// ```
1811 ///
1812 /// If a pointer is created from a fixed address, this function behaves the same during
1813 /// runtime and compiletime.
1814 ///
9c376795 1815 /// ```
487cf647
FG
1816 /// #![feature(pointer_is_aligned)]
1817 /// #![feature(const_pointer_is_aligned)]
1818 ///
1819 /// const _: () = {
1820 /// let ptr = 40 as *mut u8;
1821 /// assert!(ptr.is_aligned_to(1));
1822 /// assert!(ptr.is_aligned_to(2));
1823 /// assert!(ptr.is_aligned_to(4));
1824 /// assert!(ptr.is_aligned_to(8));
1825 /// assert!(!ptr.is_aligned_to(16));
1826 /// };
1827 /// ```
1828 ///
1829 /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
923072b8
FG
1830 #[must_use]
1831 #[inline]
1832 #[unstable(feature = "pointer_is_aligned", issue = "96284")]
487cf647
FG
1833 #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
1834 pub const fn is_aligned_to(self, align: usize) -> bool {
923072b8
FG
1835 if !align.is_power_of_two() {
1836 panic!("is_aligned_to: align is not a power-of-two");
1837 }
1838
9c376795
FG
1839 #[inline]
1840 fn runtime_impl(ptr: *mut (), align: usize) -> bool {
1841 ptr.addr() & (align - 1) == 0
1842 }
1843
1844 #[inline]
1845 const fn const_impl(ptr: *mut (), align: usize) -> bool {
1846 // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
1847 // The cast to `()` is used to
1848 // 1. deal with fat pointers; and
1849 // 2. ensure that `align_offset` doesn't actually try to compute an offset.
1850 ptr.align_offset(align) == 0
1851 }
1852
1853 // SAFETY: The two versions are equivalent at runtime.
1854 unsafe { const_eval_select((self.cast::<()>(), align), const_impl, runtime_impl) }
923072b8 1855 }
dfeec247
XL
1856}
1857
ba9703b0
XL
1858impl<T> *mut [T] {
1859 /// Returns the length of a raw slice.
1860 ///
1861 /// The returned value is the number of **elements**, not the number of bytes.
1862 ///
1863 /// This function is safe, even when the raw slice cannot be cast to a slice
1864 /// reference because the pointer is null or unaligned.
1865 ///
1866 /// # Examples
1867 ///
1868 /// ```rust
1869 /// #![feature(slice_ptr_len)]
ba9703b0
XL
1870 /// use std::ptr;
1871 ///
1872 /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3);
1873 /// assert_eq!(slice.len(), 3);
1874 /// ```
17df50a5 1875 #[inline(always)]
ba9703b0
XL
1876 #[unstable(feature = "slice_ptr_len", issue = "71146")]
1877 #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
1878 pub const fn len(self) -> usize {
6a06907d 1879 metadata(self)
ba9703b0 1880 }
3dfed10e 1881
923072b8
FG
1882 /// Returns `true` if the raw slice has a length of 0.
1883 ///
1884 /// # Examples
1885 ///
1886 /// ```
1887 /// #![feature(slice_ptr_len)]
1888 ///
1889 /// let mut a = [1, 2, 3];
1890 /// let ptr = &mut a as *mut [_];
1891 /// assert!(!ptr.is_empty());
1892 /// ```
1893 #[inline(always)]
1894 #[unstable(feature = "slice_ptr_len", issue = "71146")]
1895 #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
1896 pub const fn is_empty(self) -> bool {
1897 self.len() == 0
1898 }
1899
1900 /// Divides one mutable raw slice into two at an index.
1901 ///
1902 /// The first will contain all indices from `[0, mid)` (excluding
1903 /// the index `mid` itself) and the second will contain all
1904 /// indices from `[mid, len)` (excluding the index `len` itself).
1905 ///
1906 /// # Panics
1907 ///
1908 /// Panics if `mid > len`.
1909 ///
1910 /// # Safety
1911 ///
1912 /// `mid` must be [in-bounds] of the underlying [allocated object].
1913 /// Which means `self` must be dereferenceable and span a single allocation
1914 /// that is at least `mid * size_of::<T>()` bytes long. Not upholding these
1915 /// requirements is *[undefined behavior]* even if the resulting pointers are not used.
1916 ///
1917 /// Since `len` being in-bounds it is not a safety invariant of `*mut [T]` the
1918 /// safety requirements of this method are the same as for [`split_at_mut_unchecked`].
1919 /// The explicit bounds check is only as useful as `len` is correct.
1920 ///
1921 /// [`split_at_mut_unchecked`]: #method.split_at_mut_unchecked
1922 /// [in-bounds]: #method.add
1923 /// [allocated object]: crate::ptr#allocated-object
1924 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1925 ///
1926 /// # Examples
1927 ///
1928 /// ```
1929 /// #![feature(raw_slice_split)]
1930 /// #![feature(slice_ptr_get)]
1931 ///
1932 /// let mut v = [1, 0, 3, 0, 5, 6];
1933 /// let ptr = &mut v as *mut [_];
1934 /// unsafe {
1935 /// let (left, right) = ptr.split_at_mut(2);
1936 /// assert_eq!(&*left, [1, 0]);
1937 /// assert_eq!(&*right, [3, 0, 5, 6]);
1938 /// }
1939 /// ```
1940 #[inline(always)]
1941 #[track_caller]
1942 #[unstable(feature = "raw_slice_split", issue = "95595")]
1943 pub unsafe fn split_at_mut(self, mid: usize) -> (*mut [T], *mut [T]) {
1944 assert!(mid <= self.len());
1945 // SAFETY: The assert above is only a safety-net as long as `self.len()` is correct
1946 // The actual safety requirements of this function are the same as for `split_at_mut_unchecked`
1947 unsafe { self.split_at_mut_unchecked(mid) }
1948 }
1949
1950 /// Divides one mutable raw slice into two at an index, without doing bounds checking.
1951 ///
1952 /// The first will contain all indices from `[0, mid)` (excluding
1953 /// the index `mid` itself) and the second will contain all
1954 /// indices from `[mid, len)` (excluding the index `len` itself).
1955 ///
1956 /// # Safety
1957 ///
1958 /// `mid` must be [in-bounds] of the underlying [allocated object].
1959 /// Which means `self` must be dereferenceable and span a single allocation
1960 /// that is at least `mid * size_of::<T>()` bytes long. Not upholding these
1961 /// requirements is *[undefined behavior]* even if the resulting pointers are not used.
1962 ///
1963 /// [in-bounds]: #method.add
1964 /// [out-of-bounds index]: #method.add
1965 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1966 ///
1967 /// # Examples
1968 ///
1969 /// ```
1970 /// #![feature(raw_slice_split)]
1971 ///
1972 /// let mut v = [1, 0, 3, 0, 5, 6];
1973 /// // scoped to restrict the lifetime of the borrows
1974 /// unsafe {
1975 /// let ptr = &mut v as *mut [_];
1976 /// let (left, right) = ptr.split_at_mut_unchecked(2);
1977 /// assert_eq!(&*left, [1, 0]);
1978 /// assert_eq!(&*right, [3, 0, 5, 6]);
1979 /// (&mut *left)[1] = 2;
1980 /// (&mut *right)[1] = 4;
1981 /// }
1982 /// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
1983 /// ```
1984 #[inline(always)]
1985 #[unstable(feature = "raw_slice_split", issue = "95595")]
1986 pub unsafe fn split_at_mut_unchecked(self, mid: usize) -> (*mut [T], *mut [T]) {
1987 let len = self.len();
1988 let ptr = self.as_mut_ptr();
1989
1990 // SAFETY: Caller must pass a valid pointer and an index that is in-bounds.
1991 let tail = unsafe { ptr.add(mid) };
1992 (
1993 crate::ptr::slice_from_raw_parts_mut(ptr, mid),
1994 crate::ptr::slice_from_raw_parts_mut(tail, len - mid),
1995 )
1996 }
1997
3dfed10e
XL
1998 /// Returns a raw pointer to the slice's buffer.
1999 ///
2000 /// This is equivalent to casting `self` to `*mut T`, but more type-safe.
2001 ///
2002 /// # Examples
2003 ///
2004 /// ```rust
2005 /// #![feature(slice_ptr_get)]
2006 /// use std::ptr;
2007 ///
2008 /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3);
5e7ed085 2009 /// assert_eq!(slice.as_mut_ptr(), ptr::null_mut());
3dfed10e 2010 /// ```
17df50a5 2011 #[inline(always)]
3dfed10e
XL
2012 #[unstable(feature = "slice_ptr_get", issue = "74265")]
2013 #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
2014 pub const fn as_mut_ptr(self) -> *mut T {
2015 self as *mut T
2016 }
2017
2018 /// Returns a raw pointer to an element or subslice, without doing bounds
2019 /// checking.
2020 ///
923072b8 2021 /// Calling this method with an [out-of-bounds index] or when `self` is not dereferenceable
3dfed10e
XL
2022 /// is *[undefined behavior]* even if the resulting pointer is not used.
2023 ///
923072b8 2024 /// [out-of-bounds index]: #method.add
3dfed10e
XL
2025 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2026 ///
2027 /// # Examples
2028 ///
2029 /// ```
2030 /// #![feature(slice_ptr_get)]
2031 ///
2032 /// let x = &mut [1, 2, 4] as *mut [i32];
2033 ///
2034 /// unsafe {
2035 /// assert_eq!(x.get_unchecked_mut(1), x.as_mut_ptr().add(1));
2036 /// }
2037 /// ```
2038 #[unstable(feature = "slice_ptr_get", issue = "74265")]
5e7ed085 2039 #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
17df50a5 2040 #[inline(always)]
5e7ed085 2041 pub const unsafe fn get_unchecked_mut<I>(self, index: I) -> *mut I::Output
3dfed10e 2042 where
5e7ed085 2043 I: ~const SliceIndex<[T]>,
3dfed10e 2044 {
a2a8927a 2045 // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
3dfed10e
XL
2046 unsafe { index.get_unchecked_mut(self) }
2047 }
2048
2049 /// Returns `None` if the pointer is null, or else returns a shared slice to
2050 /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
2051 /// that the value has to be initialized.
2052 ///
2053 /// For the mutable counterpart see [`as_uninit_slice_mut`].
2054 ///
2055 /// [`as_ref`]: #method.as_ref-1
2056 /// [`as_uninit_slice_mut`]: #method.as_uninit_slice_mut
2057 ///
2058 /// # Safety
2059 ///
17df50a5 2060 /// When calling this method, you have to ensure that *either* the pointer is null *or*
3dfed10e
XL
2061 /// all of the following is true:
2062 ///
2063 /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
2064 /// and it must be properly aligned. This means in particular:
2065 ///
cdc7bbd5 2066 /// * The entire memory range of this slice must be contained within a single [allocated object]!
3dfed10e
XL
2067 /// Slices can never span across multiple allocated objects.
2068 ///
2069 /// * The pointer must be aligned even for zero-length slices. One
2070 /// reason for this is that enum layout optimizations may rely on references
2071 /// (including slices of any length) being aligned and non-null to distinguish
2072 /// them from other data. You can obtain a pointer that is usable as `data`
2073 /// for zero-length slices using [`NonNull::dangling()`].
2074 ///
2075 /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
2076 /// See the safety documentation of [`pointer::offset`].
2077 ///
2078 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
2079 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
04454e1e 2080 /// In particular, while this reference exists, the memory the pointer points to must
3dfed10e
XL
2081 /// not get mutated (except inside `UnsafeCell`).
2082 ///
2083 /// This applies even if the result of this method is unused!
2084 ///
2085 /// See also [`slice::from_raw_parts`][].
2086 ///
2087 /// [valid]: crate::ptr#safety
cdc7bbd5 2088 /// [allocated object]: crate::ptr#allocated-object
3dfed10e
XL
2089 #[inline]
2090 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
a2a8927a
XL
2091 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
2092 pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
3dfed10e
XL
2093 if self.is_null() {
2094 None
2095 } else {
2096 // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
2097 Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit<T>, self.len()) })
2098 }
2099 }
2100
2101 /// Returns `None` if the pointer is null, or else returns a unique slice to
2102 /// the value wrapped in `Some`. In contrast to [`as_mut`], this does not require
2103 /// that the value has to be initialized.
2104 ///
2105 /// For the shared counterpart see [`as_uninit_slice`].
2106 ///
2107 /// [`as_mut`]: #method.as_mut
2108 /// [`as_uninit_slice`]: #method.as_uninit_slice-1
2109 ///
2110 /// # Safety
2111 ///
17df50a5 2112 /// When calling this method, you have to ensure that *either* the pointer is null *or*
3dfed10e
XL
2113 /// all of the following is true:
2114 ///
2115 /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::<T>()`
2116 /// many bytes, and it must be properly aligned. This means in particular:
2117 ///
cdc7bbd5 2118 /// * The entire memory range of this slice must be contained within a single [allocated object]!
3dfed10e
XL
2119 /// Slices can never span across multiple allocated objects.
2120 ///
2121 /// * The pointer must be aligned even for zero-length slices. One
2122 /// reason for this is that enum layout optimizations may rely on references
2123 /// (including slices of any length) being aligned and non-null to distinguish
2124 /// them from other data. You can obtain a pointer that is usable as `data`
2125 /// for zero-length slices using [`NonNull::dangling()`].
2126 ///
2127 /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
2128 /// See the safety documentation of [`pointer::offset`].
2129 ///
2130 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
2131 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
04454e1e 2132 /// In particular, while this reference exists, the memory the pointer points to must
3dfed10e
XL
2133 /// not get accessed (read or written) through any other pointer.
2134 ///
2135 /// This applies even if the result of this method is unused!
2136 ///
2137 /// See also [`slice::from_raw_parts_mut`][].
2138 ///
2139 /// [valid]: crate::ptr#safety
cdc7bbd5 2140 /// [allocated object]: crate::ptr#allocated-object
3dfed10e
XL
2141 #[inline]
2142 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
a2a8927a
XL
2143 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
2144 pub const unsafe fn as_uninit_slice_mut<'a>(self) -> Option<&'a mut [MaybeUninit<T>]> {
3dfed10e
XL
2145 if self.is_null() {
2146 None
2147 } else {
2148 // SAFETY: the caller must uphold the safety contract for `as_uninit_slice_mut`.
2149 Some(unsafe { slice::from_raw_parts_mut(self as *mut MaybeUninit<T>, self.len()) })
2150 }
2151 }
ba9703b0
XL
2152}
2153
dfeec247
XL
2154// Equality for pointers
2155#[stable(feature = "rust1", since = "1.0.0")]
2156impl<T: ?Sized> PartialEq for *mut T {
17df50a5 2157 #[inline(always)]
dfeec247
XL
2158 fn eq(&self, other: &*mut T) -> bool {
2159 *self == *other
2160 }
2161}
2162
2163#[stable(feature = "rust1", since = "1.0.0")]
2164impl<T: ?Sized> Eq for *mut T {}
2165
2166#[stable(feature = "rust1", since = "1.0.0")]
2167impl<T: ?Sized> Ord for *mut T {
2168 #[inline]
2169 fn cmp(&self, other: &*mut T) -> Ordering {
2170 if self < other {
2171 Less
2172 } else if self == other {
2173 Equal
2174 } else {
2175 Greater
2176 }
2177 }
2178}
2179
2180#[stable(feature = "rust1", since = "1.0.0")]
2181impl<T: ?Sized> PartialOrd for *mut T {
17df50a5 2182 #[inline(always)]
dfeec247
XL
2183 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2184 Some(self.cmp(other))
2185 }
2186
17df50a5 2187 #[inline(always)]
dfeec247
XL
2188 fn lt(&self, other: &*mut T) -> bool {
2189 *self < *other
2190 }
2191
17df50a5 2192 #[inline(always)]
dfeec247
XL
2193 fn le(&self, other: &*mut T) -> bool {
2194 *self <= *other
2195 }
2196
17df50a5 2197 #[inline(always)]
dfeec247
XL
2198 fn gt(&self, other: &*mut T) -> bool {
2199 *self > *other
2200 }
2201
17df50a5 2202 #[inline(always)]
dfeec247
XL
2203 fn ge(&self, other: &*mut T) -> bool {
2204 *self >= *other
2205 }
2206}