]> git.proxmox.com Git - rustc.git/blame - src/libcore/ptr.rs
New upstream version 1.25.0+dfsg1
[rustc.git] / src / libcore / ptr.rs
CommitLineData
1a4d82fc
JJ
1// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11// FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
12
54a0048b 13//! Raw, unsafe pointers, `*const T`, and `*mut T`.
1a4d82fc 14//!
54a0048b 15//! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
1a4d82fc 16
85aaf69f 17#![stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 18
3b2f2976 19use convert::From;
1a4d82fc 20use intrinsics;
7cac9316 21use ops::CoerceUnsized;
c1a9b12d 22use fmt;
e9174d1e 23use hash;
9e0c209e 24use marker::{PhantomData, Unsize};
e9174d1e 25use mem;
85aaf69f 26use nonzero::NonZero;
1a4d82fc 27
1a4d82fc
JJ
28use cmp::Ordering::{self, Less, Equal, Greater};
29
c34b1796 30#[stable(feature = "rust1", since = "1.0.0")]
c34b1796 31pub use intrinsics::copy_nonoverlapping;
1a4d82fc 32
c34b1796 33#[stable(feature = "rust1", since = "1.0.0")]
c34b1796 34pub use intrinsics::copy;
1a4d82fc 35
c34b1796
AL
36#[stable(feature = "rust1", since = "1.0.0")]
37pub use intrinsics::write_bytes;
1a4d82fc 38
cc61c64b
XL
39/// Executes the destructor (if any) of the pointed-to value.
40///
41/// This has two use cases:
42///
43/// * It is *required* to use `drop_in_place` to drop unsized types like
44/// trait objects, because they can't be read out onto the stack and
45/// dropped normally.
46///
47/// * It is friendlier to the optimizer to do this over `ptr::read` when
48/// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
49/// as the compiler doesn't need to prove that it's sound to elide the
50/// copy.
51///
ea8adc8c 52/// # Safety
cc61c64b
XL
53///
54/// This has all the same safety problems as `ptr::read` with respect to
55/// invalid pointers, types, and double drops.
56#[stable(feature = "drop_in_place", since = "1.8.0")]
ea8adc8c 57#[lang = "drop_in_place"]
cc61c64b
XL
58#[allow(unconditional_recursion)]
59pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
60 // Code here does not matter - this is replaced by the
61 // real drop glue by the compiler.
62 drop_in_place(to_drop);
63}
64
1a4d82fc
JJ
65/// Creates a null raw pointer.
66///
67/// # Examples
68///
69/// ```
70/// use std::ptr;
71///
85aaf69f 72/// let p: *const i32 = ptr::null();
1a4d82fc
JJ
73/// assert!(p.is_null());
74/// ```
75#[inline]
85aaf69f 76#[stable(feature = "rust1", since = "1.0.0")]
e9174d1e 77pub const fn null<T>() -> *const T { 0 as *const T }
1a4d82fc
JJ
78
79/// Creates a null mutable raw pointer.
80///
81/// # Examples
82///
83/// ```
84/// use std::ptr;
85///
85aaf69f 86/// let p: *mut i32 = ptr::null_mut();
1a4d82fc
JJ
87/// assert!(p.is_null());
88/// ```
89#[inline]
85aaf69f 90#[stable(feature = "rust1", since = "1.0.0")]
e9174d1e 91pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
1a4d82fc 92
1a4d82fc 93/// Swaps the values at two mutable locations of the same type, without
ff7c6d11
XL
94/// deinitializing either.
95///
96/// The values pointed at by `x` and `y` may overlap, unlike `mem::swap` which
97/// is otherwise equivalent. If the values do overlap, then the overlapping
98/// region of memory from `x` will be used. This is demonstrated in the
99/// examples section below.
1a4d82fc
JJ
100///
101/// # Safety
102///
32a655c1
SL
103/// This function copies the memory through the raw pointers passed to it
104/// as arguments.
105///
106/// Ensure that these pointers are valid before calling `swap`.
ff7c6d11
XL
107///
108/// # Examples
109///
110/// Swapping two non-overlapping regions:
111///
112/// ```
113/// use std::ptr;
114///
115/// let mut array = [0, 1, 2, 3];
116///
117/// let x = array[0..].as_mut_ptr() as *mut [u32; 2];
118/// let y = array[2..].as_mut_ptr() as *mut [u32; 2];
119///
120/// unsafe {
121/// ptr::swap(x, y);
122/// assert_eq!([2, 3, 0, 1], array);
123/// }
124/// ```
125///
126/// Swapping two overlapping regions:
127///
128/// ```
129/// use std::ptr;
130///
131/// let mut array = [0, 1, 2, 3];
132///
133/// let x = array[0..].as_mut_ptr() as *mut [u32; 3];
134/// let y = array[1..].as_mut_ptr() as *mut [u32; 3];
135///
136/// unsafe {
137/// ptr::swap(x, y);
138/// assert_eq!([1, 0, 1, 2], array);
139/// }
140/// ```
1a4d82fc 141#[inline]
85aaf69f 142#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
143pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
144 // Give ourselves some scratch space to work with
145 let mut tmp: T = mem::uninitialized();
1a4d82fc
JJ
146
147 // Perform the swap
c34b1796
AL
148 copy_nonoverlapping(x, &mut tmp, 1);
149 copy(y, x, 1); // `x` and `y` may overlap
150 copy_nonoverlapping(&tmp, y, 1);
1a4d82fc
JJ
151
152 // y and t now point to the same thing, but we need to completely forget `tmp`
153 // because it's no longer relevant.
154 mem::forget(tmp);
155}
156
041b39d2
XL
157/// Swaps a sequence of values at two mutable locations of the same type.
158///
159/// # Safety
160///
161/// The two arguments must each point to the beginning of `count` locations
162/// of valid memory, and the two memory ranges must not overlap.
163///
164/// # Examples
165///
166/// Basic usage:
167///
168/// ```
169/// #![feature(swap_nonoverlapping)]
170///
171/// use std::ptr;
172///
173/// let mut x = [1, 2, 3, 4];
174/// let mut y = [7, 8, 9];
175///
176/// unsafe {
177/// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
178/// }
179///
180/// assert_eq!(x, [7, 8, 3, 4]);
181/// assert_eq!(y, [1, 2, 9]);
182/// ```
183#[inline]
184#[unstable(feature = "swap_nonoverlapping", issue = "42818")]
185pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
186 let x = x as *mut u8;
187 let y = y as *mut u8;
188 let len = mem::size_of::<T>() * count;
189 swap_nonoverlapping_bytes(x, y, len)
190}
191
192#[inline]
193unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
194 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
195 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
196 // Haswell E processors. LLVM is more able to optimize if we give a struct a
197 // #[repr(simd)], even if we don't actually use this struct directly.
198 //
199 // FIXME repr(simd) broken on emscripten and redox
200 // It's also broken on big-endian powerpc64 and s390x. #42778
201 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
202 target_endian = "big")),
203 repr(simd))]
204 struct Block(u64, u64, u64, u64);
205 struct UnalignedBlock(u64, u64, u64, u64);
206
207 let block_size = mem::size_of::<Block>();
208
209 // Loop through x & y, copying them `Block` at a time
210 // The optimizer should unroll the loop fully for most types
211 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
212 let mut i = 0;
213 while i + block_size <= len {
214 // Create some uninitialized memory as scratch space
215 // Declaring `t` here avoids aligning the stack when this loop is unused
216 let mut t: Block = mem::uninitialized();
217 let t = &mut t as *mut _ as *mut u8;
218 let x = x.offset(i as isize);
219 let y = y.offset(i as isize);
220
221 // Swap a block of bytes of x & y, using t as a temporary buffer
222 // This should be optimized into efficient SIMD operations where available
223 copy_nonoverlapping(x, t, block_size);
224 copy_nonoverlapping(y, x, block_size);
225 copy_nonoverlapping(t, y, block_size);
226 i += block_size;
227 }
228
229 if i < len {
230 // Swap any remaining bytes
231 let mut t: UnalignedBlock = mem::uninitialized();
232 let rem = len - i;
233
234 let t = &mut t as *mut _ as *mut u8;
235 let x = x.offset(i as isize);
236 let y = y.offset(i as isize);
237
238 copy_nonoverlapping(x, t, rem);
239 copy_nonoverlapping(y, x, rem);
240 copy_nonoverlapping(t, y, rem);
241 }
242}
243
1a4d82fc
JJ
244/// Replaces the value at `dest` with `src`, returning the old
245/// value, without dropping either.
246///
247/// # Safety
248///
249/// This is only unsafe because it accepts a raw pointer.
250/// Otherwise, this operation is identical to `mem::replace`.
251#[inline]
85aaf69f 252#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 253pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
e9174d1e 254 mem::swap(&mut *dest, &mut src); // cannot overlap
1a4d82fc
JJ
255 src
256}
257
85aaf69f 258/// Reads the value from `src` without moving it. This leaves the
1a4d82fc
JJ
259/// memory in `src` unchanged.
260///
261/// # Safety
262///
263/// Beyond accepting a raw pointer, this is unsafe because it semantically
264/// moves the value out of `src` without preventing further usage of `src`.
265/// If `T` is not `Copy`, then care must be taken to ensure that the value at
266/// `src` is not used before the data is overwritten again (e.g. with `write`,
abe05a73 267/// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
1a4d82fc 268/// because it will attempt to drop the value previously at `*src`.
a7813a04 269///
476ff2be
SL
270/// The pointer must be aligned; use `read_unaligned` if that is not the case.
271///
a7813a04
XL
272/// # Examples
273///
274/// Basic usage:
275///
276/// ```
277/// let x = 12;
278/// let y = &x as *const i32;
279///
9e0c209e
SL
280/// unsafe {
281/// assert_eq!(std::ptr::read(y), 12);
282/// }
a7813a04 283/// ```
3b2f2976 284#[inline]
85aaf69f 285#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
286pub unsafe fn read<T>(src: *const T) -> T {
287 let mut tmp: T = mem::uninitialized();
c34b1796 288 copy_nonoverlapping(src, &mut tmp, 1);
1a4d82fc
JJ
289 tmp
290}
291
476ff2be
SL
292/// Reads the value from `src` without moving it. This leaves the
293/// memory in `src` unchanged.
294///
295/// Unlike `read`, the pointer may be unaligned.
296///
297/// # Safety
298///
299/// Beyond accepting a raw pointer, this is unsafe because it semantically
300/// moves the value out of `src` without preventing further usage of `src`.
301/// If `T` is not `Copy`, then care must be taken to ensure that the value at
302/// `src` is not used before the data is overwritten again (e.g. with `write`,
abe05a73 303/// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
476ff2be
SL
304/// because it will attempt to drop the value previously at `*src`.
305///
306/// # Examples
307///
308/// Basic usage:
309///
310/// ```
476ff2be
SL
311/// let x = 12;
312/// let y = &x as *const i32;
313///
314/// unsafe {
315/// assert_eq!(std::ptr::read_unaligned(y), 12);
316/// }
317/// ```
3b2f2976 318#[inline]
8bb4bdeb 319#[stable(feature = "ptr_unaligned", since = "1.17.0")]
476ff2be
SL
320pub unsafe fn read_unaligned<T>(src: *const T) -> T {
321 let mut tmp: T = mem::uninitialized();
322 copy_nonoverlapping(src as *const u8,
323 &mut tmp as *mut T as *mut u8,
324 mem::size_of::<T>());
325 tmp
326}
327
1a4d82fc
JJ
328/// Overwrites a memory location with the given value without reading or
329/// dropping the old value.
330///
331/// # Safety
332///
b039eaaf
SL
333/// This operation is marked unsafe because it accepts a raw pointer.
334///
335/// It does not drop the contents of `dst`. This is safe, but it could leak
336/// allocations or resources, so care must be taken not to overwrite an object
337/// that should be dropped.
1a4d82fc 338///
cc61c64b
XL
339/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
340/// location pointed to by `dst`.
8bb4bdeb 341///
1a4d82fc
JJ
342/// This is appropriate for initializing uninitialized memory, or overwriting
343/// memory that has previously been `read` from.
a7813a04 344///
476ff2be
SL
345/// The pointer must be aligned; use `write_unaligned` if that is not the case.
346///
a7813a04
XL
347/// # Examples
348///
349/// Basic usage:
350///
351/// ```
352/// let mut x = 0;
353/// let y = &mut x as *mut i32;
354/// let z = 12;
355///
356/// unsafe {
357/// std::ptr::write(y, z);
9e0c209e 358/// assert_eq!(std::ptr::read(y), 12);
a7813a04
XL
359/// }
360/// ```
1a4d82fc 361#[inline]
85aaf69f 362#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
363pub unsafe fn write<T>(dst: *mut T, src: T) {
364 intrinsics::move_val_init(&mut *dst, src)
365}
366
476ff2be
SL
367/// Overwrites a memory location with the given value without reading or
368/// dropping the old value.
369///
370/// Unlike `write`, the pointer may be unaligned.
371///
372/// # Safety
373///
374/// This operation is marked unsafe because it accepts a raw pointer.
375///
376/// It does not drop the contents of `dst`. This is safe, but it could leak
377/// allocations or resources, so care must be taken not to overwrite an object
378/// that should be dropped.
379///
cc61c64b
XL
380/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
381/// location pointed to by `dst`.
382///
476ff2be
SL
383/// This is appropriate for initializing uninitialized memory, or overwriting
384/// memory that has previously been `read` from.
385///
386/// # Examples
387///
388/// Basic usage:
389///
390/// ```
476ff2be
SL
391/// let mut x = 0;
392/// let y = &mut x as *mut i32;
393/// let z = 12;
394///
395/// unsafe {
396/// std::ptr::write_unaligned(y, z);
397/// assert_eq!(std::ptr::read_unaligned(y), 12);
398/// }
399/// ```
400#[inline]
8bb4bdeb 401#[stable(feature = "ptr_unaligned", since = "1.17.0")]
476ff2be
SL
402pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
403 copy_nonoverlapping(&src as *const T as *const u8,
404 dst as *mut u8,
405 mem::size_of::<T>());
406 mem::forget(src);
407}
408
7453a54e
SL
409/// Performs a volatile read of the value from `src` without moving it. This
410/// leaves the memory in `src` unchanged.
411///
412/// Volatile operations are intended to act on I/O memory, and are guaranteed
413/// to not be elided or reordered by the compiler across other volatile
54a0048b 414/// operations.
7453a54e 415///
54a0048b
SL
416/// # Notes
417///
418/// Rust does not currently have a rigorously and formally defined memory model,
419/// so the precise semantics of what "volatile" means here is subject to change
420/// over time. That being said, the semantics will almost always end up pretty
421/// similar to [C11's definition of volatile][c11].
422///
3b2f2976
XL
423/// The compiler shouldn't change the relative order or number of volatile
424/// memory operations. However, volatile memory operations on zero-sized types
425/// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
426/// and may be ignored.
427///
54a0048b 428/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
7453a54e
SL
429///
430/// # Safety
431///
432/// Beyond accepting a raw pointer, this is unsafe because it semantically
433/// moves the value out of `src` without preventing further usage of `src`.
434/// If `T` is not `Copy`, then care must be taken to ensure that the value at
435/// `src` is not used before the data is overwritten again (e.g. with `write`,
abe05a73 436/// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
7453a54e 437/// because it will attempt to drop the value previously at `*src`.
a7813a04
XL
438///
439/// # Examples
440///
441/// Basic usage:
442///
443/// ```
444/// let x = 12;
445/// let y = &x as *const i32;
446///
9e0c209e
SL
447/// unsafe {
448/// assert_eq!(std::ptr::read_volatile(y), 12);
449/// }
a7813a04 450/// ```
7453a54e 451#[inline]
54a0048b 452#[stable(feature = "volatile", since = "1.9.0")]
7453a54e
SL
453pub unsafe fn read_volatile<T>(src: *const T) -> T {
454 intrinsics::volatile_load(src)
455}
456
457/// Performs a volatile write of a memory location with the given value without
458/// reading or dropping the old value.
459///
460/// Volatile operations are intended to act on I/O memory, and are guaranteed
461/// to not be elided or reordered by the compiler across other volatile
54a0048b
SL
462/// operations.
463///
464/// # Notes
465///
466/// Rust does not currently have a rigorously and formally defined memory model,
467/// so the precise semantics of what "volatile" means here is subject to change
468/// over time. That being said, the semantics will almost always end up pretty
469/// similar to [C11's definition of volatile][c11].
7453a54e 470///
3b2f2976
XL
471/// The compiler shouldn't change the relative order or number of volatile
472/// memory operations. However, volatile memory operations on zero-sized types
473/// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
474/// and may be ignored.
475///
54a0048b 476/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
7453a54e
SL
477///
478/// # Safety
479///
480/// This operation is marked unsafe because it accepts a raw pointer.
481///
482/// It does not drop the contents of `dst`. This is safe, but it could leak
483/// allocations or resources, so care must be taken not to overwrite an object
484/// that should be dropped.
485///
486/// This is appropriate for initializing uninitialized memory, or overwriting
487/// memory that has previously been `read` from.
a7813a04
XL
488///
489/// # Examples
490///
491/// Basic usage:
492///
493/// ```
494/// let mut x = 0;
495/// let y = &mut x as *mut i32;
496/// let z = 12;
497///
498/// unsafe {
499/// std::ptr::write_volatile(y, z);
9e0c209e 500/// assert_eq!(std::ptr::read_volatile(y), 12);
a7813a04
XL
501/// }
502/// ```
7453a54e 503#[inline]
54a0048b 504#[stable(feature = "volatile", since = "1.9.0")]
7453a54e
SL
505pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
506 intrinsics::volatile_store(dst, src);
507}
508
c34b1796
AL
509#[lang = "const_ptr"]
510impl<T: ?Sized> *const T {
cc61c64b 511 /// Returns `true` if the pointer is null.
54a0048b 512 ///
ff7c6d11
XL
513 /// Note that unsized types have many possible null pointers, as only the
514 /// raw data pointer is considered, not their length, vtable, etc.
515 /// Therefore, two pointers that are null may still not compare equal to
516 /// each other.
517 ///
54a0048b
SL
518 /// # Examples
519 ///
520 /// Basic usage:
521 ///
522 /// ```
523 /// let s: &str = "Follow the rabbit";
524 /// let ptr: *const u8 = s.as_ptr();
525 /// assert!(!ptr.is_null());
526 /// ```
85aaf69f 527 #[stable(feature = "rust1", since = "1.0.0")]
c34b1796 528 #[inline]
ff7c6d11
XL
529 pub fn is_null(self) -> bool {
530 // Compare via a cast to a thin pointer, so fat pointers are only
531 // considering their "data" part for null-ness.
532 (self as *const u8) == null()
c34b1796 533 }
1a4d82fc
JJ
534
535 /// Returns `None` if the pointer is null, or else returns a reference to
536 /// the value wrapped in `Some`.
537 ///
538 /// # Safety
539 ///
540 /// While this method and its mutable counterpart are useful for
541 /// null-safety, it is important to note that this is still an unsafe
542 /// operation because the returned value could be pointing to invalid
543 /// memory.
54a0048b
SL
544 ///
545 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
546 /// not necessarily reflect the actual lifetime of the data.
547 ///
548 /// # Examples
549 ///
550 /// Basic usage:
551 ///
041b39d2
XL
552 /// ```
553 /// let ptr: *const u8 = &10u8 as *const u8;
54a0048b
SL
554 ///
555 /// unsafe {
041b39d2 556 /// if let Some(val_back) = ptr.as_ref() {
54a0048b
SL
557 /// println!("We got back the value: {}!", val_back);
558 /// }
559 /// }
560 /// ```
561 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
c34b1796 562 #[inline]
abe05a73 563 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
ff7c6d11 564 if self.is_null() {
c34b1796
AL
565 None
566 } else {
54a0048b 567 Some(&*self)
c34b1796
AL
568 }
569 }
1a4d82fc 570
ea8adc8c
XL
571 /// Calculates the offset from a pointer.
572 ///
573 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
574 /// offset of `3 * size_of::<T>()` bytes.
1a4d82fc
JJ
575 ///
576 /// # Safety
577 ///
ea8adc8c
XL
578 /// If any of the following conditions are violated, the result is Undefined
579 /// Behavior:
580 ///
581 /// * Both the starting and resulting pointer must be either in bounds or one
582 /// byte past the end of an allocated object.
583 ///
ff7c6d11 584 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
ea8adc8c
XL
585 ///
586 /// * The offset being in bounds cannot rely on "wrapping around" the address
587 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
588 ///
589 /// The compiler and standard library generally tries to ensure allocations
590 /// never reach a size where an offset is a concern. For instance, `Vec`
591 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
592 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
593 ///
594 /// Most platforms fundamentally can't even construct such an allocation.
595 /// For instance, no known 64-bit platform can ever serve a request
abe05a73 596 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
ea8adc8c
XL
597 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
598 /// more than `isize::MAX` bytes with things like Physical Address
599 /// Extension. As such, memory acquired directly from allocators or memory
600 /// mapped files *may* be too large to handle with this function.
601 ///
602 /// Consider using `wrapping_offset` instead if these constraints are
603 /// difficult to satisfy. The only advantage of this method is that it
604 /// enables more aggressive compiler optimizations.
54a0048b
SL
605 ///
606 /// # Examples
607 ///
608 /// Basic usage:
609 ///
610 /// ```
611 /// let s: &str = "123";
612 /// let ptr: *const u8 = s.as_ptr();
613 ///
614 /// unsafe {
615 /// println!("{}", *ptr.offset(1) as char);
616 /// println!("{}", *ptr.offset(2) as char);
617 /// }
618 /// ```
85aaf69f 619 #[stable(feature = "rust1", since = "1.0.0")]
c34b1796
AL
620 #[inline]
621 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
622 intrinsics::offset(self, count)
623 }
c30ab7b3
SL
624
625 /// Calculates the offset from a pointer using wrapping arithmetic.
ea8adc8c 626 ///
c30ab7b3 627 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
cc61c64b 628 /// offset of `3 * size_of::<T>()` bytes.
c30ab7b3
SL
629 ///
630 /// # Safety
631 ///
632 /// The resulting pointer does not need to be in bounds, but it is
633 /// potentially hazardous to dereference (which requires `unsafe`).
634 ///
635 /// Always use `.offset(count)` instead when possible, because `offset`
636 /// allows the compiler to optimize better.
637 ///
638 /// # Examples
639 ///
640 /// Basic usage:
641 ///
642 /// ```
c30ab7b3
SL
643 /// // Iterate using a raw pointer in increments of two elements
644 /// let data = [1u8, 2, 3, 4, 5];
645 /// let mut ptr: *const u8 = data.as_ptr();
646 /// let step = 2;
647 /// let end_rounded_up = ptr.wrapping_offset(6);
648 ///
649 /// // This loop prints "1, 3, 5, "
650 /// while ptr != end_rounded_up {
651 /// unsafe {
652 /// print!("{}, ", *ptr);
653 /// }
654 /// ptr = ptr.wrapping_offset(step);
655 /// }
656 /// ```
32a655c1 657 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
c30ab7b3
SL
658 #[inline]
659 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
660 unsafe {
661 intrinsics::arith_offset(self, count)
662 }
663 }
cc61c64b
XL
664
665 /// Calculates the distance between two pointers. The returned value is in
666 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
667 ///
668 /// If the address different between the two pointers ia not a multiple of
669 /// `mem::size_of::<T>()` then the result of the division is rounded towards
670 /// zero.
671 ///
672 /// This function returns `None` if `T` is a zero-sized typed.
673 ///
674 /// # Examples
675 ///
676 /// Basic usage:
677 ///
678 /// ```
679 /// #![feature(offset_to)]
680 ///
681 /// fn main() {
682 /// let a = [0; 5];
683 /// let ptr1: *const i32 = &a[1];
684 /// let ptr2: *const i32 = &a[3];
685 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
686 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
687 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
688 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
689 /// }
690 /// ```
691 #[unstable(feature = "offset_to", issue = "41079")]
692 #[inline]
693 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
694 let size = mem::size_of::<T>();
695 if size == 0 {
696 None
697 } else {
698 let diff = (other as isize).wrapping_sub(self as isize);
699 Some(diff / size as isize)
700 }
701 }
ea8adc8c
XL
702
703 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
704 ///
705 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
706 /// offset of `3 * size_of::<T>()` bytes.
707 ///
708 /// # Safety
709 ///
710 /// If any of the following conditions are violated, the result is Undefined
711 /// Behavior:
712 ///
713 /// * Both the starting and resulting pointer must be either in bounds or one
714 /// byte past the end of an allocated object.
715 ///
ff7c6d11 716 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
ea8adc8c
XL
717 ///
718 /// * The offset being in bounds cannot rely on "wrapping around" the address
719 /// space. That is, the infinite-precision sum must fit in a `usize`.
720 ///
721 /// The compiler and standard library generally tries to ensure allocations
722 /// never reach a size where an offset is a concern. For instance, `Vec`
723 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
724 /// `vec.as_ptr().add(vec.len())` is always safe.
725 ///
726 /// Most platforms fundamentally can't even construct such an allocation.
727 /// For instance, no known 64-bit platform can ever serve a request
abe05a73 728 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
ea8adc8c
XL
729 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
730 /// more than `isize::MAX` bytes with things like Physical Address
731 /// Extension. As such, memory acquired directly from allocators or memory
732 /// mapped files *may* be too large to handle with this function.
733 ///
734 /// Consider using `wrapping_offset` instead if these constraints are
735 /// difficult to satisfy. The only advantage of this method is that it
736 /// enables more aggressive compiler optimizations.
737 ///
738 /// # Examples
739 ///
740 /// Basic usage:
741 ///
742 /// ```
743 /// #![feature(pointer_methods)]
744 ///
745 /// let s: &str = "123";
746 /// let ptr: *const u8 = s.as_ptr();
747 ///
748 /// unsafe {
749 /// println!("{}", *ptr.add(1) as char);
750 /// println!("{}", *ptr.add(2) as char);
751 /// }
752 /// ```
753 #[unstable(feature = "pointer_methods", issue = "43941")]
754 #[inline]
755 pub unsafe fn add(self, count: usize) -> Self
756 where T: Sized,
757 {
758 self.offset(count as isize)
759 }
760
761 /// Calculates the offset from a pointer (convenience for
762 /// `.offset((count as isize).wrapping_neg())`).
763 ///
764 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
765 /// offset of `3 * size_of::<T>()` bytes.
766 ///
767 /// # Safety
768 ///
769 /// If any of the following conditions are violated, the result is Undefined
770 /// Behavior:
771 ///
772 /// * Both the starting and resulting pointer must be either in bounds or one
773 /// byte past the end of an allocated object.
774 ///
775 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
776 ///
777 /// * The offset being in bounds cannot rely on "wrapping around" the address
778 /// space. That is, the infinite-precision sum must fit in a usize.
779 ///
780 /// The compiler and standard library generally tries to ensure allocations
781 /// never reach a size where an offset is a concern. For instance, `Vec`
782 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
783 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
784 ///
785 /// Most platforms fundamentally can't even construct such an allocation.
786 /// For instance, no known 64-bit platform can ever serve a request
abe05a73 787 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
ea8adc8c
XL
788 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
789 /// more than `isize::MAX` bytes with things like Physical Address
790 /// Extension. As such, memory acquired directly from allocators or memory
791 /// mapped files *may* be too large to handle with this function.
792 ///
793 /// Consider using `wrapping_offset` instead if these constraints are
794 /// difficult to satisfy. The only advantage of this method is that it
795 /// enables more aggressive compiler optimizations.
796 ///
797 /// # Examples
798 ///
799 /// Basic usage:
800 ///
801 /// ```
802 /// #![feature(pointer_methods)]
803 ///
804 /// let s: &str = "123";
805 ///
806 /// unsafe {
807 /// let end: *const u8 = s.as_ptr().add(3);
808 /// println!("{}", *end.sub(1) as char);
809 /// println!("{}", *end.sub(2) as char);
810 /// }
811 /// ```
812 #[unstable(feature = "pointer_methods", issue = "43941")]
813 #[inline]
814 pub unsafe fn sub(self, count: usize) -> Self
815 where T: Sized,
816 {
817 self.offset((count as isize).wrapping_neg())
818 }
819
820 /// Calculates the offset from a pointer using wrapping arithmetic.
821 /// (convenience for `.wrapping_offset(count as isize)`)
822 ///
823 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
824 /// offset of `3 * size_of::<T>()` bytes.
825 ///
826 /// # Safety
827 ///
828 /// The resulting pointer does not need to be in bounds, but it is
829 /// potentially hazardous to dereference (which requires `unsafe`).
830 ///
831 /// Always use `.add(count)` instead when possible, because `add`
832 /// allows the compiler to optimize better.
833 ///
834 /// # Examples
835 ///
836 /// Basic usage:
837 ///
838 /// ```
839 /// #![feature(pointer_methods)]
840 ///
841 /// // Iterate using a raw pointer in increments of two elements
842 /// let data = [1u8, 2, 3, 4, 5];
843 /// let mut ptr: *const u8 = data.as_ptr();
844 /// let step = 2;
845 /// let end_rounded_up = ptr.wrapping_add(6);
846 ///
847 /// // This loop prints "1, 3, 5, "
848 /// while ptr != end_rounded_up {
849 /// unsafe {
850 /// print!("{}, ", *ptr);
851 /// }
852 /// ptr = ptr.wrapping_add(step);
853 /// }
854 /// ```
855 #[unstable(feature = "pointer_methods", issue = "43941")]
856 #[inline]
857 pub fn wrapping_add(self, count: usize) -> Self
858 where T: Sized,
859 {
860 self.wrapping_offset(count as isize)
861 }
862
863 /// Calculates the offset from a pointer using wrapping arithmetic.
864 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
865 ///
866 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
867 /// offset of `3 * size_of::<T>()` bytes.
868 ///
869 /// # Safety
870 ///
871 /// The resulting pointer does not need to be in bounds, but it is
872 /// potentially hazardous to dereference (which requires `unsafe`).
873 ///
874 /// Always use `.sub(count)` instead when possible, because `sub`
875 /// allows the compiler to optimize better.
876 ///
877 /// # Examples
878 ///
879 /// Basic usage:
880 ///
881 /// ```
882 /// #![feature(pointer_methods)]
883 ///
884 /// // Iterate using a raw pointer in increments of two elements (backwards)
885 /// let data = [1u8, 2, 3, 4, 5];
886 /// let mut ptr: *const u8 = data.as_ptr();
887 /// let start_rounded_down = ptr.wrapping_sub(2);
888 /// ptr = ptr.wrapping_add(4);
889 /// let step = 2;
890 /// // This loop prints "5, 3, 1, "
891 /// while ptr != start_rounded_down {
892 /// unsafe {
893 /// print!("{}, ", *ptr);
894 /// }
895 /// ptr = ptr.wrapping_sub(step);
896 /// }
897 /// ```
898 #[unstable(feature = "pointer_methods", issue = "43941")]
899 #[inline]
900 pub fn wrapping_sub(self, count: usize) -> Self
901 where T: Sized,
902 {
903 self.wrapping_offset((count as isize).wrapping_neg())
904 }
905
906 /// Reads the value from `self` without moving it. This leaves the
907 /// memory in `self` unchanged.
908 ///
909 /// # Safety
910 ///
911 /// Beyond accepting a raw pointer, this is unsafe because it semantically
912 /// moves the value out of `self` without preventing further usage of `self`.
913 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
914 /// `self` is not used before the data is overwritten again (e.g. with `write`,
abe05a73 915 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
ea8adc8c
XL
916 /// because it will attempt to drop the value previously at `*self`.
917 ///
918 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
919 ///
920 /// # Examples
921 ///
922 /// Basic usage:
923 ///
924 /// ```
925 /// #![feature(pointer_methods)]
926 ///
927 /// let x = 12;
928 /// let y = &x as *const i32;
929 ///
930 /// unsafe {
931 /// assert_eq!(y.read(), 12);
932 /// }
933 /// ```
934 #[unstable(feature = "pointer_methods", issue = "43941")]
935 #[inline]
936 pub unsafe fn read(self) -> T
937 where T: Sized,
938 {
939 read(self)
940 }
941
942 /// Performs a volatile read of the value from `self` without moving it. This
943 /// leaves the memory in `self` unchanged.
944 ///
945 /// Volatile operations are intended to act on I/O memory, and are guaranteed
946 /// to not be elided or reordered by the compiler across other volatile
947 /// operations.
948 ///
949 /// # Notes
950 ///
951 /// Rust does not currently have a rigorously and formally defined memory model,
952 /// so the precise semantics of what "volatile" means here is subject to change
953 /// over time. That being said, the semantics will almost always end up pretty
954 /// similar to [C11's definition of volatile][c11].
955 ///
956 /// The compiler shouldn't change the relative order or number of volatile
957 /// memory operations. However, volatile memory operations on zero-sized types
958 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
959 /// and may be ignored.
960 ///
961 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
962 ///
963 /// # Safety
964 ///
965 /// Beyond accepting a raw pointer, this is unsafe because it semantically
966 /// moves the value out of `self` without preventing further usage of `self`.
967 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
968 /// `self` is not used before the data is overwritten again (e.g. with `write`,
abe05a73 969 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
ea8adc8c
XL
970 /// because it will attempt to drop the value previously at `*self`.
971 ///
972 /// # Examples
973 ///
974 /// Basic usage:
975 ///
976 /// ```
977 /// #![feature(pointer_methods)]
978 ///
979 /// let x = 12;
980 /// let y = &x as *const i32;
981 ///
982 /// unsafe {
983 /// assert_eq!(y.read_volatile(), 12);
984 /// }
985 /// ```
986 #[unstable(feature = "pointer_methods", issue = "43941")]
987 #[inline]
988 pub unsafe fn read_volatile(self) -> T
989 where T: Sized,
990 {
991 read_volatile(self)
992 }
993
994 /// Reads the value from `self` without moving it. This leaves the
995 /// memory in `self` unchanged.
996 ///
997 /// Unlike `read`, the pointer may be unaligned.
998 ///
999 /// # Safety
1000 ///
1001 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1002 /// moves the value out of `self` without preventing further usage of `self`.
1003 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1004 /// `self` is not used before the data is overwritten again (e.g. with `write`,
abe05a73 1005 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
ea8adc8c
XL
1006 /// because it will attempt to drop the value previously at `*self`.
1007 ///
1008 /// # Examples
1009 ///
1010 /// Basic usage:
1011 ///
1012 /// ```
1013 /// #![feature(pointer_methods)]
1014 ///
1015 /// let x = 12;
1016 /// let y = &x as *const i32;
1017 ///
1018 /// unsafe {
1019 /// assert_eq!(y.read_unaligned(), 12);
1020 /// }
1021 /// ```
1022 #[unstable(feature = "pointer_methods", issue = "43941")]
1023 #[inline]
1024 pub unsafe fn read_unaligned(self) -> T
1025 where T: Sized,
1026 {
1027 read_unaligned(self)
1028 }
1029
1030 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1031 /// and destination may overlap.
1032 ///
1033 /// NOTE: this has the *same* argument order as `ptr::copy`.
1034 ///
1035 /// This is semantically equivalent to C's `memmove`.
1036 ///
1037 /// # Safety
1038 ///
1039 /// Care must be taken with the ownership of `self` and `dest`.
1040 /// This method semantically moves the values of `self` into `dest`.
1041 /// However it does not drop the contents of `self`, or prevent the contents
1042 /// of `dest` from being dropped or used.
1043 ///
1044 /// # Examples
1045 ///
1046 /// Efficiently create a Rust vector from an unsafe buffer:
1047 ///
1048 /// ```
1049 /// #![feature(pointer_methods)]
1050 ///
1051 /// # #[allow(dead_code)]
1052 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1053 /// let mut dst = Vec::with_capacity(elts);
1054 /// dst.set_len(elts);
1055 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1056 /// dst
1057 /// }
1058 /// ```
1059 #[unstable(feature = "pointer_methods", issue = "43941")]
1060 #[inline]
1061 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1062 where T: Sized,
1063 {
1064 copy(self, dest, count)
1065 }
1066
1067 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1068 /// and destination may *not* overlap.
1069 ///
1070 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1071 ///
1072 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1073 ///
1074 /// # Safety
1075 ///
1076 /// Beyond requiring that the program must be allowed to access both regions
1077 /// of memory, it is Undefined Behavior for source and destination to
1078 /// overlap. Care must also be taken with the ownership of `self` and
1079 /// `self`. This method semantically moves the values of `self` into `dest`.
1080 /// However it does not drop the contents of `dest`, or prevent the contents
1081 /// of `self` from being dropped or used.
1082 ///
1083 /// # Examples
1084 ///
1085 /// Efficiently create a Rust vector from an unsafe buffer:
1086 ///
1087 /// ```
1088 /// #![feature(pointer_methods)]
1089 ///
1090 /// # #[allow(dead_code)]
1091 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1092 /// let mut dst = Vec::with_capacity(elts);
1093 /// dst.set_len(elts);
1094 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1095 /// dst
1096 /// }
1097 /// ```
1098 #[unstable(feature = "pointer_methods", issue = "43941")]
1099 #[inline]
1100 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1101 where T: Sized,
1102 {
1103 copy_nonoverlapping(self, dest, count)
1104 }
1105
1106 /// Computes the byte offset that needs to be applied in order to
1107 /// make the pointer aligned to `align`.
1108 /// If it is not possible to align the pointer, the implementation returns
1109 /// `usize::max_value()`.
1110 ///
1111 /// There are no guarantees whatsover that offsetting the pointer will not
1112 /// overflow or go beyond the allocation that the pointer points into.
1113 /// It is up to the caller to ensure that the returned offset is correct
1114 /// in all terms other than alignment.
1115 ///
1116 /// # Examples
1117 ///
1118 /// Accessing adjacent `u8` as `u16`
1119 ///
1120 /// ```
1121 /// # #![feature(align_offset)]
1122 /// # fn foo(n: usize) {
1123 /// # use std::mem::align_of;
1124 /// # unsafe {
1125 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1126 /// let ptr = &x[n] as *const u8;
1127 /// let offset = ptr.align_offset(align_of::<u16>());
1128 /// if offset < x.len() - n - 1 {
1129 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1130 /// assert_ne!(*u16_ptr, 500);
1131 /// } else {
1132 /// // while the pointer can be aligned via `offset`, it would point
1133 /// // outside the allocation
1134 /// }
1135 /// # } }
1136 /// ```
1137 #[unstable(feature = "align_offset", issue = "44488")]
1138 pub fn align_offset(self, align: usize) -> usize {
1139 unsafe {
1140 intrinsics::align_offset(self as *const _, align)
1141 }
1142 }
1a4d82fc
JJ
1143}
1144
c34b1796
AL
1145#[lang = "mut_ptr"]
1146impl<T: ?Sized> *mut T {
cc61c64b 1147 /// Returns `true` if the pointer is null.
54a0048b 1148 ///
ff7c6d11
XL
1149 /// Note that unsized types have many possible null pointers, as only the
1150 /// raw data pointer is considered, not their length, vtable, etc.
1151 /// Therefore, two pointers that are null may still not compare equal to
1152 /// each other.
1153 ///
54a0048b
SL
1154 /// # Examples
1155 ///
1156 /// Basic usage:
1157 ///
1158 /// ```
1159 /// let mut s = [1, 2, 3];
1160 /// let ptr: *mut u32 = s.as_mut_ptr();
1161 /// assert!(!ptr.is_null());
1162 /// ```
c34b1796
AL
1163 #[stable(feature = "rust1", since = "1.0.0")]
1164 #[inline]
ff7c6d11
XL
1165 pub fn is_null(self) -> bool {
1166 // Compare via a cast to a thin pointer, so fat pointers are only
1167 // considering their "data" part for null-ness.
1168 (self as *mut u8) == null_mut()
c34b1796 1169 }
1a4d82fc 1170
c34b1796
AL
1171 /// Returns `None` if the pointer is null, or else returns a reference to
1172 /// the value wrapped in `Some`.
1a4d82fc
JJ
1173 ///
1174 /// # Safety
1175 ///
c34b1796
AL
1176 /// While this method and its mutable counterpart are useful for
1177 /// null-safety, it is important to note that this is still an unsafe
1178 /// operation because the returned value could be pointing to invalid
1179 /// memory.
54a0048b
SL
1180 ///
1181 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1182 /// not necessarily reflect the actual lifetime of the data.
1183 ///
1184 /// # Examples
1185 ///
1186 /// Basic usage:
1187 ///
041b39d2
XL
1188 /// ```
1189 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
54a0048b
SL
1190 ///
1191 /// unsafe {
041b39d2 1192 /// if let Some(val_back) = ptr.as_ref() {
54a0048b
SL
1193 /// println!("We got back the value: {}!", val_back);
1194 /// }
1195 /// }
1196 /// ```
1197 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1a4d82fc 1198 #[inline]
abe05a73 1199 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
ff7c6d11 1200 if self.is_null() {
1a4d82fc
JJ
1201 None
1202 } else {
54a0048b 1203 Some(&*self)
1a4d82fc
JJ
1204 }
1205 }
1a4d82fc 1206
ea8adc8c
XL
1207 /// Calculates the offset from a pointer.
1208 ///
1209 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1210 /// offset of `3 * size_of::<T>()` bytes.
c34b1796
AL
1211 ///
1212 /// # Safety
1213 ///
ea8adc8c
XL
1214 /// If any of the following conditions are violated, the result is Undefined
1215 /// Behavior:
1216 ///
1217 /// * Both the starting and resulting pointer must be either in bounds or one
1218 /// byte past the end of an allocated object.
1219 ///
ff7c6d11 1220 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
ea8adc8c
XL
1221 ///
1222 /// * The offset being in bounds cannot rely on "wrapping around" the address
1223 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1224 ///
1225 /// The compiler and standard library generally tries to ensure allocations
1226 /// never reach a size where an offset is a concern. For instance, `Vec`
1227 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1228 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
1229 ///
1230 /// Most platforms fundamentally can't even construct such an allocation.
1231 /// For instance, no known 64-bit platform can ever serve a request
abe05a73 1232 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
ea8adc8c
XL
1233 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1234 /// more than `isize::MAX` bytes with things like Physical Address
1235 /// Extension. As such, memory acquired directly from allocators or memory
1236 /// mapped files *may* be too large to handle with this function.
1237 ///
1238 /// Consider using `wrapping_offset` instead if these constraints are
1239 /// difficult to satisfy. The only advantage of this method is that it
1240 /// enables more aggressive compiler optimizations.
54a0048b
SL
1241 ///
1242 /// # Examples
1243 ///
1244 /// Basic usage:
1245 ///
1246 /// ```
1247 /// let mut s = [1, 2, 3];
1248 /// let ptr: *mut u32 = s.as_mut_ptr();
1249 ///
1250 /// unsafe {
1251 /// println!("{}", *ptr.offset(1));
1252 /// println!("{}", *ptr.offset(2));
1253 /// }
1254 /// ```
85aaf69f 1255 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 1256 #[inline]
c34b1796 1257 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
85aaf69f 1258 intrinsics::offset(self, count) as *mut T
1a4d82fc
JJ
1259 }
1260
c30ab7b3
SL
1261 /// Calculates the offset from a pointer using wrapping arithmetic.
1262 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
cc61c64b 1263 /// offset of `3 * size_of::<T>()` bytes.
c30ab7b3
SL
1264 ///
1265 /// # Safety
1266 ///
1267 /// The resulting pointer does not need to be in bounds, but it is
1268 /// potentially hazardous to dereference (which requires `unsafe`).
1269 ///
1270 /// Always use `.offset(count)` instead when possible, because `offset`
1271 /// allows the compiler to optimize better.
1272 ///
1273 /// # Examples
1274 ///
1275 /// Basic usage:
1276 ///
1277 /// ```
c30ab7b3
SL
1278 /// // Iterate using a raw pointer in increments of two elements
1279 /// let mut data = [1u8, 2, 3, 4, 5];
1280 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1281 /// let step = 2;
1282 /// let end_rounded_up = ptr.wrapping_offset(6);
1283 ///
1284 /// while ptr != end_rounded_up {
1285 /// unsafe {
1286 /// *ptr = 0;
1287 /// }
1288 /// ptr = ptr.wrapping_offset(step);
1289 /// }
1290 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1291 /// ```
32a655c1 1292 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
c30ab7b3
SL
1293 #[inline]
1294 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1295 unsafe {
1296 intrinsics::arith_offset(self, count) as *mut T
1297 }
1298 }
1299
c34b1796
AL
1300 /// Returns `None` if the pointer is null, or else returns a mutable
1301 /// reference to the value wrapped in `Some`.
1302 ///
1303 /// # Safety
1304 ///
1305 /// As with `as_ref`, this is unsafe because it cannot verify the validity
54a0048b
SL
1306 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1307 /// returned is indeed a valid lifetime for the contained data.
1308 ///
1309 /// # Examples
1310 ///
1311 /// Basic usage:
1312 ///
1313 /// ```
1314 /// let mut s = [1, 2, 3];
1315 /// let ptr: *mut u32 = s.as_mut_ptr();
a7813a04
XL
1316 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1317 /// *first_value = 4;
1318 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
54a0048b
SL
1319 /// ```
1320 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1a4d82fc 1321 #[inline]
abe05a73 1322 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
ff7c6d11 1323 if self.is_null() {
1a4d82fc
JJ
1324 None
1325 } else {
54a0048b 1326 Some(&mut *self)
1a4d82fc
JJ
1327 }
1328 }
cc61c64b
XL
1329
1330 /// Calculates the distance between two pointers. The returned value is in
1331 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1332 ///
1333 /// If the address different between the two pointers ia not a multiple of
1334 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1335 /// zero.
1336 ///
1337 /// This function returns `None` if `T` is a zero-sized typed.
1338 ///
1339 /// # Examples
1340 ///
1341 /// Basic usage:
1342 ///
1343 /// ```
1344 /// #![feature(offset_to)]
1345 ///
1346 /// fn main() {
1347 /// let mut a = [0; 5];
1348 /// let ptr1: *mut i32 = &mut a[1];
1349 /// let ptr2: *mut i32 = &mut a[3];
1350 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
1351 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
1352 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
1353 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
1354 /// }
1355 /// ```
1356 #[unstable(feature = "offset_to", issue = "41079")]
1357 #[inline]
1358 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
1359 let size = mem::size_of::<T>();
1360 if size == 0 {
1361 None
1362 } else {
1363 let diff = (other as isize).wrapping_sub(self as isize);
1364 Some(diff / size as isize)
1365 }
1366 }
1a4d82fc 1367
ea8adc8c
XL
1368 /// Computes the byte offset that needs to be applied in order to
1369 /// make the pointer aligned to `align`.
1370 /// If it is not possible to align the pointer, the implementation returns
1371 /// `usize::max_value()`.
1372 ///
1373 /// There are no guarantees whatsover that offsetting the pointer will not
1374 /// overflow or go beyond the allocation that the pointer points into.
1375 /// It is up to the caller to ensure that the returned offset is correct
1376 /// in all terms other than alignment.
1377 ///
1378 /// # Examples
1379 ///
1380 /// Accessing adjacent `u8` as `u16`
1381 ///
1382 /// ```
1383 /// # #![feature(align_offset)]
1384 /// # fn foo(n: usize) {
1385 /// # use std::mem::align_of;
1386 /// # unsafe {
1387 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1388 /// let ptr = &x[n] as *const u8;
1389 /// let offset = ptr.align_offset(align_of::<u16>());
1390 /// if offset < x.len() - n - 1 {
1391 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1392 /// assert_ne!(*u16_ptr, 500);
1393 /// } else {
1394 /// // while the pointer can be aligned via `offset`, it would point
1395 /// // outside the allocation
1396 /// }
1397 /// # } }
1398 /// ```
1399 #[unstable(feature = "align_offset", issue = "44488")]
1400 pub fn align_offset(self, align: usize) -> usize {
1401 unsafe {
1402 intrinsics::align_offset(self as *const _, align)
1403 }
1404 }
1405
1406 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1407 ///
1408 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1409 /// offset of `3 * size_of::<T>()` bytes.
1410 ///
1411 /// # Safety
1412 ///
1413 /// If any of the following conditions are violated, the result is Undefined
1414 /// Behavior:
1415 ///
1416 /// * Both the starting and resulting pointer must be either in bounds or one
1417 /// byte past the end of an allocated object.
1418 ///
ff7c6d11 1419 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
ea8adc8c
XL
1420 ///
1421 /// * The offset being in bounds cannot rely on "wrapping around" the address
1422 /// space. That is, the infinite-precision sum must fit in a `usize`.
1423 ///
1424 /// The compiler and standard library generally tries to ensure allocations
1425 /// never reach a size where an offset is a concern. For instance, `Vec`
1426 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1427 /// `vec.as_ptr().add(vec.len())` is always safe.
1428 ///
1429 /// Most platforms fundamentally can't even construct such an allocation.
1430 /// For instance, no known 64-bit platform can ever serve a request
abe05a73 1431 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
ea8adc8c
XL
1432 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1433 /// more than `isize::MAX` bytes with things like Physical Address
1434 /// Extension. As such, memory acquired directly from allocators or memory
1435 /// mapped files *may* be too large to handle with this function.
1436 ///
1437 /// Consider using `wrapping_offset` instead if these constraints are
1438 /// difficult to satisfy. The only advantage of this method is that it
1439 /// enables more aggressive compiler optimizations.
1440 ///
1441 /// # Examples
1442 ///
1443 /// Basic usage:
1444 ///
1445 /// ```
1446 /// #![feature(pointer_methods)]
1447 ///
1448 /// let s: &str = "123";
1449 /// let ptr: *const u8 = s.as_ptr();
1450 ///
1451 /// unsafe {
1452 /// println!("{}", *ptr.add(1) as char);
1453 /// println!("{}", *ptr.add(2) as char);
1454 /// }
1455 /// ```
1456 #[unstable(feature = "pointer_methods", issue = "43941")]
1457 #[inline]
1458 pub unsafe fn add(self, count: usize) -> Self
1459 where T: Sized,
1460 {
1461 self.offset(count as isize)
1462 }
1463
1464 /// Calculates the offset from a pointer (convenience for
1465 /// `.offset((count as isize).wrapping_neg())`).
1466 ///
1467 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1468 /// offset of `3 * size_of::<T>()` bytes.
1469 ///
1470 /// # Safety
1471 ///
1472 /// If any of the following conditions are violated, the result is Undefined
1473 /// Behavior:
1474 ///
1475 /// * Both the starting and resulting pointer must be either in bounds or one
1476 /// byte past the end of an allocated object.
1477 ///
1478 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1479 ///
1480 /// * The offset being in bounds cannot rely on "wrapping around" the address
1481 /// space. That is, the infinite-precision sum must fit in a usize.
1482 ///
1483 /// The compiler and standard library generally tries to ensure allocations
1484 /// never reach a size where an offset is a concern. For instance, `Vec`
1485 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1486 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1487 ///
1488 /// Most platforms fundamentally can't even construct such an allocation.
1489 /// For instance, no known 64-bit platform can ever serve a request
abe05a73 1490 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
ea8adc8c
XL
1491 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1492 /// more than `isize::MAX` bytes with things like Physical Address
1493 /// Extension. As such, memory acquired directly from allocators or memory
1494 /// mapped files *may* be too large to handle with this function.
1495 ///
1496 /// Consider using `wrapping_offset` instead if these constraints are
1497 /// difficult to satisfy. The only advantage of this method is that it
1498 /// enables more aggressive compiler optimizations.
1499 ///
1500 /// # Examples
1501 ///
1502 /// Basic usage:
1503 ///
1504 /// ```
1505 /// #![feature(pointer_methods)]
1506 ///
1507 /// let s: &str = "123";
1508 ///
1509 /// unsafe {
1510 /// let end: *const u8 = s.as_ptr().add(3);
1511 /// println!("{}", *end.sub(1) as char);
1512 /// println!("{}", *end.sub(2) as char);
1513 /// }
1514 /// ```
1515 #[unstable(feature = "pointer_methods", issue = "43941")]
1516 #[inline]
1517 pub unsafe fn sub(self, count: usize) -> Self
1518 where T: Sized,
1519 {
1520 self.offset((count as isize).wrapping_neg())
1521 }
1522
1523 /// Calculates the offset from a pointer using wrapping arithmetic.
1524 /// (convenience for `.wrapping_offset(count as isize)`)
1525 ///
1526 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1527 /// offset of `3 * size_of::<T>()` bytes.
1528 ///
1529 /// # Safety
1530 ///
1531 /// The resulting pointer does not need to be in bounds, but it is
1532 /// potentially hazardous to dereference (which requires `unsafe`).
1533 ///
1534 /// Always use `.add(count)` instead when possible, because `add`
1535 /// allows the compiler to optimize better.
1536 ///
1537 /// # Examples
1538 ///
1539 /// Basic usage:
1540 ///
1541 /// ```
1542 /// #![feature(pointer_methods)]
1543 ///
1544 /// // Iterate using a raw pointer in increments of two elements
1545 /// let data = [1u8, 2, 3, 4, 5];
1546 /// let mut ptr: *const u8 = data.as_ptr();
1547 /// let step = 2;
1548 /// let end_rounded_up = ptr.wrapping_add(6);
1549 ///
1550 /// // This loop prints "1, 3, 5, "
1551 /// while ptr != end_rounded_up {
1552 /// unsafe {
1553 /// print!("{}, ", *ptr);
1554 /// }
1555 /// ptr = ptr.wrapping_add(step);
1556 /// }
1557 /// ```
1558 #[unstable(feature = "pointer_methods", issue = "43941")]
1559 #[inline]
1560 pub fn wrapping_add(self, count: usize) -> Self
1561 where T: Sized,
1562 {
1563 self.wrapping_offset(count as isize)
1564 }
1565
1566 /// Calculates the offset from a pointer using wrapping arithmetic.
1567 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1568 ///
1569 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1570 /// offset of `3 * size_of::<T>()` bytes.
1571 ///
1572 /// # Safety
1573 ///
1574 /// The resulting pointer does not need to be in bounds, but it is
1575 /// potentially hazardous to dereference (which requires `unsafe`).
1576 ///
1577 /// Always use `.sub(count)` instead when possible, because `sub`
1578 /// allows the compiler to optimize better.
1579 ///
1580 /// # Examples
1581 ///
1582 /// Basic usage:
1583 ///
1584 /// ```
1585 /// #![feature(pointer_methods)]
1586 ///
1587 /// // Iterate using a raw pointer in increments of two elements (backwards)
1588 /// let data = [1u8, 2, 3, 4, 5];
1589 /// let mut ptr: *const u8 = data.as_ptr();
1590 /// let start_rounded_down = ptr.wrapping_sub(2);
1591 /// ptr = ptr.wrapping_add(4);
1592 /// let step = 2;
1593 /// // This loop prints "5, 3, 1, "
1594 /// while ptr != start_rounded_down {
1595 /// unsafe {
1596 /// print!("{}, ", *ptr);
1597 /// }
1598 /// ptr = ptr.wrapping_sub(step);
1599 /// }
1600 /// ```
1601 #[unstable(feature = "pointer_methods", issue = "43941")]
1602 #[inline]
1603 pub fn wrapping_sub(self, count: usize) -> Self
1604 where T: Sized,
1605 {
1606 self.wrapping_offset((count as isize).wrapping_neg())
1607 }
1608
1609 /// Reads the value from `self` without moving it. This leaves the
1610 /// memory in `self` unchanged.
1611 ///
1612 /// # Safety
1613 ///
1614 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1615 /// moves the value out of `self` without preventing further usage of `self`.
1616 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1617 /// `self` is not used before the data is overwritten again (e.g. with `write`,
abe05a73 1618 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
ea8adc8c
XL
1619 /// because it will attempt to drop the value previously at `*self`.
1620 ///
1621 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1622 ///
1623 /// # Examples
1624 ///
1625 /// Basic usage:
1626 ///
1627 /// ```
1628 /// #![feature(pointer_methods)]
1629 ///
1630 /// let x = 12;
1631 /// let y = &x as *const i32;
1632 ///
1633 /// unsafe {
1634 /// assert_eq!(y.read(), 12);
1635 /// }
1636 /// ```
1637 #[unstable(feature = "pointer_methods", issue = "43941")]
1638 #[inline]
1639 pub unsafe fn read(self) -> T
1640 where T: Sized,
1641 {
1642 read(self)
1643 }
1644
1645 /// Performs a volatile read of the value from `self` without moving it. This
1646 /// leaves the memory in `self` unchanged.
1647 ///
1648 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1649 /// to not be elided or reordered by the compiler across other volatile
1650 /// operations.
1651 ///
1652 /// # Notes
1653 ///
1654 /// Rust does not currently have a rigorously and formally defined memory model,
1655 /// so the precise semantics of what "volatile" means here is subject to change
1656 /// over time. That being said, the semantics will almost always end up pretty
1657 /// similar to [C11's definition of volatile][c11].
1658 ///
1659 /// The compiler shouldn't change the relative order or number of volatile
1660 /// memory operations. However, volatile memory operations on zero-sized types
1661 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1662 /// and may be ignored.
1663 ///
1664 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1665 ///
1666 /// # Safety
1667 ///
1668 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1669 /// moves the value out of `self` without preventing further usage of `self`.
1670 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1671 /// `src` is not used before the data is overwritten again (e.g. with `write`,
abe05a73 1672 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
ea8adc8c
XL
1673 /// because it will attempt to drop the value previously at `*self`.
1674 ///
1675 /// # Examples
1676 ///
1677 /// Basic usage:
1678 ///
1679 /// ```
1680 /// #![feature(pointer_methods)]
1681 ///
1682 /// let x = 12;
1683 /// let y = &x as *const i32;
1684 ///
1685 /// unsafe {
1686 /// assert_eq!(y.read_volatile(), 12);
1687 /// }
1688 /// ```
1689 #[unstable(feature = "pointer_methods", issue = "43941")]
1690 #[inline]
1691 pub unsafe fn read_volatile(self) -> T
1692 where T: Sized,
1693 {
1694 read_volatile(self)
1695 }
1696
1697 /// Reads the value from `self` without moving it. This leaves the
1698 /// memory in `self` unchanged.
1699 ///
1700 /// Unlike `read`, the pointer may be unaligned.
1701 ///
1702 /// # Safety
1703 ///
1704 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1705 /// moves the value out of `self` without preventing further usage of `self`.
1706 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1707 /// `self` is not used before the data is overwritten again (e.g. with `write`,
abe05a73 1708 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
ea8adc8c
XL
1709 /// because it will attempt to drop the value previously at `*self`.
1710 ///
1711 /// # Examples
1712 ///
1713 /// Basic usage:
1714 ///
1715 /// ```
1716 /// #![feature(pointer_methods)]
1717 ///
1718 /// let x = 12;
1719 /// let y = &x as *const i32;
1720 ///
1721 /// unsafe {
1722 /// assert_eq!(y.read_unaligned(), 12);
1723 /// }
1724 /// ```
1725 #[unstable(feature = "pointer_methods", issue = "43941")]
1726 #[inline]
1727 pub unsafe fn read_unaligned(self) -> T
1728 where T: Sized,
1729 {
1730 read_unaligned(self)
1731 }
1732
1733 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1734 /// and destination may overlap.
1735 ///
1736 /// NOTE: this has the *same* argument order as `ptr::copy`.
1737 ///
1738 /// This is semantically equivalent to C's `memmove`.
1739 ///
1740 /// # Safety
1741 ///
1742 /// Care must be taken with the ownership of `self` and `dest`.
1743 /// This method semantically moves the values of `self` into `dest`.
1744 /// However it does not drop the contents of `self`, or prevent the contents
1745 /// of `dest` from being dropped or used.
1746 ///
1747 /// # Examples
1748 ///
1749 /// Efficiently create a Rust vector from an unsafe buffer:
1750 ///
1751 /// ```
1752 /// #![feature(pointer_methods)]
1753 ///
1754 /// # #[allow(dead_code)]
1755 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1756 /// let mut dst = Vec::with_capacity(elts);
1757 /// dst.set_len(elts);
1758 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1759 /// dst
1760 /// }
1761 /// ```
1762 #[unstable(feature = "pointer_methods", issue = "43941")]
1763 #[inline]
1764 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1765 where T: Sized,
1766 {
1767 copy(self, dest, count)
1768 }
1769
1770 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1771 /// and destination may *not* overlap.
1772 ///
1773 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1774 ///
1775 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1776 ///
1777 /// # Safety
1778 ///
1779 /// Beyond requiring that the program must be allowed to access both regions
1780 /// of memory, it is Undefined Behavior for source and destination to
1781 /// overlap. Care must also be taken with the ownership of `self` and
1782 /// `self`. This method semantically moves the values of `self` into `dest`.
1783 /// However it does not drop the contents of `dest`, or prevent the contents
1784 /// of `self` from being dropped or used.
1785 ///
1786 /// # Examples
1787 ///
1788 /// Efficiently create a Rust vector from an unsafe buffer:
1789 ///
1790 /// ```
1791 /// #![feature(pointer_methods)]
1792 ///
1793 /// # #[allow(dead_code)]
1794 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1795 /// let mut dst = Vec::with_capacity(elts);
1796 /// dst.set_len(elts);
1797 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1798 /// dst
1799 /// }
1800 /// ```
1801 #[unstable(feature = "pointer_methods", issue = "43941")]
1802 #[inline]
1803 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1804 where T: Sized,
1805 {
1806 copy_nonoverlapping(self, dest, count)
1807 }
1808
1809 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1810 /// and destination may overlap.
1811 ///
1812 /// NOTE: this has the *opposite* argument order of `ptr::copy`.
1813 ///
1814 /// This is semantically equivalent to C's `memmove`.
1815 ///
1816 /// # Safety
1817 ///
1818 /// Care must be taken with the ownership of `src` and `self`.
1819 /// This method semantically moves the values of `src` into `self`.
1820 /// However it does not drop the contents of `self`, or prevent the contents
1821 /// of `src` from being dropped or used.
1822 ///
1823 /// # Examples
1824 ///
1825 /// Efficiently create a Rust vector from an unsafe buffer:
1826 ///
1827 /// ```
1828 /// #![feature(pointer_methods)]
1829 ///
1830 /// # #[allow(dead_code)]
1831 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
ff7c6d11 1832 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
ea8adc8c
XL
1833 /// dst.set_len(elts);
1834 /// dst.as_mut_ptr().copy_from(ptr, elts);
1835 /// dst
1836 /// }
1837 /// ```
1838 #[unstable(feature = "pointer_methods", issue = "43941")]
1839 #[inline]
1840 pub unsafe fn copy_from(self, src: *const T, count: usize)
1841 where T: Sized,
1842 {
1843 copy(src, self, count)
1844 }
1845
1846 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1847 /// and destination may *not* overlap.
1848 ///
1849 /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`.
1850 ///
1851 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1852 ///
1853 /// # Safety
1854 ///
1855 /// Beyond requiring that the program must be allowed to access both regions
1856 /// of memory, it is Undefined Behavior for source and destination to
1857 /// overlap. Care must also be taken with the ownership of `src` and
1858 /// `self`. This method semantically moves the values of `src` into `self`.
1859 /// However it does not drop the contents of `self`, or prevent the contents
1860 /// of `src` from being dropped or used.
1861 ///
1862 /// # Examples
1863 ///
1864 /// Efficiently create a Rust vector from an unsafe buffer:
1865 ///
1866 /// ```
1867 /// #![feature(pointer_methods)]
1868 ///
1869 /// # #[allow(dead_code)]
1870 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
ff7c6d11 1871 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
ea8adc8c
XL
1872 /// dst.set_len(elts);
1873 /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts);
1874 /// dst
1875 /// }
1876 /// ```
1877 #[unstable(feature = "pointer_methods", issue = "43941")]
1878 #[inline]
1879 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
1880 where T: Sized,
1881 {
1882 copy_nonoverlapping(src, self, count)
1883 }
1884
1885 /// Executes the destructor (if any) of the pointed-to value.
1886 ///
1887 /// This has two use cases:
1888 ///
1889 /// * It is *required* to use `drop_in_place` to drop unsized types like
1890 /// trait objects, because they can't be read out onto the stack and
1891 /// dropped normally.
1892 ///
1893 /// * It is friendlier to the optimizer to do this over `ptr::read` when
1894 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
1895 /// as the compiler doesn't need to prove that it's sound to elide the
1896 /// copy.
1897 ///
1898 /// # Safety
1899 ///
1900 /// This has all the same safety problems as `ptr::read` with respect to
1901 /// invalid pointers, types, and double drops.
1902 #[unstable(feature = "pointer_methods", issue = "43941")]
1903 #[inline]
1904 pub unsafe fn drop_in_place(self) {
1905 drop_in_place(self)
1906 }
1907
1908 /// Overwrites a memory location with the given value without reading or
1909 /// dropping the old value.
1910 ///
1911 /// # Safety
1912 ///
1913 /// This operation is marked unsafe because it writes through a raw pointer.
1914 ///
1915 /// It does not drop the contents of `self`. This is safe, but it could leak
1916 /// allocations or resources, so care must be taken not to overwrite an object
1917 /// that should be dropped.
1918 ///
1919 /// Additionally, it does not drop `val`. Semantically, `val` is moved into the
1920 /// location pointed to by `self`.
1921 ///
1922 /// This is appropriate for initializing uninitialized memory, or overwriting
1923 /// memory that has previously been `read` from.
1924 ///
1925 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
1926 ///
1927 /// # Examples
1928 ///
1929 /// Basic usage:
1930 ///
1931 /// ```
1932 /// #![feature(pointer_methods)]
1933 ///
1934 /// let mut x = 0;
1935 /// let y = &mut x as *mut i32;
1936 /// let z = 12;
1937 ///
1938 /// unsafe {
1939 /// y.write(z);
1940 /// assert_eq!(y.read(), 12);
1941 /// }
1942 /// ```
1943 #[unstable(feature = "pointer_methods", issue = "43941")]
1944 #[inline]
1945 pub unsafe fn write(self, val: T)
1946 where T: Sized,
1947 {
1948 write(self, val)
1949 }
1950
1951 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
1952 /// bytes of memory starting at `self` to `val`.
1953 ///
1954 /// # Examples
1955 ///
1956 /// ```
1957 /// #![feature(pointer_methods)]
1958 ///
1959 /// let mut vec = vec![0; 4];
1960 /// unsafe {
1961 /// let vec_ptr = vec.as_mut_ptr();
1962 /// vec_ptr.write_bytes(b'a', 2);
1963 /// }
1964 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
1965 /// ```
1966 #[unstable(feature = "pointer_methods", issue = "43941")]
1967 #[inline]
1968 pub unsafe fn write_bytes(self, val: u8, count: usize)
1969 where T: Sized,
1970 {
1971 write_bytes(self, val, count)
1972 }
1973
1974 /// Performs a volatile write of a memory location with the given value without
1975 /// reading or dropping the old value.
1976 ///
1977 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1978 /// to not be elided or reordered by the compiler across other volatile
1979 /// operations.
1980 ///
1981 /// # Notes
1982 ///
1983 /// Rust does not currently have a rigorously and formally defined memory model,
1984 /// so the precise semantics of what "volatile" means here is subject to change
1985 /// over time. That being said, the semantics will almost always end up pretty
1986 /// similar to [C11's definition of volatile][c11].
1987 ///
1988 /// The compiler shouldn't change the relative order or number of volatile
1989 /// memory operations. However, volatile memory operations on zero-sized types
1990 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
1991 /// and may be ignored.
1992 ///
1993 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1994 ///
1995 /// # Safety
1996 ///
1997 /// This operation is marked unsafe because it accepts a raw pointer.
1998 ///
1999 /// It does not drop the contents of `self`. This is safe, but it could leak
2000 /// allocations or resources, so care must be taken not to overwrite an object
2001 /// that should be dropped.
2002 ///
2003 /// This is appropriate for initializing uninitialized memory, or overwriting
2004 /// memory that has previously been `read` from.
2005 ///
2006 /// # Examples
2007 ///
2008 /// Basic usage:
2009 ///
2010 /// ```
2011 /// #![feature(pointer_methods)]
2012 ///
2013 /// let mut x = 0;
2014 /// let y = &mut x as *mut i32;
2015 /// let z = 12;
2016 ///
2017 /// unsafe {
2018 /// y.write_volatile(z);
2019 /// assert_eq!(y.read_volatile(), 12);
2020 /// }
2021 /// ```
2022 #[unstable(feature = "pointer_methods", issue = "43941")]
2023 #[inline]
2024 pub unsafe fn write_volatile(self, val: T)
2025 where T: Sized,
2026 {
2027 write_volatile(self, val)
2028 }
2029
2030 /// Overwrites a memory location with the given value without reading or
2031 /// dropping the old value.
2032 ///
2033 /// Unlike `write`, the pointer may be unaligned.
2034 ///
2035 /// # Safety
2036 ///
2037 /// This operation is marked unsafe because it writes through a raw pointer.
2038 ///
2039 /// It does not drop the contents of `self`. This is safe, but it could leak
2040 /// allocations or resources, so care must be taken not to overwrite an object
2041 /// that should be dropped.
2042 ///
2043 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
2044 /// location pointed to by `dst`.
2045 ///
2046 /// This is appropriate for initializing uninitialized memory, or overwriting
2047 /// memory that has previously been `read` from.
2048 ///
2049 /// # Examples
2050 ///
2051 /// Basic usage:
2052 ///
2053 /// ```
2054 /// #![feature(pointer_methods)]
2055 ///
2056 /// let mut x = 0;
2057 /// let y = &mut x as *mut i32;
2058 /// let z = 12;
2059 ///
2060 /// unsafe {
2061 /// y.write_unaligned(z);
2062 /// assert_eq!(y.read_unaligned(), 12);
2063 /// }
2064 /// ```
2065 #[unstable(feature = "pointer_methods", issue = "43941")]
2066 #[inline]
2067 pub unsafe fn write_unaligned(self, val: T)
2068 where T: Sized,
2069 {
2070 write_unaligned(self, val)
2071 }
2072
2073 /// Replaces the value at `self` with `src`, returning the old
2074 /// value, without dropping either.
2075 ///
2076 /// # Safety
2077 ///
2078 /// This is only unsafe because it accepts a raw pointer.
2079 /// Otherwise, this operation is identical to `mem::replace`.
2080 #[unstable(feature = "pointer_methods", issue = "43941")]
2081 #[inline]
2082 pub unsafe fn replace(self, src: T) -> T
2083 where T: Sized,
2084 {
2085 replace(self, src)
2086 }
2087
2088 /// Swaps the values at two mutable locations of the same type, without
2089 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2090 /// otherwise equivalent.
2091 ///
2092 /// # Safety
2093 ///
2094 /// This function copies the memory through the raw pointers passed to it
2095 /// as arguments.
2096 ///
2097 /// Ensure that these pointers are valid before calling `swap`.
2098 #[unstable(feature = "pointer_methods", issue = "43941")]
2099 #[inline]
2100 pub unsafe fn swap(self, with: *mut T)
2101 where T: Sized,
2102 {
2103 swap(self, with)
2104 }
2105}
2106
2107// Equality for pointers
2108#[stable(feature = "rust1", since = "1.0.0")]
2109impl<T: ?Sized> PartialEq for *const T {
2110 #[inline]
2111 fn eq(&self, other: &*const T) -> bool { *self == *other }
2112}
1a4d82fc 2113
85aaf69f 2114#[stable(feature = "rust1", since = "1.0.0")]
c34b1796 2115impl<T: ?Sized> Eq for *const T {}
1a4d82fc 2116
85aaf69f 2117#[stable(feature = "rust1", since = "1.0.0")]
c34b1796 2118impl<T: ?Sized> PartialEq for *mut T {
1a4d82fc 2119 #[inline]
c34b1796 2120 fn eq(&self, other: &*mut T) -> bool { *self == *other }
1a4d82fc
JJ
2121}
2122
85aaf69f 2123#[stable(feature = "rust1", since = "1.0.0")]
c34b1796 2124impl<T: ?Sized> Eq for *mut T {}
1a4d82fc 2125
9e0c209e
SL
2126/// Compare raw pointers for equality.
2127///
2128/// This is the same as using the `==` operator, but less generic:
2129/// the arguments have to be `*const T` raw pointers,
2130/// not anything that implements `PartialEq`.
2131///
2132/// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2133/// by their address rather than comparing the values they point to
2134/// (which is what the `PartialEq for &T` implementation does).
2135///
2136/// # Examples
2137///
2138/// ```
9e0c209e
SL
2139/// use std::ptr;
2140///
2141/// let five = 5;
2142/// let other_five = 5;
2143/// let five_ref = &five;
2144/// let same_five_ref = &five;
2145/// let other_five_ref = &other_five;
2146///
2147/// assert!(five_ref == same_five_ref);
2148/// assert!(five_ref == other_five_ref);
2149///
2150/// assert!(ptr::eq(five_ref, same_five_ref));
2151/// assert!(!ptr::eq(five_ref, other_five_ref));
2152/// ```
8bb4bdeb 2153#[stable(feature = "ptr_eq", since = "1.17.0")]
9e0c209e
SL
2154#[inline]
2155pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2156 a == b
2157}
2158
e9174d1e
SL
2159// Impls for function pointers
2160macro_rules! fnptr_impls_safety_abi {
2161 ($FnTy: ty, $($Arg: ident),*) => {
e9174d1e
SL
2162 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2163 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2164 #[inline]
2165 fn eq(&self, other: &Self) -> bool {
2166 *self as usize == *other as usize
2167 }
1a4d82fc 2168 }
e9174d1e
SL
2169
2170 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2171 impl<Ret, $($Arg),*> Eq for $FnTy {}
2172
2173 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2174 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2175 #[inline]
2176 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2177 (*self as usize).partial_cmp(&(*other as usize))
2178 }
2179 }
2180
2181 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2182 impl<Ret, $($Arg),*> Ord for $FnTy {
2183 #[inline]
2184 fn cmp(&self, other: &Self) -> Ordering {
2185 (*self as usize).cmp(&(*other as usize))
2186 }
2187 }
2188
2189 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2190 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2191 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2192 state.write_usize(*self as usize)
2193 }
2194 }
2195
2196 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2197 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2198 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2199 fmt::Pointer::fmt(&(*self as *const ()), f)
2200 }
2201 }
2202
2203 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2204 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2205 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2206 fmt::Pointer::fmt(&(*self as *const ()), f)
1a4d82fc
JJ
2207 }
2208 }
2209 }
1a4d82fc
JJ
2210}
2211
e9174d1e 2212macro_rules! fnptr_impls_args {
5bcae85e 2213 ($($Arg: ident),+) => {
e9174d1e
SL
2214 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2215 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
5bcae85e 2216 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
e9174d1e
SL
2217 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2218 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
5bcae85e
SL
2219 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2220 };
2221 () => {
2222 // No variadic functions with 0 parameters
2223 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2224 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2225 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2226 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2227 };
e9174d1e
SL
2228}
2229
2230fnptr_impls_args! { }
2231fnptr_impls_args! { A }
2232fnptr_impls_args! { A, B }
2233fnptr_impls_args! { A, B, C }
2234fnptr_impls_args! { A, B, C, D }
2235fnptr_impls_args! { A, B, C, D, E }
2236fnptr_impls_args! { A, B, C, D, E, F }
2237fnptr_impls_args! { A, B, C, D, E, F, G }
2238fnptr_impls_args! { A, B, C, D, E, F, G, H }
2239fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2240fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2241fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2242fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2243
1a4d82fc 2244// Comparison for pointers
85aaf69f 2245#[stable(feature = "rust1", since = "1.0.0")]
c34b1796 2246impl<T: ?Sized> Ord for *const T {
1a4d82fc
JJ
2247 #[inline]
2248 fn cmp(&self, other: &*const T) -> Ordering {
2249 if self < other {
2250 Less
2251 } else if self == other {
2252 Equal
2253 } else {
2254 Greater
2255 }
2256 }
2257}
2258
85aaf69f 2259#[stable(feature = "rust1", since = "1.0.0")]
c34b1796 2260impl<T: ?Sized> PartialOrd for *const T {
1a4d82fc
JJ
2261 #[inline]
2262 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2263 Some(self.cmp(other))
2264 }
2265
2266 #[inline]
2267 fn lt(&self, other: &*const T) -> bool { *self < *other }
2268
2269 #[inline]
2270 fn le(&self, other: &*const T) -> bool { *self <= *other }
2271
2272 #[inline]
2273 fn gt(&self, other: &*const T) -> bool { *self > *other }
2274
2275 #[inline]
2276 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2277}
2278
85aaf69f 2279#[stable(feature = "rust1", since = "1.0.0")]
c34b1796 2280impl<T: ?Sized> Ord for *mut T {
1a4d82fc
JJ
2281 #[inline]
2282 fn cmp(&self, other: &*mut T) -> Ordering {
2283 if self < other {
2284 Less
2285 } else if self == other {
2286 Equal
2287 } else {
2288 Greater
2289 }
2290 }
2291}
2292
85aaf69f 2293#[stable(feature = "rust1", since = "1.0.0")]
c34b1796 2294impl<T: ?Sized> PartialOrd for *mut T {
1a4d82fc
JJ
2295 #[inline]
2296 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2297 Some(self.cmp(other))
2298 }
2299
2300 #[inline]
2301 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2302
2303 #[inline]
2304 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2305
2306 #[inline]
2307 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2308
2309 #[inline]
2310 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2311}
2312
7453a54e 2313/// A wrapper around a raw non-null `*mut T` that indicates that the possessor
7cac9316
XL
2314/// of this wrapper owns the referent. Useful for building abstractions like
2315/// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2316///
2317/// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2318/// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2319/// the kind of strong aliasing guarantees an instance of `T` can expect:
2320/// the referent of the pointer should not be modified without a unique path to
2321/// its owning Unique.
2322///
2323/// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2c00a5a8 2324/// consider using `NonNull`, which has weaker semantics.
7cac9316
XL
2325///
2326/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2327/// is never dereferenced. This is so that enums may use this forbidden value
2328/// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2329/// However the pointer may still dangle if it isn't dereferenced.
2330///
2331/// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2332/// for any type which upholds Unique's aliasing requirements.
2c00a5a8
XL
2333#[unstable(feature = "ptr_internals", issue = "0",
2334 reason = "use NonNull instead and consider PhantomData<T> \
2335 (if you also use #[may_dangle]), Send, and/or Sync")]
c34b1796 2336pub struct Unique<T: ?Sized> {
85aaf69f 2337 pointer: NonZero<*const T>,
62682a34
SL
2338 // NOTE: this marker has no consequences for variance, but is necessary
2339 // for dropck to understand that we logically own a `T`.
2340 //
2341 // For details, see:
2342 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
85aaf69f
SL
2343 _marker: PhantomData<T>,
2344}
1a4d82fc 2345
2c00a5a8
XL
2346#[unstable(feature = "ptr_internals", issue = "0")]
2347impl<T: ?Sized> fmt::Debug for Unique<T> {
2348 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2349 fmt::Pointer::fmt(&self.as_ptr(), f)
2350 }
2351}
2352
1a4d82fc
JJ
2353/// `Unique` pointers are `Send` if `T` is `Send` because the data they
2354/// reference is unaliased. Note that this aliasing invariant is
2355/// unenforced by the type system; the abstraction using the
2356/// `Unique` must enforce it.
2c00a5a8 2357#[unstable(feature = "ptr_internals", issue = "0")]
85aaf69f 2358unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
1a4d82fc
JJ
2359
2360/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2361/// reference is unaliased. Note that this aliasing invariant is
2362/// unenforced by the type system; the abstraction using the
2363/// `Unique` must enforce it.
2c00a5a8 2364#[unstable(feature = "ptr_internals", issue = "0")]
85aaf69f
SL
2365unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2366
2c00a5a8 2367#[unstable(feature = "ptr_internals", issue = "0")]
7cac9316
XL
2368impl<T: Sized> Unique<T> {
2369 /// Creates a new `Unique` that is dangling, but well-aligned.
2370 ///
2371 /// This is useful for initializing types which lazily allocate, like
2372 /// `Vec::new` does.
2c00a5a8 2373 // FIXME: rename to dangling() to match NonNull?
7cac9316
XL
2374 pub fn empty() -> Self {
2375 unsafe {
2376 let ptr = mem::align_of::<T>() as *mut T;
3b2f2976 2377 Unique::new_unchecked(ptr)
7cac9316
XL
2378 }
2379 }
2380}
2381
2c00a5a8 2382#[unstable(feature = "ptr_internals", issue = "0")]
c34b1796 2383impl<T: ?Sized> Unique<T> {
9cc50fc6 2384 /// Creates a new `Unique`.
7453a54e
SL
2385 ///
2386 /// # Safety
2387 ///
2388 /// `ptr` must be non-null.
3b2f2976
XL
2389 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2390 Unique { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData }
2391 }
2392
2393 /// Creates a new `Unique` if `ptr` is non-null.
2394 pub fn new(ptr: *mut T) -> Option<Self> {
2395 NonZero::new(ptr as *const T).map(|nz| Unique { pointer: nz, _marker: PhantomData })
9cc50fc6 2396 }
85aaf69f 2397
7cac9316
XL
2398 /// Acquires the underlying `*mut` pointer.
2399 pub fn as_ptr(self) -> *mut T {
2400 self.pointer.get() as *mut T
2401 }
2402
9346a6ac 2403 /// Dereferences the content.
7cac9316
XL
2404 ///
2405 /// The resulting lifetime is bound to self so this behaves "as if"
2406 /// it were actually an instance of T that is getting borrowed. If a longer
2c00a5a8 2407 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
7cac9316
XL
2408 pub unsafe fn as_ref(&self) -> &T {
2409 &*self.as_ptr()
85aaf69f
SL
2410 }
2411
9346a6ac 2412 /// Mutably dereferences the content.
7cac9316
XL
2413 ///
2414 /// The resulting lifetime is bound to self so this behaves "as if"
2415 /// it were actually an instance of T that is getting borrowed. If a longer
2c00a5a8 2416 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
7cac9316
XL
2417 pub unsafe fn as_mut(&mut self) -> &mut T {
2418 &mut *self.as_ptr()
1a4d82fc 2419 }
85aaf69f 2420}
1a4d82fc 2421
2c00a5a8 2422#[unstable(feature = "ptr_internals", issue = "0")]
7cac9316
XL
2423impl<T: ?Sized> Clone for Unique<T> {
2424 fn clone(&self) -> Self {
2425 *self
1a4d82fc
JJ
2426 }
2427}
9346a6ac 2428
2c00a5a8 2429#[unstable(feature = "ptr_internals", issue = "0")]
7cac9316
XL
2430impl<T: ?Sized> Copy for Unique<T> { }
2431
2c00a5a8 2432#[unstable(feature = "ptr_internals", issue = "0")]
7cac9316
XL
2433impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2434
2c00a5a8 2435#[unstable(feature = "ptr_internals", issue = "0")]
7cac9316 2436impl<T: ?Sized> fmt::Pointer for Unique<T> {
9346a6ac 2437 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
7cac9316 2438 fmt::Pointer::fmt(&self.as_ptr(), f)
9346a6ac
AL
2439 }
2440}
b039eaaf 2441
2c00a5a8 2442#[unstable(feature = "ptr_internals", issue = "0")]
3b2f2976
XL
2443impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> {
2444 fn from(reference: &'a mut T) -> Self {
2445 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2446 }
2447}
2448
2c00a5a8 2449#[unstable(feature = "ptr_internals", issue = "0")]
3b2f2976
XL
2450impl<'a, T: ?Sized> From<&'a T> for Unique<T> {
2451 fn from(reference: &'a T) -> Self {
2452 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2453 }
2454}
2455
2c00a5a8
XL
2456#[unstable(feature = "ptr_internals", issue = "0")]
2457impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> {
2458 fn from(p: NonNull<T>) -> Self {
2459 Unique { pointer: p.pointer, _marker: PhantomData }
2460 }
2461}
2462
2463/// Previous name of `NonNull`.
2464#[rustc_deprecated(since = "1.25.0", reason = "renamed to `NonNull`")]
2465#[unstable(feature = "shared", issue = "27730")]
2466pub type Shared<T> = NonNull<T>;
2467
ff7c6d11 2468/// `*mut T` but non-zero and covariant.
7cac9316 2469///
ff7c6d11
XL
2470/// This is often the correct thing to use when building data structures using
2471/// raw pointers, but is ultimately more dangerous to use because of its additional
2c00a5a8 2472/// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
7cac9316
XL
2473///
2474/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2475/// is never dereferenced. This is so that enums may use this forbidden value
2c00a5a8 2476/// as a discriminant -- `Option<NonNull<T>>` has the same size as `NonNull<T>`.
7cac9316
XL
2477/// However the pointer may still dangle if it isn't dereferenced.
2478///
2c00a5a8 2479/// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect
7cac9316
XL
2480/// for your use case, you should include some PhantomData in your type to
2481/// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
ff7c6d11
XL
2482/// Usually this won't be necessary; covariance is correct for most safe abstractions,
2483/// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they
2484/// provide a public API that follows the normal shared XOR mutable rules of Rust.
2c00a5a8
XL
2485#[stable(feature = "nonnull", since = "1.25.0")]
2486pub struct NonNull<T: ?Sized> {
b039eaaf 2487 pointer: NonZero<*const T>,
b039eaaf
SL
2488}
2489
2c00a5a8 2490/// `NonNull` pointers are not `Send` because the data they reference may be aliased.
b039eaaf 2491// NB: This impl is unnecessary, but should provide better error messages.
2c00a5a8
XL
2492#[stable(feature = "nonnull", since = "1.25.0")]
2493impl<T: ?Sized> !Send for NonNull<T> { }
b039eaaf 2494
2c00a5a8 2495/// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
b039eaaf 2496// NB: This impl is unnecessary, but should provide better error messages.
2c00a5a8
XL
2497#[stable(feature = "nonnull", since = "1.25.0")]
2498impl<T: ?Sized> !Sync for NonNull<T> { }
b039eaaf 2499
2c00a5a8
XL
2500impl<T: Sized> NonNull<T> {
2501 /// Creates a new `NonNull` that is dangling, but well-aligned.
7cac9316
XL
2502 ///
2503 /// This is useful for initializing types which lazily allocate, like
2504 /// `Vec::new` does.
2c00a5a8
XL
2505 #[stable(feature = "nonnull", since = "1.25.0")]
2506 pub fn dangling() -> Self {
7cac9316
XL
2507 unsafe {
2508 let ptr = mem::align_of::<T>() as *mut T;
2c00a5a8 2509 NonNull::new_unchecked(ptr)
7cac9316
XL
2510 }
2511 }
2512}
2513
2c00a5a8
XL
2514impl<T: ?Sized> NonNull<T> {
2515 /// Creates a new `NonNull`.
7453a54e
SL
2516 ///
2517 /// # Safety
2518 ///
2519 /// `ptr` must be non-null.
2c00a5a8 2520 #[stable(feature = "nonnull", since = "1.25.0")]
3b2f2976 2521 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2c00a5a8 2522 NonNull { pointer: NonZero::new_unchecked(ptr) }
3b2f2976
XL
2523 }
2524
2c00a5a8
XL
2525 /// Creates a new `NonNull` if `ptr` is non-null.
2526 #[stable(feature = "nonnull", since = "1.25.0")]
3b2f2976 2527 pub fn new(ptr: *mut T) -> Option<Self> {
2c00a5a8 2528 NonZero::new(ptr as *const T).map(|nz| NonNull { pointer: nz })
b039eaaf 2529 }
b039eaaf 2530
7cac9316 2531 /// Acquires the underlying `*mut` pointer.
2c00a5a8 2532 #[stable(feature = "nonnull", since = "1.25.0")]
7cac9316
XL
2533 pub fn as_ptr(self) -> *mut T {
2534 self.pointer.get() as *mut T
2535 }
2536
2537 /// Dereferences the content.
2538 ///
2539 /// The resulting lifetime is bound to self so this behaves "as if"
2540 /// it were actually an instance of T that is getting borrowed. If a longer
2c00a5a8
XL
2541 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2542 #[stable(feature = "nonnull", since = "1.25.0")]
7cac9316
XL
2543 pub unsafe fn as_ref(&self) -> &T {
2544 &*self.as_ptr()
2545 }
2546
2547 /// Mutably dereferences the content.
2548 ///
2549 /// The resulting lifetime is bound to self so this behaves "as if"
2550 /// it were actually an instance of T that is getting borrowed. If a longer
2c00a5a8
XL
2551 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2552 #[stable(feature = "nonnull", since = "1.25.0")]
7cac9316
XL
2553 pub unsafe fn as_mut(&mut self) -> &mut T {
2554 &mut *self.as_ptr()
2555 }
2556
2c00a5a8
XL
2557 /// Cast to a pointer of another type
2558 #[unstable(feature = "nonnull_cast", issue = "47653")]
2559 pub fn cast<U>(self) -> NonNull<U> {
2560 unsafe {
2561 NonNull::new_unchecked(self.as_ptr() as *mut U)
2562 }
8bb4bdeb
XL
2563 }
2564}
2565
2c00a5a8
XL
2566#[stable(feature = "nonnull", since = "1.25.0")]
2567impl<T: ?Sized> Clone for NonNull<T> {
b039eaaf
SL
2568 fn clone(&self) -> Self {
2569 *self
2570 }
2571}
2572
2c00a5a8
XL
2573#[stable(feature = "nonnull", since = "1.25.0")]
2574impl<T: ?Sized> Copy for NonNull<T> { }
b039eaaf 2575
2c00a5a8
XL
2576#[stable(feature = "nonnull", since = "1.25.0")]
2577impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
b039eaaf 2578
2c00a5a8
XL
2579#[stable(feature = "nonnull", since = "1.25.0")]
2580impl<T: ?Sized> fmt::Debug for NonNull<T> {
b039eaaf 2581 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
7cac9316 2582 fmt::Pointer::fmt(&self.as_ptr(), f)
b039eaaf
SL
2583 }
2584}
3b2f2976 2585
2c00a5a8
XL
2586#[stable(feature = "nonnull", since = "1.25.0")]
2587impl<T: ?Sized> fmt::Pointer for NonNull<T> {
2588 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2589 fmt::Pointer::fmt(&self.as_ptr(), f)
2590 }
2591}
2592
2593#[stable(feature = "nonnull", since = "1.25.0")]
2594impl<T: ?Sized> Eq for NonNull<T> {}
2595
2596#[stable(feature = "nonnull", since = "1.25.0")]
2597impl<T: ?Sized> PartialEq for NonNull<T> {
2598 fn eq(&self, other: &Self) -> bool {
2599 self.as_ptr() == other.as_ptr()
2600 }
2601}
2602
2603#[stable(feature = "nonnull", since = "1.25.0")]
2604impl<T: ?Sized> Ord for NonNull<T> {
2605 fn cmp(&self, other: &Self) -> Ordering {
2606 self.as_ptr().cmp(&other.as_ptr())
2607 }
2608}
2609
2610#[stable(feature = "nonnull", since = "1.25.0")]
2611impl<T: ?Sized> PartialOrd for NonNull<T> {
2612 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2613 self.as_ptr().partial_cmp(&other.as_ptr())
2614 }
2615}
2616
2617#[stable(feature = "nonnull", since = "1.25.0")]
2618impl<T: ?Sized> hash::Hash for NonNull<T> {
2619 fn hash<H: hash::Hasher>(&self, state: &mut H) {
2620 self.as_ptr().hash(state)
2621 }
2622}
2623
2624#[stable(feature = "nonnull", since = "1.25.0")]
2625impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
3b2f2976 2626 fn from(unique: Unique<T>) -> Self {
2c00a5a8 2627 NonNull { pointer: unique.pointer }
3b2f2976
XL
2628 }
2629}
2630
2c00a5a8
XL
2631#[stable(feature = "nonnull", since = "1.25.0")]
2632impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> {
3b2f2976 2633 fn from(reference: &'a mut T) -> Self {
2c00a5a8 2634 NonNull { pointer: NonZero::from(reference) }
3b2f2976
XL
2635 }
2636}
2637
2c00a5a8
XL
2638#[stable(feature = "nonnull", since = "1.25.0")]
2639impl<'a, T: ?Sized> From<&'a T> for NonNull<T> {
3b2f2976 2640 fn from(reference: &'a T) -> Self {
2c00a5a8 2641 NonNull { pointer: NonZero::from(reference) }
3b2f2976
XL
2642 }
2643}