1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
13 //! Raw, unsafe pointers, `*const T`, and `*mut T`.
15 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
17 #![stable(feature = "rust1", since = "1.0.0")]
21 use ops
::CoerceUnsized
;
24 use marker
::{PhantomData, Unsize}
;
28 use cmp
::Ordering
::{self, Less, Equal, Greater}
;
30 #[stable(feature = "rust1", since = "1.0.0")]
31 pub use intrinsics
::copy_nonoverlapping
;
33 #[stable(feature = "rust1", since = "1.0.0")]
34 pub use intrinsics
::copy
;
36 #[stable(feature = "rust1", since = "1.0.0")]
37 pub use intrinsics
::write_bytes
;
39 /// Executes the destructor (if any) of the pointed-to value.
41 /// This has two use cases:
43 /// * It is *required* to use `drop_in_place` to drop unsized types like
44 /// trait objects, because they can't be read out onto the stack and
47 /// * It is friendlier to the optimizer to do this over `ptr::read` when
48 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
49 /// as the compiler doesn't need to prove that it's sound to elide the
54 /// This has all the same safety problems as `ptr::read` with respect to
55 /// invalid pointers, types, and double drops.
56 #[stable(feature = "drop_in_place", since = "1.8.0")]
57 #[lang = "drop_in_place"]
58 #[allow(unconditional_recursion)]
59 pub unsafe fn drop_in_place
<T
: ?Sized
>(to_drop
: *mut T
) {
60 // Code here does not matter - this is replaced by the
61 // real drop glue by the compiler.
62 drop_in_place(to_drop
);
65 /// Creates a null raw pointer.
72 /// let p: *const i32 = ptr::null();
73 /// assert!(p.is_null());
76 #[stable(feature = "rust1", since = "1.0.0")]
77 #[rustc_const_unstable(feature = "const_ptr_null")]
78 pub const fn null
<T
>() -> *const T { 0 as *const T }
80 /// Creates a null mutable raw pointer.
87 /// let p: *mut i32 = ptr::null_mut();
88 /// assert!(p.is_null());
91 #[stable(feature = "rust1", since = "1.0.0")]
92 #[rustc_const_unstable(feature = "const_ptr_null_mut")]
93 pub const fn null_mut
<T
>() -> *mut T { 0 as *mut T }
95 /// Swaps the values at two mutable locations of the same type, without
96 /// deinitializing either. They may overlap, unlike `mem::swap` which is
97 /// otherwise equivalent.
101 /// This function copies the memory through the raw pointers passed to it
104 /// Ensure that these pointers are valid before calling `swap`.
106 #[stable(feature = "rust1", since = "1.0.0")]
107 pub unsafe fn swap
<T
>(x
: *mut T
, y
: *mut T
) {
108 // Give ourselves some scratch space to work with
109 let mut tmp
: T
= mem
::uninitialized();
112 copy_nonoverlapping(x
, &mut tmp
, 1);
113 copy(y
, x
, 1); // `x` and `y` may overlap
114 copy_nonoverlapping(&tmp
, y
, 1);
116 // y and t now point to the same thing, but we need to completely forget `tmp`
117 // because it's no longer relevant.
121 /// Swaps a sequence of values at two mutable locations of the same type.
125 /// The two arguments must each point to the beginning of `count` locations
126 /// of valid memory, and the two memory ranges must not overlap.
133 /// #![feature(swap_nonoverlapping)]
137 /// let mut x = [1, 2, 3, 4];
138 /// let mut y = [7, 8, 9];
141 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
144 /// assert_eq!(x, [7, 8, 3, 4]);
145 /// assert_eq!(y, [1, 2, 9]);
148 #[unstable(feature = "swap_nonoverlapping", issue = "42818")]
149 pub unsafe fn swap_nonoverlapping
<T
>(x
: *mut T
, y
: *mut T
, count
: usize) {
150 let x
= x
as *mut u8;
151 let y
= y
as *mut u8;
152 let len
= mem
::size_of
::<T
>() * count
;
153 swap_nonoverlapping_bytes(x
, y
, len
)
157 unsafe fn swap_nonoverlapping_bytes(x
: *mut u8, y
: *mut u8, len
: usize) {
158 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
159 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
160 // Haswell E processors. LLVM is more able to optimize if we give a struct a
161 // #[repr(simd)], even if we don't actually use this struct directly.
163 // FIXME repr(simd) broken on emscripten and redox
164 // It's also broken on big-endian powerpc64 and s390x. #42778
165 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
166 target_endian
= "big")),
168 struct Block(u64, u64, u64, u64);
169 struct UnalignedBlock(u64, u64, u64, u64);
171 let block_size
= mem
::size_of
::<Block
>();
173 // Loop through x & y, copying them `Block` at a time
174 // The optimizer should unroll the loop fully for most types
175 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
177 while i
+ block_size
<= len
{
178 // Create some uninitialized memory as scratch space
179 // Declaring `t` here avoids aligning the stack when this loop is unused
180 let mut t
: Block
= mem
::uninitialized();
181 let t
= &mut t
as *mut _
as *mut u8;
182 let x
= x
.offset(i
as isize);
183 let y
= y
.offset(i
as isize);
185 // Swap a block of bytes of x & y, using t as a temporary buffer
186 // This should be optimized into efficient SIMD operations where available
187 copy_nonoverlapping(x
, t
, block_size
);
188 copy_nonoverlapping(y
, x
, block_size
);
189 copy_nonoverlapping(t
, y
, block_size
);
194 // Swap any remaining bytes
195 let mut t
: UnalignedBlock
= mem
::uninitialized();
198 let t
= &mut t
as *mut _
as *mut u8;
199 let x
= x
.offset(i
as isize);
200 let y
= y
.offset(i
as isize);
202 copy_nonoverlapping(x
, t
, rem
);
203 copy_nonoverlapping(y
, x
, rem
);
204 copy_nonoverlapping(t
, y
, rem
);
208 /// Replaces the value at `dest` with `src`, returning the old
209 /// value, without dropping either.
213 /// This is only unsafe because it accepts a raw pointer.
214 /// Otherwise, this operation is identical to `mem::replace`.
216 #[stable(feature = "rust1", since = "1.0.0")]
217 pub unsafe fn replace
<T
>(dest
: *mut T
, mut src
: T
) -> T
{
218 mem
::swap(&mut *dest
, &mut src
); // cannot overlap
222 /// Reads the value from `src` without moving it. This leaves the
223 /// memory in `src` unchanged.
227 /// Beyond accepting a raw pointer, this is unsafe because it semantically
228 /// moves the value out of `src` without preventing further usage of `src`.
229 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
230 /// `src` is not used before the data is overwritten again (e.g. with `write`,
231 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
232 /// because it will attempt to drop the value previously at `*src`.
234 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
242 /// let y = &x as *const i32;
245 /// assert_eq!(std::ptr::read(y), 12);
249 #[stable(feature = "rust1", since = "1.0.0")]
250 pub unsafe fn read
<T
>(src
: *const T
) -> T
{
251 let mut tmp
: T
= mem
::uninitialized();
252 copy_nonoverlapping(src
, &mut tmp
, 1);
256 /// Reads the value from `src` without moving it. This leaves the
257 /// memory in `src` unchanged.
259 /// Unlike `read`, the pointer may be unaligned.
263 /// Beyond accepting a raw pointer, this is unsafe because it semantically
264 /// moves the value out of `src` without preventing further usage of `src`.
265 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
266 /// `src` is not used before the data is overwritten again (e.g. with `write`,
267 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
268 /// because it will attempt to drop the value previously at `*src`.
276 /// let y = &x as *const i32;
279 /// assert_eq!(std::ptr::read_unaligned(y), 12);
283 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
284 pub unsafe fn read_unaligned
<T
>(src
: *const T
) -> T
{
285 let mut tmp
: T
= mem
::uninitialized();
286 copy_nonoverlapping(src
as *const u8,
287 &mut tmp
as *mut T
as *mut u8,
288 mem
::size_of
::<T
>());
292 /// Overwrites a memory location with the given value without reading or
293 /// dropping the old value.
297 /// This operation is marked unsafe because it accepts a raw pointer.
299 /// It does not drop the contents of `dst`. This is safe, but it could leak
300 /// allocations or resources, so care must be taken not to overwrite an object
301 /// that should be dropped.
303 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
304 /// location pointed to by `dst`.
306 /// This is appropriate for initializing uninitialized memory, or overwriting
307 /// memory that has previously been `read` from.
309 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
317 /// let y = &mut x as *mut i32;
321 /// std::ptr::write(y, z);
322 /// assert_eq!(std::ptr::read(y), 12);
326 #[stable(feature = "rust1", since = "1.0.0")]
327 pub unsafe fn write
<T
>(dst
: *mut T
, src
: T
) {
328 intrinsics
::move_val_init(&mut *dst
, src
)
331 /// Overwrites a memory location with the given value without reading or
332 /// dropping the old value.
334 /// Unlike `write`, the pointer may be unaligned.
338 /// This operation is marked unsafe because it accepts a raw pointer.
340 /// It does not drop the contents of `dst`. This is safe, but it could leak
341 /// allocations or resources, so care must be taken not to overwrite an object
342 /// that should be dropped.
344 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
345 /// location pointed to by `dst`.
347 /// This is appropriate for initializing uninitialized memory, or overwriting
348 /// memory that has previously been `read` from.
356 /// let y = &mut x as *mut i32;
360 /// std::ptr::write_unaligned(y, z);
361 /// assert_eq!(std::ptr::read_unaligned(y), 12);
365 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
366 pub unsafe fn write_unaligned
<T
>(dst
: *mut T
, src
: T
) {
367 copy_nonoverlapping(&src
as *const T
as *const u8,
369 mem
::size_of
::<T
>());
373 /// Performs a volatile read of the value from `src` without moving it. This
374 /// leaves the memory in `src` unchanged.
376 /// Volatile operations are intended to act on I/O memory, and are guaranteed
377 /// to not be elided or reordered by the compiler across other volatile
382 /// Rust does not currently have a rigorously and formally defined memory model,
383 /// so the precise semantics of what "volatile" means here is subject to change
384 /// over time. That being said, the semantics will almost always end up pretty
385 /// similar to [C11's definition of volatile][c11].
387 /// The compiler shouldn't change the relative order or number of volatile
388 /// memory operations. However, volatile memory operations on zero-sized types
389 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
390 /// and may be ignored.
392 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
396 /// Beyond accepting a raw pointer, this is unsafe because it semantically
397 /// moves the value out of `src` without preventing further usage of `src`.
398 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
399 /// `src` is not used before the data is overwritten again (e.g. with `write`,
400 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
401 /// because it will attempt to drop the value previously at `*src`.
409 /// let y = &x as *const i32;
412 /// assert_eq!(std::ptr::read_volatile(y), 12);
416 #[stable(feature = "volatile", since = "1.9.0")]
417 pub unsafe fn read_volatile
<T
>(src
: *const T
) -> T
{
418 intrinsics
::volatile_load(src
)
421 /// Performs a volatile write of a memory location with the given value without
422 /// reading or dropping the old value.
424 /// Volatile operations are intended to act on I/O memory, and are guaranteed
425 /// to not be elided or reordered by the compiler across other volatile
430 /// Rust does not currently have a rigorously and formally defined memory model,
431 /// so the precise semantics of what "volatile" means here is subject to change
432 /// over time. That being said, the semantics will almost always end up pretty
433 /// similar to [C11's definition of volatile][c11].
435 /// The compiler shouldn't change the relative order or number of volatile
436 /// memory operations. However, volatile memory operations on zero-sized types
437 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
438 /// and may be ignored.
440 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
444 /// This operation is marked unsafe because it accepts a raw pointer.
446 /// It does not drop the contents of `dst`. This is safe, but it could leak
447 /// allocations or resources, so care must be taken not to overwrite an object
448 /// that should be dropped.
450 /// This is appropriate for initializing uninitialized memory, or overwriting
451 /// memory that has previously been `read` from.
459 /// let y = &mut x as *mut i32;
463 /// std::ptr::write_volatile(y, z);
464 /// assert_eq!(std::ptr::read_volatile(y), 12);
468 #[stable(feature = "volatile", since = "1.9.0")]
469 pub unsafe fn write_volatile
<T
>(dst
: *mut T
, src
: T
) {
470 intrinsics
::volatile_store(dst
, src
);
473 #[lang = "const_ptr"]
474 impl<T
: ?Sized
> *const T
{
475 /// Returns `true` if the pointer is null.
482 /// let s: &str = "Follow the rabbit";
483 /// let ptr: *const u8 = s.as_ptr();
484 /// assert!(!ptr.is_null());
486 #[stable(feature = "rust1", since = "1.0.0")]
488 pub fn is_null(self) -> bool
where T
: Sized
{
492 /// Returns `None` if the pointer is null, or else returns a reference to
493 /// the value wrapped in `Some`.
497 /// While this method and its mutable counterpart are useful for
498 /// null-safety, it is important to note that this is still an unsafe
499 /// operation because the returned value could be pointing to invalid
502 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
503 /// not necessarily reflect the actual lifetime of the data.
510 /// let ptr: *const u8 = &10u8 as *const u8;
513 /// if let Some(val_back) = ptr.as_ref() {
514 /// println!("We got back the value: {}!", val_back);
518 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
520 pub unsafe fn as_ref
<'a
>(self) -> Option
<&'a T
> {
521 // Check for null via a cast to a thin pointer, so fat pointers are only
522 // considering their "data" part for null-ness.
523 if (self as *const u8).is_null() {
530 /// Calculates the offset from a pointer.
532 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
533 /// offset of `3 * size_of::<T>()` bytes.
537 /// If any of the following conditions are violated, the result is Undefined
540 /// * Both the starting and resulting pointer must be either in bounds or one
541 /// byte past the end of an allocated object.
543 /// * The computed offset, **in bytes**, cannot overflow or underflow an
546 /// * The offset being in bounds cannot rely on "wrapping around" the address
547 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
549 /// The compiler and standard library generally tries to ensure allocations
550 /// never reach a size where an offset is a concern. For instance, `Vec`
551 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
552 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
554 /// Most platforms fundamentally can't even construct such an allocation.
555 /// For instance, no known 64-bit platform can ever serve a request
556 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
557 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
558 /// more than `isize::MAX` bytes with things like Physical Address
559 /// Extension. As such, memory acquired directly from allocators or memory
560 /// mapped files *may* be too large to handle with this function.
562 /// Consider using `wrapping_offset` instead if these constraints are
563 /// difficult to satisfy. The only advantage of this method is that it
564 /// enables more aggressive compiler optimizations.
571 /// let s: &str = "123";
572 /// let ptr: *const u8 = s.as_ptr();
575 /// println!("{}", *ptr.offset(1) as char);
576 /// println!("{}", *ptr.offset(2) as char);
579 #[stable(feature = "rust1", since = "1.0.0")]
581 pub unsafe fn offset(self, count
: isize) -> *const T
where T
: Sized
{
582 intrinsics
::offset(self, count
)
585 /// Calculates the offset from a pointer using wrapping arithmetic.
587 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
588 /// offset of `3 * size_of::<T>()` bytes.
592 /// The resulting pointer does not need to be in bounds, but it is
593 /// potentially hazardous to dereference (which requires `unsafe`).
595 /// Always use `.offset(count)` instead when possible, because `offset`
596 /// allows the compiler to optimize better.
603 /// // Iterate using a raw pointer in increments of two elements
604 /// let data = [1u8, 2, 3, 4, 5];
605 /// let mut ptr: *const u8 = data.as_ptr();
607 /// let end_rounded_up = ptr.wrapping_offset(6);
609 /// // This loop prints "1, 3, 5, "
610 /// while ptr != end_rounded_up {
612 /// print!("{}, ", *ptr);
614 /// ptr = ptr.wrapping_offset(step);
617 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
619 pub fn wrapping_offset(self, count
: isize) -> *const T
where T
: Sized
{
621 intrinsics
::arith_offset(self, count
)
625 /// Calculates the distance between two pointers. The returned value is in
626 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
628 /// If the address different between the two pointers ia not a multiple of
629 /// `mem::size_of::<T>()` then the result of the division is rounded towards
632 /// This function returns `None` if `T` is a zero-sized typed.
639 /// #![feature(offset_to)]
643 /// let ptr1: *const i32 = &a[1];
644 /// let ptr2: *const i32 = &a[3];
645 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
646 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
647 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
648 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
651 #[unstable(feature = "offset_to", issue = "41079")]
653 pub fn offset_to(self, other
: *const T
) -> Option
<isize> where T
: Sized
{
654 let size
= mem
::size_of
::<T
>();
658 let diff
= (other
as isize).wrapping_sub(self as isize);
659 Some(diff
/ size
as isize)
663 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
665 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
666 /// offset of `3 * size_of::<T>()` bytes.
670 /// If any of the following conditions are violated, the result is Undefined
673 /// * Both the starting and resulting pointer must be either in bounds or one
674 /// byte past the end of an allocated object.
676 /// * The computed offset, **in bytes**, cannot overflow or underflow an
679 /// * The offset being in bounds cannot rely on "wrapping around" the address
680 /// space. That is, the infinite-precision sum must fit in a `usize`.
682 /// The compiler and standard library generally tries to ensure allocations
683 /// never reach a size where an offset is a concern. For instance, `Vec`
684 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
685 /// `vec.as_ptr().add(vec.len())` is always safe.
687 /// Most platforms fundamentally can't even construct such an allocation.
688 /// For instance, no known 64-bit platform can ever serve a request
689 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
690 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
691 /// more than `isize::MAX` bytes with things like Physical Address
692 /// Extension. As such, memory acquired directly from allocators or memory
693 /// mapped files *may* be too large to handle with this function.
695 /// Consider using `wrapping_offset` instead if these constraints are
696 /// difficult to satisfy. The only advantage of this method is that it
697 /// enables more aggressive compiler optimizations.
704 /// #![feature(pointer_methods)]
706 /// let s: &str = "123";
707 /// let ptr: *const u8 = s.as_ptr();
710 /// println!("{}", *ptr.add(1) as char);
711 /// println!("{}", *ptr.add(2) as char);
714 #[unstable(feature = "pointer_methods", issue = "43941")]
716 pub unsafe fn add(self, count
: usize) -> Self
719 self.offset(count
as isize)
722 /// Calculates the offset from a pointer (convenience for
723 /// `.offset((count as isize).wrapping_neg())`).
725 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
726 /// offset of `3 * size_of::<T>()` bytes.
730 /// If any of the following conditions are violated, the result is Undefined
733 /// * Both the starting and resulting pointer must be either in bounds or one
734 /// byte past the end of an allocated object.
736 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
738 /// * The offset being in bounds cannot rely on "wrapping around" the address
739 /// space. That is, the infinite-precision sum must fit in a usize.
741 /// The compiler and standard library generally tries to ensure allocations
742 /// never reach a size where an offset is a concern. For instance, `Vec`
743 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
744 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
746 /// Most platforms fundamentally can't even construct such an allocation.
747 /// For instance, no known 64-bit platform can ever serve a request
748 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
749 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
750 /// more than `isize::MAX` bytes with things like Physical Address
751 /// Extension. As such, memory acquired directly from allocators or memory
752 /// mapped files *may* be too large to handle with this function.
754 /// Consider using `wrapping_offset` instead if these constraints are
755 /// difficult to satisfy. The only advantage of this method is that it
756 /// enables more aggressive compiler optimizations.
763 /// #![feature(pointer_methods)]
765 /// let s: &str = "123";
768 /// let end: *const u8 = s.as_ptr().add(3);
769 /// println!("{}", *end.sub(1) as char);
770 /// println!("{}", *end.sub(2) as char);
773 #[unstable(feature = "pointer_methods", issue = "43941")]
775 pub unsafe fn sub(self, count
: usize) -> Self
778 self.offset((count
as isize).wrapping_neg())
781 /// Calculates the offset from a pointer using wrapping arithmetic.
782 /// (convenience for `.wrapping_offset(count as isize)`)
784 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
785 /// offset of `3 * size_of::<T>()` bytes.
789 /// The resulting pointer does not need to be in bounds, but it is
790 /// potentially hazardous to dereference (which requires `unsafe`).
792 /// Always use `.add(count)` instead when possible, because `add`
793 /// allows the compiler to optimize better.
800 /// #![feature(pointer_methods)]
802 /// // Iterate using a raw pointer in increments of two elements
803 /// let data = [1u8, 2, 3, 4, 5];
804 /// let mut ptr: *const u8 = data.as_ptr();
806 /// let end_rounded_up = ptr.wrapping_add(6);
808 /// // This loop prints "1, 3, 5, "
809 /// while ptr != end_rounded_up {
811 /// print!("{}, ", *ptr);
813 /// ptr = ptr.wrapping_add(step);
816 #[unstable(feature = "pointer_methods", issue = "43941")]
818 pub fn wrapping_add(self, count
: usize) -> Self
821 self.wrapping_offset(count
as isize)
824 /// Calculates the offset from a pointer using wrapping arithmetic.
825 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
827 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
828 /// offset of `3 * size_of::<T>()` bytes.
832 /// The resulting pointer does not need to be in bounds, but it is
833 /// potentially hazardous to dereference (which requires `unsafe`).
835 /// Always use `.sub(count)` instead when possible, because `sub`
836 /// allows the compiler to optimize better.
843 /// #![feature(pointer_methods)]
845 /// // Iterate using a raw pointer in increments of two elements (backwards)
846 /// let data = [1u8, 2, 3, 4, 5];
847 /// let mut ptr: *const u8 = data.as_ptr();
848 /// let start_rounded_down = ptr.wrapping_sub(2);
849 /// ptr = ptr.wrapping_add(4);
851 /// // This loop prints "5, 3, 1, "
852 /// while ptr != start_rounded_down {
854 /// print!("{}, ", *ptr);
856 /// ptr = ptr.wrapping_sub(step);
859 #[unstable(feature = "pointer_methods", issue = "43941")]
861 pub fn wrapping_sub(self, count
: usize) -> Self
864 self.wrapping_offset((count
as isize).wrapping_neg())
867 /// Reads the value from `self` without moving it. This leaves the
868 /// memory in `self` unchanged.
872 /// Beyond accepting a raw pointer, this is unsafe because it semantically
873 /// moves the value out of `self` without preventing further usage of `self`.
874 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
875 /// `self` is not used before the data is overwritten again (e.g. with `write`,
876 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
877 /// because it will attempt to drop the value previously at `*self`.
879 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
886 /// #![feature(pointer_methods)]
889 /// let y = &x as *const i32;
892 /// assert_eq!(y.read(), 12);
895 #[unstable(feature = "pointer_methods", issue = "43941")]
897 pub unsafe fn read(self) -> T
903 /// Performs a volatile read of the value from `self` without moving it. This
904 /// leaves the memory in `self` unchanged.
906 /// Volatile operations are intended to act on I/O memory, and are guaranteed
907 /// to not be elided or reordered by the compiler across other volatile
912 /// Rust does not currently have a rigorously and formally defined memory model,
913 /// so the precise semantics of what "volatile" means here is subject to change
914 /// over time. That being said, the semantics will almost always end up pretty
915 /// similar to [C11's definition of volatile][c11].
917 /// The compiler shouldn't change the relative order or number of volatile
918 /// memory operations. However, volatile memory operations on zero-sized types
919 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
920 /// and may be ignored.
922 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
926 /// Beyond accepting a raw pointer, this is unsafe because it semantically
927 /// moves the value out of `self` without preventing further usage of `self`.
928 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
929 /// `self` is not used before the data is overwritten again (e.g. with `write`,
930 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
931 /// because it will attempt to drop the value previously at `*self`.
938 /// #![feature(pointer_methods)]
941 /// let y = &x as *const i32;
944 /// assert_eq!(y.read_volatile(), 12);
947 #[unstable(feature = "pointer_methods", issue = "43941")]
949 pub unsafe fn read_volatile(self) -> T
955 /// Reads the value from `self` without moving it. This leaves the
956 /// memory in `self` unchanged.
958 /// Unlike `read`, the pointer may be unaligned.
962 /// Beyond accepting a raw pointer, this is unsafe because it semantically
963 /// moves the value out of `self` without preventing further usage of `self`.
964 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
965 /// `self` is not used before the data is overwritten again (e.g. with `write`,
966 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
967 /// because it will attempt to drop the value previously at `*self`.
974 /// #![feature(pointer_methods)]
977 /// let y = &x as *const i32;
980 /// assert_eq!(y.read_unaligned(), 12);
983 #[unstable(feature = "pointer_methods", issue = "43941")]
985 pub unsafe fn read_unaligned(self) -> T
991 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
992 /// and destination may overlap.
994 /// NOTE: this has the *same* argument order as `ptr::copy`.
996 /// This is semantically equivalent to C's `memmove`.
1000 /// Care must be taken with the ownership of `self` and `dest`.
1001 /// This method semantically moves the values of `self` into `dest`.
1002 /// However it does not drop the contents of `self`, or prevent the contents
1003 /// of `dest` from being dropped or used.
1007 /// Efficiently create a Rust vector from an unsafe buffer:
1010 /// #![feature(pointer_methods)]
1012 /// # #[allow(dead_code)]
1013 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1014 /// let mut dst = Vec::with_capacity(elts);
1015 /// dst.set_len(elts);
1016 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1020 #[unstable(feature = "pointer_methods", issue = "43941")]
1022 pub unsafe fn copy_to(self, dest
: *mut T
, count
: usize)
1025 copy(self, dest
, count
)
1028 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1029 /// and destination may *not* overlap.
1031 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1033 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1037 /// Beyond requiring that the program must be allowed to access both regions
1038 /// of memory, it is Undefined Behavior for source and destination to
1039 /// overlap. Care must also be taken with the ownership of `self` and
1040 /// `self`. This method semantically moves the values of `self` into `dest`.
1041 /// However it does not drop the contents of `dest`, or prevent the contents
1042 /// of `self` from being dropped or used.
1046 /// Efficiently create a Rust vector from an unsafe buffer:
1049 /// #![feature(pointer_methods)]
1051 /// # #[allow(dead_code)]
1052 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1053 /// let mut dst = Vec::with_capacity(elts);
1054 /// dst.set_len(elts);
1055 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1059 #[unstable(feature = "pointer_methods", issue = "43941")]
1061 pub unsafe fn copy_to_nonoverlapping(self, dest
: *mut T
, count
: usize)
1064 copy_nonoverlapping(self, dest
, count
)
1067 /// Computes the byte offset that needs to be applied in order to
1068 /// make the pointer aligned to `align`.
1069 /// If it is not possible to align the pointer, the implementation returns
1070 /// `usize::max_value()`.
1072 /// There are no guarantees whatsover that offsetting the pointer will not
1073 /// overflow or go beyond the allocation that the pointer points into.
1074 /// It is up to the caller to ensure that the returned offset is correct
1075 /// in all terms other than alignment.
1079 /// Accessing adjacent `u8` as `u16`
1082 /// # #![feature(align_offset)]
1083 /// # fn foo(n: usize) {
1084 /// # use std::mem::align_of;
1086 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1087 /// let ptr = &x[n] as *const u8;
1088 /// let offset = ptr.align_offset(align_of::<u16>());
1089 /// if offset < x.len() - n - 1 {
1090 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1091 /// assert_ne!(*u16_ptr, 500);
1093 /// // while the pointer can be aligned via `offset`, it would point
1094 /// // outside the allocation
1098 #[unstable(feature = "align_offset", issue = "44488")]
1099 pub fn align_offset(self, align
: usize) -> usize {
1101 intrinsics
::align_offset(self as *const _
, align
)
1107 impl<T
: ?Sized
> *mut T
{
1108 /// Returns `true` if the pointer is null.
1115 /// let mut s = [1, 2, 3];
1116 /// let ptr: *mut u32 = s.as_mut_ptr();
1117 /// assert!(!ptr.is_null());
1119 #[stable(feature = "rust1", since = "1.0.0")]
1121 pub fn is_null(self) -> bool
where T
: Sized
{
1125 /// Returns `None` if the pointer is null, or else returns a reference to
1126 /// the value wrapped in `Some`.
1130 /// While this method and its mutable counterpart are useful for
1131 /// null-safety, it is important to note that this is still an unsafe
1132 /// operation because the returned value could be pointing to invalid
1135 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1136 /// not necessarily reflect the actual lifetime of the data.
1143 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1146 /// if let Some(val_back) = ptr.as_ref() {
1147 /// println!("We got back the value: {}!", val_back);
1151 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1153 pub unsafe fn as_ref
<'a
>(self) -> Option
<&'a T
> {
1154 // Check for null via a cast to a thin pointer, so fat pointers are only
1155 // considering their "data" part for null-ness.
1156 if (self as *const u8).is_null() {
1163 /// Calculates the offset from a pointer.
1165 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1166 /// offset of `3 * size_of::<T>()` bytes.
1170 /// If any of the following conditions are violated, the result is Undefined
1173 /// * Both the starting and resulting pointer must be either in bounds or one
1174 /// byte past the end of an allocated object.
1176 /// * The computed offset, **in bytes**, cannot overflow or underflow an
1179 /// * The offset being in bounds cannot rely on "wrapping around" the address
1180 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1182 /// The compiler and standard library generally tries to ensure allocations
1183 /// never reach a size where an offset is a concern. For instance, `Vec`
1184 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1185 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
1187 /// Most platforms fundamentally can't even construct such an allocation.
1188 /// For instance, no known 64-bit platform can ever serve a request
1189 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1190 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1191 /// more than `isize::MAX` bytes with things like Physical Address
1192 /// Extension. As such, memory acquired directly from allocators or memory
1193 /// mapped files *may* be too large to handle with this function.
1195 /// Consider using `wrapping_offset` instead if these constraints are
1196 /// difficult to satisfy. The only advantage of this method is that it
1197 /// enables more aggressive compiler optimizations.
1204 /// let mut s = [1, 2, 3];
1205 /// let ptr: *mut u32 = s.as_mut_ptr();
1208 /// println!("{}", *ptr.offset(1));
1209 /// println!("{}", *ptr.offset(2));
1212 #[stable(feature = "rust1", since = "1.0.0")]
1214 pub unsafe fn offset(self, count
: isize) -> *mut T
where T
: Sized
{
1215 intrinsics
::offset(self, count
) as *mut T
1218 /// Calculates the offset from a pointer using wrapping arithmetic.
1219 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1220 /// offset of `3 * size_of::<T>()` bytes.
1224 /// The resulting pointer does not need to be in bounds, but it is
1225 /// potentially hazardous to dereference (which requires `unsafe`).
1227 /// Always use `.offset(count)` instead when possible, because `offset`
1228 /// allows the compiler to optimize better.
1235 /// // Iterate using a raw pointer in increments of two elements
1236 /// let mut data = [1u8, 2, 3, 4, 5];
1237 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1239 /// let end_rounded_up = ptr.wrapping_offset(6);
1241 /// while ptr != end_rounded_up {
1245 /// ptr = ptr.wrapping_offset(step);
1247 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1249 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1251 pub fn wrapping_offset(self, count
: isize) -> *mut T
where T
: Sized
{
1253 intrinsics
::arith_offset(self, count
) as *mut T
1257 /// Returns `None` if the pointer is null, or else returns a mutable
1258 /// reference to the value wrapped in `Some`.
1262 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1263 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1264 /// returned is indeed a valid lifetime for the contained data.
1271 /// let mut s = [1, 2, 3];
1272 /// let ptr: *mut u32 = s.as_mut_ptr();
1273 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1274 /// *first_value = 4;
1275 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1277 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1279 pub unsafe fn as_mut
<'a
>(self) -> Option
<&'a
mut T
> {
1280 // Check for null via a cast to a thin pointer, so fat pointers are only
1281 // considering their "data" part for null-ness.
1282 if (self as *mut u8).is_null() {
1289 /// Calculates the distance between two pointers. The returned value is in
1290 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1292 /// If the address different between the two pointers ia not a multiple of
1293 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1296 /// This function returns `None` if `T` is a zero-sized typed.
1303 /// #![feature(offset_to)]
1306 /// let mut a = [0; 5];
1307 /// let ptr1: *mut i32 = &mut a[1];
1308 /// let ptr2: *mut i32 = &mut a[3];
1309 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
1310 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
1311 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
1312 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
1315 #[unstable(feature = "offset_to", issue = "41079")]
1317 pub fn offset_to(self, other
: *const T
) -> Option
<isize> where T
: Sized
{
1318 let size
= mem
::size_of
::<T
>();
1322 let diff
= (other
as isize).wrapping_sub(self as isize);
1323 Some(diff
/ size
as isize)
1327 /// Computes the byte offset that needs to be applied in order to
1328 /// make the pointer aligned to `align`.
1329 /// If it is not possible to align the pointer, the implementation returns
1330 /// `usize::max_value()`.
1332 /// There are no guarantees whatsover that offsetting the pointer will not
1333 /// overflow or go beyond the allocation that the pointer points into.
1334 /// It is up to the caller to ensure that the returned offset is correct
1335 /// in all terms other than alignment.
1339 /// Accessing adjacent `u8` as `u16`
1342 /// # #![feature(align_offset)]
1343 /// # fn foo(n: usize) {
1344 /// # use std::mem::align_of;
1346 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1347 /// let ptr = &x[n] as *const u8;
1348 /// let offset = ptr.align_offset(align_of::<u16>());
1349 /// if offset < x.len() - n - 1 {
1350 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1351 /// assert_ne!(*u16_ptr, 500);
1353 /// // while the pointer can be aligned via `offset`, it would point
1354 /// // outside the allocation
1358 #[unstable(feature = "align_offset", issue = "44488")]
1359 pub fn align_offset(self, align
: usize) -> usize {
1361 intrinsics
::align_offset(self as *const _
, align
)
1365 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1367 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1368 /// offset of `3 * size_of::<T>()` bytes.
1372 /// If any of the following conditions are violated, the result is Undefined
1375 /// * Both the starting and resulting pointer must be either in bounds or one
1376 /// byte past the end of an allocated object.
1378 /// * The computed offset, **in bytes**, cannot overflow or underflow an
1381 /// * The offset being in bounds cannot rely on "wrapping around" the address
1382 /// space. That is, the infinite-precision sum must fit in a `usize`.
1384 /// The compiler and standard library generally tries to ensure allocations
1385 /// never reach a size where an offset is a concern. For instance, `Vec`
1386 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1387 /// `vec.as_ptr().add(vec.len())` is always safe.
1389 /// Most platforms fundamentally can't even construct such an allocation.
1390 /// For instance, no known 64-bit platform can ever serve a request
1391 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1392 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1393 /// more than `isize::MAX` bytes with things like Physical Address
1394 /// Extension. As such, memory acquired directly from allocators or memory
1395 /// mapped files *may* be too large to handle with this function.
1397 /// Consider using `wrapping_offset` instead if these constraints are
1398 /// difficult to satisfy. The only advantage of this method is that it
1399 /// enables more aggressive compiler optimizations.
1406 /// #![feature(pointer_methods)]
1408 /// let s: &str = "123";
1409 /// let ptr: *const u8 = s.as_ptr();
1412 /// println!("{}", *ptr.add(1) as char);
1413 /// println!("{}", *ptr.add(2) as char);
1416 #[unstable(feature = "pointer_methods", issue = "43941")]
1418 pub unsafe fn add(self, count
: usize) -> Self
1421 self.offset(count
as isize)
1424 /// Calculates the offset from a pointer (convenience for
1425 /// `.offset((count as isize).wrapping_neg())`).
1427 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1428 /// offset of `3 * size_of::<T>()` bytes.
1432 /// If any of the following conditions are violated, the result is Undefined
1435 /// * Both the starting and resulting pointer must be either in bounds or one
1436 /// byte past the end of an allocated object.
1438 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1440 /// * The offset being in bounds cannot rely on "wrapping around" the address
1441 /// space. That is, the infinite-precision sum must fit in a usize.
1443 /// The compiler and standard library generally tries to ensure allocations
1444 /// never reach a size where an offset is a concern. For instance, `Vec`
1445 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1446 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1448 /// Most platforms fundamentally can't even construct such an allocation.
1449 /// For instance, no known 64-bit platform can ever serve a request
1450 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1451 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1452 /// more than `isize::MAX` bytes with things like Physical Address
1453 /// Extension. As such, memory acquired directly from allocators or memory
1454 /// mapped files *may* be too large to handle with this function.
1456 /// Consider using `wrapping_offset` instead if these constraints are
1457 /// difficult to satisfy. The only advantage of this method is that it
1458 /// enables more aggressive compiler optimizations.
1465 /// #![feature(pointer_methods)]
1467 /// let s: &str = "123";
1470 /// let end: *const u8 = s.as_ptr().add(3);
1471 /// println!("{}", *end.sub(1) as char);
1472 /// println!("{}", *end.sub(2) as char);
1475 #[unstable(feature = "pointer_methods", issue = "43941")]
1477 pub unsafe fn sub(self, count
: usize) -> Self
1480 self.offset((count
as isize).wrapping_neg())
1483 /// Calculates the offset from a pointer using wrapping arithmetic.
1484 /// (convenience for `.wrapping_offset(count as isize)`)
1486 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1487 /// offset of `3 * size_of::<T>()` bytes.
1491 /// The resulting pointer does not need to be in bounds, but it is
1492 /// potentially hazardous to dereference (which requires `unsafe`).
1494 /// Always use `.add(count)` instead when possible, because `add`
1495 /// allows the compiler to optimize better.
1502 /// #![feature(pointer_methods)]
1504 /// // Iterate using a raw pointer in increments of two elements
1505 /// let data = [1u8, 2, 3, 4, 5];
1506 /// let mut ptr: *const u8 = data.as_ptr();
1508 /// let end_rounded_up = ptr.wrapping_add(6);
1510 /// // This loop prints "1, 3, 5, "
1511 /// while ptr != end_rounded_up {
1513 /// print!("{}, ", *ptr);
1515 /// ptr = ptr.wrapping_add(step);
1518 #[unstable(feature = "pointer_methods", issue = "43941")]
1520 pub fn wrapping_add(self, count
: usize) -> Self
1523 self.wrapping_offset(count
as isize)
1526 /// Calculates the offset from a pointer using wrapping arithmetic.
1527 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1529 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1530 /// offset of `3 * size_of::<T>()` bytes.
1534 /// The resulting pointer does not need to be in bounds, but it is
1535 /// potentially hazardous to dereference (which requires `unsafe`).
1537 /// Always use `.sub(count)` instead when possible, because `sub`
1538 /// allows the compiler to optimize better.
1545 /// #![feature(pointer_methods)]
1547 /// // Iterate using a raw pointer in increments of two elements (backwards)
1548 /// let data = [1u8, 2, 3, 4, 5];
1549 /// let mut ptr: *const u8 = data.as_ptr();
1550 /// let start_rounded_down = ptr.wrapping_sub(2);
1551 /// ptr = ptr.wrapping_add(4);
1553 /// // This loop prints "5, 3, 1, "
1554 /// while ptr != start_rounded_down {
1556 /// print!("{}, ", *ptr);
1558 /// ptr = ptr.wrapping_sub(step);
1561 #[unstable(feature = "pointer_methods", issue = "43941")]
1563 pub fn wrapping_sub(self, count
: usize) -> Self
1566 self.wrapping_offset((count
as isize).wrapping_neg())
1569 /// Reads the value from `self` without moving it. This leaves the
1570 /// memory in `self` unchanged.
1574 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1575 /// moves the value out of `self` without preventing further usage of `self`.
1576 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1577 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1578 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1579 /// because it will attempt to drop the value previously at `*self`.
1581 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1588 /// #![feature(pointer_methods)]
1591 /// let y = &x as *const i32;
1594 /// assert_eq!(y.read(), 12);
1597 #[unstable(feature = "pointer_methods", issue = "43941")]
1599 pub unsafe fn read(self) -> T
1605 /// Performs a volatile read of the value from `self` without moving it. This
1606 /// leaves the memory in `self` unchanged.
1608 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1609 /// to not be elided or reordered by the compiler across other volatile
1614 /// Rust does not currently have a rigorously and formally defined memory model,
1615 /// so the precise semantics of what "volatile" means here is subject to change
1616 /// over time. That being said, the semantics will almost always end up pretty
1617 /// similar to [C11's definition of volatile][c11].
1619 /// The compiler shouldn't change the relative order or number of volatile
1620 /// memory operations. However, volatile memory operations on zero-sized types
1621 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1622 /// and may be ignored.
1624 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1628 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1629 /// moves the value out of `self` without preventing further usage of `self`.
1630 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1631 /// `src` is not used before the data is overwritten again (e.g. with `write`,
1632 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1633 /// because it will attempt to drop the value previously at `*self`.
1640 /// #![feature(pointer_methods)]
1643 /// let y = &x as *const i32;
1646 /// assert_eq!(y.read_volatile(), 12);
1649 #[unstable(feature = "pointer_methods", issue = "43941")]
1651 pub unsafe fn read_volatile(self) -> T
1657 /// Reads the value from `self` without moving it. This leaves the
1658 /// memory in `self` unchanged.
1660 /// Unlike `read`, the pointer may be unaligned.
1664 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1665 /// moves the value out of `self` without preventing further usage of `self`.
1666 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1667 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1668 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1669 /// because it will attempt to drop the value previously at `*self`.
1676 /// #![feature(pointer_methods)]
1679 /// let y = &x as *const i32;
1682 /// assert_eq!(y.read_unaligned(), 12);
1685 #[unstable(feature = "pointer_methods", issue = "43941")]
1687 pub unsafe fn read_unaligned(self) -> T
1690 read_unaligned(self)
1693 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1694 /// and destination may overlap.
1696 /// NOTE: this has the *same* argument order as `ptr::copy`.
1698 /// This is semantically equivalent to C's `memmove`.
1702 /// Care must be taken with the ownership of `self` and `dest`.
1703 /// This method semantically moves the values of `self` into `dest`.
1704 /// However it does not drop the contents of `self`, or prevent the contents
1705 /// of `dest` from being dropped or used.
1709 /// Efficiently create a Rust vector from an unsafe buffer:
1712 /// #![feature(pointer_methods)]
1714 /// # #[allow(dead_code)]
1715 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1716 /// let mut dst = Vec::with_capacity(elts);
1717 /// dst.set_len(elts);
1718 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1722 #[unstable(feature = "pointer_methods", issue = "43941")]
1724 pub unsafe fn copy_to(self, dest
: *mut T
, count
: usize)
1727 copy(self, dest
, count
)
1730 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1731 /// and destination may *not* overlap.
1733 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1735 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1739 /// Beyond requiring that the program must be allowed to access both regions
1740 /// of memory, it is Undefined Behavior for source and destination to
1741 /// overlap. Care must also be taken with the ownership of `self` and
1742 /// `self`. This method semantically moves the values of `self` into `dest`.
1743 /// However it does not drop the contents of `dest`, or prevent the contents
1744 /// of `self` from being dropped or used.
1748 /// Efficiently create a Rust vector from an unsafe buffer:
1751 /// #![feature(pointer_methods)]
1753 /// # #[allow(dead_code)]
1754 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1755 /// let mut dst = Vec::with_capacity(elts);
1756 /// dst.set_len(elts);
1757 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1761 #[unstable(feature = "pointer_methods", issue = "43941")]
1763 pub unsafe fn copy_to_nonoverlapping(self, dest
: *mut T
, count
: usize)
1766 copy_nonoverlapping(self, dest
, count
)
1769 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1770 /// and destination may overlap.
1772 /// NOTE: this has the *opposite* argument order of `ptr::copy`.
1774 /// This is semantically equivalent to C's `memmove`.
1778 /// Care must be taken with the ownership of `src` and `self`.
1779 /// This method semantically moves the values of `src` into `self`.
1780 /// However it does not drop the contents of `self`, or prevent the contents
1781 /// of `src` from being dropped or used.
1785 /// Efficiently create a Rust vector from an unsafe buffer:
1788 /// #![feature(pointer_methods)]
1790 /// # #[allow(dead_code)]
1791 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1792 /// let mut dst = Vec::with_capacity(elts);
1793 /// dst.set_len(elts);
1794 /// dst.as_mut_ptr().copy_from(ptr, elts);
1798 #[unstable(feature = "pointer_methods", issue = "43941")]
1800 pub unsafe fn copy_from(self, src
: *const T
, count
: usize)
1803 copy(src
, self, count
)
1806 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1807 /// and destination may *not* overlap.
1809 /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`.
1811 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1815 /// Beyond requiring that the program must be allowed to access both regions
1816 /// of memory, it is Undefined Behavior for source and destination to
1817 /// overlap. Care must also be taken with the ownership of `src` and
1818 /// `self`. This method semantically moves the values of `src` into `self`.
1819 /// However it does not drop the contents of `self`, or prevent the contents
1820 /// of `src` from being dropped or used.
1824 /// Efficiently create a Rust vector from an unsafe buffer:
1827 /// #![feature(pointer_methods)]
1829 /// # #[allow(dead_code)]
1830 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1831 /// let mut dst = Vec::with_capacity(elts);
1832 /// dst.set_len(elts);
1833 /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts);
1837 #[unstable(feature = "pointer_methods", issue = "43941")]
1839 pub unsafe fn copy_from_nonoverlapping(self, src
: *const T
, count
: usize)
1842 copy_nonoverlapping(src
, self, count
)
1845 /// Executes the destructor (if any) of the pointed-to value.
1847 /// This has two use cases:
1849 /// * It is *required* to use `drop_in_place` to drop unsized types like
1850 /// trait objects, because they can't be read out onto the stack and
1851 /// dropped normally.
1853 /// * It is friendlier to the optimizer to do this over `ptr::read` when
1854 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
1855 /// as the compiler doesn't need to prove that it's sound to elide the
1860 /// This has all the same safety problems as `ptr::read` with respect to
1861 /// invalid pointers, types, and double drops.
1862 #[unstable(feature = "pointer_methods", issue = "43941")]
1864 pub unsafe fn drop_in_place(self) {
1868 /// Overwrites a memory location with the given value without reading or
1869 /// dropping the old value.
1873 /// This operation is marked unsafe because it writes through a raw pointer.
1875 /// It does not drop the contents of `self`. This is safe, but it could leak
1876 /// allocations or resources, so care must be taken not to overwrite an object
1877 /// that should be dropped.
1879 /// Additionally, it does not drop `val`. Semantically, `val` is moved into the
1880 /// location pointed to by `self`.
1882 /// This is appropriate for initializing uninitialized memory, or overwriting
1883 /// memory that has previously been `read` from.
1885 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
1892 /// #![feature(pointer_methods)]
1895 /// let y = &mut x as *mut i32;
1900 /// assert_eq!(y.read(), 12);
1903 #[unstable(feature = "pointer_methods", issue = "43941")]
1905 pub unsafe fn write(self, val
: T
)
1911 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
1912 /// bytes of memory starting at `self` to `val`.
1917 /// #![feature(pointer_methods)]
1919 /// let mut vec = vec![0; 4];
1921 /// let vec_ptr = vec.as_mut_ptr();
1922 /// vec_ptr.write_bytes(b'a', 2);
1924 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
1926 #[unstable(feature = "pointer_methods", issue = "43941")]
1928 pub unsafe fn write_bytes(self, val
: u8, count
: usize)
1931 write_bytes(self, val
, count
)
1934 /// Performs a volatile write of a memory location with the given value without
1935 /// reading or dropping the old value.
1937 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1938 /// to not be elided or reordered by the compiler across other volatile
1943 /// Rust does not currently have a rigorously and formally defined memory model,
1944 /// so the precise semantics of what "volatile" means here is subject to change
1945 /// over time. That being said, the semantics will almost always end up pretty
1946 /// similar to [C11's definition of volatile][c11].
1948 /// The compiler shouldn't change the relative order or number of volatile
1949 /// memory operations. However, volatile memory operations on zero-sized types
1950 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
1951 /// and may be ignored.
1953 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1957 /// This operation is marked unsafe because it accepts a raw pointer.
1959 /// It does not drop the contents of `self`. This is safe, but it could leak
1960 /// allocations or resources, so care must be taken not to overwrite an object
1961 /// that should be dropped.
1963 /// This is appropriate for initializing uninitialized memory, or overwriting
1964 /// memory that has previously been `read` from.
1971 /// #![feature(pointer_methods)]
1974 /// let y = &mut x as *mut i32;
1978 /// y.write_volatile(z);
1979 /// assert_eq!(y.read_volatile(), 12);
1982 #[unstable(feature = "pointer_methods", issue = "43941")]
1984 pub unsafe fn write_volatile(self, val
: T
)
1987 write_volatile(self, val
)
1990 /// Overwrites a memory location with the given value without reading or
1991 /// dropping the old value.
1993 /// Unlike `write`, the pointer may be unaligned.
1997 /// This operation is marked unsafe because it writes through a raw pointer.
1999 /// It does not drop the contents of `self`. This is safe, but it could leak
2000 /// allocations or resources, so care must be taken not to overwrite an object
2001 /// that should be dropped.
2003 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
2004 /// location pointed to by `dst`.
2006 /// This is appropriate for initializing uninitialized memory, or overwriting
2007 /// memory that has previously been `read` from.
2014 /// #![feature(pointer_methods)]
2017 /// let y = &mut x as *mut i32;
2021 /// y.write_unaligned(z);
2022 /// assert_eq!(y.read_unaligned(), 12);
2025 #[unstable(feature = "pointer_methods", issue = "43941")]
2027 pub unsafe fn write_unaligned(self, val
: T
)
2030 write_unaligned(self, val
)
2033 /// Replaces the value at `self` with `src`, returning the old
2034 /// value, without dropping either.
2038 /// This is only unsafe because it accepts a raw pointer.
2039 /// Otherwise, this operation is identical to `mem::replace`.
2040 #[unstable(feature = "pointer_methods", issue = "43941")]
2042 pub unsafe fn replace(self, src
: T
) -> T
2048 /// Swaps the values at two mutable locations of the same type, without
2049 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2050 /// otherwise equivalent.
2054 /// This function copies the memory through the raw pointers passed to it
2057 /// Ensure that these pointers are valid before calling `swap`.
2058 #[unstable(feature = "pointer_methods", issue = "43941")]
2060 pub unsafe fn swap(self, with
: *mut T
)
2067 // Equality for pointers
2068 #[stable(feature = "rust1", since = "1.0.0")]
2069 impl<T
: ?Sized
> PartialEq
for *const T
{
2071 fn eq(&self, other
: &*const T
) -> bool { *self == *other }
2074 #[stable(feature = "rust1", since = "1.0.0")]
2075 impl<T
: ?Sized
> Eq
for *const T {}
2077 #[stable(feature = "rust1", since = "1.0.0")]
2078 impl<T
: ?Sized
> PartialEq
for *mut T
{
2080 fn eq(&self, other
: &*mut T
) -> bool { *self == *other }
2083 #[stable(feature = "rust1", since = "1.0.0")]
2084 impl<T
: ?Sized
> Eq
for *mut T {}
2086 /// Compare raw pointers for equality.
2088 /// This is the same as using the `==` operator, but less generic:
2089 /// the arguments have to be `*const T` raw pointers,
2090 /// not anything that implements `PartialEq`.
2092 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2093 /// by their address rather than comparing the values they point to
2094 /// (which is what the `PartialEq for &T` implementation does).
2102 /// let other_five = 5;
2103 /// let five_ref = &five;
2104 /// let same_five_ref = &five;
2105 /// let other_five_ref = &other_five;
2107 /// assert!(five_ref == same_five_ref);
2108 /// assert!(five_ref == other_five_ref);
2110 /// assert!(ptr::eq(five_ref, same_five_ref));
2111 /// assert!(!ptr::eq(five_ref, other_five_ref));
2113 #[stable(feature = "ptr_eq", since = "1.17.0")]
2115 pub fn eq
<T
: ?Sized
>(a
: *const T
, b
: *const T
) -> bool
{
2119 // Impls for function pointers
2120 macro_rules
! fnptr_impls_safety_abi
{
2121 ($FnTy
: ty
, $
($Arg
: ident
),*) => {
2122 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2123 impl<Ret
, $
($Arg
),*> PartialEq
for $FnTy
{
2125 fn eq(&self, other
: &Self) -> bool
{
2126 *self as usize == *other
as usize
2130 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2131 impl<Ret
, $
($Arg
),*> Eq
for $FnTy {}
2133 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2134 impl<Ret
, $
($Arg
),*> PartialOrd
for $FnTy
{
2136 fn partial_cmp(&self, other
: &Self) -> Option
<Ordering
> {
2137 (*self as usize).partial_cmp(&(*other
as usize))
2141 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2142 impl<Ret
, $
($Arg
),*> Ord
for $FnTy
{
2144 fn cmp(&self, other
: &Self) -> Ordering
{
2145 (*self as usize).cmp(&(*other
as usize))
2149 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2150 impl<Ret
, $
($Arg
),*> hash
::Hash
for $FnTy
{
2151 fn hash
<HH
: hash
::Hasher
>(&self, state
: &mut HH
) {
2152 state
.write_usize(*self as usize)
2156 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2157 impl<Ret
, $
($Arg
),*> fmt
::Pointer
for $FnTy
{
2158 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
2159 fmt
::Pointer
::fmt(&(*self as *const ()), f
)
2163 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2164 impl<Ret
, $
($Arg
),*> fmt
::Debug
for $FnTy
{
2165 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
2166 fmt
::Pointer
::fmt(&(*self as *const ()), f
)
2172 macro_rules
! fnptr_impls_args
{
2173 ($
($Arg
: ident
),+) => {
2174 fnptr_impls_safety_abi
! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2175 fnptr_impls_safety_abi
! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2176 fnptr_impls_safety_abi
! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2177 fnptr_impls_safety_abi
! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2178 fnptr_impls_safety_abi
! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2179 fnptr_impls_safety_abi
! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2182 // No variadic functions with 0 parameters
2183 fnptr_impls_safety_abi
! { extern "Rust" fn() -> Ret, }
2184 fnptr_impls_safety_abi
! { extern "C" fn() -> Ret, }
2185 fnptr_impls_safety_abi
! { unsafe extern "Rust" fn() -> Ret, }
2186 fnptr_impls_safety_abi
! { unsafe extern "C" fn() -> Ret, }
2190 fnptr_impls_args
! { }
2191 fnptr_impls_args
! { A }
2192 fnptr_impls_args
! { A, B }
2193 fnptr_impls_args
! { A, B, C }
2194 fnptr_impls_args
! { A, B, C, D }
2195 fnptr_impls_args
! { A, B, C, D, E }
2196 fnptr_impls_args
! { A, B, C, D, E, F }
2197 fnptr_impls_args
! { A, B, C, D, E, F, G }
2198 fnptr_impls_args
! { A, B, C, D, E, F, G, H }
2199 fnptr_impls_args
! { A, B, C, D, E, F, G, H, I }
2200 fnptr_impls_args
! { A, B, C, D, E, F, G, H, I, J }
2201 fnptr_impls_args
! { A, B, C, D, E, F, G, H, I, J, K }
2202 fnptr_impls_args
! { A, B, C, D, E, F, G, H, I, J, K, L }
2204 // Comparison for pointers
2205 #[stable(feature = "rust1", since = "1.0.0")]
2206 impl<T
: ?Sized
> Ord
for *const T
{
2208 fn cmp(&self, other
: &*const T
) -> Ordering
{
2211 } else if self == other
{
2219 #[stable(feature = "rust1", since = "1.0.0")]
2220 impl<T
: ?Sized
> PartialOrd
for *const T
{
2222 fn partial_cmp(&self, other
: &*const T
) -> Option
<Ordering
> {
2223 Some(self.cmp(other
))
2227 fn lt(&self, other
: &*const T
) -> bool { *self < *other }
2230 fn le(&self, other
: &*const T
) -> bool { *self <= *other }
2233 fn gt(&self, other
: &*const T
) -> bool { *self > *other }
2236 fn ge(&self, other
: &*const T
) -> bool { *self >= *other }
2239 #[stable(feature = "rust1", since = "1.0.0")]
2240 impl<T
: ?Sized
> Ord
for *mut T
{
2242 fn cmp(&self, other
: &*mut T
) -> Ordering
{
2245 } else if self == other
{
2253 #[stable(feature = "rust1", since = "1.0.0")]
2254 impl<T
: ?Sized
> PartialOrd
for *mut T
{
2256 fn partial_cmp(&self, other
: &*mut T
) -> Option
<Ordering
> {
2257 Some(self.cmp(other
))
2261 fn lt(&self, other
: &*mut T
) -> bool { *self < *other }
2264 fn le(&self, other
: &*mut T
) -> bool { *self <= *other }
2267 fn gt(&self, other
: &*mut T
) -> bool { *self > *other }
2270 fn ge(&self, other
: &*mut T
) -> bool { *self >= *other }
2273 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2274 /// of this wrapper owns the referent. Useful for building abstractions like
2275 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2277 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2278 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2279 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2280 /// the referent of the pointer should not be modified without a unique path to
2281 /// its owning Unique.
2283 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2284 /// consider using `Shared`, which has weaker semantics.
2286 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2287 /// is never dereferenced. This is so that enums may use this forbidden value
2288 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2289 /// However the pointer may still dangle if it isn't dereferenced.
2291 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2292 /// for any type which upholds Unique's aliasing requirements.
2293 #[allow(missing_debug_implementations)]
2294 #[unstable(feature = "unique", reason = "needs an RFC to flesh out design",
2296 pub struct Unique
<T
: ?Sized
> {
2297 pointer
: NonZero
<*const T
>,
2298 // NOTE: this marker has no consequences for variance, but is necessary
2299 // for dropck to understand that we logically own a `T`.
2301 // For details, see:
2302 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2303 _marker
: PhantomData
<T
>,
2306 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2307 /// reference is unaliased. Note that this aliasing invariant is
2308 /// unenforced by the type system; the abstraction using the
2309 /// `Unique` must enforce it.
2310 #[unstable(feature = "unique", issue = "27730")]
2311 unsafe impl<T
: Send
+ ?Sized
> Send
for Unique
<T
> { }
2313 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2314 /// reference is unaliased. Note that this aliasing invariant is
2315 /// unenforced by the type system; the abstraction using the
2316 /// `Unique` must enforce it.
2317 #[unstable(feature = "unique", issue = "27730")]
2318 unsafe impl<T
: Sync
+ ?Sized
> Sync
for Unique
<T
> { }
2320 #[unstable(feature = "unique", issue = "27730")]
2321 impl<T
: Sized
> Unique
<T
> {
2322 /// Creates a new `Unique` that is dangling, but well-aligned.
2324 /// This is useful for initializing types which lazily allocate, like
2325 /// `Vec::new` does.
2326 pub fn empty() -> Self {
2328 let ptr
= mem
::align_of
::<T
>() as *mut T
;
2329 Unique
::new_unchecked(ptr
)
2334 #[unstable(feature = "unique", issue = "27730")]
2335 impl<T
: ?Sized
> Unique
<T
> {
2336 /// Creates a new `Unique`.
2340 /// `ptr` must be non-null.
2341 #[unstable(feature = "unique", issue = "27730")]
2342 #[rustc_const_unstable(feature = "const_unique_new")]
2343 pub const unsafe fn new_unchecked(ptr
: *mut T
) -> Self {
2344 Unique { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData }
2347 /// Creates a new `Unique` if `ptr` is non-null.
2348 pub fn new(ptr
: *mut T
) -> Option
<Self> {
2349 NonZero
::new(ptr
as *const T
).map(|nz
| Unique { pointer: nz, _marker: PhantomData }
)
2352 /// Acquires the underlying `*mut` pointer.
2353 pub fn as_ptr(self) -> *mut T
{
2354 self.pointer
.get() as *mut T
2357 /// Dereferences the content.
2359 /// The resulting lifetime is bound to self so this behaves "as if"
2360 /// it were actually an instance of T that is getting borrowed. If a longer
2361 /// (unbound) lifetime is needed, use `&*my_ptr.ptr()`.
2362 pub unsafe fn as_ref(&self) -> &T
{
2366 /// Mutably dereferences the content.
2368 /// The resulting lifetime is bound to self so this behaves "as if"
2369 /// it were actually an instance of T that is getting borrowed. If a longer
2370 /// (unbound) lifetime is needed, use `&mut *my_ptr.ptr()`.
2371 pub unsafe fn as_mut(&mut self) -> &mut T
{
2376 #[unstable(feature = "unique", issue = "27730")]
2377 impl<T
: ?Sized
> Clone
for Unique
<T
> {
2378 fn clone(&self) -> Self {
2383 #[unstable(feature = "unique", issue = "27730")]
2384 impl<T
: ?Sized
> Copy
for Unique
<T
> { }
2386 #[unstable(feature = "unique", issue = "27730")]
2387 impl<T
: ?Sized
, U
: ?Sized
> CoerceUnsized
<Unique
<U
>> for Unique
<T
> where T
: Unsize
<U
> { }
2389 #[unstable(feature = "unique", issue = "27730")]
2390 impl<T
: ?Sized
> fmt
::Pointer
for Unique
<T
> {
2391 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
2392 fmt
::Pointer
::fmt(&self.as_ptr(), f
)
2396 #[unstable(feature = "unique", issue = "27730")]
2397 impl<'a
, T
: ?Sized
> From
<&'a
mut T
> for Unique
<T
> {
2398 fn from(reference
: &'a
mut T
) -> Self {
2399 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2403 #[unstable(feature = "unique", issue = "27730")]
2404 impl<'a
, T
: ?Sized
> From
<&'a T
> for Unique
<T
> {
2405 fn from(reference
: &'a T
) -> Self {
2406 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2410 /// A wrapper around a raw `*mut T` that indicates that the possessor
2411 /// of this wrapper has shared ownership of the referent. Useful for
2412 /// building abstractions like `Rc<T>`, `Arc<T>`, or doubly-linked lists, which
2413 /// internally use aliased raw pointers to manage the memory that they own.
2415 /// This is similar to `Unique`, except that it doesn't make any aliasing
2416 /// guarantees, and doesn't derive Send and Sync. Note that unlike `&T`,
2417 /// Shared has no special mutability requirements. Shared may mutate data
2418 /// aliased by other Shared pointers. More precise rules require Rust to
2419 /// develop an actual aliasing model.
2421 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2422 /// is never dereferenced. This is so that enums may use this forbidden value
2423 /// as a discriminant -- `Option<Shared<T>>` has the same size as `Shared<T>`.
2424 /// However the pointer may still dangle if it isn't dereferenced.
2426 /// Unlike `*mut T`, `Shared<T>` is covariant over `T`. If this is incorrect
2427 /// for your use case, you should include some PhantomData in your type to
2428 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2429 /// Usually this won't be necessary; covariance is correct for Rc, Arc, and LinkedList
2430 /// because they provide a public API that follows the normal shared XOR mutable
2432 #[allow(missing_debug_implementations)]
2433 #[unstable(feature = "shared", reason = "needs an RFC to flesh out design",
2435 pub struct Shared
<T
: ?Sized
> {
2436 pointer
: NonZero
<*const T
>,
2437 // NOTE: this marker has no consequences for variance, but is necessary
2438 // for dropck to understand that we logically own a `T`.
2440 // For details, see:
2441 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2442 _marker
: PhantomData
<T
>,
2445 /// `Shared` pointers are not `Send` because the data they reference may be aliased.
2446 // NB: This impl is unnecessary, but should provide better error messages.
2447 #[unstable(feature = "shared", issue = "27730")]
2448 impl<T
: ?Sized
> !Send
for Shared
<T
> { }
2450 /// `Shared` pointers are not `Sync` because the data they reference may be aliased.
2451 // NB: This impl is unnecessary, but should provide better error messages.
2452 #[unstable(feature = "shared", issue = "27730")]
2453 impl<T
: ?Sized
> !Sync
for Shared
<T
> { }
2455 #[unstable(feature = "shared", issue = "27730")]
2456 impl<T
: Sized
> Shared
<T
> {
2457 /// Creates a new `Shared` that is dangling, but well-aligned.
2459 /// This is useful for initializing types which lazily allocate, like
2460 /// `Vec::new` does.
2461 pub fn empty() -> Self {
2463 let ptr
= mem
::align_of
::<T
>() as *mut T
;
2464 Shared
::new_unchecked(ptr
)
2469 #[unstable(feature = "shared", issue = "27730")]
2470 impl<T
: ?Sized
> Shared
<T
> {
2471 /// Creates a new `Shared`.
2475 /// `ptr` must be non-null.
2476 #[unstable(feature = "shared", issue = "27730")]
2477 #[rustc_const_unstable(feature = "const_shared_new")]
2478 pub const unsafe fn new_unchecked(ptr
: *mut T
) -> Self {
2479 Shared { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData }
2482 /// Creates a new `Shared` if `ptr` is non-null.
2483 pub fn new(ptr
: *mut T
) -> Option
<Self> {
2484 NonZero
::new(ptr
as *const T
).map(|nz
| Shared { pointer: nz, _marker: PhantomData }
)
2487 /// Acquires the underlying `*mut` pointer.
2488 pub fn as_ptr(self) -> *mut T
{
2489 self.pointer
.get() as *mut T
2492 /// Dereferences the content.
2494 /// The resulting lifetime is bound to self so this behaves "as if"
2495 /// it were actually an instance of T that is getting borrowed. If a longer
2496 /// (unbound) lifetime is needed, use `&*my_ptr.ptr()`.
2497 pub unsafe fn as_ref(&self) -> &T
{
2501 /// Mutably dereferences the content.
2503 /// The resulting lifetime is bound to self so this behaves "as if"
2504 /// it were actually an instance of T that is getting borrowed. If a longer
2505 /// (unbound) lifetime is needed, use `&mut *my_ptr.ptr_mut()`.
2506 pub unsafe fn as_mut(&mut self) -> &mut T
{
2510 /// Acquires the underlying pointer as a `*mut` pointer.
2511 #[rustc_deprecated(since = "1.19", reason = "renamed to `as_ptr` for ergonomics/consistency")]
2512 #[unstable(feature = "shared", issue = "27730")]
2513 pub unsafe fn as_mut_ptr(&self) -> *mut T
{
2518 #[unstable(feature = "shared", issue = "27730")]
2519 impl<T
: ?Sized
> Clone
for Shared
<T
> {
2520 fn clone(&self) -> Self {
2525 #[unstable(feature = "shared", issue = "27730")]
2526 impl<T
: ?Sized
> Copy
for Shared
<T
> { }
2528 #[unstable(feature = "shared", issue = "27730")]
2529 impl<T
: ?Sized
, U
: ?Sized
> CoerceUnsized
<Shared
<U
>> for Shared
<T
> where T
: Unsize
<U
> { }
2531 #[unstable(feature = "shared", issue = "27730")]
2532 impl<T
: ?Sized
> fmt
::Pointer
for Shared
<T
> {
2533 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
2534 fmt
::Pointer
::fmt(&self.as_ptr(), f
)
2538 #[unstable(feature = "shared", issue = "27730")]
2539 impl<T
: ?Sized
> From
<Unique
<T
>> for Shared
<T
> {
2540 fn from(unique
: Unique
<T
>) -> Self {
2541 Shared { pointer: unique.pointer, _marker: PhantomData }
2545 #[unstable(feature = "shared", issue = "27730")]
2546 impl<'a
, T
: ?Sized
> From
<&'a
mut T
> for Shared
<T
> {
2547 fn from(reference
: &'a
mut T
) -> Self {
2548 Shared { pointer: NonZero::from(reference), _marker: PhantomData }
2552 #[unstable(feature = "shared", issue = "27730")]
2553 impl<'a
, T
: ?Sized
> From
<&'a T
> for Shared
<T
> {
2554 fn from(reference
: &'a T
) -> Self {
2555 Shared { pointer: NonZero::from(reference), _marker: PhantomData }