]> git.proxmox.com Git - rustc.git/blob - src/libcore/ptr.rs
New upstream version 1.23.0+dfsg1
[rustc.git] / src / libcore / ptr.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
12
13 //! Raw, unsafe pointers, `*const T`, and `*mut T`.
14 //!
15 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
16
17 #![stable(feature = "rust1", since = "1.0.0")]
18
19 use convert::From;
20 use intrinsics;
21 use ops::CoerceUnsized;
22 use fmt;
23 use hash;
24 use marker::{PhantomData, Unsize};
25 use mem;
26 use nonzero::NonZero;
27
28 use cmp::Ordering::{self, Less, Equal, Greater};
29
30 #[stable(feature = "rust1", since = "1.0.0")]
31 pub use intrinsics::copy_nonoverlapping;
32
33 #[stable(feature = "rust1", since = "1.0.0")]
34 pub use intrinsics::copy;
35
36 #[stable(feature = "rust1", since = "1.0.0")]
37 pub use intrinsics::write_bytes;
38
39 /// Executes the destructor (if any) of the pointed-to value.
40 ///
41 /// This has two use cases:
42 ///
43 /// * It is *required* to use `drop_in_place` to drop unsized types like
44 /// trait objects, because they can't be read out onto the stack and
45 /// dropped normally.
46 ///
47 /// * It is friendlier to the optimizer to do this over `ptr::read` when
48 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
49 /// as the compiler doesn't need to prove that it's sound to elide the
50 /// copy.
51 ///
52 /// # Safety
53 ///
54 /// This has all the same safety problems as `ptr::read` with respect to
55 /// invalid pointers, types, and double drops.
56 #[stable(feature = "drop_in_place", since = "1.8.0")]
57 #[lang = "drop_in_place"]
58 #[allow(unconditional_recursion)]
59 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
60 // Code here does not matter - this is replaced by the
61 // real drop glue by the compiler.
62 drop_in_place(to_drop);
63 }
64
65 /// Creates a null raw pointer.
66 ///
67 /// # Examples
68 ///
69 /// ```
70 /// use std::ptr;
71 ///
72 /// let p: *const i32 = ptr::null();
73 /// assert!(p.is_null());
74 /// ```
75 #[inline]
76 #[stable(feature = "rust1", since = "1.0.0")]
77 #[rustc_const_unstable(feature = "const_ptr_null")]
78 pub const fn null<T>() -> *const T { 0 as *const T }
79
80 /// Creates a null mutable raw pointer.
81 ///
82 /// # Examples
83 ///
84 /// ```
85 /// use std::ptr;
86 ///
87 /// let p: *mut i32 = ptr::null_mut();
88 /// assert!(p.is_null());
89 /// ```
90 #[inline]
91 #[stable(feature = "rust1", since = "1.0.0")]
92 #[rustc_const_unstable(feature = "const_ptr_null_mut")]
93 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
94
95 /// Swaps the values at two mutable locations of the same type, without
96 /// deinitializing either. They may overlap, unlike `mem::swap` which is
97 /// otherwise equivalent.
98 ///
99 /// # Safety
100 ///
101 /// This function copies the memory through the raw pointers passed to it
102 /// as arguments.
103 ///
104 /// Ensure that these pointers are valid before calling `swap`.
105 #[inline]
106 #[stable(feature = "rust1", since = "1.0.0")]
107 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
108 // Give ourselves some scratch space to work with
109 let mut tmp: T = mem::uninitialized();
110
111 // Perform the swap
112 copy_nonoverlapping(x, &mut tmp, 1);
113 copy(y, x, 1); // `x` and `y` may overlap
114 copy_nonoverlapping(&tmp, y, 1);
115
116 // y and t now point to the same thing, but we need to completely forget `tmp`
117 // because it's no longer relevant.
118 mem::forget(tmp);
119 }
120
121 /// Swaps a sequence of values at two mutable locations of the same type.
122 ///
123 /// # Safety
124 ///
125 /// The two arguments must each point to the beginning of `count` locations
126 /// of valid memory, and the two memory ranges must not overlap.
127 ///
128 /// # Examples
129 ///
130 /// Basic usage:
131 ///
132 /// ```
133 /// #![feature(swap_nonoverlapping)]
134 ///
135 /// use std::ptr;
136 ///
137 /// let mut x = [1, 2, 3, 4];
138 /// let mut y = [7, 8, 9];
139 ///
140 /// unsafe {
141 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
142 /// }
143 ///
144 /// assert_eq!(x, [7, 8, 3, 4]);
145 /// assert_eq!(y, [1, 2, 9]);
146 /// ```
147 #[inline]
148 #[unstable(feature = "swap_nonoverlapping", issue = "42818")]
149 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
150 let x = x as *mut u8;
151 let y = y as *mut u8;
152 let len = mem::size_of::<T>() * count;
153 swap_nonoverlapping_bytes(x, y, len)
154 }
155
156 #[inline]
157 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
158 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
159 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
160 // Haswell E processors. LLVM is more able to optimize if we give a struct a
161 // #[repr(simd)], even if we don't actually use this struct directly.
162 //
163 // FIXME repr(simd) broken on emscripten and redox
164 // It's also broken on big-endian powerpc64 and s390x. #42778
165 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
166 target_endian = "big")),
167 repr(simd))]
168 struct Block(u64, u64, u64, u64);
169 struct UnalignedBlock(u64, u64, u64, u64);
170
171 let block_size = mem::size_of::<Block>();
172
173 // Loop through x & y, copying them `Block` at a time
174 // The optimizer should unroll the loop fully for most types
175 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
176 let mut i = 0;
177 while i + block_size <= len {
178 // Create some uninitialized memory as scratch space
179 // Declaring `t` here avoids aligning the stack when this loop is unused
180 let mut t: Block = mem::uninitialized();
181 let t = &mut t as *mut _ as *mut u8;
182 let x = x.offset(i as isize);
183 let y = y.offset(i as isize);
184
185 // Swap a block of bytes of x & y, using t as a temporary buffer
186 // This should be optimized into efficient SIMD operations where available
187 copy_nonoverlapping(x, t, block_size);
188 copy_nonoverlapping(y, x, block_size);
189 copy_nonoverlapping(t, y, block_size);
190 i += block_size;
191 }
192
193 if i < len {
194 // Swap any remaining bytes
195 let mut t: UnalignedBlock = mem::uninitialized();
196 let rem = len - i;
197
198 let t = &mut t as *mut _ as *mut u8;
199 let x = x.offset(i as isize);
200 let y = y.offset(i as isize);
201
202 copy_nonoverlapping(x, t, rem);
203 copy_nonoverlapping(y, x, rem);
204 copy_nonoverlapping(t, y, rem);
205 }
206 }
207
208 /// Replaces the value at `dest` with `src`, returning the old
209 /// value, without dropping either.
210 ///
211 /// # Safety
212 ///
213 /// This is only unsafe because it accepts a raw pointer.
214 /// Otherwise, this operation is identical to `mem::replace`.
215 #[inline]
216 #[stable(feature = "rust1", since = "1.0.0")]
217 pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
218 mem::swap(&mut *dest, &mut src); // cannot overlap
219 src
220 }
221
222 /// Reads the value from `src` without moving it. This leaves the
223 /// memory in `src` unchanged.
224 ///
225 /// # Safety
226 ///
227 /// Beyond accepting a raw pointer, this is unsafe because it semantically
228 /// moves the value out of `src` without preventing further usage of `src`.
229 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
230 /// `src` is not used before the data is overwritten again (e.g. with `write`,
231 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
232 /// because it will attempt to drop the value previously at `*src`.
233 ///
234 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
235 ///
236 /// # Examples
237 ///
238 /// Basic usage:
239 ///
240 /// ```
241 /// let x = 12;
242 /// let y = &x as *const i32;
243 ///
244 /// unsafe {
245 /// assert_eq!(std::ptr::read(y), 12);
246 /// }
247 /// ```
248 #[inline]
249 #[stable(feature = "rust1", since = "1.0.0")]
250 pub unsafe fn read<T>(src: *const T) -> T {
251 let mut tmp: T = mem::uninitialized();
252 copy_nonoverlapping(src, &mut tmp, 1);
253 tmp
254 }
255
256 /// Reads the value from `src` without moving it. This leaves the
257 /// memory in `src` unchanged.
258 ///
259 /// Unlike `read`, the pointer may be unaligned.
260 ///
261 /// # Safety
262 ///
263 /// Beyond accepting a raw pointer, this is unsafe because it semantically
264 /// moves the value out of `src` without preventing further usage of `src`.
265 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
266 /// `src` is not used before the data is overwritten again (e.g. with `write`,
267 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
268 /// because it will attempt to drop the value previously at `*src`.
269 ///
270 /// # Examples
271 ///
272 /// Basic usage:
273 ///
274 /// ```
275 /// let x = 12;
276 /// let y = &x as *const i32;
277 ///
278 /// unsafe {
279 /// assert_eq!(std::ptr::read_unaligned(y), 12);
280 /// }
281 /// ```
282 #[inline]
283 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
284 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
285 let mut tmp: T = mem::uninitialized();
286 copy_nonoverlapping(src as *const u8,
287 &mut tmp as *mut T as *mut u8,
288 mem::size_of::<T>());
289 tmp
290 }
291
292 /// Overwrites a memory location with the given value without reading or
293 /// dropping the old value.
294 ///
295 /// # Safety
296 ///
297 /// This operation is marked unsafe because it accepts a raw pointer.
298 ///
299 /// It does not drop the contents of `dst`. This is safe, but it could leak
300 /// allocations or resources, so care must be taken not to overwrite an object
301 /// that should be dropped.
302 ///
303 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
304 /// location pointed to by `dst`.
305 ///
306 /// This is appropriate for initializing uninitialized memory, or overwriting
307 /// memory that has previously been `read` from.
308 ///
309 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
310 ///
311 /// # Examples
312 ///
313 /// Basic usage:
314 ///
315 /// ```
316 /// let mut x = 0;
317 /// let y = &mut x as *mut i32;
318 /// let z = 12;
319 ///
320 /// unsafe {
321 /// std::ptr::write(y, z);
322 /// assert_eq!(std::ptr::read(y), 12);
323 /// }
324 /// ```
325 #[inline]
326 #[stable(feature = "rust1", since = "1.0.0")]
327 pub unsafe fn write<T>(dst: *mut T, src: T) {
328 intrinsics::move_val_init(&mut *dst, src)
329 }
330
331 /// Overwrites a memory location with the given value without reading or
332 /// dropping the old value.
333 ///
334 /// Unlike `write`, the pointer may be unaligned.
335 ///
336 /// # Safety
337 ///
338 /// This operation is marked unsafe because it accepts a raw pointer.
339 ///
340 /// It does not drop the contents of `dst`. This is safe, but it could leak
341 /// allocations or resources, so care must be taken not to overwrite an object
342 /// that should be dropped.
343 ///
344 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
345 /// location pointed to by `dst`.
346 ///
347 /// This is appropriate for initializing uninitialized memory, or overwriting
348 /// memory that has previously been `read` from.
349 ///
350 /// # Examples
351 ///
352 /// Basic usage:
353 ///
354 /// ```
355 /// let mut x = 0;
356 /// let y = &mut x as *mut i32;
357 /// let z = 12;
358 ///
359 /// unsafe {
360 /// std::ptr::write_unaligned(y, z);
361 /// assert_eq!(std::ptr::read_unaligned(y), 12);
362 /// }
363 /// ```
364 #[inline]
365 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
366 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
367 copy_nonoverlapping(&src as *const T as *const u8,
368 dst as *mut u8,
369 mem::size_of::<T>());
370 mem::forget(src);
371 }
372
373 /// Performs a volatile read of the value from `src` without moving it. This
374 /// leaves the memory in `src` unchanged.
375 ///
376 /// Volatile operations are intended to act on I/O memory, and are guaranteed
377 /// to not be elided or reordered by the compiler across other volatile
378 /// operations.
379 ///
380 /// # Notes
381 ///
382 /// Rust does not currently have a rigorously and formally defined memory model,
383 /// so the precise semantics of what "volatile" means here is subject to change
384 /// over time. That being said, the semantics will almost always end up pretty
385 /// similar to [C11's definition of volatile][c11].
386 ///
387 /// The compiler shouldn't change the relative order or number of volatile
388 /// memory operations. However, volatile memory operations on zero-sized types
389 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
390 /// and may be ignored.
391 ///
392 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
393 ///
394 /// # Safety
395 ///
396 /// Beyond accepting a raw pointer, this is unsafe because it semantically
397 /// moves the value out of `src` without preventing further usage of `src`.
398 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
399 /// `src` is not used before the data is overwritten again (e.g. with `write`,
400 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
401 /// because it will attempt to drop the value previously at `*src`.
402 ///
403 /// # Examples
404 ///
405 /// Basic usage:
406 ///
407 /// ```
408 /// let x = 12;
409 /// let y = &x as *const i32;
410 ///
411 /// unsafe {
412 /// assert_eq!(std::ptr::read_volatile(y), 12);
413 /// }
414 /// ```
415 #[inline]
416 #[stable(feature = "volatile", since = "1.9.0")]
417 pub unsafe fn read_volatile<T>(src: *const T) -> T {
418 intrinsics::volatile_load(src)
419 }
420
421 /// Performs a volatile write of a memory location with the given value without
422 /// reading or dropping the old value.
423 ///
424 /// Volatile operations are intended to act on I/O memory, and are guaranteed
425 /// to not be elided or reordered by the compiler across other volatile
426 /// operations.
427 ///
428 /// # Notes
429 ///
430 /// Rust does not currently have a rigorously and formally defined memory model,
431 /// so the precise semantics of what "volatile" means here is subject to change
432 /// over time. That being said, the semantics will almost always end up pretty
433 /// similar to [C11's definition of volatile][c11].
434 ///
435 /// The compiler shouldn't change the relative order or number of volatile
436 /// memory operations. However, volatile memory operations on zero-sized types
437 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
438 /// and may be ignored.
439 ///
440 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
441 ///
442 /// # Safety
443 ///
444 /// This operation is marked unsafe because it accepts a raw pointer.
445 ///
446 /// It does not drop the contents of `dst`. This is safe, but it could leak
447 /// allocations or resources, so care must be taken not to overwrite an object
448 /// that should be dropped.
449 ///
450 /// This is appropriate for initializing uninitialized memory, or overwriting
451 /// memory that has previously been `read` from.
452 ///
453 /// # Examples
454 ///
455 /// Basic usage:
456 ///
457 /// ```
458 /// let mut x = 0;
459 /// let y = &mut x as *mut i32;
460 /// let z = 12;
461 ///
462 /// unsafe {
463 /// std::ptr::write_volatile(y, z);
464 /// assert_eq!(std::ptr::read_volatile(y), 12);
465 /// }
466 /// ```
467 #[inline]
468 #[stable(feature = "volatile", since = "1.9.0")]
469 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
470 intrinsics::volatile_store(dst, src);
471 }
472
473 #[lang = "const_ptr"]
474 impl<T: ?Sized> *const T {
475 /// Returns `true` if the pointer is null.
476 ///
477 /// # Examples
478 ///
479 /// Basic usage:
480 ///
481 /// ```
482 /// let s: &str = "Follow the rabbit";
483 /// let ptr: *const u8 = s.as_ptr();
484 /// assert!(!ptr.is_null());
485 /// ```
486 #[stable(feature = "rust1", since = "1.0.0")]
487 #[inline]
488 pub fn is_null(self) -> bool where T: Sized {
489 self == null()
490 }
491
492 /// Returns `None` if the pointer is null, or else returns a reference to
493 /// the value wrapped in `Some`.
494 ///
495 /// # Safety
496 ///
497 /// While this method and its mutable counterpart are useful for
498 /// null-safety, it is important to note that this is still an unsafe
499 /// operation because the returned value could be pointing to invalid
500 /// memory.
501 ///
502 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
503 /// not necessarily reflect the actual lifetime of the data.
504 ///
505 /// # Examples
506 ///
507 /// Basic usage:
508 ///
509 /// ```
510 /// let ptr: *const u8 = &10u8 as *const u8;
511 ///
512 /// unsafe {
513 /// if let Some(val_back) = ptr.as_ref() {
514 /// println!("We got back the value: {}!", val_back);
515 /// }
516 /// }
517 /// ```
518 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
519 #[inline]
520 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
521 // Check for null via a cast to a thin pointer, so fat pointers are only
522 // considering their "data" part for null-ness.
523 if (self as *const u8).is_null() {
524 None
525 } else {
526 Some(&*self)
527 }
528 }
529
530 /// Calculates the offset from a pointer.
531 ///
532 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
533 /// offset of `3 * size_of::<T>()` bytes.
534 ///
535 /// # Safety
536 ///
537 /// If any of the following conditions are violated, the result is Undefined
538 /// Behavior:
539 ///
540 /// * Both the starting and resulting pointer must be either in bounds or one
541 /// byte past the end of an allocated object.
542 ///
543 /// * The computed offset, **in bytes**, cannot overflow or underflow an
544 /// `isize`.
545 ///
546 /// * The offset being in bounds cannot rely on "wrapping around" the address
547 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
548 ///
549 /// The compiler and standard library generally tries to ensure allocations
550 /// never reach a size where an offset is a concern. For instance, `Vec`
551 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
552 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
553 ///
554 /// Most platforms fundamentally can't even construct such an allocation.
555 /// For instance, no known 64-bit platform can ever serve a request
556 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
557 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
558 /// more than `isize::MAX` bytes with things like Physical Address
559 /// Extension. As such, memory acquired directly from allocators or memory
560 /// mapped files *may* be too large to handle with this function.
561 ///
562 /// Consider using `wrapping_offset` instead if these constraints are
563 /// difficult to satisfy. The only advantage of this method is that it
564 /// enables more aggressive compiler optimizations.
565 ///
566 /// # Examples
567 ///
568 /// Basic usage:
569 ///
570 /// ```
571 /// let s: &str = "123";
572 /// let ptr: *const u8 = s.as_ptr();
573 ///
574 /// unsafe {
575 /// println!("{}", *ptr.offset(1) as char);
576 /// println!("{}", *ptr.offset(2) as char);
577 /// }
578 /// ```
579 #[stable(feature = "rust1", since = "1.0.0")]
580 #[inline]
581 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
582 intrinsics::offset(self, count)
583 }
584
585 /// Calculates the offset from a pointer using wrapping arithmetic.
586 ///
587 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
588 /// offset of `3 * size_of::<T>()` bytes.
589 ///
590 /// # Safety
591 ///
592 /// The resulting pointer does not need to be in bounds, but it is
593 /// potentially hazardous to dereference (which requires `unsafe`).
594 ///
595 /// Always use `.offset(count)` instead when possible, because `offset`
596 /// allows the compiler to optimize better.
597 ///
598 /// # Examples
599 ///
600 /// Basic usage:
601 ///
602 /// ```
603 /// // Iterate using a raw pointer in increments of two elements
604 /// let data = [1u8, 2, 3, 4, 5];
605 /// let mut ptr: *const u8 = data.as_ptr();
606 /// let step = 2;
607 /// let end_rounded_up = ptr.wrapping_offset(6);
608 ///
609 /// // This loop prints "1, 3, 5, "
610 /// while ptr != end_rounded_up {
611 /// unsafe {
612 /// print!("{}, ", *ptr);
613 /// }
614 /// ptr = ptr.wrapping_offset(step);
615 /// }
616 /// ```
617 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
618 #[inline]
619 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
620 unsafe {
621 intrinsics::arith_offset(self, count)
622 }
623 }
624
625 /// Calculates the distance between two pointers. The returned value is in
626 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
627 ///
628 /// If the address different between the two pointers ia not a multiple of
629 /// `mem::size_of::<T>()` then the result of the division is rounded towards
630 /// zero.
631 ///
632 /// This function returns `None` if `T` is a zero-sized typed.
633 ///
634 /// # Examples
635 ///
636 /// Basic usage:
637 ///
638 /// ```
639 /// #![feature(offset_to)]
640 ///
641 /// fn main() {
642 /// let a = [0; 5];
643 /// let ptr1: *const i32 = &a[1];
644 /// let ptr2: *const i32 = &a[3];
645 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
646 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
647 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
648 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
649 /// }
650 /// ```
651 #[unstable(feature = "offset_to", issue = "41079")]
652 #[inline]
653 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
654 let size = mem::size_of::<T>();
655 if size == 0 {
656 None
657 } else {
658 let diff = (other as isize).wrapping_sub(self as isize);
659 Some(diff / size as isize)
660 }
661 }
662
663 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
664 ///
665 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
666 /// offset of `3 * size_of::<T>()` bytes.
667 ///
668 /// # Safety
669 ///
670 /// If any of the following conditions are violated, the result is Undefined
671 /// Behavior:
672 ///
673 /// * Both the starting and resulting pointer must be either in bounds or one
674 /// byte past the end of an allocated object.
675 ///
676 /// * The computed offset, **in bytes**, cannot overflow or underflow an
677 /// `isize`.
678 ///
679 /// * The offset being in bounds cannot rely on "wrapping around" the address
680 /// space. That is, the infinite-precision sum must fit in a `usize`.
681 ///
682 /// The compiler and standard library generally tries to ensure allocations
683 /// never reach a size where an offset is a concern. For instance, `Vec`
684 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
685 /// `vec.as_ptr().add(vec.len())` is always safe.
686 ///
687 /// Most platforms fundamentally can't even construct such an allocation.
688 /// For instance, no known 64-bit platform can ever serve a request
689 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
690 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
691 /// more than `isize::MAX` bytes with things like Physical Address
692 /// Extension. As such, memory acquired directly from allocators or memory
693 /// mapped files *may* be too large to handle with this function.
694 ///
695 /// Consider using `wrapping_offset` instead if these constraints are
696 /// difficult to satisfy. The only advantage of this method is that it
697 /// enables more aggressive compiler optimizations.
698 ///
699 /// # Examples
700 ///
701 /// Basic usage:
702 ///
703 /// ```
704 /// #![feature(pointer_methods)]
705 ///
706 /// let s: &str = "123";
707 /// let ptr: *const u8 = s.as_ptr();
708 ///
709 /// unsafe {
710 /// println!("{}", *ptr.add(1) as char);
711 /// println!("{}", *ptr.add(2) as char);
712 /// }
713 /// ```
714 #[unstable(feature = "pointer_methods", issue = "43941")]
715 #[inline]
716 pub unsafe fn add(self, count: usize) -> Self
717 where T: Sized,
718 {
719 self.offset(count as isize)
720 }
721
722 /// Calculates the offset from a pointer (convenience for
723 /// `.offset((count as isize).wrapping_neg())`).
724 ///
725 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
726 /// offset of `3 * size_of::<T>()` bytes.
727 ///
728 /// # Safety
729 ///
730 /// If any of the following conditions are violated, the result is Undefined
731 /// Behavior:
732 ///
733 /// * Both the starting and resulting pointer must be either in bounds or one
734 /// byte past the end of an allocated object.
735 ///
736 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
737 ///
738 /// * The offset being in bounds cannot rely on "wrapping around" the address
739 /// space. That is, the infinite-precision sum must fit in a usize.
740 ///
741 /// The compiler and standard library generally tries to ensure allocations
742 /// never reach a size where an offset is a concern. For instance, `Vec`
743 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
744 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
745 ///
746 /// Most platforms fundamentally can't even construct such an allocation.
747 /// For instance, no known 64-bit platform can ever serve a request
748 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
749 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
750 /// more than `isize::MAX` bytes with things like Physical Address
751 /// Extension. As such, memory acquired directly from allocators or memory
752 /// mapped files *may* be too large to handle with this function.
753 ///
754 /// Consider using `wrapping_offset` instead if these constraints are
755 /// difficult to satisfy. The only advantage of this method is that it
756 /// enables more aggressive compiler optimizations.
757 ///
758 /// # Examples
759 ///
760 /// Basic usage:
761 ///
762 /// ```
763 /// #![feature(pointer_methods)]
764 ///
765 /// let s: &str = "123";
766 ///
767 /// unsafe {
768 /// let end: *const u8 = s.as_ptr().add(3);
769 /// println!("{}", *end.sub(1) as char);
770 /// println!("{}", *end.sub(2) as char);
771 /// }
772 /// ```
773 #[unstable(feature = "pointer_methods", issue = "43941")]
774 #[inline]
775 pub unsafe fn sub(self, count: usize) -> Self
776 where T: Sized,
777 {
778 self.offset((count as isize).wrapping_neg())
779 }
780
781 /// Calculates the offset from a pointer using wrapping arithmetic.
782 /// (convenience for `.wrapping_offset(count as isize)`)
783 ///
784 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
785 /// offset of `3 * size_of::<T>()` bytes.
786 ///
787 /// # Safety
788 ///
789 /// The resulting pointer does not need to be in bounds, but it is
790 /// potentially hazardous to dereference (which requires `unsafe`).
791 ///
792 /// Always use `.add(count)` instead when possible, because `add`
793 /// allows the compiler to optimize better.
794 ///
795 /// # Examples
796 ///
797 /// Basic usage:
798 ///
799 /// ```
800 /// #![feature(pointer_methods)]
801 ///
802 /// // Iterate using a raw pointer in increments of two elements
803 /// let data = [1u8, 2, 3, 4, 5];
804 /// let mut ptr: *const u8 = data.as_ptr();
805 /// let step = 2;
806 /// let end_rounded_up = ptr.wrapping_add(6);
807 ///
808 /// // This loop prints "1, 3, 5, "
809 /// while ptr != end_rounded_up {
810 /// unsafe {
811 /// print!("{}, ", *ptr);
812 /// }
813 /// ptr = ptr.wrapping_add(step);
814 /// }
815 /// ```
816 #[unstable(feature = "pointer_methods", issue = "43941")]
817 #[inline]
818 pub fn wrapping_add(self, count: usize) -> Self
819 where T: Sized,
820 {
821 self.wrapping_offset(count as isize)
822 }
823
824 /// Calculates the offset from a pointer using wrapping arithmetic.
825 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
826 ///
827 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
828 /// offset of `3 * size_of::<T>()` bytes.
829 ///
830 /// # Safety
831 ///
832 /// The resulting pointer does not need to be in bounds, but it is
833 /// potentially hazardous to dereference (which requires `unsafe`).
834 ///
835 /// Always use `.sub(count)` instead when possible, because `sub`
836 /// allows the compiler to optimize better.
837 ///
838 /// # Examples
839 ///
840 /// Basic usage:
841 ///
842 /// ```
843 /// #![feature(pointer_methods)]
844 ///
845 /// // Iterate using a raw pointer in increments of two elements (backwards)
846 /// let data = [1u8, 2, 3, 4, 5];
847 /// let mut ptr: *const u8 = data.as_ptr();
848 /// let start_rounded_down = ptr.wrapping_sub(2);
849 /// ptr = ptr.wrapping_add(4);
850 /// let step = 2;
851 /// // This loop prints "5, 3, 1, "
852 /// while ptr != start_rounded_down {
853 /// unsafe {
854 /// print!("{}, ", *ptr);
855 /// }
856 /// ptr = ptr.wrapping_sub(step);
857 /// }
858 /// ```
859 #[unstable(feature = "pointer_methods", issue = "43941")]
860 #[inline]
861 pub fn wrapping_sub(self, count: usize) -> Self
862 where T: Sized,
863 {
864 self.wrapping_offset((count as isize).wrapping_neg())
865 }
866
867 /// Reads the value from `self` without moving it. This leaves the
868 /// memory in `self` unchanged.
869 ///
870 /// # Safety
871 ///
872 /// Beyond accepting a raw pointer, this is unsafe because it semantically
873 /// moves the value out of `self` without preventing further usage of `self`.
874 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
875 /// `self` is not used before the data is overwritten again (e.g. with `write`,
876 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
877 /// because it will attempt to drop the value previously at `*self`.
878 ///
879 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
880 ///
881 /// # Examples
882 ///
883 /// Basic usage:
884 ///
885 /// ```
886 /// #![feature(pointer_methods)]
887 ///
888 /// let x = 12;
889 /// let y = &x as *const i32;
890 ///
891 /// unsafe {
892 /// assert_eq!(y.read(), 12);
893 /// }
894 /// ```
895 #[unstable(feature = "pointer_methods", issue = "43941")]
896 #[inline]
897 pub unsafe fn read(self) -> T
898 where T: Sized,
899 {
900 read(self)
901 }
902
903 /// Performs a volatile read of the value from `self` without moving it. This
904 /// leaves the memory in `self` unchanged.
905 ///
906 /// Volatile operations are intended to act on I/O memory, and are guaranteed
907 /// to not be elided or reordered by the compiler across other volatile
908 /// operations.
909 ///
910 /// # Notes
911 ///
912 /// Rust does not currently have a rigorously and formally defined memory model,
913 /// so the precise semantics of what "volatile" means here is subject to change
914 /// over time. That being said, the semantics will almost always end up pretty
915 /// similar to [C11's definition of volatile][c11].
916 ///
917 /// The compiler shouldn't change the relative order or number of volatile
918 /// memory operations. However, volatile memory operations on zero-sized types
919 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
920 /// and may be ignored.
921 ///
922 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
923 ///
924 /// # Safety
925 ///
926 /// Beyond accepting a raw pointer, this is unsafe because it semantically
927 /// moves the value out of `self` without preventing further usage of `self`.
928 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
929 /// `self` is not used before the data is overwritten again (e.g. with `write`,
930 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
931 /// because it will attempt to drop the value previously at `*self`.
932 ///
933 /// # Examples
934 ///
935 /// Basic usage:
936 ///
937 /// ```
938 /// #![feature(pointer_methods)]
939 ///
940 /// let x = 12;
941 /// let y = &x as *const i32;
942 ///
943 /// unsafe {
944 /// assert_eq!(y.read_volatile(), 12);
945 /// }
946 /// ```
947 #[unstable(feature = "pointer_methods", issue = "43941")]
948 #[inline]
949 pub unsafe fn read_volatile(self) -> T
950 where T: Sized,
951 {
952 read_volatile(self)
953 }
954
955 /// Reads the value from `self` without moving it. This leaves the
956 /// memory in `self` unchanged.
957 ///
958 /// Unlike `read`, the pointer may be unaligned.
959 ///
960 /// # Safety
961 ///
962 /// Beyond accepting a raw pointer, this is unsafe because it semantically
963 /// moves the value out of `self` without preventing further usage of `self`.
964 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
965 /// `self` is not used before the data is overwritten again (e.g. with `write`,
966 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
967 /// because it will attempt to drop the value previously at `*self`.
968 ///
969 /// # Examples
970 ///
971 /// Basic usage:
972 ///
973 /// ```
974 /// #![feature(pointer_methods)]
975 ///
976 /// let x = 12;
977 /// let y = &x as *const i32;
978 ///
979 /// unsafe {
980 /// assert_eq!(y.read_unaligned(), 12);
981 /// }
982 /// ```
983 #[unstable(feature = "pointer_methods", issue = "43941")]
984 #[inline]
985 pub unsafe fn read_unaligned(self) -> T
986 where T: Sized,
987 {
988 read_unaligned(self)
989 }
990
991 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
992 /// and destination may overlap.
993 ///
994 /// NOTE: this has the *same* argument order as `ptr::copy`.
995 ///
996 /// This is semantically equivalent to C's `memmove`.
997 ///
998 /// # Safety
999 ///
1000 /// Care must be taken with the ownership of `self` and `dest`.
1001 /// This method semantically moves the values of `self` into `dest`.
1002 /// However it does not drop the contents of `self`, or prevent the contents
1003 /// of `dest` from being dropped or used.
1004 ///
1005 /// # Examples
1006 ///
1007 /// Efficiently create a Rust vector from an unsafe buffer:
1008 ///
1009 /// ```
1010 /// #![feature(pointer_methods)]
1011 ///
1012 /// # #[allow(dead_code)]
1013 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1014 /// let mut dst = Vec::with_capacity(elts);
1015 /// dst.set_len(elts);
1016 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1017 /// dst
1018 /// }
1019 /// ```
1020 #[unstable(feature = "pointer_methods", issue = "43941")]
1021 #[inline]
1022 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1023 where T: Sized,
1024 {
1025 copy(self, dest, count)
1026 }
1027
1028 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1029 /// and destination may *not* overlap.
1030 ///
1031 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1032 ///
1033 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1034 ///
1035 /// # Safety
1036 ///
1037 /// Beyond requiring that the program must be allowed to access both regions
1038 /// of memory, it is Undefined Behavior for source and destination to
1039 /// overlap. Care must also be taken with the ownership of `self` and
1040 /// `self`. This method semantically moves the values of `self` into `dest`.
1041 /// However it does not drop the contents of `dest`, or prevent the contents
1042 /// of `self` from being dropped or used.
1043 ///
1044 /// # Examples
1045 ///
1046 /// Efficiently create a Rust vector from an unsafe buffer:
1047 ///
1048 /// ```
1049 /// #![feature(pointer_methods)]
1050 ///
1051 /// # #[allow(dead_code)]
1052 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1053 /// let mut dst = Vec::with_capacity(elts);
1054 /// dst.set_len(elts);
1055 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1056 /// dst
1057 /// }
1058 /// ```
1059 #[unstable(feature = "pointer_methods", issue = "43941")]
1060 #[inline]
1061 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1062 where T: Sized,
1063 {
1064 copy_nonoverlapping(self, dest, count)
1065 }
1066
1067 /// Computes the byte offset that needs to be applied in order to
1068 /// make the pointer aligned to `align`.
1069 /// If it is not possible to align the pointer, the implementation returns
1070 /// `usize::max_value()`.
1071 ///
1072 /// There are no guarantees whatsover that offsetting the pointer will not
1073 /// overflow or go beyond the allocation that the pointer points into.
1074 /// It is up to the caller to ensure that the returned offset is correct
1075 /// in all terms other than alignment.
1076 ///
1077 /// # Examples
1078 ///
1079 /// Accessing adjacent `u8` as `u16`
1080 ///
1081 /// ```
1082 /// # #![feature(align_offset)]
1083 /// # fn foo(n: usize) {
1084 /// # use std::mem::align_of;
1085 /// # unsafe {
1086 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1087 /// let ptr = &x[n] as *const u8;
1088 /// let offset = ptr.align_offset(align_of::<u16>());
1089 /// if offset < x.len() - n - 1 {
1090 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1091 /// assert_ne!(*u16_ptr, 500);
1092 /// } else {
1093 /// // while the pointer can be aligned via `offset`, it would point
1094 /// // outside the allocation
1095 /// }
1096 /// # } }
1097 /// ```
1098 #[unstable(feature = "align_offset", issue = "44488")]
1099 pub fn align_offset(self, align: usize) -> usize {
1100 unsafe {
1101 intrinsics::align_offset(self as *const _, align)
1102 }
1103 }
1104 }
1105
1106 #[lang = "mut_ptr"]
1107 impl<T: ?Sized> *mut T {
1108 /// Returns `true` if the pointer is null.
1109 ///
1110 /// # Examples
1111 ///
1112 /// Basic usage:
1113 ///
1114 /// ```
1115 /// let mut s = [1, 2, 3];
1116 /// let ptr: *mut u32 = s.as_mut_ptr();
1117 /// assert!(!ptr.is_null());
1118 /// ```
1119 #[stable(feature = "rust1", since = "1.0.0")]
1120 #[inline]
1121 pub fn is_null(self) -> bool where T: Sized {
1122 self == null_mut()
1123 }
1124
1125 /// Returns `None` if the pointer is null, or else returns a reference to
1126 /// the value wrapped in `Some`.
1127 ///
1128 /// # Safety
1129 ///
1130 /// While this method and its mutable counterpart are useful for
1131 /// null-safety, it is important to note that this is still an unsafe
1132 /// operation because the returned value could be pointing to invalid
1133 /// memory.
1134 ///
1135 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1136 /// not necessarily reflect the actual lifetime of the data.
1137 ///
1138 /// # Examples
1139 ///
1140 /// Basic usage:
1141 ///
1142 /// ```
1143 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1144 ///
1145 /// unsafe {
1146 /// if let Some(val_back) = ptr.as_ref() {
1147 /// println!("We got back the value: {}!", val_back);
1148 /// }
1149 /// }
1150 /// ```
1151 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1152 #[inline]
1153 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1154 // Check for null via a cast to a thin pointer, so fat pointers are only
1155 // considering their "data" part for null-ness.
1156 if (self as *const u8).is_null() {
1157 None
1158 } else {
1159 Some(&*self)
1160 }
1161 }
1162
1163 /// Calculates the offset from a pointer.
1164 ///
1165 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1166 /// offset of `3 * size_of::<T>()` bytes.
1167 ///
1168 /// # Safety
1169 ///
1170 /// If any of the following conditions are violated, the result is Undefined
1171 /// Behavior:
1172 ///
1173 /// * Both the starting and resulting pointer must be either in bounds or one
1174 /// byte past the end of an allocated object.
1175 ///
1176 /// * The computed offset, **in bytes**, cannot overflow or underflow an
1177 /// `isize`.
1178 ///
1179 /// * The offset being in bounds cannot rely on "wrapping around" the address
1180 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1181 ///
1182 /// The compiler and standard library generally tries to ensure allocations
1183 /// never reach a size where an offset is a concern. For instance, `Vec`
1184 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1185 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
1186 ///
1187 /// Most platforms fundamentally can't even construct such an allocation.
1188 /// For instance, no known 64-bit platform can ever serve a request
1189 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1190 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1191 /// more than `isize::MAX` bytes with things like Physical Address
1192 /// Extension. As such, memory acquired directly from allocators or memory
1193 /// mapped files *may* be too large to handle with this function.
1194 ///
1195 /// Consider using `wrapping_offset` instead if these constraints are
1196 /// difficult to satisfy. The only advantage of this method is that it
1197 /// enables more aggressive compiler optimizations.
1198 ///
1199 /// # Examples
1200 ///
1201 /// Basic usage:
1202 ///
1203 /// ```
1204 /// let mut s = [1, 2, 3];
1205 /// let ptr: *mut u32 = s.as_mut_ptr();
1206 ///
1207 /// unsafe {
1208 /// println!("{}", *ptr.offset(1));
1209 /// println!("{}", *ptr.offset(2));
1210 /// }
1211 /// ```
1212 #[stable(feature = "rust1", since = "1.0.0")]
1213 #[inline]
1214 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1215 intrinsics::offset(self, count) as *mut T
1216 }
1217
1218 /// Calculates the offset from a pointer using wrapping arithmetic.
1219 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1220 /// offset of `3 * size_of::<T>()` bytes.
1221 ///
1222 /// # Safety
1223 ///
1224 /// The resulting pointer does not need to be in bounds, but it is
1225 /// potentially hazardous to dereference (which requires `unsafe`).
1226 ///
1227 /// Always use `.offset(count)` instead when possible, because `offset`
1228 /// allows the compiler to optimize better.
1229 ///
1230 /// # Examples
1231 ///
1232 /// Basic usage:
1233 ///
1234 /// ```
1235 /// // Iterate using a raw pointer in increments of two elements
1236 /// let mut data = [1u8, 2, 3, 4, 5];
1237 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1238 /// let step = 2;
1239 /// let end_rounded_up = ptr.wrapping_offset(6);
1240 ///
1241 /// while ptr != end_rounded_up {
1242 /// unsafe {
1243 /// *ptr = 0;
1244 /// }
1245 /// ptr = ptr.wrapping_offset(step);
1246 /// }
1247 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1248 /// ```
1249 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1250 #[inline]
1251 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1252 unsafe {
1253 intrinsics::arith_offset(self, count) as *mut T
1254 }
1255 }
1256
1257 /// Returns `None` if the pointer is null, or else returns a mutable
1258 /// reference to the value wrapped in `Some`.
1259 ///
1260 /// # Safety
1261 ///
1262 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1263 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1264 /// returned is indeed a valid lifetime for the contained data.
1265 ///
1266 /// # Examples
1267 ///
1268 /// Basic usage:
1269 ///
1270 /// ```
1271 /// let mut s = [1, 2, 3];
1272 /// let ptr: *mut u32 = s.as_mut_ptr();
1273 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1274 /// *first_value = 4;
1275 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1276 /// ```
1277 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1278 #[inline]
1279 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
1280 // Check for null via a cast to a thin pointer, so fat pointers are only
1281 // considering their "data" part for null-ness.
1282 if (self as *mut u8).is_null() {
1283 None
1284 } else {
1285 Some(&mut *self)
1286 }
1287 }
1288
1289 /// Calculates the distance between two pointers. The returned value is in
1290 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1291 ///
1292 /// If the address different between the two pointers ia not a multiple of
1293 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1294 /// zero.
1295 ///
1296 /// This function returns `None` if `T` is a zero-sized typed.
1297 ///
1298 /// # Examples
1299 ///
1300 /// Basic usage:
1301 ///
1302 /// ```
1303 /// #![feature(offset_to)]
1304 ///
1305 /// fn main() {
1306 /// let mut a = [0; 5];
1307 /// let ptr1: *mut i32 = &mut a[1];
1308 /// let ptr2: *mut i32 = &mut a[3];
1309 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
1310 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
1311 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
1312 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
1313 /// }
1314 /// ```
1315 #[unstable(feature = "offset_to", issue = "41079")]
1316 #[inline]
1317 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
1318 let size = mem::size_of::<T>();
1319 if size == 0 {
1320 None
1321 } else {
1322 let diff = (other as isize).wrapping_sub(self as isize);
1323 Some(diff / size as isize)
1324 }
1325 }
1326
1327 /// Computes the byte offset that needs to be applied in order to
1328 /// make the pointer aligned to `align`.
1329 /// If it is not possible to align the pointer, the implementation returns
1330 /// `usize::max_value()`.
1331 ///
1332 /// There are no guarantees whatsover that offsetting the pointer will not
1333 /// overflow or go beyond the allocation that the pointer points into.
1334 /// It is up to the caller to ensure that the returned offset is correct
1335 /// in all terms other than alignment.
1336 ///
1337 /// # Examples
1338 ///
1339 /// Accessing adjacent `u8` as `u16`
1340 ///
1341 /// ```
1342 /// # #![feature(align_offset)]
1343 /// # fn foo(n: usize) {
1344 /// # use std::mem::align_of;
1345 /// # unsafe {
1346 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1347 /// let ptr = &x[n] as *const u8;
1348 /// let offset = ptr.align_offset(align_of::<u16>());
1349 /// if offset < x.len() - n - 1 {
1350 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1351 /// assert_ne!(*u16_ptr, 500);
1352 /// } else {
1353 /// // while the pointer can be aligned via `offset`, it would point
1354 /// // outside the allocation
1355 /// }
1356 /// # } }
1357 /// ```
1358 #[unstable(feature = "align_offset", issue = "44488")]
1359 pub fn align_offset(self, align: usize) -> usize {
1360 unsafe {
1361 intrinsics::align_offset(self as *const _, align)
1362 }
1363 }
1364
1365 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1366 ///
1367 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1368 /// offset of `3 * size_of::<T>()` bytes.
1369 ///
1370 /// # Safety
1371 ///
1372 /// If any of the following conditions are violated, the result is Undefined
1373 /// Behavior:
1374 ///
1375 /// * Both the starting and resulting pointer must be either in bounds or one
1376 /// byte past the end of an allocated object.
1377 ///
1378 /// * The computed offset, **in bytes**, cannot overflow or underflow an
1379 /// `isize`.
1380 ///
1381 /// * The offset being in bounds cannot rely on "wrapping around" the address
1382 /// space. That is, the infinite-precision sum must fit in a `usize`.
1383 ///
1384 /// The compiler and standard library generally tries to ensure allocations
1385 /// never reach a size where an offset is a concern. For instance, `Vec`
1386 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1387 /// `vec.as_ptr().add(vec.len())` is always safe.
1388 ///
1389 /// Most platforms fundamentally can't even construct such an allocation.
1390 /// For instance, no known 64-bit platform can ever serve a request
1391 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1392 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1393 /// more than `isize::MAX` bytes with things like Physical Address
1394 /// Extension. As such, memory acquired directly from allocators or memory
1395 /// mapped files *may* be too large to handle with this function.
1396 ///
1397 /// Consider using `wrapping_offset` instead if these constraints are
1398 /// difficult to satisfy. The only advantage of this method is that it
1399 /// enables more aggressive compiler optimizations.
1400 ///
1401 /// # Examples
1402 ///
1403 /// Basic usage:
1404 ///
1405 /// ```
1406 /// #![feature(pointer_methods)]
1407 ///
1408 /// let s: &str = "123";
1409 /// let ptr: *const u8 = s.as_ptr();
1410 ///
1411 /// unsafe {
1412 /// println!("{}", *ptr.add(1) as char);
1413 /// println!("{}", *ptr.add(2) as char);
1414 /// }
1415 /// ```
1416 #[unstable(feature = "pointer_methods", issue = "43941")]
1417 #[inline]
1418 pub unsafe fn add(self, count: usize) -> Self
1419 where T: Sized,
1420 {
1421 self.offset(count as isize)
1422 }
1423
1424 /// Calculates the offset from a pointer (convenience for
1425 /// `.offset((count as isize).wrapping_neg())`).
1426 ///
1427 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1428 /// offset of `3 * size_of::<T>()` bytes.
1429 ///
1430 /// # Safety
1431 ///
1432 /// If any of the following conditions are violated, the result is Undefined
1433 /// Behavior:
1434 ///
1435 /// * Both the starting and resulting pointer must be either in bounds or one
1436 /// byte past the end of an allocated object.
1437 ///
1438 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1439 ///
1440 /// * The offset being in bounds cannot rely on "wrapping around" the address
1441 /// space. That is, the infinite-precision sum must fit in a usize.
1442 ///
1443 /// The compiler and standard library generally tries to ensure allocations
1444 /// never reach a size where an offset is a concern. For instance, `Vec`
1445 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1446 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1447 ///
1448 /// Most platforms fundamentally can't even construct such an allocation.
1449 /// For instance, no known 64-bit platform can ever serve a request
1450 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1451 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1452 /// more than `isize::MAX` bytes with things like Physical Address
1453 /// Extension. As such, memory acquired directly from allocators or memory
1454 /// mapped files *may* be too large to handle with this function.
1455 ///
1456 /// Consider using `wrapping_offset` instead if these constraints are
1457 /// difficult to satisfy. The only advantage of this method is that it
1458 /// enables more aggressive compiler optimizations.
1459 ///
1460 /// # Examples
1461 ///
1462 /// Basic usage:
1463 ///
1464 /// ```
1465 /// #![feature(pointer_methods)]
1466 ///
1467 /// let s: &str = "123";
1468 ///
1469 /// unsafe {
1470 /// let end: *const u8 = s.as_ptr().add(3);
1471 /// println!("{}", *end.sub(1) as char);
1472 /// println!("{}", *end.sub(2) as char);
1473 /// }
1474 /// ```
1475 #[unstable(feature = "pointer_methods", issue = "43941")]
1476 #[inline]
1477 pub unsafe fn sub(self, count: usize) -> Self
1478 where T: Sized,
1479 {
1480 self.offset((count as isize).wrapping_neg())
1481 }
1482
1483 /// Calculates the offset from a pointer using wrapping arithmetic.
1484 /// (convenience for `.wrapping_offset(count as isize)`)
1485 ///
1486 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1487 /// offset of `3 * size_of::<T>()` bytes.
1488 ///
1489 /// # Safety
1490 ///
1491 /// The resulting pointer does not need to be in bounds, but it is
1492 /// potentially hazardous to dereference (which requires `unsafe`).
1493 ///
1494 /// Always use `.add(count)` instead when possible, because `add`
1495 /// allows the compiler to optimize better.
1496 ///
1497 /// # Examples
1498 ///
1499 /// Basic usage:
1500 ///
1501 /// ```
1502 /// #![feature(pointer_methods)]
1503 ///
1504 /// // Iterate using a raw pointer in increments of two elements
1505 /// let data = [1u8, 2, 3, 4, 5];
1506 /// let mut ptr: *const u8 = data.as_ptr();
1507 /// let step = 2;
1508 /// let end_rounded_up = ptr.wrapping_add(6);
1509 ///
1510 /// // This loop prints "1, 3, 5, "
1511 /// while ptr != end_rounded_up {
1512 /// unsafe {
1513 /// print!("{}, ", *ptr);
1514 /// }
1515 /// ptr = ptr.wrapping_add(step);
1516 /// }
1517 /// ```
1518 #[unstable(feature = "pointer_methods", issue = "43941")]
1519 #[inline]
1520 pub fn wrapping_add(self, count: usize) -> Self
1521 where T: Sized,
1522 {
1523 self.wrapping_offset(count as isize)
1524 }
1525
1526 /// Calculates the offset from a pointer using wrapping arithmetic.
1527 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1528 ///
1529 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1530 /// offset of `3 * size_of::<T>()` bytes.
1531 ///
1532 /// # Safety
1533 ///
1534 /// The resulting pointer does not need to be in bounds, but it is
1535 /// potentially hazardous to dereference (which requires `unsafe`).
1536 ///
1537 /// Always use `.sub(count)` instead when possible, because `sub`
1538 /// allows the compiler to optimize better.
1539 ///
1540 /// # Examples
1541 ///
1542 /// Basic usage:
1543 ///
1544 /// ```
1545 /// #![feature(pointer_methods)]
1546 ///
1547 /// // Iterate using a raw pointer in increments of two elements (backwards)
1548 /// let data = [1u8, 2, 3, 4, 5];
1549 /// let mut ptr: *const u8 = data.as_ptr();
1550 /// let start_rounded_down = ptr.wrapping_sub(2);
1551 /// ptr = ptr.wrapping_add(4);
1552 /// let step = 2;
1553 /// // This loop prints "5, 3, 1, "
1554 /// while ptr != start_rounded_down {
1555 /// unsafe {
1556 /// print!("{}, ", *ptr);
1557 /// }
1558 /// ptr = ptr.wrapping_sub(step);
1559 /// }
1560 /// ```
1561 #[unstable(feature = "pointer_methods", issue = "43941")]
1562 #[inline]
1563 pub fn wrapping_sub(self, count: usize) -> Self
1564 where T: Sized,
1565 {
1566 self.wrapping_offset((count as isize).wrapping_neg())
1567 }
1568
1569 /// Reads the value from `self` without moving it. This leaves the
1570 /// memory in `self` unchanged.
1571 ///
1572 /// # Safety
1573 ///
1574 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1575 /// moves the value out of `self` without preventing further usage of `self`.
1576 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1577 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1578 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1579 /// because it will attempt to drop the value previously at `*self`.
1580 ///
1581 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1582 ///
1583 /// # Examples
1584 ///
1585 /// Basic usage:
1586 ///
1587 /// ```
1588 /// #![feature(pointer_methods)]
1589 ///
1590 /// let x = 12;
1591 /// let y = &x as *const i32;
1592 ///
1593 /// unsafe {
1594 /// assert_eq!(y.read(), 12);
1595 /// }
1596 /// ```
1597 #[unstable(feature = "pointer_methods", issue = "43941")]
1598 #[inline]
1599 pub unsafe fn read(self) -> T
1600 where T: Sized,
1601 {
1602 read(self)
1603 }
1604
1605 /// Performs a volatile read of the value from `self` without moving it. This
1606 /// leaves the memory in `self` unchanged.
1607 ///
1608 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1609 /// to not be elided or reordered by the compiler across other volatile
1610 /// operations.
1611 ///
1612 /// # Notes
1613 ///
1614 /// Rust does not currently have a rigorously and formally defined memory model,
1615 /// so the precise semantics of what "volatile" means here is subject to change
1616 /// over time. That being said, the semantics will almost always end up pretty
1617 /// similar to [C11's definition of volatile][c11].
1618 ///
1619 /// The compiler shouldn't change the relative order or number of volatile
1620 /// memory operations. However, volatile memory operations on zero-sized types
1621 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1622 /// and may be ignored.
1623 ///
1624 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1625 ///
1626 /// # Safety
1627 ///
1628 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1629 /// moves the value out of `self` without preventing further usage of `self`.
1630 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1631 /// `src` is not used before the data is overwritten again (e.g. with `write`,
1632 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1633 /// because it will attempt to drop the value previously at `*self`.
1634 ///
1635 /// # Examples
1636 ///
1637 /// Basic usage:
1638 ///
1639 /// ```
1640 /// #![feature(pointer_methods)]
1641 ///
1642 /// let x = 12;
1643 /// let y = &x as *const i32;
1644 ///
1645 /// unsafe {
1646 /// assert_eq!(y.read_volatile(), 12);
1647 /// }
1648 /// ```
1649 #[unstable(feature = "pointer_methods", issue = "43941")]
1650 #[inline]
1651 pub unsafe fn read_volatile(self) -> T
1652 where T: Sized,
1653 {
1654 read_volatile(self)
1655 }
1656
1657 /// Reads the value from `self` without moving it. This leaves the
1658 /// memory in `self` unchanged.
1659 ///
1660 /// Unlike `read`, the pointer may be unaligned.
1661 ///
1662 /// # Safety
1663 ///
1664 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1665 /// moves the value out of `self` without preventing further usage of `self`.
1666 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1667 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1668 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1669 /// because it will attempt to drop the value previously at `*self`.
1670 ///
1671 /// # Examples
1672 ///
1673 /// Basic usage:
1674 ///
1675 /// ```
1676 /// #![feature(pointer_methods)]
1677 ///
1678 /// let x = 12;
1679 /// let y = &x as *const i32;
1680 ///
1681 /// unsafe {
1682 /// assert_eq!(y.read_unaligned(), 12);
1683 /// }
1684 /// ```
1685 #[unstable(feature = "pointer_methods", issue = "43941")]
1686 #[inline]
1687 pub unsafe fn read_unaligned(self) -> T
1688 where T: Sized,
1689 {
1690 read_unaligned(self)
1691 }
1692
1693 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1694 /// and destination may overlap.
1695 ///
1696 /// NOTE: this has the *same* argument order as `ptr::copy`.
1697 ///
1698 /// This is semantically equivalent to C's `memmove`.
1699 ///
1700 /// # Safety
1701 ///
1702 /// Care must be taken with the ownership of `self` and `dest`.
1703 /// This method semantically moves the values of `self` into `dest`.
1704 /// However it does not drop the contents of `self`, or prevent the contents
1705 /// of `dest` from being dropped or used.
1706 ///
1707 /// # Examples
1708 ///
1709 /// Efficiently create a Rust vector from an unsafe buffer:
1710 ///
1711 /// ```
1712 /// #![feature(pointer_methods)]
1713 ///
1714 /// # #[allow(dead_code)]
1715 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1716 /// let mut dst = Vec::with_capacity(elts);
1717 /// dst.set_len(elts);
1718 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1719 /// dst
1720 /// }
1721 /// ```
1722 #[unstable(feature = "pointer_methods", issue = "43941")]
1723 #[inline]
1724 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1725 where T: Sized,
1726 {
1727 copy(self, dest, count)
1728 }
1729
1730 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1731 /// and destination may *not* overlap.
1732 ///
1733 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1734 ///
1735 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1736 ///
1737 /// # Safety
1738 ///
1739 /// Beyond requiring that the program must be allowed to access both regions
1740 /// of memory, it is Undefined Behavior for source and destination to
1741 /// overlap. Care must also be taken with the ownership of `self` and
1742 /// `self`. This method semantically moves the values of `self` into `dest`.
1743 /// However it does not drop the contents of `dest`, or prevent the contents
1744 /// of `self` from being dropped or used.
1745 ///
1746 /// # Examples
1747 ///
1748 /// Efficiently create a Rust vector from an unsafe buffer:
1749 ///
1750 /// ```
1751 /// #![feature(pointer_methods)]
1752 ///
1753 /// # #[allow(dead_code)]
1754 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1755 /// let mut dst = Vec::with_capacity(elts);
1756 /// dst.set_len(elts);
1757 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1758 /// dst
1759 /// }
1760 /// ```
1761 #[unstable(feature = "pointer_methods", issue = "43941")]
1762 #[inline]
1763 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1764 where T: Sized,
1765 {
1766 copy_nonoverlapping(self, dest, count)
1767 }
1768
1769 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1770 /// and destination may overlap.
1771 ///
1772 /// NOTE: this has the *opposite* argument order of `ptr::copy`.
1773 ///
1774 /// This is semantically equivalent to C's `memmove`.
1775 ///
1776 /// # Safety
1777 ///
1778 /// Care must be taken with the ownership of `src` and `self`.
1779 /// This method semantically moves the values of `src` into `self`.
1780 /// However it does not drop the contents of `self`, or prevent the contents
1781 /// of `src` from being dropped or used.
1782 ///
1783 /// # Examples
1784 ///
1785 /// Efficiently create a Rust vector from an unsafe buffer:
1786 ///
1787 /// ```
1788 /// #![feature(pointer_methods)]
1789 ///
1790 /// # #[allow(dead_code)]
1791 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1792 /// let mut dst = Vec::with_capacity(elts);
1793 /// dst.set_len(elts);
1794 /// dst.as_mut_ptr().copy_from(ptr, elts);
1795 /// dst
1796 /// }
1797 /// ```
1798 #[unstable(feature = "pointer_methods", issue = "43941")]
1799 #[inline]
1800 pub unsafe fn copy_from(self, src: *const T, count: usize)
1801 where T: Sized,
1802 {
1803 copy(src, self, count)
1804 }
1805
1806 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1807 /// and destination may *not* overlap.
1808 ///
1809 /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`.
1810 ///
1811 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1812 ///
1813 /// # Safety
1814 ///
1815 /// Beyond requiring that the program must be allowed to access both regions
1816 /// of memory, it is Undefined Behavior for source and destination to
1817 /// overlap. Care must also be taken with the ownership of `src` and
1818 /// `self`. This method semantically moves the values of `src` into `self`.
1819 /// However it does not drop the contents of `self`, or prevent the contents
1820 /// of `src` from being dropped or used.
1821 ///
1822 /// # Examples
1823 ///
1824 /// Efficiently create a Rust vector from an unsafe buffer:
1825 ///
1826 /// ```
1827 /// #![feature(pointer_methods)]
1828 ///
1829 /// # #[allow(dead_code)]
1830 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1831 /// let mut dst = Vec::with_capacity(elts);
1832 /// dst.set_len(elts);
1833 /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts);
1834 /// dst
1835 /// }
1836 /// ```
1837 #[unstable(feature = "pointer_methods", issue = "43941")]
1838 #[inline]
1839 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
1840 where T: Sized,
1841 {
1842 copy_nonoverlapping(src, self, count)
1843 }
1844
1845 /// Executes the destructor (if any) of the pointed-to value.
1846 ///
1847 /// This has two use cases:
1848 ///
1849 /// * It is *required* to use `drop_in_place` to drop unsized types like
1850 /// trait objects, because they can't be read out onto the stack and
1851 /// dropped normally.
1852 ///
1853 /// * It is friendlier to the optimizer to do this over `ptr::read` when
1854 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
1855 /// as the compiler doesn't need to prove that it's sound to elide the
1856 /// copy.
1857 ///
1858 /// # Safety
1859 ///
1860 /// This has all the same safety problems as `ptr::read` with respect to
1861 /// invalid pointers, types, and double drops.
1862 #[unstable(feature = "pointer_methods", issue = "43941")]
1863 #[inline]
1864 pub unsafe fn drop_in_place(self) {
1865 drop_in_place(self)
1866 }
1867
1868 /// Overwrites a memory location with the given value without reading or
1869 /// dropping the old value.
1870 ///
1871 /// # Safety
1872 ///
1873 /// This operation is marked unsafe because it writes through a raw pointer.
1874 ///
1875 /// It does not drop the contents of `self`. This is safe, but it could leak
1876 /// allocations or resources, so care must be taken not to overwrite an object
1877 /// that should be dropped.
1878 ///
1879 /// Additionally, it does not drop `val`. Semantically, `val` is moved into the
1880 /// location pointed to by `self`.
1881 ///
1882 /// This is appropriate for initializing uninitialized memory, or overwriting
1883 /// memory that has previously been `read` from.
1884 ///
1885 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
1886 ///
1887 /// # Examples
1888 ///
1889 /// Basic usage:
1890 ///
1891 /// ```
1892 /// #![feature(pointer_methods)]
1893 ///
1894 /// let mut x = 0;
1895 /// let y = &mut x as *mut i32;
1896 /// let z = 12;
1897 ///
1898 /// unsafe {
1899 /// y.write(z);
1900 /// assert_eq!(y.read(), 12);
1901 /// }
1902 /// ```
1903 #[unstable(feature = "pointer_methods", issue = "43941")]
1904 #[inline]
1905 pub unsafe fn write(self, val: T)
1906 where T: Sized,
1907 {
1908 write(self, val)
1909 }
1910
1911 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
1912 /// bytes of memory starting at `self` to `val`.
1913 ///
1914 /// # Examples
1915 ///
1916 /// ```
1917 /// #![feature(pointer_methods)]
1918 ///
1919 /// let mut vec = vec![0; 4];
1920 /// unsafe {
1921 /// let vec_ptr = vec.as_mut_ptr();
1922 /// vec_ptr.write_bytes(b'a', 2);
1923 /// }
1924 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
1925 /// ```
1926 #[unstable(feature = "pointer_methods", issue = "43941")]
1927 #[inline]
1928 pub unsafe fn write_bytes(self, val: u8, count: usize)
1929 where T: Sized,
1930 {
1931 write_bytes(self, val, count)
1932 }
1933
1934 /// Performs a volatile write of a memory location with the given value without
1935 /// reading or dropping the old value.
1936 ///
1937 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1938 /// to not be elided or reordered by the compiler across other volatile
1939 /// operations.
1940 ///
1941 /// # Notes
1942 ///
1943 /// Rust does not currently have a rigorously and formally defined memory model,
1944 /// so the precise semantics of what "volatile" means here is subject to change
1945 /// over time. That being said, the semantics will almost always end up pretty
1946 /// similar to [C11's definition of volatile][c11].
1947 ///
1948 /// The compiler shouldn't change the relative order or number of volatile
1949 /// memory operations. However, volatile memory operations on zero-sized types
1950 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
1951 /// and may be ignored.
1952 ///
1953 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1954 ///
1955 /// # Safety
1956 ///
1957 /// This operation is marked unsafe because it accepts a raw pointer.
1958 ///
1959 /// It does not drop the contents of `self`. This is safe, but it could leak
1960 /// allocations or resources, so care must be taken not to overwrite an object
1961 /// that should be dropped.
1962 ///
1963 /// This is appropriate for initializing uninitialized memory, or overwriting
1964 /// memory that has previously been `read` from.
1965 ///
1966 /// # Examples
1967 ///
1968 /// Basic usage:
1969 ///
1970 /// ```
1971 /// #![feature(pointer_methods)]
1972 ///
1973 /// let mut x = 0;
1974 /// let y = &mut x as *mut i32;
1975 /// let z = 12;
1976 ///
1977 /// unsafe {
1978 /// y.write_volatile(z);
1979 /// assert_eq!(y.read_volatile(), 12);
1980 /// }
1981 /// ```
1982 #[unstable(feature = "pointer_methods", issue = "43941")]
1983 #[inline]
1984 pub unsafe fn write_volatile(self, val: T)
1985 where T: Sized,
1986 {
1987 write_volatile(self, val)
1988 }
1989
1990 /// Overwrites a memory location with the given value without reading or
1991 /// dropping the old value.
1992 ///
1993 /// Unlike `write`, the pointer may be unaligned.
1994 ///
1995 /// # Safety
1996 ///
1997 /// This operation is marked unsafe because it writes through a raw pointer.
1998 ///
1999 /// It does not drop the contents of `self`. This is safe, but it could leak
2000 /// allocations or resources, so care must be taken not to overwrite an object
2001 /// that should be dropped.
2002 ///
2003 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
2004 /// location pointed to by `dst`.
2005 ///
2006 /// This is appropriate for initializing uninitialized memory, or overwriting
2007 /// memory that has previously been `read` from.
2008 ///
2009 /// # Examples
2010 ///
2011 /// Basic usage:
2012 ///
2013 /// ```
2014 /// #![feature(pointer_methods)]
2015 ///
2016 /// let mut x = 0;
2017 /// let y = &mut x as *mut i32;
2018 /// let z = 12;
2019 ///
2020 /// unsafe {
2021 /// y.write_unaligned(z);
2022 /// assert_eq!(y.read_unaligned(), 12);
2023 /// }
2024 /// ```
2025 #[unstable(feature = "pointer_methods", issue = "43941")]
2026 #[inline]
2027 pub unsafe fn write_unaligned(self, val: T)
2028 where T: Sized,
2029 {
2030 write_unaligned(self, val)
2031 }
2032
2033 /// Replaces the value at `self` with `src`, returning the old
2034 /// value, without dropping either.
2035 ///
2036 /// # Safety
2037 ///
2038 /// This is only unsafe because it accepts a raw pointer.
2039 /// Otherwise, this operation is identical to `mem::replace`.
2040 #[unstable(feature = "pointer_methods", issue = "43941")]
2041 #[inline]
2042 pub unsafe fn replace(self, src: T) -> T
2043 where T: Sized,
2044 {
2045 replace(self, src)
2046 }
2047
2048 /// Swaps the values at two mutable locations of the same type, without
2049 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2050 /// otherwise equivalent.
2051 ///
2052 /// # Safety
2053 ///
2054 /// This function copies the memory through the raw pointers passed to it
2055 /// as arguments.
2056 ///
2057 /// Ensure that these pointers are valid before calling `swap`.
2058 #[unstable(feature = "pointer_methods", issue = "43941")]
2059 #[inline]
2060 pub unsafe fn swap(self, with: *mut T)
2061 where T: Sized,
2062 {
2063 swap(self, with)
2064 }
2065 }
2066
2067 // Equality for pointers
2068 #[stable(feature = "rust1", since = "1.0.0")]
2069 impl<T: ?Sized> PartialEq for *const T {
2070 #[inline]
2071 fn eq(&self, other: &*const T) -> bool { *self == *other }
2072 }
2073
2074 #[stable(feature = "rust1", since = "1.0.0")]
2075 impl<T: ?Sized> Eq for *const T {}
2076
2077 #[stable(feature = "rust1", since = "1.0.0")]
2078 impl<T: ?Sized> PartialEq for *mut T {
2079 #[inline]
2080 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2081 }
2082
2083 #[stable(feature = "rust1", since = "1.0.0")]
2084 impl<T: ?Sized> Eq for *mut T {}
2085
2086 /// Compare raw pointers for equality.
2087 ///
2088 /// This is the same as using the `==` operator, but less generic:
2089 /// the arguments have to be `*const T` raw pointers,
2090 /// not anything that implements `PartialEq`.
2091 ///
2092 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2093 /// by their address rather than comparing the values they point to
2094 /// (which is what the `PartialEq for &T` implementation does).
2095 ///
2096 /// # Examples
2097 ///
2098 /// ```
2099 /// use std::ptr;
2100 ///
2101 /// let five = 5;
2102 /// let other_five = 5;
2103 /// let five_ref = &five;
2104 /// let same_five_ref = &five;
2105 /// let other_five_ref = &other_five;
2106 ///
2107 /// assert!(five_ref == same_five_ref);
2108 /// assert!(five_ref == other_five_ref);
2109 ///
2110 /// assert!(ptr::eq(five_ref, same_five_ref));
2111 /// assert!(!ptr::eq(five_ref, other_five_ref));
2112 /// ```
2113 #[stable(feature = "ptr_eq", since = "1.17.0")]
2114 #[inline]
2115 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2116 a == b
2117 }
2118
2119 // Impls for function pointers
2120 macro_rules! fnptr_impls_safety_abi {
2121 ($FnTy: ty, $($Arg: ident),*) => {
2122 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2123 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2124 #[inline]
2125 fn eq(&self, other: &Self) -> bool {
2126 *self as usize == *other as usize
2127 }
2128 }
2129
2130 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2131 impl<Ret, $($Arg),*> Eq for $FnTy {}
2132
2133 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2134 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2135 #[inline]
2136 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2137 (*self as usize).partial_cmp(&(*other as usize))
2138 }
2139 }
2140
2141 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2142 impl<Ret, $($Arg),*> Ord for $FnTy {
2143 #[inline]
2144 fn cmp(&self, other: &Self) -> Ordering {
2145 (*self as usize).cmp(&(*other as usize))
2146 }
2147 }
2148
2149 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2150 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2151 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2152 state.write_usize(*self as usize)
2153 }
2154 }
2155
2156 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2157 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2158 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2159 fmt::Pointer::fmt(&(*self as *const ()), f)
2160 }
2161 }
2162
2163 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2164 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2165 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2166 fmt::Pointer::fmt(&(*self as *const ()), f)
2167 }
2168 }
2169 }
2170 }
2171
2172 macro_rules! fnptr_impls_args {
2173 ($($Arg: ident),+) => {
2174 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2175 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2176 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2177 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2178 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2179 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2180 };
2181 () => {
2182 // No variadic functions with 0 parameters
2183 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2184 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2185 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2186 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2187 };
2188 }
2189
2190 fnptr_impls_args! { }
2191 fnptr_impls_args! { A }
2192 fnptr_impls_args! { A, B }
2193 fnptr_impls_args! { A, B, C }
2194 fnptr_impls_args! { A, B, C, D }
2195 fnptr_impls_args! { A, B, C, D, E }
2196 fnptr_impls_args! { A, B, C, D, E, F }
2197 fnptr_impls_args! { A, B, C, D, E, F, G }
2198 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2199 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2200 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2201 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2202 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2203
2204 // Comparison for pointers
2205 #[stable(feature = "rust1", since = "1.0.0")]
2206 impl<T: ?Sized> Ord for *const T {
2207 #[inline]
2208 fn cmp(&self, other: &*const T) -> Ordering {
2209 if self < other {
2210 Less
2211 } else if self == other {
2212 Equal
2213 } else {
2214 Greater
2215 }
2216 }
2217 }
2218
2219 #[stable(feature = "rust1", since = "1.0.0")]
2220 impl<T: ?Sized> PartialOrd for *const T {
2221 #[inline]
2222 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2223 Some(self.cmp(other))
2224 }
2225
2226 #[inline]
2227 fn lt(&self, other: &*const T) -> bool { *self < *other }
2228
2229 #[inline]
2230 fn le(&self, other: &*const T) -> bool { *self <= *other }
2231
2232 #[inline]
2233 fn gt(&self, other: &*const T) -> bool { *self > *other }
2234
2235 #[inline]
2236 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2237 }
2238
2239 #[stable(feature = "rust1", since = "1.0.0")]
2240 impl<T: ?Sized> Ord for *mut T {
2241 #[inline]
2242 fn cmp(&self, other: &*mut T) -> Ordering {
2243 if self < other {
2244 Less
2245 } else if self == other {
2246 Equal
2247 } else {
2248 Greater
2249 }
2250 }
2251 }
2252
2253 #[stable(feature = "rust1", since = "1.0.0")]
2254 impl<T: ?Sized> PartialOrd for *mut T {
2255 #[inline]
2256 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2257 Some(self.cmp(other))
2258 }
2259
2260 #[inline]
2261 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2262
2263 #[inline]
2264 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2265
2266 #[inline]
2267 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2268
2269 #[inline]
2270 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2271 }
2272
2273 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2274 /// of this wrapper owns the referent. Useful for building abstractions like
2275 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2276 ///
2277 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2278 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2279 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2280 /// the referent of the pointer should not be modified without a unique path to
2281 /// its owning Unique.
2282 ///
2283 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2284 /// consider using `Shared`, which has weaker semantics.
2285 ///
2286 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2287 /// is never dereferenced. This is so that enums may use this forbidden value
2288 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2289 /// However the pointer may still dangle if it isn't dereferenced.
2290 ///
2291 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2292 /// for any type which upholds Unique's aliasing requirements.
2293 #[allow(missing_debug_implementations)]
2294 #[unstable(feature = "unique", reason = "needs an RFC to flesh out design",
2295 issue = "27730")]
2296 pub struct Unique<T: ?Sized> {
2297 pointer: NonZero<*const T>,
2298 // NOTE: this marker has no consequences for variance, but is necessary
2299 // for dropck to understand that we logically own a `T`.
2300 //
2301 // For details, see:
2302 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2303 _marker: PhantomData<T>,
2304 }
2305
2306 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2307 /// reference is unaliased. Note that this aliasing invariant is
2308 /// unenforced by the type system; the abstraction using the
2309 /// `Unique` must enforce it.
2310 #[unstable(feature = "unique", issue = "27730")]
2311 unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
2312
2313 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2314 /// reference is unaliased. Note that this aliasing invariant is
2315 /// unenforced by the type system; the abstraction using the
2316 /// `Unique` must enforce it.
2317 #[unstable(feature = "unique", issue = "27730")]
2318 unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2319
2320 #[unstable(feature = "unique", issue = "27730")]
2321 impl<T: Sized> Unique<T> {
2322 /// Creates a new `Unique` that is dangling, but well-aligned.
2323 ///
2324 /// This is useful for initializing types which lazily allocate, like
2325 /// `Vec::new` does.
2326 pub fn empty() -> Self {
2327 unsafe {
2328 let ptr = mem::align_of::<T>() as *mut T;
2329 Unique::new_unchecked(ptr)
2330 }
2331 }
2332 }
2333
2334 #[unstable(feature = "unique", issue = "27730")]
2335 impl<T: ?Sized> Unique<T> {
2336 /// Creates a new `Unique`.
2337 ///
2338 /// # Safety
2339 ///
2340 /// `ptr` must be non-null.
2341 #[unstable(feature = "unique", issue = "27730")]
2342 #[rustc_const_unstable(feature = "const_unique_new")]
2343 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2344 Unique { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData }
2345 }
2346
2347 /// Creates a new `Unique` if `ptr` is non-null.
2348 pub fn new(ptr: *mut T) -> Option<Self> {
2349 NonZero::new(ptr as *const T).map(|nz| Unique { pointer: nz, _marker: PhantomData })
2350 }
2351
2352 /// Acquires the underlying `*mut` pointer.
2353 pub fn as_ptr(self) -> *mut T {
2354 self.pointer.get() as *mut T
2355 }
2356
2357 /// Dereferences the content.
2358 ///
2359 /// The resulting lifetime is bound to self so this behaves "as if"
2360 /// it were actually an instance of T that is getting borrowed. If a longer
2361 /// (unbound) lifetime is needed, use `&*my_ptr.ptr()`.
2362 pub unsafe fn as_ref(&self) -> &T {
2363 &*self.as_ptr()
2364 }
2365
2366 /// Mutably dereferences the content.
2367 ///
2368 /// The resulting lifetime is bound to self so this behaves "as if"
2369 /// it were actually an instance of T that is getting borrowed. If a longer
2370 /// (unbound) lifetime is needed, use `&mut *my_ptr.ptr()`.
2371 pub unsafe fn as_mut(&mut self) -> &mut T {
2372 &mut *self.as_ptr()
2373 }
2374 }
2375
2376 #[unstable(feature = "unique", issue = "27730")]
2377 impl<T: ?Sized> Clone for Unique<T> {
2378 fn clone(&self) -> Self {
2379 *self
2380 }
2381 }
2382
2383 #[unstable(feature = "unique", issue = "27730")]
2384 impl<T: ?Sized> Copy for Unique<T> { }
2385
2386 #[unstable(feature = "unique", issue = "27730")]
2387 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2388
2389 #[unstable(feature = "unique", issue = "27730")]
2390 impl<T: ?Sized> fmt::Pointer for Unique<T> {
2391 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2392 fmt::Pointer::fmt(&self.as_ptr(), f)
2393 }
2394 }
2395
2396 #[unstable(feature = "unique", issue = "27730")]
2397 impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> {
2398 fn from(reference: &'a mut T) -> Self {
2399 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2400 }
2401 }
2402
2403 #[unstable(feature = "unique", issue = "27730")]
2404 impl<'a, T: ?Sized> From<&'a T> for Unique<T> {
2405 fn from(reference: &'a T) -> Self {
2406 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2407 }
2408 }
2409
2410 /// A wrapper around a raw `*mut T` that indicates that the possessor
2411 /// of this wrapper has shared ownership of the referent. Useful for
2412 /// building abstractions like `Rc<T>`, `Arc<T>`, or doubly-linked lists, which
2413 /// internally use aliased raw pointers to manage the memory that they own.
2414 ///
2415 /// This is similar to `Unique`, except that it doesn't make any aliasing
2416 /// guarantees, and doesn't derive Send and Sync. Note that unlike `&T`,
2417 /// Shared has no special mutability requirements. Shared may mutate data
2418 /// aliased by other Shared pointers. More precise rules require Rust to
2419 /// develop an actual aliasing model.
2420 ///
2421 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2422 /// is never dereferenced. This is so that enums may use this forbidden value
2423 /// as a discriminant -- `Option<Shared<T>>` has the same size as `Shared<T>`.
2424 /// However the pointer may still dangle if it isn't dereferenced.
2425 ///
2426 /// Unlike `*mut T`, `Shared<T>` is covariant over `T`. If this is incorrect
2427 /// for your use case, you should include some PhantomData in your type to
2428 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2429 /// Usually this won't be necessary; covariance is correct for Rc, Arc, and LinkedList
2430 /// because they provide a public API that follows the normal shared XOR mutable
2431 /// rules of Rust.
2432 #[allow(missing_debug_implementations)]
2433 #[unstable(feature = "shared", reason = "needs an RFC to flesh out design",
2434 issue = "27730")]
2435 pub struct Shared<T: ?Sized> {
2436 pointer: NonZero<*const T>,
2437 // NOTE: this marker has no consequences for variance, but is necessary
2438 // for dropck to understand that we logically own a `T`.
2439 //
2440 // For details, see:
2441 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2442 _marker: PhantomData<T>,
2443 }
2444
2445 /// `Shared` pointers are not `Send` because the data they reference may be aliased.
2446 // NB: This impl is unnecessary, but should provide better error messages.
2447 #[unstable(feature = "shared", issue = "27730")]
2448 impl<T: ?Sized> !Send for Shared<T> { }
2449
2450 /// `Shared` pointers are not `Sync` because the data they reference may be aliased.
2451 // NB: This impl is unnecessary, but should provide better error messages.
2452 #[unstable(feature = "shared", issue = "27730")]
2453 impl<T: ?Sized> !Sync for Shared<T> { }
2454
2455 #[unstable(feature = "shared", issue = "27730")]
2456 impl<T: Sized> Shared<T> {
2457 /// Creates a new `Shared` that is dangling, but well-aligned.
2458 ///
2459 /// This is useful for initializing types which lazily allocate, like
2460 /// `Vec::new` does.
2461 pub fn empty() -> Self {
2462 unsafe {
2463 let ptr = mem::align_of::<T>() as *mut T;
2464 Shared::new_unchecked(ptr)
2465 }
2466 }
2467 }
2468
2469 #[unstable(feature = "shared", issue = "27730")]
2470 impl<T: ?Sized> Shared<T> {
2471 /// Creates a new `Shared`.
2472 ///
2473 /// # Safety
2474 ///
2475 /// `ptr` must be non-null.
2476 #[unstable(feature = "shared", issue = "27730")]
2477 #[rustc_const_unstable(feature = "const_shared_new")]
2478 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2479 Shared { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData }
2480 }
2481
2482 /// Creates a new `Shared` if `ptr` is non-null.
2483 pub fn new(ptr: *mut T) -> Option<Self> {
2484 NonZero::new(ptr as *const T).map(|nz| Shared { pointer: nz, _marker: PhantomData })
2485 }
2486
2487 /// Acquires the underlying `*mut` pointer.
2488 pub fn as_ptr(self) -> *mut T {
2489 self.pointer.get() as *mut T
2490 }
2491
2492 /// Dereferences the content.
2493 ///
2494 /// The resulting lifetime is bound to self so this behaves "as if"
2495 /// it were actually an instance of T that is getting borrowed. If a longer
2496 /// (unbound) lifetime is needed, use `&*my_ptr.ptr()`.
2497 pub unsafe fn as_ref(&self) -> &T {
2498 &*self.as_ptr()
2499 }
2500
2501 /// Mutably dereferences the content.
2502 ///
2503 /// The resulting lifetime is bound to self so this behaves "as if"
2504 /// it were actually an instance of T that is getting borrowed. If a longer
2505 /// (unbound) lifetime is needed, use `&mut *my_ptr.ptr_mut()`.
2506 pub unsafe fn as_mut(&mut self) -> &mut T {
2507 &mut *self.as_ptr()
2508 }
2509
2510 /// Acquires the underlying pointer as a `*mut` pointer.
2511 #[rustc_deprecated(since = "1.19", reason = "renamed to `as_ptr` for ergonomics/consistency")]
2512 #[unstable(feature = "shared", issue = "27730")]
2513 pub unsafe fn as_mut_ptr(&self) -> *mut T {
2514 self.as_ptr()
2515 }
2516 }
2517
2518 #[unstable(feature = "shared", issue = "27730")]
2519 impl<T: ?Sized> Clone for Shared<T> {
2520 fn clone(&self) -> Self {
2521 *self
2522 }
2523 }
2524
2525 #[unstable(feature = "shared", issue = "27730")]
2526 impl<T: ?Sized> Copy for Shared<T> { }
2527
2528 #[unstable(feature = "shared", issue = "27730")]
2529 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Shared<U>> for Shared<T> where T: Unsize<U> { }
2530
2531 #[unstable(feature = "shared", issue = "27730")]
2532 impl<T: ?Sized> fmt::Pointer for Shared<T> {
2533 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2534 fmt::Pointer::fmt(&self.as_ptr(), f)
2535 }
2536 }
2537
2538 #[unstable(feature = "shared", issue = "27730")]
2539 impl<T: ?Sized> From<Unique<T>> for Shared<T> {
2540 fn from(unique: Unique<T>) -> Self {
2541 Shared { pointer: unique.pointer, _marker: PhantomData }
2542 }
2543 }
2544
2545 #[unstable(feature = "shared", issue = "27730")]
2546 impl<'a, T: ?Sized> From<&'a mut T> for Shared<T> {
2547 fn from(reference: &'a mut T) -> Self {
2548 Shared { pointer: NonZero::from(reference), _marker: PhantomData }
2549 }
2550 }
2551
2552 #[unstable(feature = "shared", issue = "27730")]
2553 impl<'a, T: ?Sized> From<&'a T> for Shared<T> {
2554 fn from(reference: &'a T) -> Self {
2555 Shared { pointer: NonZero::from(reference), _marker: PhantomData }
2556 }
2557 }