1 use core
::cell
::RefCell
;
2 use core
::mem
::{self, MaybeUninit}
;
3 use core
::num
::NonZeroUsize
;
6 use std
::fmt
::{Debug, Display}
;
9 fn test_const_from_raw_parts() {
10 const SLICE
: &[u8] = &[1, 2, 3, 4];
11 const FROM_RAW
: &[u8] = unsafe { &*slice_from_raw_parts(SLICE.as_ptr(), SLICE.len()) }
;
12 assert_eq
!(SLICE
, FROM_RAW
);
14 let slice
= &[1, 2, 3, 4, 5];
15 let from_raw
= unsafe { &*slice_from_raw_parts(slice.as_ptr(), 2) }
;
16 assert_eq
!(&slice
[..2], from_raw
);
27 let mut p
= Pair { fst: 10, snd: 20 }
;
28 let pptr
: *mut Pair
= &mut p
;
29 let iptr
: *mut isize = pptr
as *mut isize;
30 assert_eq
!(*iptr
, 10);
32 assert_eq
!(*iptr
, 30);
33 assert_eq
!(p
.fst
, 30);
35 *pptr
= Pair { fst: 50, snd: 60 }
;
36 assert_eq
!(*iptr
, 50);
37 assert_eq
!(p
.fst
, 50);
38 assert_eq
!(p
.snd
, 60);
40 let v0
= vec
![32000u16, 32001u16, 32002u16];
41 let mut v1
= vec
![0u16, 0u16, 0u16];
43 copy(v0
.as_ptr().offset(1), v1
.as_mut_ptr().offset(1), 1);
44 assert
!((v1
[0] == 0u16 && v1
[1] == 32001u16 && v1
[2] == 0u16));
45 copy(v0
.as_ptr().offset(2), v1
.as_mut_ptr(), 1);
46 assert
!((v1
[0] == 32002u16 && v1
[1] == 32001u16 && v1
[2] == 0u16));
47 copy(v0
.as_ptr(), v1
.as_mut_ptr().offset(2), 1);
48 assert
!((v1
[0] == 32002u16 && v1
[1] == 32001u16 && v1
[2] == 32000u16));
54 let p
: *const isize = null();
57 let q
= p
.wrapping_offset(1);
58 assert
!(!q
.is_null());
60 let mp
: *mut isize = null_mut();
61 assert
!(mp
.is_null());
63 let mq
= mp
.wrapping_offset(1);
64 assert
!(!mq
.is_null());
66 // Pointers to unsized types -- slices
67 let s
: &mut [u8] = &mut [1, 2, 3];
68 let cs
: *const [u8] = s
;
69 assert
!(!cs
.is_null());
71 let ms
: *mut [u8] = s
;
72 assert
!(!ms
.is_null());
74 let cz
: *const [u8] = &[];
75 assert
!(!cz
.is_null());
77 let mz
: *mut [u8] = &mut [];
78 assert
!(!mz
.is_null());
80 let ncs
: *const [u8] = null
::<[u8; 3]>();
81 assert
!(ncs
.is_null());
83 let nms
: *mut [u8] = null_mut
::<[u8; 3]>();
84 assert
!(nms
.is_null());
86 // Pointers to unsized types -- trait objects
87 let ci
: *const dyn ToString
= &3;
88 assert
!(!ci
.is_null());
90 let mi
: *mut dyn ToString
= &mut 3;
91 assert
!(!mi
.is_null());
93 let nci
: *const dyn ToString
= null
::<isize>();
94 assert
!(nci
.is_null());
96 let nmi
: *mut dyn ToString
= null_mut
::<isize>();
97 assert
!(nmi
.is_null());
102 let ec
: *const Extern
= null
::<Extern
>();
103 assert
!(ec
.is_null());
105 let em
: *mut Extern
= null_mut
::<Extern
>();
106 assert
!(em
.is_null());
112 let p
: *const isize = null();
113 assert_eq
!(p
.as_ref(), None
);
115 let q
: *const isize = &2;
116 assert_eq
!(q
.as_ref().unwrap(), &2);
118 let p
: *mut isize = null_mut();
119 assert_eq
!(p
.as_ref(), None
);
121 let q
: *mut isize = &mut 2;
122 assert_eq
!(q
.as_ref().unwrap(), &2);
124 // Lifetime inference
127 let p
= &u
as *const isize;
128 assert_eq
!(p
.as_ref().unwrap(), &2);
131 // Pointers to unsized types -- slices
132 let s
: &mut [u8] = &mut [1, 2, 3];
133 let cs
: *const [u8] = s
;
134 assert_eq
!(cs
.as_ref(), Some(&*s
));
136 let ms
: *mut [u8] = s
;
137 assert_eq
!(ms
.as_ref(), Some(&*s
));
139 let cz
: *const [u8] = &[];
140 assert_eq
!(cz
.as_ref(), Some(&[][..]));
142 let mz
: *mut [u8] = &mut [];
143 assert_eq
!(mz
.as_ref(), Some(&[][..]));
145 let ncs
: *const [u8] = null
::<[u8; 3]>();
146 assert_eq
!(ncs
.as_ref(), None
);
148 let nms
: *mut [u8] = null_mut
::<[u8; 3]>();
149 assert_eq
!(nms
.as_ref(), None
);
151 // Pointers to unsized types -- trait objects
152 let ci
: *const dyn ToString
= &3;
153 assert
!(ci
.as_ref().is_some());
155 let mi
: *mut dyn ToString
= &mut 3;
156 assert
!(mi
.as_ref().is_some());
158 let nci
: *const dyn ToString
= null
::<isize>();
159 assert
!(nci
.as_ref().is_none());
161 let nmi
: *mut dyn ToString
= null_mut
::<isize>();
162 assert
!(nmi
.as_ref().is_none());
169 let p
: *mut isize = null_mut();
170 assert
!(p
.as_mut() == None
);
172 let q
: *mut isize = &mut 2;
173 assert
!(q
.as_mut().unwrap() == &mut 2);
175 // Lifetime inference
178 let p
= &mut u
as *mut isize;
179 assert
!(p
.as_mut().unwrap() == &mut 2);
182 // Pointers to unsized types -- slices
183 let s
: &mut [u8] = &mut [1, 2, 3];
184 let ms
: *mut [u8] = s
;
185 assert_eq
!(ms
.as_mut(), Some(&mut [1, 2, 3][..]));
187 let mz
: *mut [u8] = &mut [];
188 assert_eq
!(mz
.as_mut(), Some(&mut [][..]));
190 let nms
: *mut [u8] = null_mut
::<[u8; 3]>();
191 assert_eq
!(nms
.as_mut(), None
);
193 // Pointers to unsized types -- trait objects
194 let mi
: *mut dyn ToString
= &mut 3;
195 assert
!(mi
.as_mut().is_some());
197 let nmi
: *mut dyn ToString
= null_mut
::<isize>();
198 assert
!(nmi
.as_mut().is_none());
203 fn test_ptr_addition() {
205 let xs
= vec
![5; 16];
206 let mut ptr
= xs
.as_ptr();
207 let end
= ptr
.offset(16);
215 let mut m_ptr
= xs_mut
.as_mut_ptr();
216 let m_end
= m_ptr
.offset(16);
218 while m_ptr
< m_end
{
220 m_ptr
= m_ptr
.offset(1);
223 assert
!(xs_mut
== vec
![10; 16]);
228 fn test_ptr_subtraction() {
230 let xs
= vec
![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
232 let ptr
= xs
.as_ptr();
235 assert_eq
!(*(ptr
.offset(idx
as isize)), idx
as isize);
240 let m_start
= xs_mut
.as_mut_ptr();
241 let mut m_ptr
= m_start
.offset(9);
245 if m_ptr
== m_start
{
248 m_ptr
= m_ptr
.offset(-1);
251 assert_eq
!(xs_mut
, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]);
256 fn test_set_memory() {
257 let mut xs
= [0u8; 20];
258 let ptr
= xs
.as_mut_ptr();
260 write_bytes(ptr
, 5u8, xs
.len());
262 assert
!(xs
== [5u8; 20]);
266 fn test_set_memory_const() {
267 const XS
: [u8; 20] = {
268 let mut xs
= [0u8; 20];
269 let ptr
= xs
.as_mut_ptr();
271 ptr
.write_bytes(5u8, xs
.len());
276 assert
!(XS
== [5u8; 20]);
280 fn test_unsized_nonnull() {
281 let xs
: &[i32] = &[1, 2, 3];
282 let ptr
= unsafe { NonNull::new_unchecked(xs as *const [i32] as *mut [i32]) }
;
283 let ys
= unsafe { ptr.as_ref() }
;
284 let zs
: &[i32] = &[1, 2, 3];
289 fn test_const_nonnull_new() {
291 assert
!(NonNull
::new(core
::ptr
::null_mut
::<()>()).is_none());
293 let value
= &mut 0u32;
294 let mut ptr
= NonNull
::new(value
).unwrap();
295 unsafe { *ptr.as_mut() = 42 }
;
297 let reference
= unsafe { &*ptr.as_ref() }
;
298 assert
!(*reference
== *value
);
299 assert
!(*reference
== 42);
304 #[cfg(unix)] // printf may not be available on other platforms
305 #[allow(deprecated)] // For SipHasher
306 pub fn test_variadic_fnptr() {
308 use core
::hash
::{Hash, SipHasher}
;
310 // This needs to use the correct function signature even though it isn't called as some
311 // codegen backends make it UB to declare a function with multiple conflicting signatures
312 // (like LLVM) while others straight up return an error (like Cranelift).
313 fn printf(_
: *const ffi
::c_char
, ...) -> ffi
::c_int
;
315 let p
: unsafe extern "C" fn(*const ffi
::c_char
, ...) -> ffi
::c_int
= printf
;
319 let mut s
= SipHasher
::new();
320 assert_eq
!(p
.hash(&mut s
), q
.hash(&mut s
));
324 fn write_unaligned_drop() {
326 static DROPS
: RefCell
<Vec
<u32>> = RefCell
::new(Vec
::new());
331 impl Drop
for Dropper
{
333 DROPS
.with(|d
| d
.borrow_mut().push(self.0));
339 let mut t
= Dropper(1);
341 write_unaligned(&mut t
, c
);
344 DROPS
.with(|d
| assert_eq
!(*d
.borrow(), [0]));
348 fn align_offset_zst() {
349 // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
350 // all, because no amount of elements will align the pointer.
353 assert_eq
!(ptr
::invalid
::<()>(p
).align_offset(p
), 0);
355 assert_eq
!(ptr
::invalid
::<()>(p
+ 1).align_offset(p
), !0);
357 p
= (p
+ 1).next_power_of_two();
362 fn align_offset_zst_const() {
364 // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
365 // all, because no amount of elements will align the pointer.
368 assert
!(ptr
::invalid
::<()>(p
).align_offset(p
) == 0);
370 assert
!(ptr
::invalid
::<()>(p
+ 1).align_offset(p
) == !0);
372 p
= (p
+ 1).next_power_of_two();
378 fn align_offset_stride_one() {
379 // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
383 for ptr
in 1..2 * align
{
384 let expected
= ptr
% align
;
385 let offset
= if expected
== 0 { 0 }
else { align - expected }
;
387 ptr
::invalid
::<u8>(ptr
).align_offset(align
),
389 "ptr = {}, align = {}, size = 1",
394 align
= (align
+ 1).next_power_of_two();
399 fn align_offset_stride_one_const() {
401 // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
406 while ptr
< 2 * align
{
407 let expected
= ptr
% align
;
408 let offset
= if expected
== 0 { 0 }
else { align - expected }
;
409 assert
!(ptr
::invalid
::<u8>(ptr
).align_offset(align
) == offset
);
412 align
= (align
+ 1).next_power_of_two();
418 fn align_offset_various_strides() {
419 unsafe fn test_stride
<T
>(ptr
: *const T
, align
: usize) -> bool
{
420 let numptr
= ptr
as usize;
421 let mut expected
= usize::MAX
;
422 // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
424 if (numptr
+ el
* ::std
::mem
::size_of
::<T
>()) % align
== 0 {
429 let got
= ptr
.align_offset(align
);
432 "aligning {:p} (with stride of {}) to {}, expected {}, got {}",
434 ::std
::mem
::size_of
::<T
>(),
444 // For pointers of stride != 1, we verify the algorithm against the naivest possible
449 let limit
= if cfg
!(miri
) { 32 }
else { 1024 }
;
450 while align
< limit
{
451 for ptr
in 1usize
..4 * align
{
455 x
|= test_stride
::<A3
>(ptr
::invalid
::<A3
>(ptr
), align
);
458 x
|= test_stride
::<A4
>(ptr
::invalid
::<A4
>(ptr
), align
);
462 x
|= test_stride
::<A5
>(ptr
::invalid
::<A5
>(ptr
), align
);
466 x
|= test_stride
::<A6
>(ptr
::invalid
::<A6
>(ptr
), align
);
469 struct A7(u32, u16, u8);
470 x
|= test_stride
::<A7
>(ptr
::invalid
::<A7
>(ptr
), align
);
474 x
|= test_stride
::<A8
>(ptr
::invalid
::<A8
>(ptr
), align
);
477 struct A9(u32, u32, u8);
478 x
|= test_stride
::<A9
>(ptr
::invalid
::<A9
>(ptr
), align
);
481 struct A10(u32, u32, u16);
482 x
|= test_stride
::<A10
>(ptr
::invalid
::<A10
>(ptr
), align
);
484 x
|= test_stride
::<u32>(ptr
::invalid
::<u32>(ptr
), align
);
485 x
|= test_stride
::<u128
>(ptr
::invalid
::<u128
>(ptr
), align
);
488 align
= (align
+ 1).next_power_of_two();
494 fn align_offset_various_strides_const() {
495 const unsafe fn test_stride
<T
>(ptr
: *const T
, numptr
: usize, align
: usize) {
496 let mut expected
= usize::MAX
;
497 // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
500 if (numptr
+ el
* ::std
::mem
::size_of
::<T
>()) % align
== 0 {
506 let got
= ptr
.align_offset(align
);
507 assert
!(got
== expected
);
511 // For pointers of stride != 1, we verify the algorithm against the naivest possible
515 while align
< limit
{
517 while ptr
< 4 * align
{
521 test_stride
::<A3
>(ptr
::invalid
::<A3
>(ptr
), ptr
, align
);
524 test_stride
::<A4
>(ptr
::invalid
::<A4
>(ptr
), ptr
, align
);
528 test_stride
::<A5
>(ptr
::invalid
::<A5
>(ptr
), ptr
, align
);
532 test_stride
::<A6
>(ptr
::invalid
::<A6
>(ptr
), ptr
, align
);
535 struct A7(u32, u16, u8);
536 test_stride
::<A7
>(ptr
::invalid
::<A7
>(ptr
), ptr
, align
);
540 test_stride
::<A8
>(ptr
::invalid
::<A8
>(ptr
), ptr
, align
);
543 struct A9(u32, u32, u8);
544 test_stride
::<A9
>(ptr
::invalid
::<A9
>(ptr
), ptr
, align
);
547 struct A10(u32, u32, u16);
548 test_stride
::<A10
>(ptr
::invalid
::<A10
>(ptr
), ptr
, align
);
550 test_stride
::<u32>(ptr
::invalid
::<u32>(ptr
), ptr
, align
);
551 test_stride
::<u128
>(ptr
::invalid
::<u128
>(ptr
), ptr
, align
);
555 align
= (align
+ 1).next_power_of_two();
561 fn align_offset_with_provenance_const() {
563 // On some platforms (e.g. msp430-none-elf), the alignment of `i32` is less than 4.
565 struct AlignedI32(i32);
567 let data
= AlignedI32(42);
569 // `stride % align == 0` (usual case)
571 let ptr
: *const i32 = &data
.0;
572 assert
!(ptr
.align_offset(1) == 0);
573 assert
!(ptr
.align_offset(2) == 0);
574 assert
!(ptr
.align_offset(4) == 0);
575 assert
!(ptr
.align_offset(8) == usize::MAX
);
576 assert
!(ptr
.wrapping_byte_add(1).align_offset(1) == 0);
577 assert
!(ptr
.wrapping_byte_add(1).align_offset(2) == usize::MAX
);
578 assert
!(ptr
.wrapping_byte_add(2).align_offset(1) == 0);
579 assert
!(ptr
.wrapping_byte_add(2).align_offset(2) == 0);
580 assert
!(ptr
.wrapping_byte_add(2).align_offset(4) == usize::MAX
);
581 assert
!(ptr
.wrapping_byte_add(3).align_offset(1) == 0);
582 assert
!(ptr
.wrapping_byte_add(3).align_offset(2) == usize::MAX
);
584 assert
!(ptr
.wrapping_add(42).align_offset(4) == 0);
585 assert
!(ptr
.wrapping_add(42).align_offset(8) == usize::MAX
);
587 let ptr1
: *const i8 = ptr
.cast();
588 assert
!(ptr1
.align_offset(1) == 0);
589 assert
!(ptr1
.align_offset(2) == 0);
590 assert
!(ptr1
.align_offset(4) == 0);
591 assert
!(ptr1
.align_offset(8) == usize::MAX
);
592 assert
!(ptr1
.wrapping_byte_add(1).align_offset(1) == 0);
593 assert
!(ptr1
.wrapping_byte_add(1).align_offset(2) == 1);
594 assert
!(ptr1
.wrapping_byte_add(1).align_offset(4) == 3);
595 assert
!(ptr1
.wrapping_byte_add(1).align_offset(8) == usize::MAX
);
596 assert
!(ptr1
.wrapping_byte_add(2).align_offset(1) == 0);
597 assert
!(ptr1
.wrapping_byte_add(2).align_offset(2) == 0);
598 assert
!(ptr1
.wrapping_byte_add(2).align_offset(4) == 2);
599 assert
!(ptr1
.wrapping_byte_add(2).align_offset(8) == usize::MAX
);
600 assert
!(ptr1
.wrapping_byte_add(3).align_offset(1) == 0);
601 assert
!(ptr1
.wrapping_byte_add(3).align_offset(2) == 1);
602 assert
!(ptr1
.wrapping_byte_add(3).align_offset(4) == 1);
603 assert
!(ptr1
.wrapping_byte_add(3).align_offset(8) == usize::MAX
);
605 let ptr2
: *const i16 = ptr
.cast();
606 assert
!(ptr2
.align_offset(1) == 0);
607 assert
!(ptr2
.align_offset(2) == 0);
608 assert
!(ptr2
.align_offset(4) == 0);
609 assert
!(ptr2
.align_offset(8) == usize::MAX
);
610 assert
!(ptr2
.wrapping_byte_add(1).align_offset(1) == 0);
611 assert
!(ptr2
.wrapping_byte_add(1).align_offset(2) == usize::MAX
);
612 assert
!(ptr2
.wrapping_byte_add(2).align_offset(1) == 0);
613 assert
!(ptr2
.wrapping_byte_add(2).align_offset(2) == 0);
614 assert
!(ptr2
.wrapping_byte_add(2).align_offset(4) == 1);
615 assert
!(ptr2
.wrapping_byte_add(2).align_offset(8) == usize::MAX
);
616 assert
!(ptr2
.wrapping_byte_add(3).align_offset(1) == 0);
617 assert
!(ptr2
.wrapping_byte_add(3).align_offset(2) == usize::MAX
);
619 let ptr3
: *const i64 = ptr
.cast();
620 assert
!(ptr3
.align_offset(1) == 0);
621 assert
!(ptr3
.align_offset(2) == 0);
622 assert
!(ptr3
.align_offset(4) == 0);
623 assert
!(ptr3
.align_offset(8) == usize::MAX
);
624 assert
!(ptr3
.wrapping_byte_add(1).align_offset(1) == 0);
625 assert
!(ptr3
.wrapping_byte_add(1).align_offset(2) == usize::MAX
);
627 // `stride % align != 0` (edge case)
629 let ptr4
: *const [u8; 3] = ptr
.cast();
630 assert
!(ptr4
.align_offset(1) == 0);
631 assert
!(ptr4
.align_offset(2) == 0);
632 assert
!(ptr4
.align_offset(4) == 0);
633 assert
!(ptr4
.align_offset(8) == usize::MAX
);
634 assert
!(ptr4
.wrapping_byte_add(1).align_offset(1) == 0);
635 assert
!(ptr4
.wrapping_byte_add(1).align_offset(2) == 1);
636 assert
!(ptr4
.wrapping_byte_add(1).align_offset(4) == 1);
637 assert
!(ptr4
.wrapping_byte_add(1).align_offset(8) == usize::MAX
);
638 assert
!(ptr4
.wrapping_byte_add(2).align_offset(1) == 0);
639 assert
!(ptr4
.wrapping_byte_add(2).align_offset(2) == 0);
640 assert
!(ptr4
.wrapping_byte_add(2).align_offset(4) == 2);
641 assert
!(ptr4
.wrapping_byte_add(2).align_offset(8) == usize::MAX
);
642 assert
!(ptr4
.wrapping_byte_add(3).align_offset(1) == 0);
643 assert
!(ptr4
.wrapping_byte_add(3).align_offset(2) == 1);
644 assert
!(ptr4
.wrapping_byte_add(3).align_offset(4) == 3);
645 assert
!(ptr4
.wrapping_byte_add(3).align_offset(8) == usize::MAX
);
647 let ptr5
: *const [u8; 5] = ptr
.cast();
648 assert
!(ptr5
.align_offset(1) == 0);
649 assert
!(ptr5
.align_offset(2) == 0);
650 assert
!(ptr5
.align_offset(4) == 0);
651 assert
!(ptr5
.align_offset(8) == usize::MAX
);
652 assert
!(ptr5
.wrapping_byte_add(1).align_offset(1) == 0);
653 assert
!(ptr5
.wrapping_byte_add(1).align_offset(2) == 1);
654 assert
!(ptr5
.wrapping_byte_add(1).align_offset(4) == 3);
655 assert
!(ptr5
.wrapping_byte_add(1).align_offset(8) == usize::MAX
);
656 assert
!(ptr5
.wrapping_byte_add(2).align_offset(1) == 0);
657 assert
!(ptr5
.wrapping_byte_add(2).align_offset(2) == 0);
658 assert
!(ptr5
.wrapping_byte_add(2).align_offset(4) == 2);
659 assert
!(ptr5
.wrapping_byte_add(2).align_offset(8) == usize::MAX
);
660 assert
!(ptr5
.wrapping_byte_add(3).align_offset(1) == 0);
661 assert
!(ptr5
.wrapping_byte_add(3).align_offset(2) == 1);
662 assert
!(ptr5
.wrapping_byte_add(3).align_offset(4) == 1);
663 assert
!(ptr5
.wrapping_byte_add(3).align_offset(8) == usize::MAX
);
668 fn align_offset_issue_103361() {
669 #[cfg(target_pointer_width = "64")]
670 const SIZE
: usize = 1 << 47;
671 #[cfg(target_pointer_width = "32")]
672 const SIZE
: usize = 1 << 30;
673 #[cfg(target_pointer_width = "16")]
674 const SIZE
: usize = 1 << 13;
675 struct HugeSize([u8; SIZE
- 1]);
676 let _
= ptr
::invalid
::<HugeSize
>(SIZE
).align_offset(SIZE
);
680 fn align_offset_issue_103361_const() {
681 #[cfg(target_pointer_width = "64")]
682 const SIZE
: usize = 1 << 47;
683 #[cfg(target_pointer_width = "32")]
684 const SIZE
: usize = 1 << 30;
685 #[cfg(target_pointer_width = "16")]
686 const SIZE
: usize = 1 << 13;
687 struct HugeSize([u8; SIZE
- 1]);
690 assert
!(ptr
::invalid
::<HugeSize
>(SIZE
- 1).align_offset(SIZE
) == SIZE
- 1);
691 assert
!(ptr
::invalid
::<HugeSize
>(SIZE
).align_offset(SIZE
) == 0);
692 assert
!(ptr
::invalid
::<HugeSize
>(SIZE
+ 1).align_offset(SIZE
) == 1);
699 let ptr
: *const i32 = &data
;
700 assert
!(ptr
.is_aligned());
701 assert
!(ptr
.is_aligned_to(1));
702 assert
!(ptr
.is_aligned_to(2));
703 assert
!(ptr
.is_aligned_to(4));
704 assert
!(ptr
.wrapping_byte_add(2).is_aligned_to(1));
705 assert
!(ptr
.wrapping_byte_add(2).is_aligned_to(2));
706 assert
!(!ptr
.wrapping_byte_add(2).is_aligned_to(4));
708 // At runtime either `ptr` or `ptr+1` is aligned to 8.
709 assert_ne
!(ptr
.is_aligned_to(8), ptr
.wrapping_add(1).is_aligned_to(8));
713 fn is_aligned_const() {
716 let ptr
: *const i32 = &data
;
717 assert
!(ptr
.is_aligned());
718 assert
!(ptr
.is_aligned_to(1));
719 assert
!(ptr
.is_aligned_to(2));
720 assert
!(ptr
.is_aligned_to(4));
721 assert
!(ptr
.wrapping_byte_add(2).is_aligned_to(1));
722 assert
!(ptr
.wrapping_byte_add(2).is_aligned_to(2));
723 assert
!(!ptr
.wrapping_byte_add(2).is_aligned_to(4));
725 // At comptime neither `ptr` nor `ptr+1` is aligned to 8.
726 assert
!(!ptr
.is_aligned_to(8));
727 assert
!(!ptr
.wrapping_add(1).is_aligned_to(8));
734 let ptr1
: *mut i32 = &mut a
[1];
735 let ptr2
: *mut i32 = &mut a
[3];
737 assert_eq
!(ptr2
.offset_from(ptr1
), 2);
738 assert_eq
!(ptr1
.offset_from(ptr2
), -2);
739 assert_eq
!(ptr1
.offset(2), ptr2
);
740 assert_eq
!(ptr2
.offset(-2), ptr1
);
747 struct Pair
<A
, B
: ?Sized
>(A
, B
);
751 let () = metadata(&());
752 let () = metadata(&Unit
);
753 let () = metadata(&4_u32);
754 let () = metadata(&String
::new());
755 let () = metadata(&Some(4_u32));
756 let () = metadata(&ptr_metadata
);
757 let () = metadata(&|| {}
);
758 let () = metadata(&[4, 7]);
759 let () = metadata(&(4, String
::new()));
760 let () = metadata(&Pair(4, String
::new()));
761 let () = metadata(ptr
::null
::<()>() as *const Extern
);
762 let () = metadata(ptr
::null
::<()>() as *const <&u32 as std
::ops
::Deref
>::Target
);
764 assert_eq
!(metadata("foo"), 3_usize
);
765 assert_eq
!(metadata(&[4, 7][..]), 2_usize
);
767 let dst_tuple
: &(bool
, [u8]) = &(true, [0x66, 0x6F, 0x6F]);
768 let dst_struct
: &Pair
<bool
, [u8]> = &Pair(true, [0x66, 0x6F, 0x6F]);
769 assert_eq
!(metadata(dst_tuple
), 3_usize
);
770 assert_eq
!(metadata(dst_struct
), 3_usize
);
772 let dst_tuple
: &(bool
, str) = std
::mem
::transmute(dst_tuple
);
773 let dst_struct
: &Pair
<bool
, str> = std
::mem
::transmute(dst_struct
);
774 assert_eq
!(&dst_tuple
.1, "foo");
775 assert_eq
!(&dst_struct
.1, "foo");
776 assert_eq
!(metadata(dst_tuple
), 3_usize
);
777 assert_eq
!(metadata(dst_struct
), 3_usize
);
780 let vtable_1
: DynMetadata
<dyn Debug
> = metadata(&4_u16 as &dyn Debug
);
781 let vtable_2
: DynMetadata
<dyn Display
> = metadata(&4_u16 as &dyn Display
);
782 let vtable_3
: DynMetadata
<dyn Display
> = metadata(&4_u32 as &dyn Display
);
783 let vtable_4
: DynMetadata
<dyn Display
> = metadata(&(true, 7_u32) as &(bool
, dyn Display
));
784 let vtable_5
: DynMetadata
<dyn Display
> =
785 metadata(&Pair(true, 7_u32) as &Pair
<bool
, dyn Display
>);
787 let address_1
: *const () = std
::mem
::transmute(vtable_1
);
788 let address_2
: *const () = std
::mem
::transmute(vtable_2
);
789 let address_3
: *const () = std
::mem
::transmute(vtable_3
);
790 let address_4
: *const () = std
::mem
::transmute(vtable_4
);
791 let address_5
: *const () = std
::mem
::transmute(vtable_5
);
792 // Different trait => different vtable pointer
793 assert_ne
!(address_1
, address_2
);
794 // Different erased type => different vtable pointer
795 assert_ne
!(address_2
, address_3
);
796 // Same erased type and same trait => same vtable pointer
797 assert_eq
!(address_3
, address_4
);
798 assert_eq
!(address_3
, address_5
);
803 fn ptr_metadata_bounds() {
804 fn metadata_eq_method_address
<T
: ?Sized
>() -> usize {
805 // The `Metadata` associated type has an `Ord` bound, so this is valid:
806 <<T
as Pointee
>::Metadata
as PartialEq
>::eq
as usize
808 // "Synthetic" trait impls generated by the compiler like those of `Pointee`
809 // are not checked for bounds of associated type.
810 // So with a buggy core we could have both:
811 // * `<dyn Display as Pointee>::Metadata == DynMetadata`
812 // * `DynMetadata: !PartialEq`
813 // … and cause an ICE here:
814 metadata_eq_method_address
::<dyn Display
>();
816 // For this reason, let’s check here that bounds are satisfied:
818 let _
= static_assert_expected_bounds_for_metadata
::<()>;
819 let _
= static_assert_expected_bounds_for_metadata
::<usize>;
820 let _
= static_assert_expected_bounds_for_metadata
::<DynMetadata
<dyn Display
>>;
821 fn _static_assert_associated_type
<T
: ?Sized
>() {
822 let _
= static_assert_expected_bounds_for_metadata
::<<T
as Pointee
>::Metadata
>;
825 fn static_assert_expected_bounds_for_metadata
<Meta
>()
827 // Keep this in sync with the associated type in `library/core/src/ptr/metadata.rs`
828 Meta
: Copy
+ Send
+ Sync
+ Ord
+ std
::hash
::Hash
+ Unpin
,
837 struct Something([u8; 47]);
839 let value
= Something([0; 47]);
840 let trait_object
: &dyn Debug
= &value
;
841 let meta
= metadata(trait_object
);
843 assert_eq
!(meta
.size_of(), 64);
844 assert_eq
!(meta
.size_of(), std
::mem
::size_of
::<Something
>());
845 assert_eq
!(meta
.align_of(), 32);
846 assert_eq
!(meta
.align_of(), std
::mem
::align_of
::<Something
>());
847 assert_eq
!(meta
.layout(), std
::alloc
::Layout
::new
::<Something
>());
849 assert
!(format
!("{meta:?}").starts_with("DynMetadata(0x"));
853 fn from_raw_parts() {
854 let mut value
= 5_u32;
855 let address
= &mut value
as *mut _
as *mut ();
856 let trait_object
: &dyn Display
= &mut value
;
857 let vtable
= metadata(trait_object
);
858 let trait_object
= NonNull
::from(trait_object
);
860 assert_eq
!(ptr
::from_raw_parts(address
, vtable
), trait_object
.as_ptr());
861 assert_eq
!(ptr
::from_raw_parts_mut(address
, vtable
), trait_object
.as_ptr());
862 assert_eq
!(NonNull
::from_raw_parts(NonNull
::new(address
).unwrap(), vtable
), trait_object
);
864 let mut array
= [5_u32, 5, 5, 5, 5];
865 let address
= &mut array
as *mut _
as *mut ();
866 let array_ptr
= NonNull
::from(&mut array
);
867 let slice_ptr
= NonNull
::from(&mut array
[..]);
869 assert_eq
!(ptr
::from_raw_parts(address
, ()), array_ptr
.as_ptr());
870 assert_eq
!(ptr
::from_raw_parts_mut(address
, ()), array_ptr
.as_ptr());
871 assert_eq
!(NonNull
::from_raw_parts(NonNull
::new(address
).unwrap(), ()), array_ptr
);
873 assert_eq
!(ptr
::from_raw_parts(address
, 5), slice_ptr
.as_ptr());
874 assert_eq
!(ptr
::from_raw_parts_mut(address
, 5), slice_ptr
.as_ptr());
875 assert_eq
!(NonNull
::from_raw_parts(NonNull
::new(address
).unwrap(), 5), slice_ptr
);
880 let foo
= ThinBox
::<dyn Display
>::new(4);
881 assert_eq
!(foo
.to_string(), "4");
883 let bar
= ThinBox
::<dyn Display
>::new(7);
884 assert_eq
!(bar
.to_string(), "7");
886 // A slightly more interesting library that could be built on top of metadata APIs.
888 // * It could be generalized to any `T:Â ?Sized` (not just trait object)
889 // if `{size,align}_of_for_meta<T:Â ?Sized>(T::Metadata)` are added.
890 // * Constructing a `ThinBox` without consuming and deallocating a `Box`
891 // requires either the unstable `Unsize` marker trait,
892 // or the unstable `unsized_locals`Â language feature,
893 // or taking `&dyn T` and restricting to `T: Copy`.
896 use std
::marker
::PhantomData
;
900 T
: ?Sized
+ Pointee
<Metadata
= DynMetadata
<T
>>,
902 ptr
: NonNull
<DynMetadata
<T
>>,
903 phantom
: PhantomData
<T
>,
908 T
: ?Sized
+ Pointee
<Metadata
= DynMetadata
<T
>>,
910 pub fn new
<Value
: std
::marker
::Unsize
<T
>>(value
: Value
) -> Self {
911 let unsized_
: &T
= &value
;
912 let meta
= metadata(unsized_
);
913 let meta_layout
= Layout
::for_value(&meta
);
914 let value_layout
= Layout
::for_value(&value
);
915 let (layout
, offset
) = meta_layout
.extend(value_layout
).unwrap();
916 // `DynMetadata` is pointer-sized:
917 assert
!(layout
.size() > 0);
918 // If `ThinBox<T>` is generalized to any `T: ?Sized`,
919 // handle ZSTs with a dangling pointer without going through `alloc()`,
920 // like `Box<T>` does.
922 let ptr
= NonNull
::new(alloc(layout
))
923 .unwrap_or_else(|| handle_alloc_error(layout
))
924 .cast
::<DynMetadata
<T
>>();
925 ptr
.as_ptr().write(meta
);
926 ptr
.as_ptr().byte_add(offset
).cast
::<Value
>().write(value
);
927 Self { ptr, phantom: PhantomData }
931 fn meta(&self) -> DynMetadata
<T
> {
932 unsafe { *self.ptr.as_ref() }
935 fn layout(&self) -> (Layout
, usize) {
936 let meta
= self.meta();
937 Layout
::for_value(&meta
).extend(meta
.layout()).unwrap()
940 fn value_ptr(&self) -> *const T
{
941 let (_
, offset
) = self.layout();
942 let data_ptr
= unsafe { self.ptr.cast::<u8>().as_ptr().add(offset) }
;
943 ptr
::from_raw_parts(data_ptr
.cast(), self.meta())
946 fn value_mut_ptr(&mut self) -> *mut T
{
947 let (_
, offset
) = self.layout();
948 // FIXME: can this line be shared with the same in `value_ptr()`
949 // without upsetting Stacked Borrows?
950 let data_ptr
= unsafe { self.ptr.cast::<u8>().as_ptr().add(offset) }
;
951 from_raw_parts_mut(data_ptr
.cast(), self.meta())
955 impl<T
> std
::ops
::Deref
for ThinBox
<T
>
957 T
: ?Sized
+ Pointee
<Metadata
= DynMetadata
<T
>>,
961 fn deref(&self) -> &T
{
962 unsafe { &*self.value_ptr() }
966 impl<T
> std
::ops
::DerefMut
for ThinBox
<T
>
968 T
: ?Sized
+ Pointee
<Metadata
= DynMetadata
<T
>>,
970 fn deref_mut(&mut self) -> &mut T
{
971 unsafe { &mut *self.value_mut_ptr() }
975 impl<T
> std
::ops
::Drop
for ThinBox
<T
>
977 T
: ?Sized
+ Pointee
<Metadata
= DynMetadata
<T
>>,
980 let (layout
, _
) = self.layout();
982 drop_in_place
::<T
>(&mut **self);
983 dealloc(self.ptr
.cast().as_ptr(), layout
);
990 fn nonnull_tagged_pointer_with_provenance() {
991 let raw_pointer
= Box
::into_raw(Box
::new(10));
993 let mut p
= TaggedPointer
::new(raw_pointer
).unwrap();
994 assert_eq
!(p
.tag(), 0);
997 assert_eq
!(p
.tag(), 1);
998 assert_eq
!(unsafe { *p.pointer().as_ptr() }
, 10);
1001 assert_eq
!(p
.tag(), 3);
1002 assert_eq
!(unsafe { *p.pointer().as_ptr() }
, 10);
1004 unsafe { Box::from_raw(p.pointer().as_ptr()) }
;
1006 /// A non-null pointer type which carries several bits of metadata and maintains provenance.
1007 #[repr(transparent)]
1008 pub struct TaggedPointer
<T
>(NonNull
<T
>);
1010 impl<T
> Clone
for TaggedPointer
<T
> {
1011 fn clone(&self) -> Self {
1016 impl<T
> Copy
for TaggedPointer
<T
> {}
1018 impl<T
> TaggedPointer
<T
> {
1019 /// The ABI-required minimum alignment of the `P` type.
1020 pub const ALIGNMENT
: usize = core
::mem
::align_of
::<T
>();
1021 /// A mask for data-carrying bits of the address.
1022 pub const DATA_MASK
: usize = !Self::ADDRESS_MASK
;
1023 /// Number of available bits of storage in the address.
1024 pub const NUM_BITS
: u32 = Self::ALIGNMENT
.trailing_zeros();
1025 /// A mask for the non-data-carrying bits of the address.
1026 pub const ADDRESS_MASK
: usize = usize::MAX
<< Self::NUM_BITS
;
1028 /// Create a new tagged pointer from a possibly null pointer.
1029 pub fn new(pointer
: *mut T
) -> Option
<TaggedPointer
<T
>> {
1030 Some(TaggedPointer(NonNull
::new(pointer
)?
))
1033 /// Consume this tagged pointer and produce a raw mutable pointer to the
1034 /// memory location.
1035 pub fn pointer(self) -> NonNull
<T
> {
1036 // SAFETY: The `addr` guaranteed to have bits set in the Self::ADDRESS_MASK, so the result will be non-null.
1037 self.0.map_addr(|addr
| unsafe {
1038 NonZeroUsize
::new_unchecked(addr
.get() & Self::ADDRESS_MASK
)
1042 /// Consume this tagged pointer and produce the data it carries.
1043 pub fn tag(&self) -> usize {
1044 self.0.addr().get() & Self::DATA_MASK
1047 /// Update the data this tagged pointer carries to a new value.
1048 pub fn set_tag(&mut self, data
: usize) {
1050 data
& Self::ADDRESS_MASK
,
1052 "cannot set more data beyond the lowest NUM_BITS"
1054 let data
= data
& Self::DATA_MASK
;
1056 // SAFETY: This value will always be non-zero because the upper bits (from
1057 // ADDRESS_MASK) will always be non-zero. This a property of the type and its
1059 self.0 = self.0.map_addr(|addr
| unsafe {
1060 NonZeroUsize
::new_unchecked((addr
.get() & Self::ADDRESS_MASK
) | data
)
1067 fn swap_copy_untyped() {
1068 // We call `{swap,copy}{,_nonoverlapping}` at `bool` type on data that is not a valid bool.
1069 // These should all do untyped copies, so this should work fine.
1073 let ptr1
= &mut x
as *mut u8 as *mut bool
;
1074 let ptr2
= &mut y
as *mut u8 as *mut bool
;
1077 ptr
::swap(ptr1
, ptr2
);
1078 ptr
::swap_nonoverlapping(ptr1
, ptr2
, 1);
1084 ptr
::copy(ptr1
, ptr2
, 1);
1085 ptr
::copy_nonoverlapping(ptr1
, ptr2
, 1);
1092 fn test_const_copy() {
1095 let mut ptr2
= &666;
1097 // Copy ptr1 to ptr2, bytewise.
1100 &ptr1
as *const _
as *const MaybeUninit
<u8>,
1101 &mut ptr2
as *mut _
as *mut MaybeUninit
<u8>,
1102 mem
::size_of
::<&i32>(),
1106 // Make sure they still work.
1107 assert
!(*ptr1
== 1);
1108 assert
!(*ptr2
== 1);
1113 let mut ptr2
= &666;
1115 // Copy ptr1 to ptr2, bytewise.
1117 ptr
::copy_nonoverlapping(
1118 &ptr1
as *const _
as *const MaybeUninit
<u8>,
1119 &mut ptr2
as *mut _
as *mut MaybeUninit
<u8>,
1120 mem
::size_of
::<&i32>(),
1124 // Make sure they still work.
1125 assert
!(*ptr1
== 1);
1126 assert
!(*ptr2
== 1);