]> git.proxmox.com Git - rustc.git/blob - library/core/tests/ptr.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / library / core / tests / ptr.rs
1 use core::cell::RefCell;
2 use core::mem::{self, MaybeUninit};
3 use core::num::NonZeroUsize;
4 use core::ptr;
5 use core::ptr::*;
6 use std::fmt::{Debug, Display};
7
8 #[test]
9 fn test_const_from_raw_parts() {
10 const SLICE: &[u8] = &[1, 2, 3, 4];
11 const FROM_RAW: &[u8] = unsafe { &*slice_from_raw_parts(SLICE.as_ptr(), SLICE.len()) };
12 assert_eq!(SLICE, FROM_RAW);
13
14 let slice = &[1, 2, 3, 4, 5];
15 let from_raw = unsafe { &*slice_from_raw_parts(slice.as_ptr(), 2) };
16 assert_eq!(&slice[..2], from_raw);
17 }
18
19 #[test]
20 fn test() {
21 unsafe {
22 #[repr(C)]
23 struct Pair {
24 fst: isize,
25 snd: isize,
26 }
27 let mut p = Pair { fst: 10, snd: 20 };
28 let pptr: *mut Pair = &mut p;
29 let iptr: *mut isize = pptr as *mut isize;
30 assert_eq!(*iptr, 10);
31 *iptr = 30;
32 assert_eq!(*iptr, 30);
33 assert_eq!(p.fst, 30);
34
35 *pptr = Pair { fst: 50, snd: 60 };
36 assert_eq!(*iptr, 50);
37 assert_eq!(p.fst, 50);
38 assert_eq!(p.snd, 60);
39
40 let v0 = vec![32000u16, 32001u16, 32002u16];
41 let mut v1 = vec![0u16, 0u16, 0u16];
42
43 copy(v0.as_ptr().offset(1), v1.as_mut_ptr().offset(1), 1);
44 assert!((v1[0] == 0u16 && v1[1] == 32001u16 && v1[2] == 0u16));
45 copy(v0.as_ptr().offset(2), v1.as_mut_ptr(), 1);
46 assert!((v1[0] == 32002u16 && v1[1] == 32001u16 && v1[2] == 0u16));
47 copy(v0.as_ptr(), v1.as_mut_ptr().offset(2), 1);
48 assert!((v1[0] == 32002u16 && v1[1] == 32001u16 && v1[2] == 32000u16));
49 }
50 }
51
52 #[test]
53 fn test_is_null() {
54 let p: *const isize = null();
55 assert!(p.is_null());
56
57 let q = p.wrapping_offset(1);
58 assert!(!q.is_null());
59
60 let mp: *mut isize = null_mut();
61 assert!(mp.is_null());
62
63 let mq = mp.wrapping_offset(1);
64 assert!(!mq.is_null());
65
66 // Pointers to unsized types -- slices
67 let s: &mut [u8] = &mut [1, 2, 3];
68 let cs: *const [u8] = s;
69 assert!(!cs.is_null());
70
71 let ms: *mut [u8] = s;
72 assert!(!ms.is_null());
73
74 let cz: *const [u8] = &[];
75 assert!(!cz.is_null());
76
77 let mz: *mut [u8] = &mut [];
78 assert!(!mz.is_null());
79
80 let ncs: *const [u8] = null::<[u8; 3]>();
81 assert!(ncs.is_null());
82
83 let nms: *mut [u8] = null_mut::<[u8; 3]>();
84 assert!(nms.is_null());
85
86 // Pointers to unsized types -- trait objects
87 let ci: *const dyn ToString = &3;
88 assert!(!ci.is_null());
89
90 let mi: *mut dyn ToString = &mut 3;
91 assert!(!mi.is_null());
92
93 let nci: *const dyn ToString = null::<isize>();
94 assert!(nci.is_null());
95
96 let nmi: *mut dyn ToString = null_mut::<isize>();
97 assert!(nmi.is_null());
98
99 extern "C" {
100 type Extern;
101 }
102 let ec: *const Extern = null::<Extern>();
103 assert!(ec.is_null());
104
105 let em: *mut Extern = null_mut::<Extern>();
106 assert!(em.is_null());
107 }
108
109 #[test]
110 fn test_as_ref() {
111 unsafe {
112 let p: *const isize = null();
113 assert_eq!(p.as_ref(), None);
114
115 let q: *const isize = &2;
116 assert_eq!(q.as_ref().unwrap(), &2);
117
118 let p: *mut isize = null_mut();
119 assert_eq!(p.as_ref(), None);
120
121 let q: *mut isize = &mut 2;
122 assert_eq!(q.as_ref().unwrap(), &2);
123
124 // Lifetime inference
125 let u = 2isize;
126 {
127 let p = &u as *const isize;
128 assert_eq!(p.as_ref().unwrap(), &2);
129 }
130
131 // Pointers to unsized types -- slices
132 let s: &mut [u8] = &mut [1, 2, 3];
133 let cs: *const [u8] = s;
134 assert_eq!(cs.as_ref(), Some(&*s));
135
136 let ms: *mut [u8] = s;
137 assert_eq!(ms.as_ref(), Some(&*s));
138
139 let cz: *const [u8] = &[];
140 assert_eq!(cz.as_ref(), Some(&[][..]));
141
142 let mz: *mut [u8] = &mut [];
143 assert_eq!(mz.as_ref(), Some(&[][..]));
144
145 let ncs: *const [u8] = null::<[u8; 3]>();
146 assert_eq!(ncs.as_ref(), None);
147
148 let nms: *mut [u8] = null_mut::<[u8; 3]>();
149 assert_eq!(nms.as_ref(), None);
150
151 // Pointers to unsized types -- trait objects
152 let ci: *const dyn ToString = &3;
153 assert!(ci.as_ref().is_some());
154
155 let mi: *mut dyn ToString = &mut 3;
156 assert!(mi.as_ref().is_some());
157
158 let nci: *const dyn ToString = null::<isize>();
159 assert!(nci.as_ref().is_none());
160
161 let nmi: *mut dyn ToString = null_mut::<isize>();
162 assert!(nmi.as_ref().is_none());
163 }
164 }
165
166 #[test]
167 fn test_as_mut() {
168 unsafe {
169 let p: *mut isize = null_mut();
170 assert!(p.as_mut() == None);
171
172 let q: *mut isize = &mut 2;
173 assert!(q.as_mut().unwrap() == &mut 2);
174
175 // Lifetime inference
176 let mut u = 2isize;
177 {
178 let p = &mut u as *mut isize;
179 assert!(p.as_mut().unwrap() == &mut 2);
180 }
181
182 // Pointers to unsized types -- slices
183 let s: &mut [u8] = &mut [1, 2, 3];
184 let ms: *mut [u8] = s;
185 assert_eq!(ms.as_mut(), Some(&mut [1, 2, 3][..]));
186
187 let mz: *mut [u8] = &mut [];
188 assert_eq!(mz.as_mut(), Some(&mut [][..]));
189
190 let nms: *mut [u8] = null_mut::<[u8; 3]>();
191 assert_eq!(nms.as_mut(), None);
192
193 // Pointers to unsized types -- trait objects
194 let mi: *mut dyn ToString = &mut 3;
195 assert!(mi.as_mut().is_some());
196
197 let nmi: *mut dyn ToString = null_mut::<isize>();
198 assert!(nmi.as_mut().is_none());
199 }
200 }
201
202 #[test]
203 fn test_ptr_addition() {
204 unsafe {
205 let xs = vec![5; 16];
206 let mut ptr = xs.as_ptr();
207 let end = ptr.offset(16);
208
209 while ptr < end {
210 assert_eq!(*ptr, 5);
211 ptr = ptr.offset(1);
212 }
213
214 let mut xs_mut = xs;
215 let mut m_ptr = xs_mut.as_mut_ptr();
216 let m_end = m_ptr.offset(16);
217
218 while m_ptr < m_end {
219 *m_ptr += 5;
220 m_ptr = m_ptr.offset(1);
221 }
222
223 assert!(xs_mut == vec![10; 16]);
224 }
225 }
226
227 #[test]
228 fn test_ptr_subtraction() {
229 unsafe {
230 let xs = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
231 let mut idx = 9;
232 let ptr = xs.as_ptr();
233
234 while idx >= 0 {
235 assert_eq!(*(ptr.offset(idx as isize)), idx as isize);
236 idx = idx - 1;
237 }
238
239 let mut xs_mut = xs;
240 let m_start = xs_mut.as_mut_ptr();
241 let mut m_ptr = m_start.offset(9);
242
243 loop {
244 *m_ptr += *m_ptr;
245 if m_ptr == m_start {
246 break;
247 }
248 m_ptr = m_ptr.offset(-1);
249 }
250
251 assert_eq!(xs_mut, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]);
252 }
253 }
254
255 #[test]
256 fn test_set_memory() {
257 let mut xs = [0u8; 20];
258 let ptr = xs.as_mut_ptr();
259 unsafe {
260 write_bytes(ptr, 5u8, xs.len());
261 }
262 assert!(xs == [5u8; 20]);
263 }
264
265 #[test]
266 fn test_set_memory_const() {
267 const XS: [u8; 20] = {
268 let mut xs = [0u8; 20];
269 let ptr = xs.as_mut_ptr();
270 unsafe {
271 ptr.write_bytes(5u8, xs.len());
272 }
273 xs
274 };
275
276 assert!(XS == [5u8; 20]);
277 }
278
279 #[test]
280 fn test_unsized_nonnull() {
281 let xs: &[i32] = &[1, 2, 3];
282 let ptr = unsafe { NonNull::new_unchecked(xs as *const [i32] as *mut [i32]) };
283 let ys = unsafe { ptr.as_ref() };
284 let zs: &[i32] = &[1, 2, 3];
285 assert!(ys == zs);
286 }
287
288 #[test]
289 fn test_const_nonnull_new() {
290 const {
291 assert!(NonNull::new(core::ptr::null_mut::<()>()).is_none());
292
293 let value = &mut 0u32;
294 let mut ptr = NonNull::new(value).unwrap();
295 unsafe { *ptr.as_mut() = 42 };
296
297 let reference = unsafe { &*ptr.as_ref() };
298 assert!(*reference == *value);
299 assert!(*reference == 42);
300 };
301 }
302
303 #[test]
304 #[cfg(unix)] // printf may not be available on other platforms
305 #[allow(deprecated)] // For SipHasher
306 pub fn test_variadic_fnptr() {
307 use core::ffi;
308 use core::hash::{Hash, SipHasher};
309 extern "C" {
310 // This needs to use the correct function signature even though it isn't called as some
311 // codegen backends make it UB to declare a function with multiple conflicting signatures
312 // (like LLVM) while others straight up return an error (like Cranelift).
313 fn printf(_: *const ffi::c_char, ...) -> ffi::c_int;
314 }
315 let p: unsafe extern "C" fn(*const ffi::c_char, ...) -> ffi::c_int = printf;
316 let q = p.clone();
317 assert_eq!(p, q);
318 assert!(!(p < q));
319 let mut s = SipHasher::new();
320 assert_eq!(p.hash(&mut s), q.hash(&mut s));
321 }
322
323 #[test]
324 fn write_unaligned_drop() {
325 thread_local! {
326 static DROPS: RefCell<Vec<u32>> = RefCell::new(Vec::new());
327 }
328
329 struct Dropper(u32);
330
331 impl Drop for Dropper {
332 fn drop(&mut self) {
333 DROPS.with(|d| d.borrow_mut().push(self.0));
334 }
335 }
336
337 {
338 let c = Dropper(0);
339 let mut t = Dropper(1);
340 unsafe {
341 write_unaligned(&mut t, c);
342 }
343 }
344 DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
345 }
346
347 #[test]
348 fn align_offset_zst() {
349 // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
350 // all, because no amount of elements will align the pointer.
351 let mut p = 1;
352 while p < 1024 {
353 assert_eq!(ptr::invalid::<()>(p).align_offset(p), 0);
354 if p != 1 {
355 assert_eq!(ptr::invalid::<()>(p + 1).align_offset(p), !0);
356 }
357 p = (p + 1).next_power_of_two();
358 }
359 }
360
361 #[test]
362 fn align_offset_zst_const() {
363 const {
364 // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
365 // all, because no amount of elements will align the pointer.
366 let mut p = 1;
367 while p < 1024 {
368 assert!(ptr::invalid::<()>(p).align_offset(p) == 0);
369 if p != 1 {
370 assert!(ptr::invalid::<()>(p + 1).align_offset(p) == !0);
371 }
372 p = (p + 1).next_power_of_two();
373 }
374 }
375 }
376
377 #[test]
378 fn align_offset_stride_one() {
379 // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
380 // number of bytes.
381 let mut align = 1;
382 while align < 1024 {
383 for ptr in 1..2 * align {
384 let expected = ptr % align;
385 let offset = if expected == 0 { 0 } else { align - expected };
386 assert_eq!(
387 ptr::invalid::<u8>(ptr).align_offset(align),
388 offset,
389 "ptr = {}, align = {}, size = 1",
390 ptr,
391 align
392 );
393 }
394 align = (align + 1).next_power_of_two();
395 }
396 }
397
398 #[test]
399 fn align_offset_stride_one_const() {
400 const {
401 // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
402 // number of bytes.
403 let mut align = 1;
404 while align < 1024 {
405 let mut ptr = 1;
406 while ptr < 2 * align {
407 let expected = ptr % align;
408 let offset = if expected == 0 { 0 } else { align - expected };
409 assert!(ptr::invalid::<u8>(ptr).align_offset(align) == offset);
410 ptr += 1;
411 }
412 align = (align + 1).next_power_of_two();
413 }
414 }
415 }
416
417 #[test]
418 fn align_offset_various_strides() {
419 unsafe fn test_stride<T>(ptr: *const T, align: usize) -> bool {
420 let numptr = ptr as usize;
421 let mut expected = usize::MAX;
422 // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
423 for el in 0..align {
424 if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
425 expected = el;
426 break;
427 }
428 }
429 let got = ptr.align_offset(align);
430 if got != expected {
431 eprintln!(
432 "aligning {:p} (with stride of {}) to {}, expected {}, got {}",
433 ptr,
434 ::std::mem::size_of::<T>(),
435 align,
436 expected,
437 got
438 );
439 return true;
440 }
441 return false;
442 }
443
444 // For pointers of stride != 1, we verify the algorithm against the naivest possible
445 // implementation
446 let mut align = 1;
447 let mut x = false;
448 // Miri is too slow
449 let limit = if cfg!(miri) { 32 } else { 1024 };
450 while align < limit {
451 for ptr in 1usize..4 * align {
452 unsafe {
453 #[repr(packed)]
454 struct A3(u16, u8);
455 x |= test_stride::<A3>(ptr::invalid::<A3>(ptr), align);
456
457 struct A4(u32);
458 x |= test_stride::<A4>(ptr::invalid::<A4>(ptr), align);
459
460 #[repr(packed)]
461 struct A5(u32, u8);
462 x |= test_stride::<A5>(ptr::invalid::<A5>(ptr), align);
463
464 #[repr(packed)]
465 struct A6(u32, u16);
466 x |= test_stride::<A6>(ptr::invalid::<A6>(ptr), align);
467
468 #[repr(packed)]
469 struct A7(u32, u16, u8);
470 x |= test_stride::<A7>(ptr::invalid::<A7>(ptr), align);
471
472 #[repr(packed)]
473 struct A8(u32, u32);
474 x |= test_stride::<A8>(ptr::invalid::<A8>(ptr), align);
475
476 #[repr(packed)]
477 struct A9(u32, u32, u8);
478 x |= test_stride::<A9>(ptr::invalid::<A9>(ptr), align);
479
480 #[repr(packed)]
481 struct A10(u32, u32, u16);
482 x |= test_stride::<A10>(ptr::invalid::<A10>(ptr), align);
483
484 x |= test_stride::<u32>(ptr::invalid::<u32>(ptr), align);
485 x |= test_stride::<u128>(ptr::invalid::<u128>(ptr), align);
486 }
487 }
488 align = (align + 1).next_power_of_two();
489 }
490 assert!(!x);
491 }
492
493 #[test]
494 fn align_offset_various_strides_const() {
495 const unsafe fn test_stride<T>(ptr: *const T, numptr: usize, align: usize) {
496 let mut expected = usize::MAX;
497 // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
498 let mut el = 0;
499 while el < align {
500 if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
501 expected = el;
502 break;
503 }
504 el += 1;
505 }
506 let got = ptr.align_offset(align);
507 assert!(got == expected);
508 }
509
510 const {
511 // For pointers of stride != 1, we verify the algorithm against the naivest possible
512 // implementation
513 let mut align = 1;
514 let limit = 32;
515 while align < limit {
516 let mut ptr = 1;
517 while ptr < 4 * align {
518 unsafe {
519 #[repr(packed)]
520 struct A3(u16, u8);
521 test_stride::<A3>(ptr::invalid::<A3>(ptr), ptr, align);
522
523 struct A4(u32);
524 test_stride::<A4>(ptr::invalid::<A4>(ptr), ptr, align);
525
526 #[repr(packed)]
527 struct A5(u32, u8);
528 test_stride::<A5>(ptr::invalid::<A5>(ptr), ptr, align);
529
530 #[repr(packed)]
531 struct A6(u32, u16);
532 test_stride::<A6>(ptr::invalid::<A6>(ptr), ptr, align);
533
534 #[repr(packed)]
535 struct A7(u32, u16, u8);
536 test_stride::<A7>(ptr::invalid::<A7>(ptr), ptr, align);
537
538 #[repr(packed)]
539 struct A8(u32, u32);
540 test_stride::<A8>(ptr::invalid::<A8>(ptr), ptr, align);
541
542 #[repr(packed)]
543 struct A9(u32, u32, u8);
544 test_stride::<A9>(ptr::invalid::<A9>(ptr), ptr, align);
545
546 #[repr(packed)]
547 struct A10(u32, u32, u16);
548 test_stride::<A10>(ptr::invalid::<A10>(ptr), ptr, align);
549
550 test_stride::<u32>(ptr::invalid::<u32>(ptr), ptr, align);
551 test_stride::<u128>(ptr::invalid::<u128>(ptr), ptr, align);
552 }
553 ptr += 1;
554 }
555 align = (align + 1).next_power_of_two();
556 }
557 }
558 }
559
560 #[test]
561 fn align_offset_with_provenance_const() {
562 const {
563 // On some platforms (e.g. msp430-none-elf), the alignment of `i32` is less than 4.
564 #[repr(align(4))]
565 struct AlignedI32(i32);
566
567 let data = AlignedI32(42);
568
569 // `stride % align == 0` (usual case)
570
571 let ptr: *const i32 = &data.0;
572 assert!(ptr.align_offset(1) == 0);
573 assert!(ptr.align_offset(2) == 0);
574 assert!(ptr.align_offset(4) == 0);
575 assert!(ptr.align_offset(8) == usize::MAX);
576 assert!(ptr.wrapping_byte_add(1).align_offset(1) == 0);
577 assert!(ptr.wrapping_byte_add(1).align_offset(2) == usize::MAX);
578 assert!(ptr.wrapping_byte_add(2).align_offset(1) == 0);
579 assert!(ptr.wrapping_byte_add(2).align_offset(2) == 0);
580 assert!(ptr.wrapping_byte_add(2).align_offset(4) == usize::MAX);
581 assert!(ptr.wrapping_byte_add(3).align_offset(1) == 0);
582 assert!(ptr.wrapping_byte_add(3).align_offset(2) == usize::MAX);
583
584 assert!(ptr.wrapping_add(42).align_offset(4) == 0);
585 assert!(ptr.wrapping_add(42).align_offset(8) == usize::MAX);
586
587 let ptr1: *const i8 = ptr.cast();
588 assert!(ptr1.align_offset(1) == 0);
589 assert!(ptr1.align_offset(2) == 0);
590 assert!(ptr1.align_offset(4) == 0);
591 assert!(ptr1.align_offset(8) == usize::MAX);
592 assert!(ptr1.wrapping_byte_add(1).align_offset(1) == 0);
593 assert!(ptr1.wrapping_byte_add(1).align_offset(2) == 1);
594 assert!(ptr1.wrapping_byte_add(1).align_offset(4) == 3);
595 assert!(ptr1.wrapping_byte_add(1).align_offset(8) == usize::MAX);
596 assert!(ptr1.wrapping_byte_add(2).align_offset(1) == 0);
597 assert!(ptr1.wrapping_byte_add(2).align_offset(2) == 0);
598 assert!(ptr1.wrapping_byte_add(2).align_offset(4) == 2);
599 assert!(ptr1.wrapping_byte_add(2).align_offset(8) == usize::MAX);
600 assert!(ptr1.wrapping_byte_add(3).align_offset(1) == 0);
601 assert!(ptr1.wrapping_byte_add(3).align_offset(2) == 1);
602 assert!(ptr1.wrapping_byte_add(3).align_offset(4) == 1);
603 assert!(ptr1.wrapping_byte_add(3).align_offset(8) == usize::MAX);
604
605 let ptr2: *const i16 = ptr.cast();
606 assert!(ptr2.align_offset(1) == 0);
607 assert!(ptr2.align_offset(2) == 0);
608 assert!(ptr2.align_offset(4) == 0);
609 assert!(ptr2.align_offset(8) == usize::MAX);
610 assert!(ptr2.wrapping_byte_add(1).align_offset(1) == 0);
611 assert!(ptr2.wrapping_byte_add(1).align_offset(2) == usize::MAX);
612 assert!(ptr2.wrapping_byte_add(2).align_offset(1) == 0);
613 assert!(ptr2.wrapping_byte_add(2).align_offset(2) == 0);
614 assert!(ptr2.wrapping_byte_add(2).align_offset(4) == 1);
615 assert!(ptr2.wrapping_byte_add(2).align_offset(8) == usize::MAX);
616 assert!(ptr2.wrapping_byte_add(3).align_offset(1) == 0);
617 assert!(ptr2.wrapping_byte_add(3).align_offset(2) == usize::MAX);
618
619 let ptr3: *const i64 = ptr.cast();
620 assert!(ptr3.align_offset(1) == 0);
621 assert!(ptr3.align_offset(2) == 0);
622 assert!(ptr3.align_offset(4) == 0);
623 assert!(ptr3.align_offset(8) == usize::MAX);
624 assert!(ptr3.wrapping_byte_add(1).align_offset(1) == 0);
625 assert!(ptr3.wrapping_byte_add(1).align_offset(2) == usize::MAX);
626
627 // `stride % align != 0` (edge case)
628
629 let ptr4: *const [u8; 3] = ptr.cast();
630 assert!(ptr4.align_offset(1) == 0);
631 assert!(ptr4.align_offset(2) == 0);
632 assert!(ptr4.align_offset(4) == 0);
633 assert!(ptr4.align_offset(8) == usize::MAX);
634 assert!(ptr4.wrapping_byte_add(1).align_offset(1) == 0);
635 assert!(ptr4.wrapping_byte_add(1).align_offset(2) == 1);
636 assert!(ptr4.wrapping_byte_add(1).align_offset(4) == 1);
637 assert!(ptr4.wrapping_byte_add(1).align_offset(8) == usize::MAX);
638 assert!(ptr4.wrapping_byte_add(2).align_offset(1) == 0);
639 assert!(ptr4.wrapping_byte_add(2).align_offset(2) == 0);
640 assert!(ptr4.wrapping_byte_add(2).align_offset(4) == 2);
641 assert!(ptr4.wrapping_byte_add(2).align_offset(8) == usize::MAX);
642 assert!(ptr4.wrapping_byte_add(3).align_offset(1) == 0);
643 assert!(ptr4.wrapping_byte_add(3).align_offset(2) == 1);
644 assert!(ptr4.wrapping_byte_add(3).align_offset(4) == 3);
645 assert!(ptr4.wrapping_byte_add(3).align_offset(8) == usize::MAX);
646
647 let ptr5: *const [u8; 5] = ptr.cast();
648 assert!(ptr5.align_offset(1) == 0);
649 assert!(ptr5.align_offset(2) == 0);
650 assert!(ptr5.align_offset(4) == 0);
651 assert!(ptr5.align_offset(8) == usize::MAX);
652 assert!(ptr5.wrapping_byte_add(1).align_offset(1) == 0);
653 assert!(ptr5.wrapping_byte_add(1).align_offset(2) == 1);
654 assert!(ptr5.wrapping_byte_add(1).align_offset(4) == 3);
655 assert!(ptr5.wrapping_byte_add(1).align_offset(8) == usize::MAX);
656 assert!(ptr5.wrapping_byte_add(2).align_offset(1) == 0);
657 assert!(ptr5.wrapping_byte_add(2).align_offset(2) == 0);
658 assert!(ptr5.wrapping_byte_add(2).align_offset(4) == 2);
659 assert!(ptr5.wrapping_byte_add(2).align_offset(8) == usize::MAX);
660 assert!(ptr5.wrapping_byte_add(3).align_offset(1) == 0);
661 assert!(ptr5.wrapping_byte_add(3).align_offset(2) == 1);
662 assert!(ptr5.wrapping_byte_add(3).align_offset(4) == 1);
663 assert!(ptr5.wrapping_byte_add(3).align_offset(8) == usize::MAX);
664 }
665 }
666
667 #[test]
668 fn align_offset_issue_103361() {
669 #[cfg(target_pointer_width = "64")]
670 const SIZE: usize = 1 << 47;
671 #[cfg(target_pointer_width = "32")]
672 const SIZE: usize = 1 << 30;
673 #[cfg(target_pointer_width = "16")]
674 const SIZE: usize = 1 << 13;
675 struct HugeSize([u8; SIZE - 1]);
676 let _ = ptr::invalid::<HugeSize>(SIZE).align_offset(SIZE);
677 }
678
679 #[test]
680 fn align_offset_issue_103361_const() {
681 #[cfg(target_pointer_width = "64")]
682 const SIZE: usize = 1 << 47;
683 #[cfg(target_pointer_width = "32")]
684 const SIZE: usize = 1 << 30;
685 #[cfg(target_pointer_width = "16")]
686 const SIZE: usize = 1 << 13;
687 struct HugeSize([u8; SIZE - 1]);
688
689 const {
690 assert!(ptr::invalid::<HugeSize>(SIZE - 1).align_offset(SIZE) == SIZE - 1);
691 assert!(ptr::invalid::<HugeSize>(SIZE).align_offset(SIZE) == 0);
692 assert!(ptr::invalid::<HugeSize>(SIZE + 1).align_offset(SIZE) == 1);
693 }
694 }
695
696 #[test]
697 fn is_aligned() {
698 let data = 42;
699 let ptr: *const i32 = &data;
700 assert!(ptr.is_aligned());
701 assert!(ptr.is_aligned_to(1));
702 assert!(ptr.is_aligned_to(2));
703 assert!(ptr.is_aligned_to(4));
704 assert!(ptr.wrapping_byte_add(2).is_aligned_to(1));
705 assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
706 assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
707
708 // At runtime either `ptr` or `ptr+1` is aligned to 8.
709 assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
710 }
711
712 #[test]
713 fn is_aligned_const() {
714 const {
715 let data = 42;
716 let ptr: *const i32 = &data;
717 assert!(ptr.is_aligned());
718 assert!(ptr.is_aligned_to(1));
719 assert!(ptr.is_aligned_to(2));
720 assert!(ptr.is_aligned_to(4));
721 assert!(ptr.wrapping_byte_add(2).is_aligned_to(1));
722 assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
723 assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
724
725 // At comptime neither `ptr` nor `ptr+1` is aligned to 8.
726 assert!(!ptr.is_aligned_to(8));
727 assert!(!ptr.wrapping_add(1).is_aligned_to(8));
728 }
729 }
730
731 #[test]
732 fn offset_from() {
733 let mut a = [0; 5];
734 let ptr1: *mut i32 = &mut a[1];
735 let ptr2: *mut i32 = &mut a[3];
736 unsafe {
737 assert_eq!(ptr2.offset_from(ptr1), 2);
738 assert_eq!(ptr1.offset_from(ptr2), -2);
739 assert_eq!(ptr1.offset(2), ptr2);
740 assert_eq!(ptr2.offset(-2), ptr1);
741 }
742 }
743
744 #[test]
745 fn ptr_metadata() {
746 struct Unit;
747 struct Pair<A, B: ?Sized>(A, B);
748 extern "C" {
749 type Extern;
750 }
751 let () = metadata(&());
752 let () = metadata(&Unit);
753 let () = metadata(&4_u32);
754 let () = metadata(&String::new());
755 let () = metadata(&Some(4_u32));
756 let () = metadata(&ptr_metadata);
757 let () = metadata(&|| {});
758 let () = metadata(&[4, 7]);
759 let () = metadata(&(4, String::new()));
760 let () = metadata(&Pair(4, String::new()));
761 let () = metadata(ptr::null::<()>() as *const Extern);
762 let () = metadata(ptr::null::<()>() as *const <&u32 as std::ops::Deref>::Target);
763
764 assert_eq!(metadata("foo"), 3_usize);
765 assert_eq!(metadata(&[4, 7][..]), 2_usize);
766
767 let dst_tuple: &(bool, [u8]) = &(true, [0x66, 0x6F, 0x6F]);
768 let dst_struct: &Pair<bool, [u8]> = &Pair(true, [0x66, 0x6F, 0x6F]);
769 assert_eq!(metadata(dst_tuple), 3_usize);
770 assert_eq!(metadata(dst_struct), 3_usize);
771 unsafe {
772 let dst_tuple: &(bool, str) = std::mem::transmute(dst_tuple);
773 let dst_struct: &Pair<bool, str> = std::mem::transmute(dst_struct);
774 assert_eq!(&dst_tuple.1, "foo");
775 assert_eq!(&dst_struct.1, "foo");
776 assert_eq!(metadata(dst_tuple), 3_usize);
777 assert_eq!(metadata(dst_struct), 3_usize);
778 }
779
780 let vtable_1: DynMetadata<dyn Debug> = metadata(&4_u16 as &dyn Debug);
781 let vtable_2: DynMetadata<dyn Display> = metadata(&4_u16 as &dyn Display);
782 let vtable_3: DynMetadata<dyn Display> = metadata(&4_u32 as &dyn Display);
783 let vtable_4: DynMetadata<dyn Display> = metadata(&(true, 7_u32) as &(bool, dyn Display));
784 let vtable_5: DynMetadata<dyn Display> =
785 metadata(&Pair(true, 7_u32) as &Pair<bool, dyn Display>);
786 unsafe {
787 let address_1: *const () = std::mem::transmute(vtable_1);
788 let address_2: *const () = std::mem::transmute(vtable_2);
789 let address_3: *const () = std::mem::transmute(vtable_3);
790 let address_4: *const () = std::mem::transmute(vtable_4);
791 let address_5: *const () = std::mem::transmute(vtable_5);
792 // Different trait => different vtable pointer
793 assert_ne!(address_1, address_2);
794 // Different erased type => different vtable pointer
795 assert_ne!(address_2, address_3);
796 // Same erased type and same trait => same vtable pointer
797 assert_eq!(address_3, address_4);
798 assert_eq!(address_3, address_5);
799 }
800 }
801
802 #[test]
803 fn ptr_metadata_bounds() {
804 fn metadata_eq_method_address<T: ?Sized>() -> usize {
805 // The `Metadata` associated type has an `Ord` bound, so this is valid:
806 <<T as Pointee>::Metadata as PartialEq>::eq as usize
807 }
808 // "Synthetic" trait impls generated by the compiler like those of `Pointee`
809 // are not checked for bounds of associated type.
810 // So with a buggy core we could have both:
811 // * `<dyn Display as Pointee>::Metadata == DynMetadata`
812 // * `DynMetadata: !PartialEq`
813 // … and cause an ICE here:
814 metadata_eq_method_address::<dyn Display>();
815
816 // For this reason, let’s check here that bounds are satisfied:
817
818 let _ = static_assert_expected_bounds_for_metadata::<()>;
819 let _ = static_assert_expected_bounds_for_metadata::<usize>;
820 let _ = static_assert_expected_bounds_for_metadata::<DynMetadata<dyn Display>>;
821 fn _static_assert_associated_type<T: ?Sized>() {
822 let _ = static_assert_expected_bounds_for_metadata::<<T as Pointee>::Metadata>;
823 }
824
825 fn static_assert_expected_bounds_for_metadata<Meta>()
826 where
827 // Keep this in sync with the associated type in `library/core/src/ptr/metadata.rs`
828 Meta: Copy + Send + Sync + Ord + std::hash::Hash + Unpin,
829 {
830 }
831 }
832
833 #[test]
834 fn dyn_metadata() {
835 #[derive(Debug)]
836 #[repr(align(32))]
837 struct Something([u8; 47]);
838
839 let value = Something([0; 47]);
840 let trait_object: &dyn Debug = &value;
841 let meta = metadata(trait_object);
842
843 assert_eq!(meta.size_of(), 64);
844 assert_eq!(meta.size_of(), std::mem::size_of::<Something>());
845 assert_eq!(meta.align_of(), 32);
846 assert_eq!(meta.align_of(), std::mem::align_of::<Something>());
847 assert_eq!(meta.layout(), std::alloc::Layout::new::<Something>());
848
849 assert!(format!("{meta:?}").starts_with("DynMetadata(0x"));
850 }
851
852 #[test]
853 fn from_raw_parts() {
854 let mut value = 5_u32;
855 let address = &mut value as *mut _ as *mut ();
856 let trait_object: &dyn Display = &mut value;
857 let vtable = metadata(trait_object);
858 let trait_object = NonNull::from(trait_object);
859
860 assert_eq!(ptr::from_raw_parts(address, vtable), trait_object.as_ptr());
861 assert_eq!(ptr::from_raw_parts_mut(address, vtable), trait_object.as_ptr());
862 assert_eq!(NonNull::from_raw_parts(NonNull::new(address).unwrap(), vtable), trait_object);
863
864 let mut array = [5_u32, 5, 5, 5, 5];
865 let address = &mut array as *mut _ as *mut ();
866 let array_ptr = NonNull::from(&mut array);
867 let slice_ptr = NonNull::from(&mut array[..]);
868
869 assert_eq!(ptr::from_raw_parts(address, ()), array_ptr.as_ptr());
870 assert_eq!(ptr::from_raw_parts_mut(address, ()), array_ptr.as_ptr());
871 assert_eq!(NonNull::from_raw_parts(NonNull::new(address).unwrap(), ()), array_ptr);
872
873 assert_eq!(ptr::from_raw_parts(address, 5), slice_ptr.as_ptr());
874 assert_eq!(ptr::from_raw_parts_mut(address, 5), slice_ptr.as_ptr());
875 assert_eq!(NonNull::from_raw_parts(NonNull::new(address).unwrap(), 5), slice_ptr);
876 }
877
878 #[test]
879 fn thin_box() {
880 let foo = ThinBox::<dyn Display>::new(4);
881 assert_eq!(foo.to_string(), "4");
882 drop(foo);
883 let bar = ThinBox::<dyn Display>::new(7);
884 assert_eq!(bar.to_string(), "7");
885
886 // A slightly more interesting library that could be built on top of metadata APIs.
887 //
888 // * It could be generalized to any `T: ?Sized` (not just trait object)
889 // if `{size,align}_of_for_meta<T: ?Sized>(T::Metadata)` are added.
890 // * Constructing a `ThinBox` without consuming and deallocating a `Box`
891 // requires either the unstable `Unsize` marker trait,
892 // or the unstable `unsized_locals` language feature,
893 // or taking `&dyn T` and restricting to `T: Copy`.
894
895 use std::alloc::*;
896 use std::marker::PhantomData;
897
898 struct ThinBox<T>
899 where
900 T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
901 {
902 ptr: NonNull<DynMetadata<T>>,
903 phantom: PhantomData<T>,
904 }
905
906 impl<T> ThinBox<T>
907 where
908 T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
909 {
910 pub fn new<Value: std::marker::Unsize<T>>(value: Value) -> Self {
911 let unsized_: &T = &value;
912 let meta = metadata(unsized_);
913 let meta_layout = Layout::for_value(&meta);
914 let value_layout = Layout::for_value(&value);
915 let (layout, offset) = meta_layout.extend(value_layout).unwrap();
916 // `DynMetadata` is pointer-sized:
917 assert!(layout.size() > 0);
918 // If `ThinBox<T>` is generalized to any `T: ?Sized`,
919 // handle ZSTs with a dangling pointer without going through `alloc()`,
920 // like `Box<T>` does.
921 unsafe {
922 let ptr = NonNull::new(alloc(layout))
923 .unwrap_or_else(|| handle_alloc_error(layout))
924 .cast::<DynMetadata<T>>();
925 ptr.as_ptr().write(meta);
926 ptr.as_ptr().byte_add(offset).cast::<Value>().write(value);
927 Self { ptr, phantom: PhantomData }
928 }
929 }
930
931 fn meta(&self) -> DynMetadata<T> {
932 unsafe { *self.ptr.as_ref() }
933 }
934
935 fn layout(&self) -> (Layout, usize) {
936 let meta = self.meta();
937 Layout::for_value(&meta).extend(meta.layout()).unwrap()
938 }
939
940 fn value_ptr(&self) -> *const T {
941 let (_, offset) = self.layout();
942 let data_ptr = unsafe { self.ptr.cast::<u8>().as_ptr().add(offset) };
943 ptr::from_raw_parts(data_ptr.cast(), self.meta())
944 }
945
946 fn value_mut_ptr(&mut self) -> *mut T {
947 let (_, offset) = self.layout();
948 // FIXME: can this line be shared with the same in `value_ptr()`
949 // without upsetting Stacked Borrows?
950 let data_ptr = unsafe { self.ptr.cast::<u8>().as_ptr().add(offset) };
951 from_raw_parts_mut(data_ptr.cast(), self.meta())
952 }
953 }
954
955 impl<T> std::ops::Deref for ThinBox<T>
956 where
957 T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
958 {
959 type Target = T;
960
961 fn deref(&self) -> &T {
962 unsafe { &*self.value_ptr() }
963 }
964 }
965
966 impl<T> std::ops::DerefMut for ThinBox<T>
967 where
968 T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
969 {
970 fn deref_mut(&mut self) -> &mut T {
971 unsafe { &mut *self.value_mut_ptr() }
972 }
973 }
974
975 impl<T> std::ops::Drop for ThinBox<T>
976 where
977 T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
978 {
979 fn drop(&mut self) {
980 let (layout, _) = self.layout();
981 unsafe {
982 drop_in_place::<T>(&mut **self);
983 dealloc(self.ptr.cast().as_ptr(), layout);
984 }
985 }
986 }
987 }
988
989 #[test]
990 fn nonnull_tagged_pointer_with_provenance() {
991 let raw_pointer = Box::into_raw(Box::new(10));
992
993 let mut p = TaggedPointer::new(raw_pointer).unwrap();
994 assert_eq!(p.tag(), 0);
995
996 p.set_tag(1);
997 assert_eq!(p.tag(), 1);
998 assert_eq!(unsafe { *p.pointer().as_ptr() }, 10);
999
1000 p.set_tag(3);
1001 assert_eq!(p.tag(), 3);
1002 assert_eq!(unsafe { *p.pointer().as_ptr() }, 10);
1003
1004 unsafe { Box::from_raw(p.pointer().as_ptr()) };
1005
1006 /// A non-null pointer type which carries several bits of metadata and maintains provenance.
1007 #[repr(transparent)]
1008 pub struct TaggedPointer<T>(NonNull<T>);
1009
1010 impl<T> Clone for TaggedPointer<T> {
1011 fn clone(&self) -> Self {
1012 Self(self.0)
1013 }
1014 }
1015
1016 impl<T> Copy for TaggedPointer<T> {}
1017
1018 impl<T> TaggedPointer<T> {
1019 /// The ABI-required minimum alignment of the `P` type.
1020 pub const ALIGNMENT: usize = core::mem::align_of::<T>();
1021 /// A mask for data-carrying bits of the address.
1022 pub const DATA_MASK: usize = !Self::ADDRESS_MASK;
1023 /// Number of available bits of storage in the address.
1024 pub const NUM_BITS: u32 = Self::ALIGNMENT.trailing_zeros();
1025 /// A mask for the non-data-carrying bits of the address.
1026 pub const ADDRESS_MASK: usize = usize::MAX << Self::NUM_BITS;
1027
1028 /// Create a new tagged pointer from a possibly null pointer.
1029 pub fn new(pointer: *mut T) -> Option<TaggedPointer<T>> {
1030 Some(TaggedPointer(NonNull::new(pointer)?))
1031 }
1032
1033 /// Consume this tagged pointer and produce a raw mutable pointer to the
1034 /// memory location.
1035 pub fn pointer(self) -> NonNull<T> {
1036 // SAFETY: The `addr` guaranteed to have bits set in the Self::ADDRESS_MASK, so the result will be non-null.
1037 self.0.map_addr(|addr| unsafe {
1038 NonZeroUsize::new_unchecked(addr.get() & Self::ADDRESS_MASK)
1039 })
1040 }
1041
1042 /// Consume this tagged pointer and produce the data it carries.
1043 pub fn tag(&self) -> usize {
1044 self.0.addr().get() & Self::DATA_MASK
1045 }
1046
1047 /// Update the data this tagged pointer carries to a new value.
1048 pub fn set_tag(&mut self, data: usize) {
1049 assert_eq!(
1050 data & Self::ADDRESS_MASK,
1051 0,
1052 "cannot set more data beyond the lowest NUM_BITS"
1053 );
1054 let data = data & Self::DATA_MASK;
1055
1056 // SAFETY: This value will always be non-zero because the upper bits (from
1057 // ADDRESS_MASK) will always be non-zero. This a property of the type and its
1058 // construction.
1059 self.0 = self.0.map_addr(|addr| unsafe {
1060 NonZeroUsize::new_unchecked((addr.get() & Self::ADDRESS_MASK) | data)
1061 })
1062 }
1063 }
1064 }
1065
1066 #[test]
1067 fn swap_copy_untyped() {
1068 // We call `{swap,copy}{,_nonoverlapping}` at `bool` type on data that is not a valid bool.
1069 // These should all do untyped copies, so this should work fine.
1070 let mut x = 5u8;
1071 let mut y = 6u8;
1072
1073 let ptr1 = &mut x as *mut u8 as *mut bool;
1074 let ptr2 = &mut y as *mut u8 as *mut bool;
1075
1076 unsafe {
1077 ptr::swap(ptr1, ptr2);
1078 ptr::swap_nonoverlapping(ptr1, ptr2, 1);
1079 }
1080 assert_eq!(x, 5);
1081 assert_eq!(y, 6);
1082
1083 unsafe {
1084 ptr::copy(ptr1, ptr2, 1);
1085 ptr::copy_nonoverlapping(ptr1, ptr2, 1);
1086 }
1087 assert_eq!(x, 5);
1088 assert_eq!(y, 5);
1089 }
1090
1091 #[test]
1092 fn test_const_copy() {
1093 const {
1094 let ptr1 = &1;
1095 let mut ptr2 = &666;
1096
1097 // Copy ptr1 to ptr2, bytewise.
1098 unsafe {
1099 ptr::copy(
1100 &ptr1 as *const _ as *const MaybeUninit<u8>,
1101 &mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
1102 mem::size_of::<&i32>(),
1103 );
1104 }
1105
1106 // Make sure they still work.
1107 assert!(*ptr1 == 1);
1108 assert!(*ptr2 == 1);
1109 };
1110
1111 const {
1112 let ptr1 = &1;
1113 let mut ptr2 = &666;
1114
1115 // Copy ptr1 to ptr2, bytewise.
1116 unsafe {
1117 ptr::copy_nonoverlapping(
1118 &ptr1 as *const _ as *const MaybeUninit<u8>,
1119 &mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
1120 mem::size_of::<&i32>(),
1121 );
1122 }
1123
1124 // Make sure they still work.
1125 assert!(*ptr1 == 1);
1126 assert!(*ptr2 == 1);
1127 };
1128 }