]> git.proxmox.com Git - rustc.git/blob - vendor/crossbeam-epoch/src/atomic.rs
New upstream version 1.51.0+dfsg1
[rustc.git] / vendor / crossbeam-epoch / src / atomic.rs
1 use core::borrow::{Borrow, BorrowMut};
2 use core::cmp;
3 use core::fmt;
4 use core::marker::PhantomData;
5 use core::mem::{self, MaybeUninit};
6 use core::ops::{Deref, DerefMut};
7 use core::slice;
8 use core::sync::atomic::{AtomicUsize, Ordering};
9
10 use crate::alloc::alloc;
11 use crate::alloc::boxed::Box;
12 use crate::guard::Guard;
13 use const_fn::const_fn;
14 use crossbeam_utils::atomic::AtomicConsume;
15
16 /// Given ordering for the success case in a compare-exchange operation, returns the strongest
17 /// appropriate ordering for the failure case.
18 #[inline]
19 fn strongest_failure_ordering(ord: Ordering) -> Ordering {
20 use self::Ordering::*;
21 match ord {
22 Relaxed | Release => Relaxed,
23 Acquire | AcqRel => Acquire,
24 _ => SeqCst,
25 }
26 }
27
28 /// The error returned on failed compare-and-set operation.
29 pub struct CompareAndSetError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
30 /// The value in the atomic pointer at the time of the failed operation.
31 pub current: Shared<'g, T>,
32
33 /// The new value, which the operation failed to store.
34 pub new: P,
35 }
36
37 impl<'g, T: 'g, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareAndSetError<'g, T, P> {
38 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
39 f.debug_struct("CompareAndSetError")
40 .field("current", &self.current)
41 .field("new", &self.new)
42 .finish()
43 }
44 }
45
46 /// Memory orderings for compare-and-set operations.
47 ///
48 /// A compare-and-set operation can have different memory orderings depending on whether it
49 /// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
50 ///
51 /// The two ways of specifying orderings for compare-and-set are:
52 ///
53 /// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
54 /// ordering is chosen.
55 /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
56 /// for the failure case.
57 pub trait CompareAndSetOrdering {
58 /// The ordering of the operation when it succeeds.
59 fn success(&self) -> Ordering;
60
61 /// The ordering of the operation when it fails.
62 ///
63 /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
64 /// the success ordering.
65 fn failure(&self) -> Ordering;
66 }
67
68 impl CompareAndSetOrdering for Ordering {
69 #[inline]
70 fn success(&self) -> Ordering {
71 *self
72 }
73
74 #[inline]
75 fn failure(&self) -> Ordering {
76 strongest_failure_ordering(*self)
77 }
78 }
79
80 impl CompareAndSetOrdering for (Ordering, Ordering) {
81 #[inline]
82 fn success(&self) -> Ordering {
83 self.0
84 }
85
86 #[inline]
87 fn failure(&self) -> Ordering {
88 self.1
89 }
90 }
91
92 /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
93 #[inline]
94 fn low_bits<T: ?Sized + Pointable>() -> usize {
95 (1 << T::ALIGN.trailing_zeros()) - 1
96 }
97
98 /// Panics if the pointer is not properly unaligned.
99 #[inline]
100 fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
101 assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
102 }
103
104 /// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
105 ///
106 /// `tag` is truncated to fit into the unused bits of the pointer to `T`.
107 #[inline]
108 fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize {
109 (data & !low_bits::<T>()) | (tag & low_bits::<T>())
110 }
111
112 /// Decomposes a tagged pointer `data` into the pointer and the tag.
113 #[inline]
114 fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
115 (data & !low_bits::<T>(), data & low_bits::<T>())
116 }
117
118 /// Types that are pointed to by a single word.
119 ///
120 /// In concurrent programming, it is necessary to represent an object within a word because atomic
121 /// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait
122 /// qualifies such types that are pointed to by a single word.
123 ///
124 /// The trait generalizes `Box<T>` for a sized type `T`. In a box, an object of type `T` is
125 /// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for
126 /// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
127 /// size and elements.
128 ///
129 /// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In
130 /// particular, Crossbeam supports dynamically sized slices as follows.
131 ///
132 /// ```
133 /// use std::mem::MaybeUninit;
134 /// use crossbeam_epoch::Owned;
135 ///
136 /// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
137 /// ```
138 pub trait Pointable {
139 /// The alignment of pointer.
140 const ALIGN: usize;
141
142 /// The type for initializers.
143 type Init;
144
145 /// Initializes a with the given initializer.
146 ///
147 /// # Safety
148 ///
149 /// The result should be a multiple of `ALIGN`.
150 unsafe fn init(init: Self::Init) -> usize;
151
152 /// Dereferences the given pointer.
153 ///
154 /// # Safety
155 ///
156 /// - The given `ptr` should have been initialized with [`Pointable::init`].
157 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
158 /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
159 unsafe fn deref<'a>(ptr: usize) -> &'a Self;
160
161 /// Mutably dereferences the given pointer.
162 ///
163 /// # Safety
164 ///
165 /// - The given `ptr` should have been initialized with [`Pointable::init`].
166 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
167 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
168 /// concurrently.
169 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self;
170
171 /// Drops the object pointed to by the given pointer.
172 ///
173 /// # Safety
174 ///
175 /// - The given `ptr` should have been initialized with [`Pointable::init`].
176 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
177 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
178 /// concurrently.
179 unsafe fn drop(ptr: usize);
180 }
181
182 impl<T> Pointable for T {
183 const ALIGN: usize = mem::align_of::<T>();
184
185 type Init = T;
186
187 unsafe fn init(init: Self::Init) -> usize {
188 Box::into_raw(Box::new(init)) as usize
189 }
190
191 unsafe fn deref<'a>(ptr: usize) -> &'a Self {
192 &*(ptr as *const T)
193 }
194
195 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
196 &mut *(ptr as *mut T)
197 }
198
199 unsafe fn drop(ptr: usize) {
200 drop(Box::from_raw(ptr as *mut T));
201 }
202 }
203
204 /// Array with size.
205 ///
206 /// # Memory layout
207 ///
208 /// An array consisting of size and elements:
209 ///
210 /// ```text
211 /// elements
212 /// |
213 /// |
214 /// ------------------------------------
215 /// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
216 /// ------------------------------------
217 /// ```
218 ///
219 /// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
220 /// along with pointer as in `Box<[T]>`).
221 ///
222 /// Elements are not present in the type, but they will be in the allocation.
223 /// ```
224 ///
225 // TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use
226 // [`alloc::alloc::Layout::extend`] instead.
227 #[repr(C)]
228 struct Array<T> {
229 size: usize,
230 elements: [MaybeUninit<T>; 0],
231 }
232
233 impl<T> Pointable for [MaybeUninit<T>] {
234 const ALIGN: usize = mem::align_of::<Array<T>>();
235
236 type Init = usize;
237
238 unsafe fn init(size: Self::Init) -> usize {
239 let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * size;
240 let align = mem::align_of::<Array<T>>();
241 let layout = alloc::Layout::from_size_align(size, align).unwrap();
242 let ptr = alloc::alloc(layout) as *mut Array<T>;
243 (*ptr).size = size;
244 ptr as usize
245 }
246
247 unsafe fn deref<'a>(ptr: usize) -> &'a Self {
248 let array = &*(ptr as *const Array<T>);
249 slice::from_raw_parts(array.elements.as_ptr() as *const _, array.size)
250 }
251
252 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
253 let array = &*(ptr as *mut Array<T>);
254 slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.size)
255 }
256
257 unsafe fn drop(ptr: usize) {
258 let array = &*(ptr as *mut Array<T>);
259 let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * array.size;
260 let align = mem::align_of::<Array<T>>();
261 let layout = alloc::Layout::from_size_align(size, align).unwrap();
262 alloc::dealloc(ptr as *mut u8, layout);
263 }
264 }
265
266 /// An atomic pointer that can be safely shared between threads.
267 ///
268 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
269 /// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
270 /// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
271 ///
272 /// Any method that loads the pointer must be passed a reference to a [`Guard`].
273 ///
274 /// Crossbeam supports dynamically sized types. See [`Pointable`] for details.
275 pub struct Atomic<T: ?Sized + Pointable> {
276 data: AtomicUsize,
277 _marker: PhantomData<*mut T>,
278 }
279
280 unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {}
281 unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {}
282
283 impl<T> Atomic<T> {
284 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
285 ///
286 /// # Examples
287 ///
288 /// ```
289 /// use crossbeam_epoch::Atomic;
290 ///
291 /// let a = Atomic::new(1234);
292 /// ```
293 pub fn new(init: T) -> Atomic<T> {
294 Self::init(init)
295 }
296 }
297
298 impl<T: ?Sized + Pointable> Atomic<T> {
299 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
300 ///
301 /// # Examples
302 ///
303 /// ```
304 /// use crossbeam_epoch::Atomic;
305 ///
306 /// let a = Atomic::<i32>::init(1234);
307 /// ```
308 pub fn init(init: T::Init) -> Atomic<T> {
309 Self::from(Owned::init(init))
310 }
311
312 /// Returns a new atomic pointer pointing to the tagged pointer `data`.
313 fn from_usize(data: usize) -> Self {
314 Self {
315 data: AtomicUsize::new(data),
316 _marker: PhantomData,
317 }
318 }
319
320 /// Returns a new null atomic pointer.
321 ///
322 /// # Examples
323 ///
324 /// ```
325 /// use crossbeam_epoch::Atomic;
326 ///
327 /// let a = Atomic::<i32>::null();
328 /// ```
329 ///
330 #[const_fn(feature = "nightly")]
331 pub const fn null() -> Atomic<T> {
332 Self {
333 data: AtomicUsize::new(0),
334 _marker: PhantomData,
335 }
336 }
337
338 /// Loads a `Shared` from the atomic pointer.
339 ///
340 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
341 /// operation.
342 ///
343 /// # Examples
344 ///
345 /// ```
346 /// use crossbeam_epoch::{self as epoch, Atomic};
347 /// use std::sync::atomic::Ordering::SeqCst;
348 ///
349 /// let a = Atomic::new(1234);
350 /// let guard = &epoch::pin();
351 /// let p = a.load(SeqCst, guard);
352 /// ```
353 pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
354 unsafe { Shared::from_usize(self.data.load(ord)) }
355 }
356
357 /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
358 ///
359 /// This is similar to the "acquire" ordering, except that an ordering is
360 /// only guaranteed with operations that "depend on" the result of the load.
361 /// However consume loads are usually much faster than acquire loads on
362 /// architectures with a weak memory model since they don't require memory
363 /// fence instructions.
364 ///
365 /// The exact definition of "depend on" is a bit vague, but it works as you
366 /// would expect in practice since a lot of software, especially the Linux
367 /// kernel, rely on this behavior.
368 ///
369 /// # Examples
370 ///
371 /// ```
372 /// use crossbeam_epoch::{self as epoch, Atomic};
373 ///
374 /// let a = Atomic::new(1234);
375 /// let guard = &epoch::pin();
376 /// let p = a.load_consume(guard);
377 /// ```
378 pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
379 unsafe { Shared::from_usize(self.data.load_consume()) }
380 }
381
382 /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
383 ///
384 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
385 /// operation.
386 ///
387 /// # Examples
388 ///
389 /// ```
390 /// use crossbeam_epoch::{Atomic, Owned, Shared};
391 /// use std::sync::atomic::Ordering::SeqCst;
392 ///
393 /// let a = Atomic::new(1234);
394 /// a.store(Shared::null(), SeqCst);
395 /// a.store(Owned::new(1234), SeqCst);
396 /// ```
397 pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
398 self.data.store(new.into_usize(), ord);
399 }
400
401 /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
402 /// `Shared`.
403 ///
404 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
405 /// operation.
406 ///
407 /// # Examples
408 ///
409 /// ```
410 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
411 /// use std::sync::atomic::Ordering::SeqCst;
412 ///
413 /// let a = Atomic::new(1234);
414 /// let guard = &epoch::pin();
415 /// let p = a.swap(Shared::null(), SeqCst, guard);
416 /// ```
417 pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
418 unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
419 }
420
421 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
422 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
423 /// same object, but with different tags, will not be considered equal.
424 ///
425 /// The return value is a result indicating whether the new pointer was written. On success the
426 /// pointer that was written is returned. On failure the actual current value and `new` are
427 /// returned.
428 ///
429 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
430 /// ordering of this operation.
431 ///
432 /// # Examples
433 ///
434 /// ```
435 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
436 /// use std::sync::atomic::Ordering::SeqCst;
437 ///
438 /// let a = Atomic::new(1234);
439 ///
440 /// let guard = &epoch::pin();
441 /// let curr = a.load(SeqCst, guard);
442 /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
443 /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
444 /// ```
445 pub fn compare_and_set<'g, O, P>(
446 &self,
447 current: Shared<'_, T>,
448 new: P,
449 ord: O,
450 _: &'g Guard,
451 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
452 where
453 O: CompareAndSetOrdering,
454 P: Pointer<T>,
455 {
456 let new = new.into_usize();
457 self.data
458 .compare_exchange(current.into_usize(), new, ord.success(), ord.failure())
459 .map(|_| unsafe { Shared::from_usize(new) })
460 .map_err(|current| unsafe {
461 CompareAndSetError {
462 current: Shared::from_usize(current),
463 new: P::from_usize(new),
464 }
465 })
466 }
467
468 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
469 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
470 /// same object, but with different tags, will not be considered equal.
471 ///
472 /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
473 /// succeeds, which can result in more efficient code on some platforms. The return value is a
474 /// result indicating whether the new pointer was written. On success the pointer that was
475 /// written is returned. On failure the actual current value and `new` are returned.
476 ///
477 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
478 /// ordering of this operation.
479 ///
480 /// [`compare_and_set`]: Atomic::compare_and_set
481 ///
482 /// # Examples
483 ///
484 /// ```
485 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
486 /// use std::sync::atomic::Ordering::SeqCst;
487 ///
488 /// let a = Atomic::new(1234);
489 /// let guard = &epoch::pin();
490 ///
491 /// let mut new = Owned::new(5678);
492 /// let mut ptr = a.load(SeqCst, guard);
493 /// loop {
494 /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
495 /// Ok(p) => {
496 /// ptr = p;
497 /// break;
498 /// }
499 /// Err(err) => {
500 /// ptr = err.current;
501 /// new = err.new;
502 /// }
503 /// }
504 /// }
505 ///
506 /// let mut curr = a.load(SeqCst, guard);
507 /// loop {
508 /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
509 /// Ok(_) => break,
510 /// Err(err) => curr = err.current,
511 /// }
512 /// }
513 /// ```
514 pub fn compare_and_set_weak<'g, O, P>(
515 &self,
516 current: Shared<'_, T>,
517 new: P,
518 ord: O,
519 _: &'g Guard,
520 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
521 where
522 O: CompareAndSetOrdering,
523 P: Pointer<T>,
524 {
525 let new = new.into_usize();
526 self.data
527 .compare_exchange_weak(current.into_usize(), new, ord.success(), ord.failure())
528 .map(|_| unsafe { Shared::from_usize(new) })
529 .map_err(|current| unsafe {
530 CompareAndSetError {
531 current: Shared::from_usize(current),
532 new: P::from_usize(new),
533 }
534 })
535 }
536
537 /// Bitwise "and" with the current tag.
538 ///
539 /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
540 /// new tag to the result. Returns the previous pointer.
541 ///
542 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
543 /// operation.
544 ///
545 /// # Examples
546 ///
547 /// ```
548 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
549 /// use std::sync::atomic::Ordering::SeqCst;
550 ///
551 /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
552 /// let guard = &epoch::pin();
553 /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
554 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
555 /// ```
556 pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
557 unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
558 }
559
560 /// Bitwise "or" with the current tag.
561 ///
562 /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
563 /// new tag to the result. Returns the previous pointer.
564 ///
565 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
566 /// operation.
567 ///
568 /// # Examples
569 ///
570 /// ```
571 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
572 /// use std::sync::atomic::Ordering::SeqCst;
573 ///
574 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
575 /// let guard = &epoch::pin();
576 /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
577 /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
578 /// ```
579 pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
580 unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
581 }
582
583 /// Bitwise "xor" with the current tag.
584 ///
585 /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
586 /// new tag to the result. Returns the previous pointer.
587 ///
588 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
589 /// operation.
590 ///
591 /// # Examples
592 ///
593 /// ```
594 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
595 /// use std::sync::atomic::Ordering::SeqCst;
596 ///
597 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
598 /// let guard = &epoch::pin();
599 /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
600 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
601 /// ```
602 pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
603 unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
604 }
605
606 /// Takes ownership of the pointee.
607 ///
608 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
609 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
610 /// destructors of data structures.
611 ///
612 /// # Panics
613 ///
614 /// Panics if this pointer is null, but only in debug mode.
615 ///
616 /// # Safety
617 ///
618 /// This method may be called only if the pointer is valid and nobody else is holding a
619 /// reference to the same object.
620 ///
621 /// # Examples
622 ///
623 /// ```rust
624 /// # use std::mem;
625 /// # use crossbeam_epoch::Atomic;
626 /// struct DataStructure {
627 /// ptr: Atomic<usize>,
628 /// }
629 ///
630 /// impl Drop for DataStructure {
631 /// fn drop(&mut self) {
632 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
633 /// // any Shared or & to it ourselves.
634 /// unsafe {
635 /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
636 /// }
637 /// }
638 /// }
639 /// ```
640 pub unsafe fn into_owned(self) -> Owned<T> {
641 Owned::from_usize(self.data.into_inner())
642 }
643 }
644
645 impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> {
646 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
647 let data = self.data.load(Ordering::SeqCst);
648 let (raw, tag) = decompose_tag::<T>(data);
649
650 f.debug_struct("Atomic")
651 .field("raw", &raw)
652 .field("tag", &tag)
653 .finish()
654 }
655 }
656
657 impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> {
658 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
659 let data = self.data.load(Ordering::SeqCst);
660 let (raw, _) = decompose_tag::<T>(data);
661 fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f)
662 }
663 }
664
665 impl<T: ?Sized + Pointable> Clone for Atomic<T> {
666 /// Returns a copy of the atomic value.
667 ///
668 /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
669 /// atomics or fences.
670 fn clone(&self) -> Self {
671 let data = self.data.load(Ordering::Relaxed);
672 Atomic::from_usize(data)
673 }
674 }
675
676 impl<T: ?Sized + Pointable> Default for Atomic<T> {
677 fn default() -> Self {
678 Atomic::null()
679 }
680 }
681
682 impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> {
683 /// Returns a new atomic pointer pointing to `owned`.
684 ///
685 /// # Examples
686 ///
687 /// ```
688 /// use crossbeam_epoch::{Atomic, Owned};
689 ///
690 /// let a = Atomic::<i32>::from(Owned::new(1234));
691 /// ```
692 fn from(owned: Owned<T>) -> Self {
693 let data = owned.data;
694 mem::forget(owned);
695 Self::from_usize(data)
696 }
697 }
698
699 impl<T> From<Box<T>> for Atomic<T> {
700 fn from(b: Box<T>) -> Self {
701 Self::from(Owned::from(b))
702 }
703 }
704
705 impl<T> From<T> for Atomic<T> {
706 fn from(t: T) -> Self {
707 Self::new(t)
708 }
709 }
710
711 impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> {
712 /// Returns a new atomic pointer pointing to `ptr`.
713 ///
714 /// # Examples
715 ///
716 /// ```
717 /// use crossbeam_epoch::{Atomic, Shared};
718 ///
719 /// let a = Atomic::<i32>::from(Shared::<i32>::null());
720 /// ```
721 fn from(ptr: Shared<'g, T>) -> Self {
722 Self::from_usize(ptr.data)
723 }
724 }
725
726 impl<T> From<*const T> for Atomic<T> {
727 /// Returns a new atomic pointer pointing to `raw`.
728 ///
729 /// # Examples
730 ///
731 /// ```
732 /// use std::ptr;
733 /// use crossbeam_epoch::Atomic;
734 ///
735 /// let a = Atomic::<i32>::from(ptr::null::<i32>());
736 /// ```
737 fn from(raw: *const T) -> Self {
738 Self::from_usize(raw as usize)
739 }
740 }
741
742 /// A trait for either `Owned` or `Shared` pointers.
743 pub trait Pointer<T: ?Sized + Pointable> {
744 /// Returns the machine representation of the pointer.
745 fn into_usize(self) -> usize;
746
747 /// Returns a new pointer pointing to the tagged pointer `data`.
748 ///
749 /// # Safety
750 ///
751 /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
752 /// not be converted back by `Pointer::from_usize()` multiple times.
753 unsafe fn from_usize(data: usize) -> Self;
754 }
755
756 /// An owned heap-allocated object.
757 ///
758 /// This type is very similar to `Box<T>`.
759 ///
760 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
761 /// least significant bits of the address.
762 pub struct Owned<T: ?Sized + Pointable> {
763 data: usize,
764 _marker: PhantomData<Box<T>>,
765 }
766
767 impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> {
768 #[inline]
769 fn into_usize(self) -> usize {
770 let data = self.data;
771 mem::forget(self);
772 data
773 }
774
775 /// Returns a new pointer pointing to the tagged pointer `data`.
776 ///
777 /// # Panics
778 ///
779 /// Panics if the data is zero in debug mode.
780 #[inline]
781 unsafe fn from_usize(data: usize) -> Self {
782 debug_assert!(data != 0, "converting zero into `Owned`");
783 Owned {
784 data,
785 _marker: PhantomData,
786 }
787 }
788 }
789
790 impl<T> Owned<T> {
791 /// Returns a new owned pointer pointing to `raw`.
792 ///
793 /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
794 /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
795 /// the same raw pointer.
796 ///
797 /// # Panics
798 ///
799 /// Panics if `raw` is not properly aligned.
800 ///
801 /// # Safety
802 ///
803 /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
804 /// back by `Owned::from_raw()` multiple times.
805 ///
806 /// # Examples
807 ///
808 /// ```
809 /// use crossbeam_epoch::Owned;
810 ///
811 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
812 /// ```
813 pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
814 let raw = raw as usize;
815 ensure_aligned::<T>(raw);
816 Self::from_usize(raw)
817 }
818
819 /// Converts the owned pointer into a `Box`.
820 ///
821 /// # Examples
822 ///
823 /// ```
824 /// use crossbeam_epoch::Owned;
825 ///
826 /// let o = Owned::new(1234);
827 /// let b: Box<i32> = o.into_box();
828 /// assert_eq!(*b, 1234);
829 /// ```
830 pub fn into_box(self) -> Box<T> {
831 let (raw, _) = decompose_tag::<T>(self.data);
832 mem::forget(self);
833 unsafe { Box::from_raw(raw as *mut _) }
834 }
835
836 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
837 ///
838 /// # Examples
839 ///
840 /// ```
841 /// use crossbeam_epoch::Owned;
842 ///
843 /// let o = Owned::new(1234);
844 /// ```
845 pub fn new(init: T) -> Owned<T> {
846 Self::init(init)
847 }
848 }
849
850 impl<T: ?Sized + Pointable> Owned<T> {
851 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
852 ///
853 /// # Examples
854 ///
855 /// ```
856 /// use crossbeam_epoch::Owned;
857 ///
858 /// let o = Owned::<i32>::init(1234);
859 /// ```
860 pub fn init(init: T::Init) -> Owned<T> {
861 unsafe { Self::from_usize(T::init(init)) }
862 }
863
864 /// Converts the owned pointer into a [`Shared`].
865 ///
866 /// # Examples
867 ///
868 /// ```
869 /// use crossbeam_epoch::{self as epoch, Owned};
870 ///
871 /// let o = Owned::new(1234);
872 /// let guard = &epoch::pin();
873 /// let p = o.into_shared(guard);
874 /// ```
875 #[allow(clippy::needless_lifetimes)]
876 pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
877 unsafe { Shared::from_usize(self.into_usize()) }
878 }
879
880 /// Returns the tag stored within the pointer.
881 ///
882 /// # Examples
883 ///
884 /// ```
885 /// use crossbeam_epoch::Owned;
886 ///
887 /// assert_eq!(Owned::new(1234).tag(), 0);
888 /// ```
889 pub fn tag(&self) -> usize {
890 let (_, tag) = decompose_tag::<T>(self.data);
891 tag
892 }
893
894 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
895 /// unused bits of the pointer to `T`.
896 ///
897 /// # Examples
898 ///
899 /// ```
900 /// use crossbeam_epoch::Owned;
901 ///
902 /// let o = Owned::new(0u64);
903 /// assert_eq!(o.tag(), 0);
904 /// let o = o.with_tag(2);
905 /// assert_eq!(o.tag(), 2);
906 /// ```
907 pub fn with_tag(self, tag: usize) -> Owned<T> {
908 let data = self.into_usize();
909 unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
910 }
911 }
912
913 impl<T: ?Sized + Pointable> Drop for Owned<T> {
914 fn drop(&mut self) {
915 let (raw, _) = decompose_tag::<T>(self.data);
916 unsafe {
917 T::drop(raw);
918 }
919 }
920 }
921
922 impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> {
923 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
924 let (raw, tag) = decompose_tag::<T>(self.data);
925
926 f.debug_struct("Owned")
927 .field("raw", &raw)
928 .field("tag", &tag)
929 .finish()
930 }
931 }
932
933 impl<T: Clone> Clone for Owned<T> {
934 fn clone(&self) -> Self {
935 Owned::new((**self).clone()).with_tag(self.tag())
936 }
937 }
938
939 impl<T: ?Sized + Pointable> Deref for Owned<T> {
940 type Target = T;
941
942 fn deref(&self) -> &T {
943 let (raw, _) = decompose_tag::<T>(self.data);
944 unsafe { T::deref(raw) }
945 }
946 }
947
948 impl<T: ?Sized + Pointable> DerefMut for Owned<T> {
949 fn deref_mut(&mut self) -> &mut T {
950 let (raw, _) = decompose_tag::<T>(self.data);
951 unsafe { T::deref_mut(raw) }
952 }
953 }
954
955 impl<T> From<T> for Owned<T> {
956 fn from(t: T) -> Self {
957 Owned::new(t)
958 }
959 }
960
961 impl<T> From<Box<T>> for Owned<T> {
962 /// Returns a new owned pointer pointing to `b`.
963 ///
964 /// # Panics
965 ///
966 /// Panics if the pointer (the `Box`) is not properly aligned.
967 ///
968 /// # Examples
969 ///
970 /// ```
971 /// use crossbeam_epoch::Owned;
972 ///
973 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
974 /// ```
975 fn from(b: Box<T>) -> Self {
976 unsafe { Self::from_raw(Box::into_raw(b)) }
977 }
978 }
979
980 impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> {
981 fn borrow(&self) -> &T {
982 self.deref()
983 }
984 }
985
986 impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> {
987 fn borrow_mut(&mut self) -> &mut T {
988 self.deref_mut()
989 }
990 }
991
992 impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> {
993 fn as_ref(&self) -> &T {
994 self.deref()
995 }
996 }
997
998 impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> {
999 fn as_mut(&mut self) -> &mut T {
1000 self.deref_mut()
1001 }
1002 }
1003
1004 /// A pointer to an object protected by the epoch GC.
1005 ///
1006 /// The pointer is valid for use only during the lifetime `'g`.
1007 ///
1008 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1009 /// least significant bits of the address.
1010 pub struct Shared<'g, T: 'g + ?Sized + Pointable> {
1011 data: usize,
1012 _marker: PhantomData<(&'g (), *const T)>,
1013 }
1014
1015 impl<T: ?Sized + Pointable> Clone for Shared<'_, T> {
1016 fn clone(&self) -> Self {
1017 Self {
1018 data: self.data,
1019 _marker: PhantomData,
1020 }
1021 }
1022 }
1023
1024 impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {}
1025
1026 impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> {
1027 #[inline]
1028 fn into_usize(self) -> usize {
1029 self.data
1030 }
1031
1032 #[inline]
1033 unsafe fn from_usize(data: usize) -> Self {
1034 Shared {
1035 data,
1036 _marker: PhantomData,
1037 }
1038 }
1039 }
1040
1041 impl<'g, T> Shared<'g, T> {
1042 /// Converts the pointer to a raw pointer (without the tag).
1043 ///
1044 /// # Examples
1045 ///
1046 /// ```
1047 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1048 /// use std::sync::atomic::Ordering::SeqCst;
1049 ///
1050 /// let o = Owned::new(1234);
1051 /// let raw = &*o as *const _;
1052 /// let a = Atomic::from(o);
1053 ///
1054 /// let guard = &epoch::pin();
1055 /// let p = a.load(SeqCst, guard);
1056 /// assert_eq!(p.as_raw(), raw);
1057 /// ```
1058 #[allow(clippy::trivially_copy_pass_by_ref)]
1059 pub fn as_raw(&self) -> *const T {
1060 let (raw, _) = decompose_tag::<T>(self.data);
1061 raw as *const _
1062 }
1063 }
1064
1065 impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
1066 /// Returns a new null pointer.
1067 ///
1068 /// # Examples
1069 ///
1070 /// ```
1071 /// use crossbeam_epoch::Shared;
1072 ///
1073 /// let p = Shared::<i32>::null();
1074 /// assert!(p.is_null());
1075 /// ```
1076 pub fn null() -> Shared<'g, T> {
1077 Shared {
1078 data: 0,
1079 _marker: PhantomData,
1080 }
1081 }
1082
1083 /// Returns `true` if the pointer is null.
1084 ///
1085 /// # Examples
1086 ///
1087 /// ```
1088 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1089 /// use std::sync::atomic::Ordering::SeqCst;
1090 ///
1091 /// let a = Atomic::null();
1092 /// let guard = &epoch::pin();
1093 /// assert!(a.load(SeqCst, guard).is_null());
1094 /// a.store(Owned::new(1234), SeqCst);
1095 /// assert!(!a.load(SeqCst, guard).is_null());
1096 /// ```
1097 #[allow(clippy::trivially_copy_pass_by_ref)]
1098 pub fn is_null(&self) -> bool {
1099 let (raw, _) = decompose_tag::<T>(self.data);
1100 raw == 0
1101 }
1102
1103 /// Dereferences the pointer.
1104 ///
1105 /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1106 ///
1107 /// # Safety
1108 ///
1109 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1110 ///
1111 /// Another concern is the possibility of data races due to lack of proper synchronization.
1112 /// For example, consider the following scenario:
1113 ///
1114 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1115 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1116 ///
1117 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1118 /// the read from the second thread. This is a data race. A possible solution would be to use
1119 /// `Release` and `Acquire` orderings.
1120 ///
1121 /// # Examples
1122 ///
1123 /// ```
1124 /// use crossbeam_epoch::{self as epoch, Atomic};
1125 /// use std::sync::atomic::Ordering::SeqCst;
1126 ///
1127 /// let a = Atomic::new(1234);
1128 /// let guard = &epoch::pin();
1129 /// let p = a.load(SeqCst, guard);
1130 /// unsafe {
1131 /// assert_eq!(p.deref(), &1234);
1132 /// }
1133 /// ```
1134 #[allow(clippy::trivially_copy_pass_by_ref)]
1135 #[allow(clippy::should_implement_trait)]
1136 pub unsafe fn deref(&self) -> &'g T {
1137 let (raw, _) = decompose_tag::<T>(self.data);
1138 T::deref(raw)
1139 }
1140
1141 /// Dereferences the pointer.
1142 ///
1143 /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1144 ///
1145 /// # Safety
1146 ///
1147 /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1148 /// actual object at the same time.
1149 ///
1150 /// The user must know that there are no concurrent accesses towards the object itself.
1151 ///
1152 /// * Other than the above, all safety concerns of `deref()` applies here.
1153 ///
1154 /// # Examples
1155 ///
1156 /// ```
1157 /// use crossbeam_epoch::{self as epoch, Atomic};
1158 /// use std::sync::atomic::Ordering::SeqCst;
1159 ///
1160 /// let a = Atomic::new(vec![1, 2, 3, 4]);
1161 /// let guard = &epoch::pin();
1162 ///
1163 /// let mut p = a.load(SeqCst, guard);
1164 /// unsafe {
1165 /// assert!(!p.is_null());
1166 /// let b = p.deref_mut();
1167 /// assert_eq!(b, &vec![1, 2, 3, 4]);
1168 /// b.push(5);
1169 /// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1170 /// }
1171 ///
1172 /// let p = a.load(SeqCst, guard);
1173 /// unsafe {
1174 /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1175 /// }
1176 /// ```
1177 #[allow(clippy::should_implement_trait)]
1178 pub unsafe fn deref_mut(&mut self) -> &'g mut T {
1179 let (raw, _) = decompose_tag::<T>(self.data);
1180 T::deref_mut(raw)
1181 }
1182
1183 /// Converts the pointer to a reference.
1184 ///
1185 /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1186 ///
1187 /// # Safety
1188 ///
1189 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1190 ///
1191 /// Another concern is the possibility of data races due to lack of proper synchronization.
1192 /// For example, consider the following scenario:
1193 ///
1194 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1195 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1196 ///
1197 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1198 /// the read from the second thread. This is a data race. A possible solution would be to use
1199 /// `Release` and `Acquire` orderings.
1200 ///
1201 /// # Examples
1202 ///
1203 /// ```
1204 /// use crossbeam_epoch::{self as epoch, Atomic};
1205 /// use std::sync::atomic::Ordering::SeqCst;
1206 ///
1207 /// let a = Atomic::new(1234);
1208 /// let guard = &epoch::pin();
1209 /// let p = a.load(SeqCst, guard);
1210 /// unsafe {
1211 /// assert_eq!(p.as_ref(), Some(&1234));
1212 /// }
1213 /// ```
1214 #[allow(clippy::trivially_copy_pass_by_ref)]
1215 pub unsafe fn as_ref(&self) -> Option<&'g T> {
1216 let (raw, _) = decompose_tag::<T>(self.data);
1217 if raw == 0 {
1218 None
1219 } else {
1220 Some(T::deref(raw))
1221 }
1222 }
1223
1224 /// Takes ownership of the pointee.
1225 ///
1226 /// # Panics
1227 ///
1228 /// Panics if this pointer is null, but only in debug mode.
1229 ///
1230 /// # Safety
1231 ///
1232 /// This method may be called only if the pointer is valid and nobody else is holding a
1233 /// reference to the same object.
1234 ///
1235 /// # Examples
1236 ///
1237 /// ```
1238 /// use crossbeam_epoch::{self as epoch, Atomic};
1239 /// use std::sync::atomic::Ordering::SeqCst;
1240 ///
1241 /// let a = Atomic::new(1234);
1242 /// unsafe {
1243 /// let guard = &epoch::unprotected();
1244 /// let p = a.load(SeqCst, guard);
1245 /// drop(p.into_owned());
1246 /// }
1247 /// ```
1248 pub unsafe fn into_owned(self) -> Owned<T> {
1249 debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1250 Owned::from_usize(self.data)
1251 }
1252
1253 /// Returns the tag stored within the pointer.
1254 ///
1255 /// # Examples
1256 ///
1257 /// ```
1258 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1259 /// use std::sync::atomic::Ordering::SeqCst;
1260 ///
1261 /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1262 /// let guard = &epoch::pin();
1263 /// let p = a.load(SeqCst, guard);
1264 /// assert_eq!(p.tag(), 2);
1265 /// ```
1266 #[allow(clippy::trivially_copy_pass_by_ref)]
1267 pub fn tag(&self) -> usize {
1268 let (_, tag) = decompose_tag::<T>(self.data);
1269 tag
1270 }
1271
1272 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1273 /// unused bits of the pointer to `T`.
1274 ///
1275 /// # Examples
1276 ///
1277 /// ```
1278 /// use crossbeam_epoch::{self as epoch, Atomic};
1279 /// use std::sync::atomic::Ordering::SeqCst;
1280 ///
1281 /// let a = Atomic::new(0u64);
1282 /// let guard = &epoch::pin();
1283 /// let p1 = a.load(SeqCst, guard);
1284 /// let p2 = p1.with_tag(2);
1285 ///
1286 /// assert_eq!(p1.tag(), 0);
1287 /// assert_eq!(p2.tag(), 2);
1288 /// assert_eq!(p1.as_raw(), p2.as_raw());
1289 /// ```
1290 #[allow(clippy::trivially_copy_pass_by_ref)]
1291 pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1292 unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
1293 }
1294 }
1295
1296 impl<T> From<*const T> for Shared<'_, T> {
1297 /// Returns a new pointer pointing to `raw`.
1298 ///
1299 /// # Panics
1300 ///
1301 /// Panics if `raw` is not properly aligned.
1302 ///
1303 /// # Examples
1304 ///
1305 /// ```
1306 /// use crossbeam_epoch::Shared;
1307 ///
1308 /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
1309 /// assert!(!p.is_null());
1310 /// ```
1311 fn from(raw: *const T) -> Self {
1312 let raw = raw as usize;
1313 ensure_aligned::<T>(raw);
1314 unsafe { Self::from_usize(raw) }
1315 }
1316 }
1317
1318 impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> {
1319 fn eq(&self, other: &Self) -> bool {
1320 self.data == other.data
1321 }
1322 }
1323
1324 impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {}
1325
1326 impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
1327 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1328 self.data.partial_cmp(&other.data)
1329 }
1330 }
1331
1332 impl<T: ?Sized + Pointable> Ord for Shared<'_, T> {
1333 fn cmp(&self, other: &Self) -> cmp::Ordering {
1334 self.data.cmp(&other.data)
1335 }
1336 }
1337
1338 impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> {
1339 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1340 let (raw, tag) = decompose_tag::<T>(self.data);
1341
1342 f.debug_struct("Shared")
1343 .field("raw", &raw)
1344 .field("tag", &tag)
1345 .finish()
1346 }
1347 }
1348
1349 impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> {
1350 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1351 fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f)
1352 }
1353 }
1354
1355 impl<T: ?Sized + Pointable> Default for Shared<'_, T> {
1356 fn default() -> Self {
1357 Shared::null()
1358 }
1359 }
1360
1361 #[cfg(test)]
1362 mod tests {
1363 use super::Shared;
1364
1365 #[test]
1366 fn valid_tag_i8() {
1367 Shared::<i8>::null().with_tag(0);
1368 }
1369
1370 #[test]
1371 fn valid_tag_i64() {
1372 Shared::<i64>::null().with_tag(7);
1373 }
1374 }