1 use core
::borrow
::{Borrow, BorrowMut}
;
4 use core
::marker
::PhantomData
;
5 use core
::mem
::{self, MaybeUninit}
;
6 use core
::ops
::{Deref, DerefMut}
;
8 use core
::sync
::atomic
::Ordering
;
10 use crate::alloc
::alloc
;
11 use crate::alloc
::boxed
::Box
;
12 use crate::guard
::Guard
;
13 use crate::primitive
::sync
::atomic
::AtomicUsize
;
14 use crossbeam_utils
::atomic
::AtomicConsume
;
16 /// Given ordering for the success case in a compare-exchange operation, returns the strongest
17 /// appropriate ordering for the failure case.
19 fn strongest_failure_ordering(ord
: Ordering
) -> Ordering
{
20 use self::Ordering
::*;
22 Relaxed
| Release
=> Relaxed
,
23 Acquire
| AcqRel
=> Acquire
,
28 /// The error returned on failed compare-and-set operation.
29 // TODO: remove in the next major version.
30 #[deprecated(note = "Use `CompareExchangeError` instead")]
31 pub type CompareAndSetError
<'g
, T
, P
> = CompareExchangeError
<'g
, T
, P
>;
33 /// The error returned on failed compare-and-swap operation.
34 pub struct CompareExchangeError
<'g
, T
: ?Sized
+ Pointable
, P
: Pointer
<T
>> {
35 /// The value in the atomic pointer at the time of the failed operation.
36 pub current
: Shared
<'g
, T
>,
38 /// The new value, which the operation failed to store.
42 impl<T
, P
: Pointer
<T
> + fmt
::Debug
> fmt
::Debug
for CompareExchangeError
<'_
, T
, P
> {
43 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
44 f
.debug_struct("CompareExchangeError")
45 .field("current", &self.current
)
46 .field("new", &self.new
)
51 /// Memory orderings for compare-and-set operations.
53 /// A compare-and-set operation can have different memory orderings depending on whether it
54 /// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
56 /// The two ways of specifying orderings for compare-and-set are:
58 /// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
59 /// ordering is chosen.
60 /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
61 /// for the failure case.
62 // TODO: remove in the next major version.
64 note
= "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \
65 use `compare_exchange` or `compare_exchange_weak instead`"
67 pub trait CompareAndSetOrdering
{
68 /// The ordering of the operation when it succeeds.
69 fn success(&self) -> Ordering
;
71 /// The ordering of the operation when it fails.
73 /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
74 /// the success ordering.
75 fn failure(&self) -> Ordering
;
79 impl CompareAndSetOrdering
for Ordering
{
81 fn success(&self) -> Ordering
{
86 fn failure(&self) -> Ordering
{
87 strongest_failure_ordering(*self)
92 impl CompareAndSetOrdering
for (Ordering
, Ordering
) {
94 fn success(&self) -> Ordering
{
99 fn failure(&self) -> Ordering
{
104 /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
106 fn low_bits
<T
: ?Sized
+ Pointable
>() -> usize {
107 (1 << T
::ALIGN
.trailing_zeros()) - 1
110 /// Panics if the pointer is not properly unaligned.
112 fn ensure_aligned
<T
: ?Sized
+ Pointable
>(raw
: usize) {
113 assert_eq
!(raw
& low_bits
::<T
>(), 0, "unaligned pointer");
116 /// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
118 /// `tag` is truncated to fit into the unused bits of the pointer to `T`.
120 fn compose_tag
<T
: ?Sized
+ Pointable
>(data
: usize, tag
: usize) -> usize {
121 (data
& !low_bits
::<T
>()) | (tag
& low_bits
::<T
>())
124 /// Decomposes a tagged pointer `data` into the pointer and the tag.
126 fn decompose_tag
<T
: ?Sized
+ Pointable
>(data
: usize) -> (usize, usize) {
127 (data
& !low_bits
::<T
>(), data
& low_bits
::<T
>())
130 /// Types that are pointed to by a single word.
132 /// In concurrent programming, it is necessary to represent an object within a word because atomic
133 /// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait
134 /// qualifies such types that are pointed to by a single word.
136 /// The trait generalizes `Box<T>` for a sized type `T`. In a box, an object of type `T` is
137 /// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for
138 /// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
139 /// size and elements.
141 /// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In
142 /// particular, Crossbeam supports dynamically sized slices as follows.
145 /// use std::mem::MaybeUninit;
146 /// use crossbeam_epoch::Owned;
148 /// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
150 pub trait Pointable
{
151 /// The alignment of pointer.
154 /// The type for initializers.
157 /// Initializes a with the given initializer.
161 /// The result should be a multiple of `ALIGN`.
162 unsafe fn init(init
: Self::Init
) -> usize;
164 /// Dereferences the given pointer.
168 /// - The given `ptr` should have been initialized with [`Pointable::init`].
169 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
170 /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
171 unsafe fn deref
<'a
>(ptr
: usize) -> &'a
Self;
173 /// Mutably dereferences the given pointer.
177 /// - The given `ptr` should have been initialized with [`Pointable::init`].
178 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
179 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
181 unsafe fn deref_mut
<'a
>(ptr
: usize) -> &'a
mut Self;
183 /// Drops the object pointed to by the given pointer.
187 /// - The given `ptr` should have been initialized with [`Pointable::init`].
188 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
189 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
191 unsafe fn drop(ptr
: usize);
194 impl<T
> Pointable
for T
{
195 const ALIGN
: usize = mem
::align_of
::<T
>();
199 unsafe fn init(init
: Self::Init
) -> usize {
200 Box
::into_raw(Box
::new(init
)) as usize
203 unsafe fn deref
<'a
>(ptr
: usize) -> &'a
Self {
207 unsafe fn deref_mut
<'a
>(ptr
: usize) -> &'a
mut Self {
208 &mut *(ptr
as *mut T
)
211 unsafe fn drop(ptr
: usize) {
212 drop(Box
::from_raw(ptr
as *mut T
));
220 /// An array consisting of size and elements:
226 /// ------------------------------------
227 /// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
228 /// ------------------------------------
231 /// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
232 /// along with pointer as in `Box<[T]>`).
234 /// Elements are not present in the type, but they will be in the allocation.
237 // TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use
238 // [`alloc::alloc::Layout::extend`] instead.
241 /// The number of elements (not the number of bytes).
243 elements
: [MaybeUninit
<T
>; 0],
246 impl<T
> Pointable
for [MaybeUninit
<T
>] {
247 const ALIGN
: usize = mem
::align_of
::<Array
<T
>>();
251 unsafe fn init(len
: Self::Init
) -> usize {
252 let size
= mem
::size_of
::<Array
<T
>>() + mem
::size_of
::<MaybeUninit
<T
>>() * len
;
253 let align
= mem
::align_of
::<Array
<T
>>();
254 let layout
= alloc
::Layout
::from_size_align(size
, align
).unwrap();
255 let ptr
= alloc
::alloc(layout
) as *mut Array
<T
>;
257 alloc
::handle_alloc_error(layout
);
263 unsafe fn deref
<'a
>(ptr
: usize) -> &'a
Self {
264 let array
= &*(ptr
as *const Array
<T
>);
265 slice
::from_raw_parts(array
.elements
.as_ptr() as *const _
, array
.len
)
268 unsafe fn deref_mut
<'a
>(ptr
: usize) -> &'a
mut Self {
269 let array
= &*(ptr
as *mut Array
<T
>);
270 slice
::from_raw_parts_mut(array
.elements
.as_ptr() as *mut _
, array
.len
)
273 unsafe fn drop(ptr
: usize) {
274 let array
= &*(ptr
as *mut Array
<T
>);
275 let size
= mem
::size_of
::<Array
<T
>>() + mem
::size_of
::<MaybeUninit
<T
>>() * array
.len
;
276 let align
= mem
::align_of
::<Array
<T
>>();
277 let layout
= alloc
::Layout
::from_size_align(size
, align
).unwrap();
278 alloc
::dealloc(ptr
as *mut u8, layout
);
282 /// An atomic pointer that can be safely shared between threads.
284 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
285 /// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
286 /// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
288 /// Any method that loads the pointer must be passed a reference to a [`Guard`].
290 /// Crossbeam supports dynamically sized types. See [`Pointable`] for details.
291 pub struct Atomic
<T
: ?Sized
+ Pointable
> {
293 _marker
: PhantomData
<*mut T
>,
296 unsafe impl<T
: ?Sized
+ Pointable
+ Send
+ Sync
> Send
for Atomic
<T
> {}
297 unsafe impl<T
: ?Sized
+ Pointable
+ Send
+ Sync
> Sync
for Atomic
<T
> {}
300 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
305 /// use crossbeam_epoch::Atomic;
307 /// let a = Atomic::new(1234);
309 pub fn new(init
: T
) -> Atomic
<T
> {
314 impl<T
: ?Sized
+ Pointable
> Atomic
<T
> {
315 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
320 /// use crossbeam_epoch::Atomic;
322 /// let a = Atomic::<i32>::init(1234);
324 pub fn init(init
: T
::Init
) -> Atomic
<T
> {
325 Self::from(Owned
::init(init
))
328 /// Returns a new atomic pointer pointing to the tagged pointer `data`.
329 fn from_usize(data
: usize) -> Self {
331 data
: AtomicUsize
::new(data
),
332 _marker
: PhantomData
,
336 /// Returns a new null atomic pointer.
341 /// use crossbeam_epoch::Atomic;
343 /// let a = Atomic::<i32>::null();
345 #[cfg(all(crossbeam_const_fn_trait_bound, not(crossbeam_loom)))]
346 pub const fn null() -> Atomic
<T
> {
348 data
: AtomicUsize
::new(0),
349 _marker
: PhantomData
,
353 /// Returns a new null atomic pointer.
354 #[cfg(not(all(crossbeam_const_fn_trait_bound, not(crossbeam_loom))))]
355 pub fn null() -> Atomic
<T
> {
357 data
: AtomicUsize
::new(0),
358 _marker
: PhantomData
,
362 /// Loads a `Shared` from the atomic pointer.
364 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
370 /// use crossbeam_epoch::{self as epoch, Atomic};
371 /// use std::sync::atomic::Ordering::SeqCst;
373 /// let a = Atomic::new(1234);
374 /// let guard = &epoch::pin();
375 /// let p = a.load(SeqCst, guard);
377 pub fn load
<'g
>(&self, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
378 unsafe { Shared::from_usize(self.data.load(ord)) }
381 /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
383 /// This is similar to the "acquire" ordering, except that an ordering is
384 /// only guaranteed with operations that "depend on" the result of the load.
385 /// However consume loads are usually much faster than acquire loads on
386 /// architectures with a weak memory model since they don't require memory
387 /// fence instructions.
389 /// The exact definition of "depend on" is a bit vague, but it works as you
390 /// would expect in practice since a lot of software, especially the Linux
391 /// kernel, rely on this behavior.
396 /// use crossbeam_epoch::{self as epoch, Atomic};
398 /// let a = Atomic::new(1234);
399 /// let guard = &epoch::pin();
400 /// let p = a.load_consume(guard);
402 pub fn load_consume
<'g
>(&self, _
: &'g Guard
) -> Shared
<'g
, T
> {
403 unsafe { Shared::from_usize(self.data.load_consume()) }
406 /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
408 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
414 /// use crossbeam_epoch::{Atomic, Owned, Shared};
415 /// use std::sync::atomic::Ordering::SeqCst;
417 /// let a = Atomic::new(1234);
418 /// a.store(Shared::null(), SeqCst);
419 /// a.store(Owned::new(1234), SeqCst);
421 pub fn store
<P
: Pointer
<T
>>(&self, new
: P
, ord
: Ordering
) {
422 self.data
.store(new
.into_usize(), ord
);
425 /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
428 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
434 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
435 /// use std::sync::atomic::Ordering::SeqCst;
437 /// let a = Atomic::new(1234);
438 /// let guard = &epoch::pin();
439 /// let p = a.swap(Shared::null(), SeqCst, guard);
441 pub fn swap
<'g
, P
: Pointer
<T
>>(&self, new
: P
, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
442 unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
445 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
446 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
447 /// same object, but with different tags, will not be considered equal.
449 /// The return value is a result indicating whether the new pointer was written. On success the
450 /// pointer that was written is returned. On failure the actual current value and `new` are
453 /// This method takes two `Ordering` arguments to describe the memory
454 /// ordering of this operation. `success` describes the required ordering for the
455 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
456 /// `failure` describes the required ordering for the load operation that takes place when
457 /// the comparison fails. Using `Acquire` as success ordering makes the store part
458 /// of this operation `Relaxed`, and using `Release` makes the successful load
459 /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
460 /// and must be equivalent to or weaker than the success ordering.
465 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
466 /// use std::sync::atomic::Ordering::SeqCst;
468 /// let a = Atomic::new(1234);
470 /// let guard = &epoch::pin();
471 /// let curr = a.load(SeqCst, guard);
472 /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
473 /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
475 pub fn compare_exchange
<'g
, P
>(
477 current
: Shared
<'_
, T
>,
482 ) -> Result
<Shared
<'g
, T
>, CompareExchangeError
<'g
, T
, P
>>
486 let new
= new
.into_usize();
488 .compare_exchange(current
.into_usize(), new
, success
, failure
)
489 .map(|_
| unsafe { Shared::from_usize(new) }
)
490 .map_err(|current
| unsafe {
491 CompareExchangeError
{
492 current
: Shared
::from_usize(current
),
493 new
: P
::from_usize(new
),
498 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
499 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
500 /// same object, but with different tags, will not be considered equal.
502 /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison
503 /// succeeds, which can result in more efficient code on some platforms. The return value is a
504 /// result indicating whether the new pointer was written. On success the pointer that was
505 /// written is returned. On failure the actual current value and `new` are returned.
507 /// This method takes two `Ordering` arguments to describe the memory
508 /// ordering of this operation. `success` describes the required ordering for the
509 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
510 /// `failure` describes the required ordering for the load operation that takes place when
511 /// the comparison fails. Using `Acquire` as success ordering makes the store part
512 /// of this operation `Relaxed`, and using `Release` makes the successful load
513 /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
514 /// and must be equivalent to or weaker than the success ordering.
516 /// [`compare_exchange`]: Atomic::compare_exchange
521 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
522 /// use std::sync::atomic::Ordering::SeqCst;
524 /// let a = Atomic::new(1234);
525 /// let guard = &epoch::pin();
527 /// let mut new = Owned::new(5678);
528 /// let mut ptr = a.load(SeqCst, guard);
530 /// match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
536 /// ptr = err.current;
542 /// let mut curr = a.load(SeqCst, guard);
544 /// match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) {
546 /// Err(err) => curr = err.current,
550 pub fn compare_exchange_weak
<'g
, P
>(
552 current
: Shared
<'_
, T
>,
557 ) -> Result
<Shared
<'g
, T
>, CompareExchangeError
<'g
, T
, P
>>
561 let new
= new
.into_usize();
563 .compare_exchange_weak(current
.into_usize(), new
, success
, failure
)
564 .map(|_
| unsafe { Shared::from_usize(new) }
)
565 .map_err(|current
| unsafe {
566 CompareExchangeError
{
567 current
: Shared
::from_usize(current
),
568 new
: P
::from_usize(new
),
573 /// Fetches the pointer, and then applies a function to it that returns a new value.
574 /// Returns a `Result` of `Ok(previous_value)` if the function returned `Some`, else `Err(_)`.
576 /// Note that the given function may be called multiple times if the value has been changed by
577 /// other threads in the meantime, as long as the function returns `Some(_)`, but the function
578 /// will have been applied only once to the stored value.
580 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
581 /// ordering of this operation. The first describes the required ordering for
582 /// when the operation finally succeeds while the second describes the
583 /// required ordering for loads. These correspond to the success and failure
584 /// orderings of [`Atomic::compare_exchange`] respectively.
586 /// Using [`Acquire`] as success ordering makes the store part of this
587 /// operation [`Relaxed`], and using [`Release`] makes the final successful
588 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
589 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
590 /// success ordering.
592 /// [`Relaxed`]: Ordering::Relaxed
593 /// [`Acquire`]: Ordering::Acquire
594 /// [`Release`]: Ordering::Release
595 /// [`SeqCst`]: Ordering::SeqCst
600 /// use crossbeam_epoch::{self as epoch, Atomic};
601 /// use std::sync::atomic::Ordering::SeqCst;
603 /// let a = Atomic::new(1234);
604 /// let guard = &epoch::pin();
606 /// let res1 = a.fetch_update(SeqCst, SeqCst, guard, |x| Some(x.with_tag(1)));
607 /// assert!(res1.is_ok());
609 /// let res2 = a.fetch_update(SeqCst, SeqCst, guard, |x| None);
610 /// assert!(res2.is_err());
612 pub fn fetch_update
<'g
, F
>(
615 fail_order
: Ordering
,
618 ) -> Result
<Shared
<'g
, T
>, Shared
<'g
, T
>>
620 F
: FnMut(Shared
<'g
, T
>) -> Option
<Shared
<'g
, T
>>,
622 let mut prev
= self.load(fail_order
, guard
);
623 while let Some(next
) = func(prev
) {
624 match self.compare_exchange_weak(prev
, next
, set_order
, fail_order
, guard
) {
625 Ok(shared
) => return Ok(shared
),
626 Err(next_prev
) => prev
= next_prev
.current
,
632 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
633 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
634 /// same object, but with different tags, will not be considered equal.
636 /// The return value is a result indicating whether the new pointer was written. On success the
637 /// pointer that was written is returned. On failure the actual current value and `new` are
640 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
641 /// ordering of this operation.
643 /// # Migrating to `compare_exchange`
645 /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for
646 /// memory orderings:
648 /// Original | Success | Failure
649 /// -------- | ------- | -------
650 /// Relaxed | Relaxed | Relaxed
651 /// Acquire | Acquire | Acquire
652 /// Release | Release | Relaxed
653 /// AcqRel | AcqRel | Acquire
654 /// SeqCst | SeqCst | SeqCst
659 /// # #![allow(deprecated)]
660 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
661 /// use std::sync::atomic::Ordering::SeqCst;
663 /// let a = Atomic::new(1234);
665 /// let guard = &epoch::pin();
666 /// let curr = a.load(SeqCst, guard);
667 /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
668 /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
670 // TODO: remove in the next major version.
672 #[deprecated(note = "Use `compare_exchange` instead")]
673 pub fn compare_and_set
<'g
, O
, P
>(
675 current
: Shared
<'_
, T
>,
679 ) -> Result
<Shared
<'g
, T
>, CompareAndSetError
<'g
, T
, P
>>
681 O
: CompareAndSetOrdering
,
684 self.compare_exchange(current
, new
, ord
.success(), ord
.failure(), guard
)
687 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
688 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
689 /// same object, but with different tags, will not be considered equal.
691 /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
692 /// succeeds, which can result in more efficient code on some platforms. The return value is a
693 /// result indicating whether the new pointer was written. On success the pointer that was
694 /// written is returned. On failure the actual current value and `new` are returned.
696 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
697 /// ordering of this operation.
699 /// [`compare_and_set`]: Atomic::compare_and_set
701 /// # Migrating to `compare_exchange_weak`
703 /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for
704 /// memory orderings:
706 /// Original | Success | Failure
707 /// -------- | ------- | -------
708 /// Relaxed | Relaxed | Relaxed
709 /// Acquire | Acquire | Acquire
710 /// Release | Release | Relaxed
711 /// AcqRel | AcqRel | Acquire
712 /// SeqCst | SeqCst | SeqCst
717 /// # #![allow(deprecated)]
718 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
719 /// use std::sync::atomic::Ordering::SeqCst;
721 /// let a = Atomic::new(1234);
722 /// let guard = &epoch::pin();
724 /// let mut new = Owned::new(5678);
725 /// let mut ptr = a.load(SeqCst, guard);
727 /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
733 /// ptr = err.current;
739 /// let mut curr = a.load(SeqCst, guard);
741 /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
743 /// Err(err) => curr = err.current,
747 // TODO: remove in the next major version.
749 #[deprecated(note = "Use `compare_exchange_weak` instead")]
750 pub fn compare_and_set_weak
<'g
, O
, P
>(
752 current
: Shared
<'_
, T
>,
756 ) -> Result
<Shared
<'g
, T
>, CompareAndSetError
<'g
, T
, P
>>
758 O
: CompareAndSetOrdering
,
761 self.compare_exchange_weak(current
, new
, ord
.success(), ord
.failure(), guard
)
764 /// Bitwise "and" with the current tag.
766 /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
767 /// new tag to the result. Returns the previous pointer.
769 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
775 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
776 /// use std::sync::atomic::Ordering::SeqCst;
778 /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
779 /// let guard = &epoch::pin();
780 /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
781 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
783 pub fn fetch_and
<'g
>(&self, val
: usize, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
784 unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
787 /// Bitwise "or" with the current tag.
789 /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
790 /// new tag to the result. Returns the previous pointer.
792 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
798 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
799 /// use std::sync::atomic::Ordering::SeqCst;
801 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
802 /// let guard = &epoch::pin();
803 /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
804 /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
806 pub fn fetch_or
<'g
>(&self, val
: usize, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
807 unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
810 /// Bitwise "xor" with the current tag.
812 /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
813 /// new tag to the result. Returns the previous pointer.
815 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
821 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
822 /// use std::sync::atomic::Ordering::SeqCst;
824 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
825 /// let guard = &epoch::pin();
826 /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
827 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
829 pub fn fetch_xor
<'g
>(&self, val
: usize, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
830 unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
833 /// Takes ownership of the pointee.
835 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
836 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
837 /// destructors of data structures.
841 /// Panics if this pointer is null, but only in debug mode.
845 /// This method may be called only if the pointer is valid and nobody else is holding a
846 /// reference to the same object.
852 /// # use crossbeam_epoch::Atomic;
853 /// struct DataStructure {
854 /// ptr: Atomic<usize>,
857 /// impl Drop for DataStructure {
858 /// fn drop(&mut self) {
859 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
860 /// // any Shared or & to it ourselves.
862 /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
867 pub unsafe fn into_owned(self) -> Owned
<T
> {
868 #[cfg(crossbeam_loom)]
870 // FIXME: loom does not yet support into_inner, so we use unsync_load for now,
871 // which should have the same synchronization properties:
872 // https://github.com/tokio-rs/loom/issues/117
873 Owned
::from_usize(self.data
.unsync_load())
875 #[cfg(not(crossbeam_loom))]
877 Owned
::from_usize(self.data
.into_inner())
882 impl<T
: ?Sized
+ Pointable
> fmt
::Debug
for Atomic
<T
> {
883 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
884 let data
= self.data
.load(Ordering
::SeqCst
);
885 let (raw
, tag
) = decompose_tag
::<T
>(data
);
887 f
.debug_struct("Atomic")
894 impl<T
: ?Sized
+ Pointable
> fmt
::Pointer
for Atomic
<T
> {
895 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
896 let data
= self.data
.load(Ordering
::SeqCst
);
897 let (raw
, _
) = decompose_tag
::<T
>(data
);
898 fmt
::Pointer
::fmt(&(unsafe { T::deref(raw) as *const _ }
), f
)
902 impl<T
: ?Sized
+ Pointable
> Clone
for Atomic
<T
> {
903 /// Returns a copy of the atomic value.
905 /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
906 /// atomics or fences.
907 fn clone(&self) -> Self {
908 let data
= self.data
.load(Ordering
::Relaxed
);
909 Atomic
::from_usize(data
)
913 impl<T
: ?Sized
+ Pointable
> Default
for Atomic
<T
> {
914 fn default() -> Self {
919 impl<T
: ?Sized
+ Pointable
> From
<Owned
<T
>> for Atomic
<T
> {
920 /// Returns a new atomic pointer pointing to `owned`.
925 /// use crossbeam_epoch::{Atomic, Owned};
927 /// let a = Atomic::<i32>::from(Owned::new(1234));
929 fn from(owned
: Owned
<T
>) -> Self {
930 let data
= owned
.data
;
932 Self::from_usize(data
)
936 impl<T
> From
<Box
<T
>> for Atomic
<T
> {
937 fn from(b
: Box
<T
>) -> Self {
938 Self::from(Owned
::from(b
))
942 impl<T
> From
<T
> for Atomic
<T
> {
943 fn from(t
: T
) -> Self {
948 impl<'g
, T
: ?Sized
+ Pointable
> From
<Shared
<'g
, T
>> for Atomic
<T
> {
949 /// Returns a new atomic pointer pointing to `ptr`.
954 /// use crossbeam_epoch::{Atomic, Shared};
956 /// let a = Atomic::<i32>::from(Shared::<i32>::null());
958 fn from(ptr
: Shared
<'g
, T
>) -> Self {
959 Self::from_usize(ptr
.data
)
963 impl<T
> From
<*const T
> for Atomic
<T
> {
964 /// Returns a new atomic pointer pointing to `raw`.
970 /// use crossbeam_epoch::Atomic;
972 /// let a = Atomic::<i32>::from(ptr::null::<i32>());
974 fn from(raw
: *const T
) -> Self {
975 Self::from_usize(raw
as usize)
979 /// A trait for either `Owned` or `Shared` pointers.
980 pub trait Pointer
<T
: ?Sized
+ Pointable
> {
981 /// Returns the machine representation of the pointer.
982 fn into_usize(self) -> usize;
984 /// Returns a new pointer pointing to the tagged pointer `data`.
988 /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
989 /// not be converted back by `Pointer::from_usize()` multiple times.
990 unsafe fn from_usize(data
: usize) -> Self;
993 /// An owned heap-allocated object.
995 /// This type is very similar to `Box<T>`.
997 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
998 /// least significant bits of the address.
999 pub struct Owned
<T
: ?Sized
+ Pointable
> {
1001 _marker
: PhantomData
<Box
<T
>>,
1004 impl<T
: ?Sized
+ Pointable
> Pointer
<T
> for Owned
<T
> {
1006 fn into_usize(self) -> usize {
1007 let data
= self.data
;
1012 /// Returns a new pointer pointing to the tagged pointer `data`.
1016 /// Panics if the data is zero in debug mode.
1018 unsafe fn from_usize(data
: usize) -> Self {
1019 debug_assert
!(data
!= 0, "converting zero into `Owned`");
1022 _marker
: PhantomData
,
1028 /// Returns a new owned pointer pointing to `raw`.
1030 /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
1031 /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
1032 /// the same raw pointer.
1036 /// Panics if `raw` is not properly aligned.
1040 /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
1041 /// back by `Owned::from_raw()` multiple times.
1046 /// use crossbeam_epoch::Owned;
1048 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1050 pub unsafe fn from_raw(raw
: *mut T
) -> Owned
<T
> {
1051 let raw
= raw
as usize;
1052 ensure_aligned
::<T
>(raw
);
1053 Self::from_usize(raw
)
1056 /// Converts the owned pointer into a `Box`.
1061 /// use crossbeam_epoch::Owned;
1063 /// let o = Owned::new(1234);
1064 /// let b: Box<i32> = o.into_box();
1065 /// assert_eq!(*b, 1234);
1067 pub fn into_box(self) -> Box
<T
> {
1068 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1070 unsafe { Box::from_raw(raw as *mut _) }
1073 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1078 /// use crossbeam_epoch::Owned;
1080 /// let o = Owned::new(1234);
1082 pub fn new(init
: T
) -> Owned
<T
> {
1087 impl<T
: ?Sized
+ Pointable
> Owned
<T
> {
1088 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1093 /// use crossbeam_epoch::Owned;
1095 /// let o = Owned::<i32>::init(1234);
1097 pub fn init(init
: T
::Init
) -> Owned
<T
> {
1098 unsafe { Self::from_usize(T::init(init)) }
1101 /// Converts the owned pointer into a [`Shared`].
1106 /// use crossbeam_epoch::{self as epoch, Owned};
1108 /// let o = Owned::new(1234);
1109 /// let guard = &epoch::pin();
1110 /// let p = o.into_shared(guard);
1112 #[allow(clippy::needless_lifetimes)]
1113 pub fn into_shared
<'g
>(self, _
: &'g Guard
) -> Shared
<'g
, T
> {
1114 unsafe { Shared::from_usize(self.into_usize()) }
1117 /// Returns the tag stored within the pointer.
1122 /// use crossbeam_epoch::Owned;
1124 /// assert_eq!(Owned::new(1234).tag(), 0);
1126 pub fn tag(&self) -> usize {
1127 let (_
, tag
) = decompose_tag
::<T
>(self.data
);
1131 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1132 /// unused bits of the pointer to `T`.
1137 /// use crossbeam_epoch::Owned;
1139 /// let o = Owned::new(0u64);
1140 /// assert_eq!(o.tag(), 0);
1141 /// let o = o.with_tag(2);
1142 /// assert_eq!(o.tag(), 2);
1144 pub fn with_tag(self, tag
: usize) -> Owned
<T
> {
1145 let data
= self.into_usize();
1146 unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
1150 impl<T
: ?Sized
+ Pointable
> Drop
for Owned
<T
> {
1151 fn drop(&mut self) {
1152 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1159 impl<T
: ?Sized
+ Pointable
> fmt
::Debug
for Owned
<T
> {
1160 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1161 let (raw
, tag
) = decompose_tag
::<T
>(self.data
);
1163 f
.debug_struct("Owned")
1170 impl<T
: Clone
> Clone
for Owned
<T
> {
1171 fn clone(&self) -> Self {
1172 Owned
::new((**self).clone()).with_tag(self.tag())
1176 impl<T
: ?Sized
+ Pointable
> Deref
for Owned
<T
> {
1179 fn deref(&self) -> &T
{
1180 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1181 unsafe { T::deref(raw) }
1185 impl<T
: ?Sized
+ Pointable
> DerefMut
for Owned
<T
> {
1186 fn deref_mut(&mut self) -> &mut T
{
1187 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1188 unsafe { T::deref_mut(raw) }
1192 impl<T
> From
<T
> for Owned
<T
> {
1193 fn from(t
: T
) -> Self {
1198 impl<T
> From
<Box
<T
>> for Owned
<T
> {
1199 /// Returns a new owned pointer pointing to `b`.
1203 /// Panics if the pointer (the `Box`) is not properly aligned.
1208 /// use crossbeam_epoch::Owned;
1210 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1212 fn from(b
: Box
<T
>) -> Self {
1213 unsafe { Self::from_raw(Box::into_raw(b)) }
1217 impl<T
: ?Sized
+ Pointable
> Borrow
<T
> for Owned
<T
> {
1218 fn borrow(&self) -> &T
{
1223 impl<T
: ?Sized
+ Pointable
> BorrowMut
<T
> for Owned
<T
> {
1224 fn borrow_mut(&mut self) -> &mut T
{
1229 impl<T
: ?Sized
+ Pointable
> AsRef
<T
> for Owned
<T
> {
1230 fn as_ref(&self) -> &T
{
1235 impl<T
: ?Sized
+ Pointable
> AsMut
<T
> for Owned
<T
> {
1236 fn as_mut(&mut self) -> &mut T
{
1241 /// A pointer to an object protected by the epoch GC.
1243 /// The pointer is valid for use only during the lifetime `'g`.
1245 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1246 /// least significant bits of the address.
1247 pub struct Shared
<'g
, T
: 'g
+ ?Sized
+ Pointable
> {
1249 _marker
: PhantomData
<(&'
g (), *const T
)>,
1252 impl<T
: ?Sized
+ Pointable
> Clone
for Shared
<'_
, T
> {
1253 fn clone(&self) -> Self {
1256 _marker
: PhantomData
,
1261 impl<T
: ?Sized
+ Pointable
> Copy
for Shared
<'_
, T
> {}
1263 impl<T
: ?Sized
+ Pointable
> Pointer
<T
> for Shared
<'_
, T
> {
1265 fn into_usize(self) -> usize {
1270 unsafe fn from_usize(data
: usize) -> Self {
1273 _marker
: PhantomData
,
1278 impl<'g
, T
> Shared
<'g
, T
> {
1279 /// Converts the pointer to a raw pointer (without the tag).
1284 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1285 /// use std::sync::atomic::Ordering::SeqCst;
1287 /// let o = Owned::new(1234);
1288 /// let raw = &*o as *const _;
1289 /// let a = Atomic::from(o);
1291 /// let guard = &epoch::pin();
1292 /// let p = a.load(SeqCst, guard);
1293 /// assert_eq!(p.as_raw(), raw);
1295 pub fn as_raw(&self) -> *const T
{
1296 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1301 impl<'g
, T
: ?Sized
+ Pointable
> Shared
<'g
, T
> {
1302 /// Returns a new null pointer.
1307 /// use crossbeam_epoch::Shared;
1309 /// let p = Shared::<i32>::null();
1310 /// assert!(p.is_null());
1312 pub fn null() -> Shared
<'g
, T
> {
1315 _marker
: PhantomData
,
1319 /// Returns `true` if the pointer is null.
1324 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1325 /// use std::sync::atomic::Ordering::SeqCst;
1327 /// let a = Atomic::null();
1328 /// let guard = &epoch::pin();
1329 /// assert!(a.load(SeqCst, guard).is_null());
1330 /// a.store(Owned::new(1234), SeqCst);
1331 /// assert!(!a.load(SeqCst, guard).is_null());
1333 pub fn is_null(&self) -> bool
{
1334 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1338 /// Dereferences the pointer.
1340 /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1344 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1346 /// Another concern is the possibility of data races due to lack of proper synchronization.
1347 /// For example, consider the following scenario:
1349 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1350 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1352 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1353 /// the read from the second thread. This is a data race. A possible solution would be to use
1354 /// `Release` and `Acquire` orderings.
1359 /// use crossbeam_epoch::{self as epoch, Atomic};
1360 /// use std::sync::atomic::Ordering::SeqCst;
1362 /// let a = Atomic::new(1234);
1363 /// let guard = &epoch::pin();
1364 /// let p = a.load(SeqCst, guard);
1366 /// assert_eq!(p.deref(), &1234);
1369 pub unsafe fn deref(&self) -> &'g T
{
1370 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1374 /// Dereferences the pointer.
1376 /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1380 /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1381 /// actual object at the same time.
1383 /// The user must know that there are no concurrent accesses towards the object itself.
1385 /// * Other than the above, all safety concerns of `deref()` applies here.
1390 /// use crossbeam_epoch::{self as epoch, Atomic};
1391 /// use std::sync::atomic::Ordering::SeqCst;
1393 /// let a = Atomic::new(vec![1, 2, 3, 4]);
1394 /// let guard = &epoch::pin();
1396 /// let mut p = a.load(SeqCst, guard);
1398 /// assert!(!p.is_null());
1399 /// let b = p.deref_mut();
1400 /// assert_eq!(b, &vec![1, 2, 3, 4]);
1402 /// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1405 /// let p = a.load(SeqCst, guard);
1407 /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1410 pub unsafe fn deref_mut(&mut self) -> &'g
mut T
{
1411 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1415 /// Converts the pointer to a reference.
1417 /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1421 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1423 /// Another concern is the possibility of data races due to lack of proper synchronization.
1424 /// For example, consider the following scenario:
1426 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1427 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1429 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1430 /// the read from the second thread. This is a data race. A possible solution would be to use
1431 /// `Release` and `Acquire` orderings.
1436 /// use crossbeam_epoch::{self as epoch, Atomic};
1437 /// use std::sync::atomic::Ordering::SeqCst;
1439 /// let a = Atomic::new(1234);
1440 /// let guard = &epoch::pin();
1441 /// let p = a.load(SeqCst, guard);
1443 /// assert_eq!(p.as_ref(), Some(&1234));
1446 pub unsafe fn as_ref(&self) -> Option
<&'g T
> {
1447 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1455 /// Takes ownership of the pointee.
1459 /// Panics if this pointer is null, but only in debug mode.
1463 /// This method may be called only if the pointer is valid and nobody else is holding a
1464 /// reference to the same object.
1469 /// use crossbeam_epoch::{self as epoch, Atomic};
1470 /// use std::sync::atomic::Ordering::SeqCst;
1472 /// let a = Atomic::new(1234);
1474 /// let guard = &epoch::unprotected();
1475 /// let p = a.load(SeqCst, guard);
1476 /// drop(p.into_owned());
1479 pub unsafe fn into_owned(self) -> Owned
<T
> {
1480 debug_assert
!(!self.is_null(), "converting a null `Shared` into `Owned`");
1481 Owned
::from_usize(self.data
)
1484 /// Returns the tag stored within the pointer.
1489 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1490 /// use std::sync::atomic::Ordering::SeqCst;
1492 /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1493 /// let guard = &epoch::pin();
1494 /// let p = a.load(SeqCst, guard);
1495 /// assert_eq!(p.tag(), 2);
1497 pub fn tag(&self) -> usize {
1498 let (_
, tag
) = decompose_tag
::<T
>(self.data
);
1502 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1503 /// unused bits of the pointer to `T`.
1508 /// use crossbeam_epoch::{self as epoch, Atomic};
1509 /// use std::sync::atomic::Ordering::SeqCst;
1511 /// let a = Atomic::new(0u64);
1512 /// let guard = &epoch::pin();
1513 /// let p1 = a.load(SeqCst, guard);
1514 /// let p2 = p1.with_tag(2);
1516 /// assert_eq!(p1.tag(), 0);
1517 /// assert_eq!(p2.tag(), 2);
1518 /// assert_eq!(p1.as_raw(), p2.as_raw());
1520 pub fn with_tag(&self, tag
: usize) -> Shared
<'g
, T
> {
1521 unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
1525 impl<T
> From
<*const T
> for Shared
<'_
, T
> {
1526 /// Returns a new pointer pointing to `raw`.
1530 /// Panics if `raw` is not properly aligned.
1535 /// use crossbeam_epoch::Shared;
1537 /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
1538 /// assert!(!p.is_null());
1540 fn from(raw
: *const T
) -> Self {
1541 let raw
= raw
as usize;
1542 ensure_aligned
::<T
>(raw
);
1543 unsafe { Self::from_usize(raw) }
1547 impl<'g
, T
: ?Sized
+ Pointable
> PartialEq
<Shared
<'g
, T
>> for Shared
<'g
, T
> {
1548 fn eq(&self, other
: &Self) -> bool
{
1549 self.data
== other
.data
1553 impl<T
: ?Sized
+ Pointable
> Eq
for Shared
<'_
, T
> {}
1555 impl<'g
, T
: ?Sized
+ Pointable
> PartialOrd
<Shared
<'g
, T
>> for Shared
<'g
, T
> {
1556 fn partial_cmp(&self, other
: &Self) -> Option
<cmp
::Ordering
> {
1557 self.data
.partial_cmp(&other
.data
)
1561 impl<T
: ?Sized
+ Pointable
> Ord
for Shared
<'_
, T
> {
1562 fn cmp(&self, other
: &Self) -> cmp
::Ordering
{
1563 self.data
.cmp(&other
.data
)
1567 impl<T
: ?Sized
+ Pointable
> fmt
::Debug
for Shared
<'_
, T
> {
1568 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1569 let (raw
, tag
) = decompose_tag
::<T
>(self.data
);
1571 f
.debug_struct("Shared")
1578 impl<T
: ?Sized
+ Pointable
> fmt
::Pointer
for Shared
<'_
, T
> {
1579 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1580 fmt
::Pointer
::fmt(&(unsafe { self.deref() as *const _ }
), f
)
1584 impl<T
: ?Sized
+ Pointable
> Default
for Shared
<'_
, T
> {
1585 fn default() -> Self {
1590 #[cfg(all(test, not(crossbeam_loom)))]
1592 use super::{Owned, Shared}
;
1593 use std
::mem
::MaybeUninit
;
1597 Shared
::<i8>::null().with_tag(0);
1601 fn valid_tag_i64() {
1602 Shared
::<i64>::null().with_tag(7);
1605 #[rustversion::since(1.61)]
1607 fn const_atomic_null() {
1609 static _U
: Atomic
<u8> = Atomic
::<u8>::null();
1614 let owned
= Owned
::<[MaybeUninit
<usize>]>::init(10);
1615 let arr
: &[MaybeUninit
<usize>] = &*owned
;
1616 assert_eq
!(arr
.len(), 10);