1 use core
::borrow
::{Borrow, BorrowMut}
;
4 use core
::marker
::PhantomData
;
5 use core
::mem
::{self, MaybeUninit}
;
6 use core
::ops
::{Deref, DerefMut}
;
8 use core
::sync
::atomic
::{AtomicUsize, Ordering}
;
10 use crate::alloc
::alloc
;
11 use crate::alloc
::boxed
::Box
;
12 use crate::guard
::Guard
;
13 use const_fn
::const_fn
;
14 use crossbeam_utils
::atomic
::AtomicConsume
;
16 /// Given ordering for the success case in a compare-exchange operation, returns the strongest
17 /// appropriate ordering for the failure case.
19 fn strongest_failure_ordering(ord
: Ordering
) -> Ordering
{
20 use self::Ordering
::*;
22 Relaxed
| Release
=> Relaxed
,
23 Acquire
| AcqRel
=> Acquire
,
28 /// The error returned on failed compare-and-set operation.
29 pub struct CompareAndSetError
<'g
, T
: ?Sized
+ Pointable
, P
: Pointer
<T
>> {
30 /// The value in the atomic pointer at the time of the failed operation.
31 pub current
: Shared
<'g
, T
>,
33 /// The new value, which the operation failed to store.
37 impl<'g
, T
: 'g
, P
: Pointer
<T
> + fmt
::Debug
> fmt
::Debug
for CompareAndSetError
<'g
, T
, P
> {
38 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
39 f
.debug_struct("CompareAndSetError")
40 .field("current", &self.current
)
41 .field("new", &self.new
)
46 /// Memory orderings for compare-and-set operations.
48 /// A compare-and-set operation can have different memory orderings depending on whether it
49 /// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
51 /// The two ways of specifying orderings for compare-and-set are:
53 /// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
54 /// ordering is chosen.
55 /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
56 /// for the failure case.
57 pub trait CompareAndSetOrdering
{
58 /// The ordering of the operation when it succeeds.
59 fn success(&self) -> Ordering
;
61 /// The ordering of the operation when it fails.
63 /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
64 /// the success ordering.
65 fn failure(&self) -> Ordering
;
68 impl CompareAndSetOrdering
for Ordering
{
70 fn success(&self) -> Ordering
{
75 fn failure(&self) -> Ordering
{
76 strongest_failure_ordering(*self)
80 impl CompareAndSetOrdering
for (Ordering
, Ordering
) {
82 fn success(&self) -> Ordering
{
87 fn failure(&self) -> Ordering
{
92 /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
94 fn low_bits
<T
: ?Sized
+ Pointable
>() -> usize {
95 (1 << T
::ALIGN
.trailing_zeros()) - 1
98 /// Panics if the pointer is not properly unaligned.
100 fn ensure_aligned
<T
: ?Sized
+ Pointable
>(raw
: usize) {
101 assert_eq
!(raw
& low_bits
::<T
>(), 0, "unaligned pointer");
104 /// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
106 /// `tag` is truncated to fit into the unused bits of the pointer to `T`.
108 fn compose_tag
<T
: ?Sized
+ Pointable
>(data
: usize, tag
: usize) -> usize {
109 (data
& !low_bits
::<T
>()) | (tag
& low_bits
::<T
>())
112 /// Decomposes a tagged pointer `data` into the pointer and the tag.
114 fn decompose_tag
<T
: ?Sized
+ Pointable
>(data
: usize) -> (usize, usize) {
115 (data
& !low_bits
::<T
>(), data
& low_bits
::<T
>())
118 /// Types that are pointed to by a single word.
120 /// In concurrent programming, it is necessary to represent an object within a word because atomic
121 /// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait
122 /// qualifies such types that are pointed to by a single word.
124 /// The trait generalizes `Box<T>` for a sized type `T`. In a box, an object of type `T` is
125 /// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for
126 /// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
127 /// size and elements.
129 /// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In
130 /// particular, Crossbeam supports dynamically sized slices as follows.
133 /// use std::mem::MaybeUninit;
134 /// use crossbeam_epoch::Owned;
136 /// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
138 pub trait Pointable
{
139 /// The alignment of pointer.
142 /// The type for initializers.
145 /// Initializes a with the given initializer.
149 /// The result should be a multiple of `ALIGN`.
150 unsafe fn init(init
: Self::Init
) -> usize;
152 /// Dereferences the given pointer.
156 /// - The given `ptr` should have been initialized with [`Pointable::init`].
157 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
158 /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
159 unsafe fn deref
<'a
>(ptr
: usize) -> &'a
Self;
161 /// Mutably dereferences the given pointer.
165 /// - The given `ptr` should have been initialized with [`Pointable::init`].
166 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
167 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
169 unsafe fn deref_mut
<'a
>(ptr
: usize) -> &'a
mut Self;
171 /// Drops the object pointed to by the given pointer.
175 /// - The given `ptr` should have been initialized with [`Pointable::init`].
176 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
177 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
179 unsafe fn drop(ptr
: usize);
182 impl<T
> Pointable
for T
{
183 const ALIGN
: usize = mem
::align_of
::<T
>();
187 unsafe fn init(init
: Self::Init
) -> usize {
188 Box
::into_raw(Box
::new(init
)) as usize
191 unsafe fn deref
<'a
>(ptr
: usize) -> &'a
Self {
195 unsafe fn deref_mut
<'a
>(ptr
: usize) -> &'a
mut Self {
196 &mut *(ptr
as *mut T
)
199 unsafe fn drop(ptr
: usize) {
200 drop(Box
::from_raw(ptr
as *mut T
));
208 /// An array consisting of size and elements:
214 /// ------------------------------------
215 /// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
216 /// ------------------------------------
219 /// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
220 /// along with pointer as in `Box<[T]>`).
222 /// Elements are not present in the type, but they will be in the allocation.
225 // TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use
226 // [`alloc::alloc::Layout::extend`] instead.
230 elements
: [MaybeUninit
<T
>; 0],
233 impl<T
> Pointable
for [MaybeUninit
<T
>] {
234 const ALIGN
: usize = mem
::align_of
::<Array
<T
>>();
238 unsafe fn init(size
: Self::Init
) -> usize {
239 let size
= mem
::size_of
::<Array
<T
>>() + mem
::size_of
::<MaybeUninit
<T
>>() * size
;
240 let align
= mem
::align_of
::<Array
<T
>>();
241 let layout
= alloc
::Layout
::from_size_align(size
, align
).unwrap();
242 let ptr
= alloc
::alloc(layout
) as *mut Array
<T
>;
247 unsafe fn deref
<'a
>(ptr
: usize) -> &'a
Self {
248 let array
= &*(ptr
as *const Array
<T
>);
249 slice
::from_raw_parts(array
.elements
.as_ptr() as *const _
, array
.size
)
252 unsafe fn deref_mut
<'a
>(ptr
: usize) -> &'a
mut Self {
253 let array
= &*(ptr
as *mut Array
<T
>);
254 slice
::from_raw_parts_mut(array
.elements
.as_ptr() as *mut _
, array
.size
)
257 unsafe fn drop(ptr
: usize) {
258 let array
= &*(ptr
as *mut Array
<T
>);
259 let size
= mem
::size_of
::<Array
<T
>>() + mem
::size_of
::<MaybeUninit
<T
>>() * array
.size
;
260 let align
= mem
::align_of
::<Array
<T
>>();
261 let layout
= alloc
::Layout
::from_size_align(size
, align
).unwrap();
262 alloc
::dealloc(ptr
as *mut u8, layout
);
266 /// An atomic pointer that can be safely shared between threads.
268 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
269 /// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
270 /// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
272 /// Any method that loads the pointer must be passed a reference to a [`Guard`].
274 /// Crossbeam supports dynamically sized types. See [`Pointable`] for details.
275 pub struct Atomic
<T
: ?Sized
+ Pointable
> {
277 _marker
: PhantomData
<*mut T
>,
280 unsafe impl<T
: ?Sized
+ Pointable
+ Send
+ Sync
> Send
for Atomic
<T
> {}
281 unsafe impl<T
: ?Sized
+ Pointable
+ Send
+ Sync
> Sync
for Atomic
<T
> {}
284 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
289 /// use crossbeam_epoch::Atomic;
291 /// let a = Atomic::new(1234);
293 pub fn new(init
: T
) -> Atomic
<T
> {
298 impl<T
: ?Sized
+ Pointable
> Atomic
<T
> {
299 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
304 /// use crossbeam_epoch::Atomic;
306 /// let a = Atomic::<i32>::init(1234);
308 pub fn init(init
: T
::Init
) -> Atomic
<T
> {
309 Self::from(Owned
::init(init
))
312 /// Returns a new atomic pointer pointing to the tagged pointer `data`.
313 fn from_usize(data
: usize) -> Self {
315 data
: AtomicUsize
::new(data
),
316 _marker
: PhantomData
,
320 /// Returns a new null atomic pointer.
325 /// use crossbeam_epoch::Atomic;
327 /// let a = Atomic::<i32>::null();
330 #[const_fn(feature = "nightly")]
331 pub const fn null() -> Atomic
<T
> {
333 data
: AtomicUsize
::new(0),
334 _marker
: PhantomData
,
338 /// Loads a `Shared` from the atomic pointer.
340 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
346 /// use crossbeam_epoch::{self as epoch, Atomic};
347 /// use std::sync::atomic::Ordering::SeqCst;
349 /// let a = Atomic::new(1234);
350 /// let guard = &epoch::pin();
351 /// let p = a.load(SeqCst, guard);
353 pub fn load
<'g
>(&self, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
354 unsafe { Shared::from_usize(self.data.load(ord)) }
357 /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
359 /// This is similar to the "acquire" ordering, except that an ordering is
360 /// only guaranteed with operations that "depend on" the result of the load.
361 /// However consume loads are usually much faster than acquire loads on
362 /// architectures with a weak memory model since they don't require memory
363 /// fence instructions.
365 /// The exact definition of "depend on" is a bit vague, but it works as you
366 /// would expect in practice since a lot of software, especially the Linux
367 /// kernel, rely on this behavior.
372 /// use crossbeam_epoch::{self as epoch, Atomic};
374 /// let a = Atomic::new(1234);
375 /// let guard = &epoch::pin();
376 /// let p = a.load_consume(guard);
378 pub fn load_consume
<'g
>(&self, _
: &'g Guard
) -> Shared
<'g
, T
> {
379 unsafe { Shared::from_usize(self.data.load_consume()) }
382 /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
384 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
390 /// use crossbeam_epoch::{Atomic, Owned, Shared};
391 /// use std::sync::atomic::Ordering::SeqCst;
393 /// let a = Atomic::new(1234);
394 /// a.store(Shared::null(), SeqCst);
395 /// a.store(Owned::new(1234), SeqCst);
397 pub fn store
<P
: Pointer
<T
>>(&self, new
: P
, ord
: Ordering
) {
398 self.data
.store(new
.into_usize(), ord
);
401 /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
404 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
410 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
411 /// use std::sync::atomic::Ordering::SeqCst;
413 /// let a = Atomic::new(1234);
414 /// let guard = &epoch::pin();
415 /// let p = a.swap(Shared::null(), SeqCst, guard);
417 pub fn swap
<'g
, P
: Pointer
<T
>>(&self, new
: P
, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
418 unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
421 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
422 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
423 /// same object, but with different tags, will not be considered equal.
425 /// The return value is a result indicating whether the new pointer was written. On success the
426 /// pointer that was written is returned. On failure the actual current value and `new` are
429 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
430 /// ordering of this operation.
435 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
436 /// use std::sync::atomic::Ordering::SeqCst;
438 /// let a = Atomic::new(1234);
440 /// let guard = &epoch::pin();
441 /// let curr = a.load(SeqCst, guard);
442 /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
443 /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
445 pub fn compare_and_set
<'g
, O
, P
>(
447 current
: Shared
<'_
, T
>,
451 ) -> Result
<Shared
<'g
, T
>, CompareAndSetError
<'g
, T
, P
>>
453 O
: CompareAndSetOrdering
,
456 let new
= new
.into_usize();
458 .compare_exchange(current
.into_usize(), new
, ord
.success(), ord
.failure())
459 .map(|_
| unsafe { Shared::from_usize(new) }
)
460 .map_err(|current
| unsafe {
462 current
: Shared
::from_usize(current
),
463 new
: P
::from_usize(new
),
468 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
469 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
470 /// same object, but with different tags, will not be considered equal.
472 /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
473 /// succeeds, which can result in more efficient code on some platforms. The return value is a
474 /// result indicating whether the new pointer was written. On success the pointer that was
475 /// written is returned. On failure the actual current value and `new` are returned.
477 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
478 /// ordering of this operation.
480 /// [`compare_and_set`]: Atomic::compare_and_set
485 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
486 /// use std::sync::atomic::Ordering::SeqCst;
488 /// let a = Atomic::new(1234);
489 /// let guard = &epoch::pin();
491 /// let mut new = Owned::new(5678);
492 /// let mut ptr = a.load(SeqCst, guard);
494 /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
500 /// ptr = err.current;
506 /// let mut curr = a.load(SeqCst, guard);
508 /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
510 /// Err(err) => curr = err.current,
514 pub fn compare_and_set_weak
<'g
, O
, P
>(
516 current
: Shared
<'_
, T
>,
520 ) -> Result
<Shared
<'g
, T
>, CompareAndSetError
<'g
, T
, P
>>
522 O
: CompareAndSetOrdering
,
525 let new
= new
.into_usize();
527 .compare_exchange_weak(current
.into_usize(), new
, ord
.success(), ord
.failure())
528 .map(|_
| unsafe { Shared::from_usize(new) }
)
529 .map_err(|current
| unsafe {
531 current
: Shared
::from_usize(current
),
532 new
: P
::from_usize(new
),
537 /// Bitwise "and" with the current tag.
539 /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
540 /// new tag to the result. Returns the previous pointer.
542 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
548 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
549 /// use std::sync::atomic::Ordering::SeqCst;
551 /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
552 /// let guard = &epoch::pin();
553 /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
554 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
556 pub fn fetch_and
<'g
>(&self, val
: usize, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
557 unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
560 /// Bitwise "or" with the current tag.
562 /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
563 /// new tag to the result. Returns the previous pointer.
565 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
571 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
572 /// use std::sync::atomic::Ordering::SeqCst;
574 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
575 /// let guard = &epoch::pin();
576 /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
577 /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
579 pub fn fetch_or
<'g
>(&self, val
: usize, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
580 unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
583 /// Bitwise "xor" with the current tag.
585 /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
586 /// new tag to the result. Returns the previous pointer.
588 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
594 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
595 /// use std::sync::atomic::Ordering::SeqCst;
597 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
598 /// let guard = &epoch::pin();
599 /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
600 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
602 pub fn fetch_xor
<'g
>(&self, val
: usize, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
603 unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
606 /// Takes ownership of the pointee.
608 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
609 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
610 /// destructors of data structures.
614 /// Panics if this pointer is null, but only in debug mode.
618 /// This method may be called only if the pointer is valid and nobody else is holding a
619 /// reference to the same object.
625 /// # use crossbeam_epoch::Atomic;
626 /// struct DataStructure {
627 /// ptr: Atomic<usize>,
630 /// impl Drop for DataStructure {
631 /// fn drop(&mut self) {
632 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
633 /// // any Shared or & to it ourselves.
635 /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
640 pub unsafe fn into_owned(self) -> Owned
<T
> {
641 Owned
::from_usize(self.data
.into_inner())
645 impl<T
: ?Sized
+ Pointable
> fmt
::Debug
for Atomic
<T
> {
646 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
647 let data
= self.data
.load(Ordering
::SeqCst
);
648 let (raw
, tag
) = decompose_tag
::<T
>(data
);
650 f
.debug_struct("Atomic")
657 impl<T
: ?Sized
+ Pointable
> fmt
::Pointer
for Atomic
<T
> {
658 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
659 let data
= self.data
.load(Ordering
::SeqCst
);
660 let (raw
, _
) = decompose_tag
::<T
>(data
);
661 fmt
::Pointer
::fmt(&(unsafe { T::deref(raw) as *const _ }
), f
)
665 impl<T
: ?Sized
+ Pointable
> Clone
for Atomic
<T
> {
666 /// Returns a copy of the atomic value.
668 /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
669 /// atomics or fences.
670 fn clone(&self) -> Self {
671 let data
= self.data
.load(Ordering
::Relaxed
);
672 Atomic
::from_usize(data
)
676 impl<T
: ?Sized
+ Pointable
> Default
for Atomic
<T
> {
677 fn default() -> Self {
682 impl<T
: ?Sized
+ Pointable
> From
<Owned
<T
>> for Atomic
<T
> {
683 /// Returns a new atomic pointer pointing to `owned`.
688 /// use crossbeam_epoch::{Atomic, Owned};
690 /// let a = Atomic::<i32>::from(Owned::new(1234));
692 fn from(owned
: Owned
<T
>) -> Self {
693 let data
= owned
.data
;
695 Self::from_usize(data
)
699 impl<T
> From
<Box
<T
>> for Atomic
<T
> {
700 fn from(b
: Box
<T
>) -> Self {
701 Self::from(Owned
::from(b
))
705 impl<T
> From
<T
> for Atomic
<T
> {
706 fn from(t
: T
) -> Self {
711 impl<'g
, T
: ?Sized
+ Pointable
> From
<Shared
<'g
, T
>> for Atomic
<T
> {
712 /// Returns a new atomic pointer pointing to `ptr`.
717 /// use crossbeam_epoch::{Atomic, Shared};
719 /// let a = Atomic::<i32>::from(Shared::<i32>::null());
721 fn from(ptr
: Shared
<'g
, T
>) -> Self {
722 Self::from_usize(ptr
.data
)
726 impl<T
> From
<*const T
> for Atomic
<T
> {
727 /// Returns a new atomic pointer pointing to `raw`.
733 /// use crossbeam_epoch::Atomic;
735 /// let a = Atomic::<i32>::from(ptr::null::<i32>());
737 fn from(raw
: *const T
) -> Self {
738 Self::from_usize(raw
as usize)
742 /// A trait for either `Owned` or `Shared` pointers.
743 pub trait Pointer
<T
: ?Sized
+ Pointable
> {
744 /// Returns the machine representation of the pointer.
745 fn into_usize(self) -> usize;
747 /// Returns a new pointer pointing to the tagged pointer `data`.
751 /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
752 /// not be converted back by `Pointer::from_usize()` multiple times.
753 unsafe fn from_usize(data
: usize) -> Self;
756 /// An owned heap-allocated object.
758 /// This type is very similar to `Box<T>`.
760 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
761 /// least significant bits of the address.
762 pub struct Owned
<T
: ?Sized
+ Pointable
> {
764 _marker
: PhantomData
<Box
<T
>>,
767 impl<T
: ?Sized
+ Pointable
> Pointer
<T
> for Owned
<T
> {
769 fn into_usize(self) -> usize {
770 let data
= self.data
;
775 /// Returns a new pointer pointing to the tagged pointer `data`.
779 /// Panics if the data is zero in debug mode.
781 unsafe fn from_usize(data
: usize) -> Self {
782 debug_assert
!(data
!= 0, "converting zero into `Owned`");
785 _marker
: PhantomData
,
791 /// Returns a new owned pointer pointing to `raw`.
793 /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
794 /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
795 /// the same raw pointer.
799 /// Panics if `raw` is not properly aligned.
803 /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
804 /// back by `Owned::from_raw()` multiple times.
809 /// use crossbeam_epoch::Owned;
811 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
813 pub unsafe fn from_raw(raw
: *mut T
) -> Owned
<T
> {
814 let raw
= raw
as usize;
815 ensure_aligned
::<T
>(raw
);
816 Self::from_usize(raw
)
819 /// Converts the owned pointer into a `Box`.
824 /// use crossbeam_epoch::Owned;
826 /// let o = Owned::new(1234);
827 /// let b: Box<i32> = o.into_box();
828 /// assert_eq!(*b, 1234);
830 pub fn into_box(self) -> Box
<T
> {
831 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
833 unsafe { Box::from_raw(raw as *mut _) }
836 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
841 /// use crossbeam_epoch::Owned;
843 /// let o = Owned::new(1234);
845 pub fn new(init
: T
) -> Owned
<T
> {
850 impl<T
: ?Sized
+ Pointable
> Owned
<T
> {
851 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
856 /// use crossbeam_epoch::Owned;
858 /// let o = Owned::<i32>::init(1234);
860 pub fn init(init
: T
::Init
) -> Owned
<T
> {
861 unsafe { Self::from_usize(T::init(init)) }
864 /// Converts the owned pointer into a [`Shared`].
869 /// use crossbeam_epoch::{self as epoch, Owned};
871 /// let o = Owned::new(1234);
872 /// let guard = &epoch::pin();
873 /// let p = o.into_shared(guard);
875 #[allow(clippy::needless_lifetimes)]
876 pub fn into_shared
<'g
>(self, _
: &'g Guard
) -> Shared
<'g
, T
> {
877 unsafe { Shared::from_usize(self.into_usize()) }
880 /// Returns the tag stored within the pointer.
885 /// use crossbeam_epoch::Owned;
887 /// assert_eq!(Owned::new(1234).tag(), 0);
889 pub fn tag(&self) -> usize {
890 let (_
, tag
) = decompose_tag
::<T
>(self.data
);
894 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
895 /// unused bits of the pointer to `T`.
900 /// use crossbeam_epoch::Owned;
902 /// let o = Owned::new(0u64);
903 /// assert_eq!(o.tag(), 0);
904 /// let o = o.with_tag(2);
905 /// assert_eq!(o.tag(), 2);
907 pub fn with_tag(self, tag
: usize) -> Owned
<T
> {
908 let data
= self.into_usize();
909 unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
913 impl<T
: ?Sized
+ Pointable
> Drop
for Owned
<T
> {
915 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
922 impl<T
: ?Sized
+ Pointable
> fmt
::Debug
for Owned
<T
> {
923 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
924 let (raw
, tag
) = decompose_tag
::<T
>(self.data
);
926 f
.debug_struct("Owned")
933 impl<T
: Clone
> Clone
for Owned
<T
> {
934 fn clone(&self) -> Self {
935 Owned
::new((**self).clone()).with_tag(self.tag())
939 impl<T
: ?Sized
+ Pointable
> Deref
for Owned
<T
> {
942 fn deref(&self) -> &T
{
943 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
944 unsafe { T::deref(raw) }
948 impl<T
: ?Sized
+ Pointable
> DerefMut
for Owned
<T
> {
949 fn deref_mut(&mut self) -> &mut T
{
950 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
951 unsafe { T::deref_mut(raw) }
955 impl<T
> From
<T
> for Owned
<T
> {
956 fn from(t
: T
) -> Self {
961 impl<T
> From
<Box
<T
>> for Owned
<T
> {
962 /// Returns a new owned pointer pointing to `b`.
966 /// Panics if the pointer (the `Box`) is not properly aligned.
971 /// use crossbeam_epoch::Owned;
973 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
975 fn from(b
: Box
<T
>) -> Self {
976 unsafe { Self::from_raw(Box::into_raw(b)) }
980 impl<T
: ?Sized
+ Pointable
> Borrow
<T
> for Owned
<T
> {
981 fn borrow(&self) -> &T
{
986 impl<T
: ?Sized
+ Pointable
> BorrowMut
<T
> for Owned
<T
> {
987 fn borrow_mut(&mut self) -> &mut T
{
992 impl<T
: ?Sized
+ Pointable
> AsRef
<T
> for Owned
<T
> {
993 fn as_ref(&self) -> &T
{
998 impl<T
: ?Sized
+ Pointable
> AsMut
<T
> for Owned
<T
> {
999 fn as_mut(&mut self) -> &mut T
{
1004 /// A pointer to an object protected by the epoch GC.
1006 /// The pointer is valid for use only during the lifetime `'g`.
1008 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1009 /// least significant bits of the address.
1010 pub struct Shared
<'g
, T
: 'g
+ ?Sized
+ Pointable
> {
1012 _marker
: PhantomData
<(&'
g (), *const T
)>,
1015 impl<T
: ?Sized
+ Pointable
> Clone
for Shared
<'_
, T
> {
1016 fn clone(&self) -> Self {
1019 _marker
: PhantomData
,
1024 impl<T
: ?Sized
+ Pointable
> Copy
for Shared
<'_
, T
> {}
1026 impl<T
: ?Sized
+ Pointable
> Pointer
<T
> for Shared
<'_
, T
> {
1028 fn into_usize(self) -> usize {
1033 unsafe fn from_usize(data
: usize) -> Self {
1036 _marker
: PhantomData
,
1041 impl<'g
, T
> Shared
<'g
, T
> {
1042 /// Converts the pointer to a raw pointer (without the tag).
1047 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1048 /// use std::sync::atomic::Ordering::SeqCst;
1050 /// let o = Owned::new(1234);
1051 /// let raw = &*o as *const _;
1052 /// let a = Atomic::from(o);
1054 /// let guard = &epoch::pin();
1055 /// let p = a.load(SeqCst, guard);
1056 /// assert_eq!(p.as_raw(), raw);
1058 #[allow(clippy::trivially_copy_pass_by_ref)]
1059 pub fn as_raw(&self) -> *const T
{
1060 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1065 impl<'g
, T
: ?Sized
+ Pointable
> Shared
<'g
, T
> {
1066 /// Returns a new null pointer.
1071 /// use crossbeam_epoch::Shared;
1073 /// let p = Shared::<i32>::null();
1074 /// assert!(p.is_null());
1076 pub fn null() -> Shared
<'g
, T
> {
1079 _marker
: PhantomData
,
1083 /// Returns `true` if the pointer is null.
1088 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1089 /// use std::sync::atomic::Ordering::SeqCst;
1091 /// let a = Atomic::null();
1092 /// let guard = &epoch::pin();
1093 /// assert!(a.load(SeqCst, guard).is_null());
1094 /// a.store(Owned::new(1234), SeqCst);
1095 /// assert!(!a.load(SeqCst, guard).is_null());
1097 #[allow(clippy::trivially_copy_pass_by_ref)]
1098 pub fn is_null(&self) -> bool
{
1099 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1103 /// Dereferences the pointer.
1105 /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1109 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1111 /// Another concern is the possibility of data races due to lack of proper synchronization.
1112 /// For example, consider the following scenario:
1114 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1115 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1117 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1118 /// the read from the second thread. This is a data race. A possible solution would be to use
1119 /// `Release` and `Acquire` orderings.
1124 /// use crossbeam_epoch::{self as epoch, Atomic};
1125 /// use std::sync::atomic::Ordering::SeqCst;
1127 /// let a = Atomic::new(1234);
1128 /// let guard = &epoch::pin();
1129 /// let p = a.load(SeqCst, guard);
1131 /// assert_eq!(p.deref(), &1234);
1134 #[allow(clippy::trivially_copy_pass_by_ref)]
1135 #[allow(clippy::should_implement_trait)]
1136 pub unsafe fn deref(&self) -> &'g T
{
1137 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1141 /// Dereferences the pointer.
1143 /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1147 /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1148 /// actual object at the same time.
1150 /// The user must know that there are no concurrent accesses towards the object itself.
1152 /// * Other than the above, all safety concerns of `deref()` applies here.
1157 /// use crossbeam_epoch::{self as epoch, Atomic};
1158 /// use std::sync::atomic::Ordering::SeqCst;
1160 /// let a = Atomic::new(vec![1, 2, 3, 4]);
1161 /// let guard = &epoch::pin();
1163 /// let mut p = a.load(SeqCst, guard);
1165 /// assert!(!p.is_null());
1166 /// let b = p.deref_mut();
1167 /// assert_eq!(b, &vec![1, 2, 3, 4]);
1169 /// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1172 /// let p = a.load(SeqCst, guard);
1174 /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1177 #[allow(clippy::should_implement_trait)]
1178 pub unsafe fn deref_mut(&mut self) -> &'g
mut T
{
1179 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1183 /// Converts the pointer to a reference.
1185 /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1189 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1191 /// Another concern is the possibility of data races due to lack of proper synchronization.
1192 /// For example, consider the following scenario:
1194 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1195 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1197 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1198 /// the read from the second thread. This is a data race. A possible solution would be to use
1199 /// `Release` and `Acquire` orderings.
1204 /// use crossbeam_epoch::{self as epoch, Atomic};
1205 /// use std::sync::atomic::Ordering::SeqCst;
1207 /// let a = Atomic::new(1234);
1208 /// let guard = &epoch::pin();
1209 /// let p = a.load(SeqCst, guard);
1211 /// assert_eq!(p.as_ref(), Some(&1234));
1214 #[allow(clippy::trivially_copy_pass_by_ref)]
1215 pub unsafe fn as_ref(&self) -> Option
<&'g T
> {
1216 let (raw
, _
) = decompose_tag
::<T
>(self.data
);
1224 /// Takes ownership of the pointee.
1228 /// Panics if this pointer is null, but only in debug mode.
1232 /// This method may be called only if the pointer is valid and nobody else is holding a
1233 /// reference to the same object.
1238 /// use crossbeam_epoch::{self as epoch, Atomic};
1239 /// use std::sync::atomic::Ordering::SeqCst;
1241 /// let a = Atomic::new(1234);
1243 /// let guard = &epoch::unprotected();
1244 /// let p = a.load(SeqCst, guard);
1245 /// drop(p.into_owned());
1248 pub unsafe fn into_owned(self) -> Owned
<T
> {
1249 debug_assert
!(!self.is_null(), "converting a null `Shared` into `Owned`");
1250 Owned
::from_usize(self.data
)
1253 /// Returns the tag stored within the pointer.
1258 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1259 /// use std::sync::atomic::Ordering::SeqCst;
1261 /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1262 /// let guard = &epoch::pin();
1263 /// let p = a.load(SeqCst, guard);
1264 /// assert_eq!(p.tag(), 2);
1266 #[allow(clippy::trivially_copy_pass_by_ref)]
1267 pub fn tag(&self) -> usize {
1268 let (_
, tag
) = decompose_tag
::<T
>(self.data
);
1272 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1273 /// unused bits of the pointer to `T`.
1278 /// use crossbeam_epoch::{self as epoch, Atomic};
1279 /// use std::sync::atomic::Ordering::SeqCst;
1281 /// let a = Atomic::new(0u64);
1282 /// let guard = &epoch::pin();
1283 /// let p1 = a.load(SeqCst, guard);
1284 /// let p2 = p1.with_tag(2);
1286 /// assert_eq!(p1.tag(), 0);
1287 /// assert_eq!(p2.tag(), 2);
1288 /// assert_eq!(p1.as_raw(), p2.as_raw());
1290 #[allow(clippy::trivially_copy_pass_by_ref)]
1291 pub fn with_tag(&self, tag
: usize) -> Shared
<'g
, T
> {
1292 unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
1296 impl<T
> From
<*const T
> for Shared
<'_
, T
> {
1297 /// Returns a new pointer pointing to `raw`.
1301 /// Panics if `raw` is not properly aligned.
1306 /// use crossbeam_epoch::Shared;
1308 /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
1309 /// assert!(!p.is_null());
1311 fn from(raw
: *const T
) -> Self {
1312 let raw
= raw
as usize;
1313 ensure_aligned
::<T
>(raw
);
1314 unsafe { Self::from_usize(raw) }
1318 impl<'g
, T
: ?Sized
+ Pointable
> PartialEq
<Shared
<'g
, T
>> for Shared
<'g
, T
> {
1319 fn eq(&self, other
: &Self) -> bool
{
1320 self.data
== other
.data
1324 impl<T
: ?Sized
+ Pointable
> Eq
for Shared
<'_
, T
> {}
1326 impl<'g
, T
: ?Sized
+ Pointable
> PartialOrd
<Shared
<'g
, T
>> for Shared
<'g
, T
> {
1327 fn partial_cmp(&self, other
: &Self) -> Option
<cmp
::Ordering
> {
1328 self.data
.partial_cmp(&other
.data
)
1332 impl<T
: ?Sized
+ Pointable
> Ord
for Shared
<'_
, T
> {
1333 fn cmp(&self, other
: &Self) -> cmp
::Ordering
{
1334 self.data
.cmp(&other
.data
)
1338 impl<T
: ?Sized
+ Pointable
> fmt
::Debug
for Shared
<'_
, T
> {
1339 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1340 let (raw
, tag
) = decompose_tag
::<T
>(self.data
);
1342 f
.debug_struct("Shared")
1349 impl<T
: ?Sized
+ Pointable
> fmt
::Pointer
for Shared
<'_
, T
> {
1350 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1351 fmt
::Pointer
::fmt(&(unsafe { self.deref() as *const _ }
), f
)
1355 impl<T
: ?Sized
+ Pointable
> Default
for Shared
<'_
, T
> {
1356 fn default() -> Self {
1367 Shared
::<i8>::null().with_tag(0);
1371 fn valid_tag_i64() {
1372 Shared
::<i64>::null().with_tag(7);