2 use core
::borrow
::{Borrow, BorrowMut}
;
5 use core
::marker
::PhantomData
;
7 use core
::ops
::{Deref, DerefMut}
;
9 use core
::sync
::atomic
::{AtomicUsize, Ordering}
;
11 use crossbeam_utils
::atomic
::AtomicConsume
;
14 /// Given ordering for the success case in a compare-exchange operation, returns the strongest
15 /// appropriate ordering for the failure case.
17 fn strongest_failure_ordering(ord
: Ordering
) -> Ordering
{
18 use self::Ordering
::*;
20 Relaxed
| Release
=> Relaxed
,
21 Acquire
| AcqRel
=> Acquire
,
26 /// The error returned on failed compare-and-set operation.
27 pub struct CompareAndSetError
<'g
, T
: 'g
, P
: Pointer
<T
>> {
28 /// The value in the atomic pointer at the time of the failed operation.
29 pub current
: Shared
<'g
, T
>,
31 /// The new value, which the operation failed to store.
35 impl<'g
, T
: 'g
, P
: Pointer
<T
> + fmt
::Debug
> fmt
::Debug
for CompareAndSetError
<'g
, T
, P
> {
36 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
37 f
.debug_struct("CompareAndSetError")
38 .field("current", &self.current
)
39 .field("new", &self.new
)
44 /// Memory orderings for compare-and-set operations.
46 /// A compare-and-set operation can have different memory orderings depending on whether it
47 /// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
49 /// The two ways of specifying orderings for compare-and-set are:
51 /// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
52 /// ordering is chosen.
53 /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
54 /// for the failure case.
55 pub trait CompareAndSetOrdering
{
56 /// The ordering of the operation when it succeeds.
57 fn success(&self) -> Ordering
;
59 /// The ordering of the operation when it fails.
61 /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
62 /// the success ordering.
63 fn failure(&self) -> Ordering
;
66 impl CompareAndSetOrdering
for Ordering
{
68 fn success(&self) -> Ordering
{
73 fn failure(&self) -> Ordering
{
74 strongest_failure_ordering(*self)
78 impl CompareAndSetOrdering
for (Ordering
, Ordering
) {
80 fn success(&self) -> Ordering
{
85 fn failure(&self) -> Ordering
{
90 /// Panics if the pointer is not properly unaligned.
92 fn ensure_aligned
<T
>(raw
: *const T
) {
93 assert_eq
!(raw
as usize & low_bits
::<T
>(), 0, "unaligned pointer");
96 /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
98 fn low_bits
<T
>() -> usize {
99 (1 << mem
::align_of
::<T
>().trailing_zeros()) - 1
102 /// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
104 /// `tag` is truncated to fit into the unused bits of the pointer to `T`.
106 fn data_with_tag
<T
>(data
: usize, tag
: usize) -> usize {
107 (data
& !low_bits
::<T
>()) | (tag
& low_bits
::<T
>())
110 /// Decomposes a tagged pointer `data` into the pointer and the tag.
112 fn decompose_data
<T
>(data
: usize) -> (*mut T
, usize) {
113 let raw
= (data
& !low_bits
::<T
>()) as *mut T
;
114 let tag
= data
& low_bits
::<T
>();
118 /// An atomic pointer that can be safely shared between threads.
120 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
121 /// least significant bits of the address. More precisely, a tag should be less than `(1 <<
122 /// mem::align_of::<T>().trailing_zeros())`.
124 /// Any method that loads the pointer must be passed a reference to a [`Guard`].
126 /// [`Guard`]: struct.Guard.html
127 pub struct Atomic
<T
> {
129 _marker
: PhantomData
<*mut T
>,
132 unsafe impl<T
: Send
+ Sync
> Send
for Atomic
<T
> {}
133 unsafe impl<T
: Send
+ Sync
> Sync
for Atomic
<T
> {}
136 /// Returns a new atomic pointer pointing to the tagged pointer `data`.
137 fn from_usize(data
: usize) -> Self {
139 data
: AtomicUsize
::new(data
),
140 _marker
: PhantomData
,
144 /// Returns a new null atomic pointer.
149 /// use crossbeam_epoch::Atomic;
151 /// let a = Atomic::<i32>::null();
153 #[cfg(not(has_min_const_fn))]
154 pub fn null() -> Atomic
<T
> {
156 data
: AtomicUsize
::new(0),
157 _marker
: PhantomData
,
161 /// Returns a new null atomic pointer.
166 /// use crossbeam_epoch::Atomic;
168 /// let a = Atomic::<i32>::null();
170 #[cfg(has_min_const_fn)]
171 pub const fn null() -> Atomic
<T
> {
173 data
: AtomicUsize
::new(0),
174 _marker
: PhantomData
,
178 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
183 /// use crossbeam_epoch::Atomic;
185 /// let a = Atomic::new(1234);
187 pub fn new(value
: T
) -> Atomic
<T
> {
188 Self::from(Owned
::new(value
))
191 /// Loads a `Shared` from the atomic pointer.
193 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
196 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
201 /// use crossbeam_epoch::{self as epoch, Atomic};
202 /// use std::sync::atomic::Ordering::SeqCst;
204 /// let a = Atomic::new(1234);
205 /// let guard = &epoch::pin();
206 /// let p = a.load(SeqCst, guard);
208 pub fn load
<'g
>(&self, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
209 unsafe { Shared::from_usize(self.data.load(ord)) }
212 /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
214 /// This is similar to the "acquire" ordering, except that an ordering is
215 /// only guaranteed with operations that "depend on" the result of the load.
216 /// However consume loads are usually much faster than acquire loads on
217 /// architectures with a weak memory model since they don't require memory
218 /// fence instructions.
220 /// The exact definition of "depend on" is a bit vague, but it works as you
221 /// would expect in practice since a lot of software, especially the Linux
222 /// kernel, rely on this behavior.
227 /// use crossbeam_epoch::{self as epoch, Atomic};
229 /// let a = Atomic::new(1234);
230 /// let guard = &epoch::pin();
231 /// let p = a.load_consume(guard);
233 pub fn load_consume
<'g
>(&self, _
: &'g Guard
) -> Shared
<'g
, T
> {
234 unsafe { Shared::from_usize(self.data.load_consume()) }
237 /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
239 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
242 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
247 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
248 /// use std::sync::atomic::Ordering::SeqCst;
250 /// let a = Atomic::new(1234);
251 /// a.store(Shared::null(), SeqCst);
252 /// a.store(Owned::new(1234), SeqCst);
254 pub fn store
<'g
, P
: Pointer
<T
>>(&self, new
: P
, ord
: Ordering
) {
255 self.data
.store(new
.into_usize(), ord
);
258 /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
261 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
264 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
269 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
270 /// use std::sync::atomic::Ordering::SeqCst;
272 /// let a = Atomic::new(1234);
273 /// let guard = &epoch::pin();
274 /// let p = a.swap(Shared::null(), SeqCst, guard);
276 pub fn swap
<'g
, P
: Pointer
<T
>>(&self, new
: P
, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
277 unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
280 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
281 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
282 /// same object, but with different tags, will not be considered equal.
284 /// The return value is a result indicating whether the new pointer was written. On success the
285 /// pointer that was written is returned. On failure the actual current value and `new` are
288 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
289 /// ordering of this operation.
291 /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
296 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
297 /// use std::sync::atomic::Ordering::SeqCst;
299 /// let a = Atomic::new(1234);
301 /// let guard = &epoch::pin();
302 /// let mut curr = a.load(SeqCst, guard);
303 /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
304 /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
306 pub fn compare_and_set
<'g
, O
, P
>(
312 ) -> Result
<Shared
<'g
, T
>, CompareAndSetError
<'g
, T
, P
>>
314 O
: CompareAndSetOrdering
,
317 let new
= new
.into_usize();
319 .compare_exchange(current
.into_usize(), new
, ord
.success(), ord
.failure())
320 .map(|_
| unsafe { Shared::from_usize(new) }
)
321 .map_err(|current
| unsafe {
323 current
: Shared
::from_usize(current
),
324 new
: P
::from_usize(new
),
329 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
330 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
331 /// same object, but with different tags, will not be considered equal.
333 /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
334 /// succeeds, which can result in more efficient code on some platforms. The return value is a
335 /// result indicating whether the new pointer was written. On success the pointer that was
336 /// written is returned. On failure the actual current value and `new` are returned.
338 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
339 /// ordering of this operation.
341 /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set
342 /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
347 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
348 /// use std::sync::atomic::Ordering::SeqCst;
350 /// let a = Atomic::new(1234);
351 /// let guard = &epoch::pin();
353 /// let mut new = Owned::new(5678);
354 /// let mut ptr = a.load(SeqCst, guard);
356 /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
362 /// ptr = err.current;
368 /// let mut curr = a.load(SeqCst, guard);
370 /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
372 /// Err(err) => curr = err.current,
376 pub fn compare_and_set_weak
<'g
, O
, P
>(
382 ) -> Result
<Shared
<'g
, T
>, CompareAndSetError
<'g
, T
, P
>>
384 O
: CompareAndSetOrdering
,
387 let new
= new
.into_usize();
389 .compare_exchange_weak(current
.into_usize(), new
, ord
.success(), ord
.failure())
390 .map(|_
| unsafe { Shared::from_usize(new) }
)
391 .map_err(|current
| unsafe {
393 current
: Shared
::from_usize(current
),
394 new
: P
::from_usize(new
),
399 /// Bitwise "and" with the current tag.
401 /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
402 /// new tag to the result. Returns the previous pointer.
404 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
407 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
412 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
413 /// use std::sync::atomic::Ordering::SeqCst;
415 /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
416 /// let guard = &epoch::pin();
417 /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
418 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
420 pub fn fetch_and
<'g
>(&self, val
: usize, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
421 unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
424 /// Bitwise "or" with the current tag.
426 /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
427 /// new tag to the result. Returns the previous pointer.
429 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
432 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
437 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
438 /// use std::sync::atomic::Ordering::SeqCst;
440 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
441 /// let guard = &epoch::pin();
442 /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
443 /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
445 pub fn fetch_or
<'g
>(&self, val
: usize, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
446 unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
449 /// Bitwise "xor" with the current tag.
451 /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
452 /// new tag to the result. Returns the previous pointer.
454 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
457 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
462 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
463 /// use std::sync::atomic::Ordering::SeqCst;
465 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
466 /// let guard = &epoch::pin();
467 /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
468 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
470 pub fn fetch_xor
<'g
>(&self, val
: usize, ord
: Ordering
, _
: &'g Guard
) -> Shared
<'g
, T
> {
471 unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
474 /// Takes ownership of the pointee.
476 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
477 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
478 /// destructors of data structures.
482 /// Panics if this pointer is null, but only in debug mode.
486 /// This method may be called only if the pointer is valid and nobody else is holding a
487 /// reference to the same object.
493 /// # use crossbeam_epoch::Atomic;
494 /// struct DataStructure {
495 /// ptr: Atomic<usize>,
498 /// impl Drop for DataStructure {
499 /// fn drop(&mut self) {
500 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
501 /// // any Shared or & to it ourselves.
503 /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
508 pub unsafe fn into_owned(self) -> Owned
<T
> {
509 Owned
::from_usize(self.data
.into_inner())
513 impl<T
> fmt
::Debug
for Atomic
<T
> {
514 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
515 let data
= self.data
.load(Ordering
::SeqCst
);
516 let (raw
, tag
) = decompose_data
::<T
>(data
);
518 f
.debug_struct("Atomic")
525 impl<T
> fmt
::Pointer
for Atomic
<T
> {
526 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
527 let data
= self.data
.load(Ordering
::SeqCst
);
528 let (raw
, _
) = decompose_data
::<T
>(data
);
529 fmt
::Pointer
::fmt(&raw
, f
)
533 impl<T
> Clone
for Atomic
<T
> {
534 /// Returns a copy of the atomic value.
536 /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
537 /// atomics or fences.
538 fn clone(&self) -> Self {
539 let data
= self.data
.load(Ordering
::Relaxed
);
540 Atomic
::from_usize(data
)
544 impl<T
> Default
for Atomic
<T
> {
545 fn default() -> Self {
550 impl<T
> From
<Owned
<T
>> for Atomic
<T
> {
551 /// Returns a new atomic pointer pointing to `owned`.
556 /// use crossbeam_epoch::{Atomic, Owned};
558 /// let a = Atomic::<i32>::from(Owned::new(1234));
560 fn from(owned
: Owned
<T
>) -> Self {
561 let data
= owned
.data
;
563 Self::from_usize(data
)
567 impl<T
> From
<Box
<T
>> for Atomic
<T
> {
568 fn from(b
: Box
<T
>) -> Self {
569 Self::from(Owned
::from(b
))
573 impl<T
> From
<T
> for Atomic
<T
> {
574 fn from(t
: T
) -> Self {
579 impl<'g
, T
> From
<Shared
<'g
, T
>> for Atomic
<T
> {
580 /// Returns a new atomic pointer pointing to `ptr`.
585 /// use crossbeam_epoch::{Atomic, Shared};
587 /// let a = Atomic::<i32>::from(Shared::<i32>::null());
589 fn from(ptr
: Shared
<'g
, T
>) -> Self {
590 Self::from_usize(ptr
.data
)
594 impl<T
> From
<*const T
> for Atomic
<T
> {
595 /// Returns a new atomic pointer pointing to `raw`.
601 /// use crossbeam_epoch::Atomic;
603 /// let a = Atomic::<i32>::from(ptr::null::<i32>());
605 fn from(raw
: *const T
) -> Self {
606 Self::from_usize(raw
as usize)
610 /// A trait for either `Owned` or `Shared` pointers.
611 pub trait Pointer
<T
> {
612 /// Returns the machine representation of the pointer.
613 fn into_usize(self) -> usize;
615 /// Returns a new pointer pointing to the tagged pointer `data`.
616 unsafe fn from_usize(data
: usize) -> Self;
619 /// An owned heap-allocated object.
621 /// This type is very similar to `Box<T>`.
623 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
624 /// least significant bits of the address.
625 pub struct Owned
<T
> {
627 _marker
: PhantomData
<Box
<T
>>,
630 impl<T
> Pointer
<T
> for Owned
<T
> {
632 fn into_usize(self) -> usize {
633 let data
= self.data
;
638 /// Returns a new pointer pointing to the tagged pointer `data`.
642 /// Panics if the data is zero in debug mode.
644 unsafe fn from_usize(data
: usize) -> Self {
645 debug_assert
!(data
!= 0, "converting zero into `Owned`");
648 _marker
: PhantomData
,
654 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
659 /// use crossbeam_epoch::Owned;
661 /// let o = Owned::new(1234);
663 pub fn new(value
: T
) -> Owned
<T
> {
664 Self::from(Box
::new(value
))
667 /// Returns a new owned pointer pointing to `raw`.
669 /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
670 /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
671 /// the same raw pointer.
675 /// Panics if `raw` is not properly aligned.
680 /// use crossbeam_epoch::Owned;
682 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
684 pub unsafe fn from_raw(raw
: *mut T
) -> Owned
<T
> {
686 Self::from_usize(raw
as usize)
689 /// Converts the owned pointer into a [`Shared`].
694 /// use crossbeam_epoch::{self as epoch, Owned};
696 /// let o = Owned::new(1234);
697 /// let guard = &epoch::pin();
698 /// let p = o.into_shared(guard);
701 /// [`Shared`]: struct.Shared.html
702 pub fn into_shared
<'g
>(self, _
: &'g Guard
) -> Shared
<'g
, T
> {
703 unsafe { Shared::from_usize(self.into_usize()) }
706 /// Converts the owned pointer into a `Box`.
711 /// use crossbeam_epoch::{self as epoch, Owned};
713 /// let o = Owned::new(1234);
714 /// let b: Box<i32> = o.into_box();
715 /// assert_eq!(*b, 1234);
717 pub fn into_box(self) -> Box
<T
> {
718 let (raw
, _
) = decompose_data
::<T
>(self.data
);
720 unsafe { Box::from_raw(raw) }
723 /// Returns the tag stored within the pointer.
728 /// use crossbeam_epoch::Owned;
730 /// assert_eq!(Owned::new(1234).tag(), 0);
732 pub fn tag(&self) -> usize {
733 let (_
, tag
) = decompose_data
::<T
>(self.data
);
737 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
738 /// unused bits of the pointer to `T`.
743 /// use crossbeam_epoch::Owned;
745 /// let o = Owned::new(0u64);
746 /// assert_eq!(o.tag(), 0);
747 /// let o = o.with_tag(2);
748 /// assert_eq!(o.tag(), 2);
750 pub fn with_tag(self, tag
: usize) -> Owned
<T
> {
751 let data
= self.into_usize();
752 unsafe { Self::from_usize(data_with_tag::<T>(data, tag)) }
756 impl<T
> Drop
for Owned
<T
> {
758 let (raw
, _
) = decompose_data
::<T
>(self.data
);
760 drop(Box
::from_raw(raw
));
765 impl<T
> fmt
::Debug
for Owned
<T
> {
766 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
767 let (raw
, tag
) = decompose_data
::<T
>(self.data
);
769 f
.debug_struct("Owned")
776 impl<T
: Clone
> Clone
for Owned
<T
> {
777 fn clone(&self) -> Self {
778 Owned
::new((**self).clone()).with_tag(self.tag())
782 impl<T
> Deref
for Owned
<T
> {
785 fn deref(&self) -> &T
{
786 let (raw
, _
) = decompose_data
::<T
>(self.data
);
791 impl<T
> DerefMut
for Owned
<T
> {
792 fn deref_mut(&mut self) -> &mut T
{
793 let (raw
, _
) = decompose_data
::<T
>(self.data
);
798 impl<T
> From
<T
> for Owned
<T
> {
799 fn from(t
: T
) -> Self {
804 impl<T
> From
<Box
<T
>> for Owned
<T
> {
805 /// Returns a new owned pointer pointing to `b`.
809 /// Panics if the pointer (the `Box`) is not properly aligned.
814 /// use crossbeam_epoch::Owned;
816 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
818 fn from(b
: Box
<T
>) -> Self {
819 unsafe { Self::from_raw(Box::into_raw(b)) }
823 impl<T
> Borrow
<T
> for Owned
<T
> {
824 fn borrow(&self) -> &T
{
829 impl<T
> BorrowMut
<T
> for Owned
<T
> {
830 fn borrow_mut(&mut self) -> &mut T
{
835 impl<T
> AsRef
<T
> for Owned
<T
> {
836 fn as_ref(&self) -> &T
{
841 impl<T
> AsMut
<T
> for Owned
<T
> {
842 fn as_mut(&mut self) -> &mut T
{
847 /// A pointer to an object protected by the epoch GC.
849 /// The pointer is valid for use only during the lifetime `'g`.
851 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
852 /// least significant bits of the address.
853 pub struct Shared
<'g
, T
: 'g
> {
855 _marker
: PhantomData
<(&'
g (), *const T
)>,
858 impl<'g
, T
> Clone
for Shared
<'g
, T
> {
859 fn clone(&self) -> Self {
862 _marker
: PhantomData
,
867 impl<'g
, T
> Copy
for Shared
<'g
, T
> {}
869 impl<'g
, T
> Pointer
<T
> for Shared
<'g
, T
> {
871 fn into_usize(self) -> usize {
876 unsafe fn from_usize(data
: usize) -> Self {
879 _marker
: PhantomData
,
884 impl<'g
, T
> Shared
<'g
, T
> {
885 /// Returns a new null pointer.
890 /// use crossbeam_epoch::Shared;
892 /// let p = Shared::<i32>::null();
893 /// assert!(p.is_null());
895 pub fn null() -> Shared
<'g
, T
> {
898 _marker
: PhantomData
,
902 /// Returns `true` if the pointer is null.
907 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
908 /// use std::sync::atomic::Ordering::SeqCst;
910 /// let a = Atomic::null();
911 /// let guard = &epoch::pin();
912 /// assert!(a.load(SeqCst, guard).is_null());
913 /// a.store(Owned::new(1234), SeqCst);
914 /// assert!(!a.load(SeqCst, guard).is_null());
916 pub fn is_null(&self) -> bool
{
917 self.as_raw().is_null()
920 /// Converts the pointer to a raw pointer (without the tag).
925 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
926 /// use std::sync::atomic::Ordering::SeqCst;
928 /// let o = Owned::new(1234);
929 /// let raw = &*o as *const _;
930 /// let a = Atomic::from(o);
932 /// let guard = &epoch::pin();
933 /// let p = a.load(SeqCst, guard);
934 /// assert_eq!(p.as_raw(), raw);
936 pub fn as_raw(&self) -> *const T
{
937 let (raw
, _
) = decompose_data
::<T
>(self.data
);
941 /// Dereferences the pointer.
943 /// Returns a reference to the pointee that is valid during the lifetime `'g`.
947 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
949 /// Another concern is the possiblity of data races due to lack of proper synchronization.
950 /// For example, consider the following scenario:
952 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
953 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
955 /// The problem is that relaxed orderings don't synchronize initialization of the object with
956 /// the read from the second thread. This is a data race. A possible solution would be to use
957 /// `Release` and `Acquire` orderings.
962 /// use crossbeam_epoch::{self as epoch, Atomic};
963 /// use std::sync::atomic::Ordering::SeqCst;
965 /// let a = Atomic::new(1234);
966 /// let guard = &epoch::pin();
967 /// let p = a.load(SeqCst, guard);
969 /// assert_eq!(p.deref(), &1234);
972 pub unsafe fn deref(&self) -> &'g T
{
976 /// Dereferences the pointer.
978 /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
982 /// * There is no guarantee that there are no more threads attempting to read/write from/to the
983 /// actual object at the same time.
985 /// The user must know that there are no concurrent accesses towards the object itself.
987 /// * Other than the above, all safety concerns of `deref()` applies here.
992 /// use crossbeam_epoch::{self as epoch, Atomic};
993 /// use std::sync::atomic::Ordering::SeqCst;
995 /// let a = Atomic::new(vec![1, 2, 3, 4]);
996 /// let guard = &epoch::pin();
998 /// let mut p = a.load(SeqCst, guard);
1000 /// assert!(!p.is_null());
1001 /// let b = p.deref_mut();
1002 /// assert_eq!(b, &vec![1, 2, 3, 4]);
1004 /// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1007 /// let p = a.load(SeqCst, guard);
1009 /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1012 pub unsafe fn deref_mut(&mut self) -> &'g
mut T
{
1013 &mut *(self.as_raw() as *mut T
)
1016 /// Converts the pointer to a reference.
1018 /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1022 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1024 /// Another concern is the possiblity of data races due to lack of proper synchronization.
1025 /// For example, consider the following scenario:
1027 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1028 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1030 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1031 /// the read from the second thread. This is a data race. A possible solution would be to use
1032 /// `Release` and `Acquire` orderings.
1037 /// use crossbeam_epoch::{self as epoch, Atomic};
1038 /// use std::sync::atomic::Ordering::SeqCst;
1040 /// let a = Atomic::new(1234);
1041 /// let guard = &epoch::pin();
1042 /// let p = a.load(SeqCst, guard);
1044 /// assert_eq!(p.as_ref(), Some(&1234));
1047 pub unsafe fn as_ref(&self) -> Option
<&'g T
> {
1048 self.as_raw().as_ref()
1051 /// Takes ownership of the pointee.
1055 /// Panics if this pointer is null, but only in debug mode.
1059 /// This method may be called only if the pointer is valid and nobody else is holding a
1060 /// reference to the same object.
1065 /// use crossbeam_epoch::{self as epoch, Atomic};
1066 /// use std::sync::atomic::Ordering::SeqCst;
1068 /// let a = Atomic::new(1234);
1070 /// let guard = &epoch::unprotected();
1071 /// let p = a.load(SeqCst, guard);
1072 /// drop(p.into_owned());
1075 pub unsafe fn into_owned(self) -> Owned
<T
> {
1077 self.as_raw() != ptr
::null(),
1078 "converting a null `Shared` into `Owned`"
1080 Owned
::from_usize(self.data
)
1083 /// Returns the tag stored within the pointer.
1088 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1089 /// use std::sync::atomic::Ordering::SeqCst;
1091 /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1092 /// let guard = &epoch::pin();
1093 /// let p = a.load(SeqCst, guard);
1094 /// assert_eq!(p.tag(), 2);
1096 pub fn tag(&self) -> usize {
1097 let (_
, tag
) = decompose_data
::<T
>(self.data
);
1101 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1102 /// unused bits of the pointer to `T`.
1107 /// use crossbeam_epoch::{self as epoch, Atomic};
1108 /// use std::sync::atomic::Ordering::SeqCst;
1110 /// let a = Atomic::new(0u64);
1111 /// let guard = &epoch::pin();
1112 /// let p1 = a.load(SeqCst, guard);
1113 /// let p2 = p1.with_tag(2);
1115 /// assert_eq!(p1.tag(), 0);
1116 /// assert_eq!(p2.tag(), 2);
1117 /// assert_eq!(p1.as_raw(), p2.as_raw());
1119 pub fn with_tag(&self, tag
: usize) -> Shared
<'g
, T
> {
1120 unsafe { Self::from_usize(data_with_tag::<T>(self.data, tag)) }
1124 impl<'g
, T
> From
<*const T
> for Shared
<'g
, T
> {
1125 /// Returns a new pointer pointing to `raw`.
1129 /// Panics if `raw` is not properly aligned.
1134 /// use crossbeam_epoch::Shared;
1136 /// let p = unsafe { Shared::from(Box::into_raw(Box::new(1234)) as *const _) };
1137 /// assert!(!p.is_null());
1139 fn from(raw
: *const T
) -> Self {
1140 ensure_aligned(raw
);
1141 unsafe { Self::from_usize(raw as usize) }
1145 impl<'g
, T
> PartialEq
<Shared
<'g
, T
>> for Shared
<'g
, T
> {
1146 fn eq(&self, other
: &Self) -> bool
{
1147 self.data
== other
.data
1151 impl<'g
, T
> Eq
for Shared
<'g
, T
> {}
1153 impl<'g
, T
> PartialOrd
<Shared
<'g
, T
>> for Shared
<'g
, T
> {
1154 fn partial_cmp(&self, other
: &Self) -> Option
<cmp
::Ordering
> {
1155 self.data
.partial_cmp(&other
.data
)
1159 impl<'g
, T
> Ord
for Shared
<'g
, T
> {
1160 fn cmp(&self, other
: &Self) -> cmp
::Ordering
{
1161 self.data
.cmp(&other
.data
)
1165 impl<'g
, T
> fmt
::Debug
for Shared
<'g
, T
> {
1166 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1167 let (raw
, tag
) = decompose_data
::<T
>(self.data
);
1169 f
.debug_struct("Shared")
1176 impl<'g
, T
> fmt
::Pointer
for Shared
<'g
, T
> {
1177 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1178 fmt
::Pointer
::fmt(&self.as_raw(), f
)
1182 impl<'g
, T
> Default
for Shared
<'g
, T
> {
1183 fn default() -> Self {
1194 Shared
::<i8>::null().with_tag(0);
1198 fn valid_tag_i64() {
1199 Shared
::<i64>::null().with_tag(7);