]> git.proxmox.com Git - rustc.git/blame - vendor/crossbeam-epoch/src/atomic.rs
New upstream version 1.61.0+dfsg1
[rustc.git] / vendor / crossbeam-epoch / src / atomic.rs
CommitLineData
416331ca
XL
1use core::borrow::{Borrow, BorrowMut};
2use core::cmp;
3use core::fmt;
4use core::marker::PhantomData;
5869c6ff 5use core::mem::{self, MaybeUninit};
416331ca 6use core::ops::{Deref, DerefMut};
5869c6ff 7use core::slice;
6a06907d 8use core::sync::atomic::Ordering;
416331ca 9
5869c6ff
XL
10use crate::alloc::alloc;
11use crate::alloc::boxed::Box;
12use crate::guard::Guard;
6a06907d 13use crate::primitive::sync::atomic::AtomicUsize;
416331ca 14use crossbeam_utils::atomic::AtomicConsume;
416331ca
XL
15
16/// Given ordering for the success case in a compare-exchange operation, returns the strongest
17/// appropriate ordering for the failure case.
18#[inline]
19fn strongest_failure_ordering(ord: Ordering) -> Ordering {
20 use self::Ordering::*;
21 match ord {
22 Relaxed | Release => Relaxed,
23 Acquire | AcqRel => Acquire,
24 _ => SeqCst,
25 }
26}
27
28/// The error returned on failed compare-and-set operation.
6a06907d
XL
29// TODO: remove in the next major version.
30#[deprecated(note = "Use `CompareExchangeError` instead")]
31pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>;
32
33/// The error returned on failed compare-and-swap operation.
34pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
416331ca
XL
35 /// The value in the atomic pointer at the time of the failed operation.
36 pub current: Shared<'g, T>,
37
38 /// The new value, which the operation failed to store.
39 pub new: P,
40}
41
6a06907d 42impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> {
5869c6ff 43 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
6a06907d 44 f.debug_struct("CompareExchangeError")
416331ca
XL
45 .field("current", &self.current)
46 .field("new", &self.new)
47 .finish()
48 }
49}
50
51/// Memory orderings for compare-and-set operations.
52///
53/// A compare-and-set operation can have different memory orderings depending on whether it
54/// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
55///
56/// The two ways of specifying orderings for compare-and-set are:
57///
58/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
59/// ordering is chosen.
60/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
61/// for the failure case.
6a06907d
XL
62// TODO: remove in the next major version.
63#[deprecated(
64 note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \
65 use `compare_exchange` or `compare_exchange_weak instead`"
66)]
416331ca
XL
67pub trait CompareAndSetOrdering {
68 /// The ordering of the operation when it succeeds.
69 fn success(&self) -> Ordering;
70
71 /// The ordering of the operation when it fails.
72 ///
73 /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
74 /// the success ordering.
75 fn failure(&self) -> Ordering;
76}
77
6a06907d 78#[allow(deprecated)]
416331ca
XL
79impl CompareAndSetOrdering for Ordering {
80 #[inline]
81 fn success(&self) -> Ordering {
82 *self
83 }
84
85 #[inline]
86 fn failure(&self) -> Ordering {
87 strongest_failure_ordering(*self)
88 }
89}
90
6a06907d 91#[allow(deprecated)]
416331ca
XL
92impl CompareAndSetOrdering for (Ordering, Ordering) {
93 #[inline]
94 fn success(&self) -> Ordering {
95 self.0
96 }
97
98 #[inline]
99 fn failure(&self) -> Ordering {
100 self.1
101 }
102}
103
5869c6ff 104/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
416331ca 105#[inline]
5869c6ff
XL
106fn low_bits<T: ?Sized + Pointable>() -> usize {
107 (1 << T::ALIGN.trailing_zeros()) - 1
416331ca
XL
108}
109
5869c6ff 110/// Panics if the pointer is not properly unaligned.
416331ca 111#[inline]
5869c6ff
XL
112fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
113 assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
416331ca
XL
114}
115
116/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
117///
118/// `tag` is truncated to fit into the unused bits of the pointer to `T`.
119#[inline]
5869c6ff 120fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize {
416331ca
XL
121 (data & !low_bits::<T>()) | (tag & low_bits::<T>())
122}
123
124/// Decomposes a tagged pointer `data` into the pointer and the tag.
125#[inline]
5869c6ff
XL
126fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127 (data & !low_bits::<T>(), data & low_bits::<T>())
128}
129
130/// Types that are pointed to by a single word.
131///
132/// In concurrent programming, it is necessary to represent an object within a word because atomic
133/// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait
134/// qualifies such types that are pointed to by a single word.
135///
136/// The trait generalizes `Box<T>` for a sized type `T`. In a box, an object of type `T` is
137/// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for
138/// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
139/// size and elements.
140///
141/// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In
142/// particular, Crossbeam supports dynamically sized slices as follows.
143///
144/// ```
145/// use std::mem::MaybeUninit;
146/// use crossbeam_epoch::Owned;
147///
148/// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
149/// ```
150pub trait Pointable {
151 /// The alignment of pointer.
152 const ALIGN: usize;
153
154 /// The type for initializers.
155 type Init;
156
157 /// Initializes a with the given initializer.
158 ///
159 /// # Safety
160 ///
161 /// The result should be a multiple of `ALIGN`.
162 unsafe fn init(init: Self::Init) -> usize;
163
164 /// Dereferences the given pointer.
165 ///
166 /// # Safety
167 ///
168 /// - The given `ptr` should have been initialized with [`Pointable::init`].
169 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
170 /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
171 unsafe fn deref<'a>(ptr: usize) -> &'a Self;
172
173 /// Mutably dereferences the given pointer.
174 ///
175 /// # Safety
176 ///
177 /// - The given `ptr` should have been initialized with [`Pointable::init`].
178 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
179 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
180 /// concurrently.
181 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self;
182
183 /// Drops the object pointed to by the given pointer.
184 ///
185 /// # Safety
186 ///
187 /// - The given `ptr` should have been initialized with [`Pointable::init`].
188 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
189 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
190 /// concurrently.
191 unsafe fn drop(ptr: usize);
192}
193
194impl<T> Pointable for T {
195 const ALIGN: usize = mem::align_of::<T>();
196
197 type Init = T;
198
199 unsafe fn init(init: Self::Init) -> usize {
200 Box::into_raw(Box::new(init)) as usize
201 }
202
203 unsafe fn deref<'a>(ptr: usize) -> &'a Self {
204 &*(ptr as *const T)
205 }
206
207 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
208 &mut *(ptr as *mut T)
209 }
210
211 unsafe fn drop(ptr: usize) {
212 drop(Box::from_raw(ptr as *mut T));
213 }
214}
215
216/// Array with size.
217///
218/// # Memory layout
219///
220/// An array consisting of size and elements:
221///
222/// ```text
223/// elements
224/// |
225/// |
226/// ------------------------------------
227/// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
228/// ------------------------------------
229/// ```
230///
231/// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
232/// along with pointer as in `Box<[T]>`).
233///
234/// Elements are not present in the type, but they will be in the allocation.
235/// ```
236///
237// TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use
238// [`alloc::alloc::Layout::extend`] instead.
239#[repr(C)]
240struct Array<T> {
17df50a5
XL
241 /// The number of elements (not the number of bytes).
242 len: usize,
5869c6ff
XL
243 elements: [MaybeUninit<T>; 0],
244}
245
246impl<T> Pointable for [MaybeUninit<T>] {
247 const ALIGN: usize = mem::align_of::<Array<T>>();
248
249 type Init = usize;
250
17df50a5
XL
251 unsafe fn init(len: Self::Init) -> usize {
252 let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * len;
5869c6ff
XL
253 let align = mem::align_of::<Array<T>>();
254 let layout = alloc::Layout::from_size_align(size, align).unwrap();
255 let ptr = alloc::alloc(layout) as *mut Array<T>;
17df50a5
XL
256 if ptr.is_null() {
257 alloc::handle_alloc_error(layout);
258 }
259 (*ptr).len = len;
5869c6ff
XL
260 ptr as usize
261 }
262
263 unsafe fn deref<'a>(ptr: usize) -> &'a Self {
264 let array = &*(ptr as *const Array<T>);
17df50a5 265 slice::from_raw_parts(array.elements.as_ptr() as *const _, array.len)
5869c6ff
XL
266 }
267
268 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
269 let array = &*(ptr as *mut Array<T>);
17df50a5 270 slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.len)
5869c6ff
XL
271 }
272
273 unsafe fn drop(ptr: usize) {
274 let array = &*(ptr as *mut Array<T>);
17df50a5 275 let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * array.len;
5869c6ff
XL
276 let align = mem::align_of::<Array<T>>();
277 let layout = alloc::Layout::from_size_align(size, align).unwrap();
278 alloc::dealloc(ptr as *mut u8, layout);
279 }
416331ca
XL
280}
281
282/// An atomic pointer that can be safely shared between threads.
283///
284/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
5869c6ff
XL
285/// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
286/// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
416331ca
XL
287///
288/// Any method that loads the pointer must be passed a reference to a [`Guard`].
289///
5869c6ff
XL
290/// Crossbeam supports dynamically sized types. See [`Pointable`] for details.
291pub struct Atomic<T: ?Sized + Pointable> {
416331ca
XL
292 data: AtomicUsize,
293 _marker: PhantomData<*mut T>,
294}
295
5869c6ff
XL
296unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {}
297unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {}
416331ca
XL
298
299impl<T> Atomic<T> {
5869c6ff 300 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
416331ca
XL
301 ///
302 /// # Examples
303 ///
304 /// ```
305 /// use crossbeam_epoch::Atomic;
306 ///
5869c6ff 307 /// let a = Atomic::new(1234);
416331ca 308 /// ```
5869c6ff
XL
309 pub fn new(init: T) -> Atomic<T> {
310 Self::init(init)
416331ca 311 }
5869c6ff 312}
416331ca 313
5869c6ff
XL
314impl<T: ?Sized + Pointable> Atomic<T> {
315 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
416331ca
XL
316 ///
317 /// # Examples
318 ///
319 /// ```
320 /// use crossbeam_epoch::Atomic;
321 ///
5869c6ff 322 /// let a = Atomic::<i32>::init(1234);
416331ca 323 /// ```
5869c6ff
XL
324 pub fn init(init: T::Init) -> Atomic<T> {
325 Self::from(Owned::init(init))
326 }
327
328 /// Returns a new atomic pointer pointing to the tagged pointer `data`.
329 fn from_usize(data: usize) -> Self {
416331ca 330 Self {
5869c6ff 331 data: AtomicUsize::new(data),
416331ca
XL
332 _marker: PhantomData,
333 }
334 }
335
5869c6ff 336 /// Returns a new null atomic pointer.
416331ca
XL
337 ///
338 /// # Examples
339 ///
340 /// ```
341 /// use crossbeam_epoch::Atomic;
342 ///
5869c6ff 343 /// let a = Atomic::<i32>::null();
416331ca 344 /// ```
ee023bcb
FG
345 #[cfg(all(crossbeam_const_fn_trait_bound, not(crossbeam_loom)))]
346 pub const fn null() -> Atomic<T> {
347 Self {
348 data: AtomicUsize::new(0),
349 _marker: PhantomData,
350 }
351 }
352
353 /// Returns a new null atomic pointer.
354 #[cfg(not(all(crossbeam_const_fn_trait_bound, not(crossbeam_loom))))]
6a06907d 355 pub fn null() -> Atomic<T> {
5869c6ff
XL
356 Self {
357 data: AtomicUsize::new(0),
358 _marker: PhantomData,
359 }
416331ca
XL
360 }
361
362 /// Loads a `Shared` from the atomic pointer.
363 ///
364 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
365 /// operation.
366 ///
416331ca
XL
367 /// # Examples
368 ///
369 /// ```
370 /// use crossbeam_epoch::{self as epoch, Atomic};
371 /// use std::sync::atomic::Ordering::SeqCst;
372 ///
373 /// let a = Atomic::new(1234);
374 /// let guard = &epoch::pin();
375 /// let p = a.load(SeqCst, guard);
376 /// ```
377 pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
378 unsafe { Shared::from_usize(self.data.load(ord)) }
379 }
380
381 /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
382 ///
383 /// This is similar to the "acquire" ordering, except that an ordering is
384 /// only guaranteed with operations that "depend on" the result of the load.
385 /// However consume loads are usually much faster than acquire loads on
386 /// architectures with a weak memory model since they don't require memory
387 /// fence instructions.
388 ///
389 /// The exact definition of "depend on" is a bit vague, but it works as you
390 /// would expect in practice since a lot of software, especially the Linux
391 /// kernel, rely on this behavior.
392 ///
393 /// # Examples
394 ///
395 /// ```
396 /// use crossbeam_epoch::{self as epoch, Atomic};
397 ///
398 /// let a = Atomic::new(1234);
399 /// let guard = &epoch::pin();
400 /// let p = a.load_consume(guard);
401 /// ```
402 pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
403 unsafe { Shared::from_usize(self.data.load_consume()) }
404 }
405
406 /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
407 ///
408 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
409 /// operation.
410 ///
416331ca
XL
411 /// # Examples
412 ///
413 /// ```
5869c6ff 414 /// use crossbeam_epoch::{Atomic, Owned, Shared};
416331ca
XL
415 /// use std::sync::atomic::Ordering::SeqCst;
416 ///
417 /// let a = Atomic::new(1234);
418 /// a.store(Shared::null(), SeqCst);
419 /// a.store(Owned::new(1234), SeqCst);
420 /// ```
5869c6ff 421 pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
416331ca
XL
422 self.data.store(new.into_usize(), ord);
423 }
424
425 /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
426 /// `Shared`.
427 ///
428 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
429 /// operation.
430 ///
416331ca
XL
431 /// # Examples
432 ///
433 /// ```
5869c6ff 434 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
416331ca
XL
435 /// use std::sync::atomic::Ordering::SeqCst;
436 ///
437 /// let a = Atomic::new(1234);
438 /// let guard = &epoch::pin();
439 /// let p = a.swap(Shared::null(), SeqCst, guard);
440 /// ```
441 pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
442 unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
443 }
444
445 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
446 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
447 /// same object, but with different tags, will not be considered equal.
448 ///
449 /// The return value is a result indicating whether the new pointer was written. On success the
450 /// pointer that was written is returned. On failure the actual current value and `new` are
451 /// returned.
452 ///
6a06907d
XL
453 /// This method takes two `Ordering` arguments to describe the memory
454 /// ordering of this operation. `success` describes the required ordering for the
455 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
456 /// `failure` describes the required ordering for the load operation that takes place when
457 /// the comparison fails. Using `Acquire` as success ordering makes the store part
458 /// of this operation `Relaxed`, and using `Release` makes the successful load
459 /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
460 /// and must be equivalent to or weaker than the success ordering.
416331ca 461 ///
416331ca
XL
462 /// # Examples
463 ///
464 /// ```
465 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
466 /// use std::sync::atomic::Ordering::SeqCst;
467 ///
468 /// let a = Atomic::new(1234);
469 ///
470 /// let guard = &epoch::pin();
5869c6ff 471 /// let curr = a.load(SeqCst, guard);
6a06907d
XL
472 /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
473 /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
416331ca 474 /// ```
6a06907d 475 pub fn compare_exchange<'g, P>(
416331ca 476 &self,
5869c6ff 477 current: Shared<'_, T>,
416331ca 478 new: P,
6a06907d
XL
479 success: Ordering,
480 failure: Ordering,
416331ca 481 _: &'g Guard,
6a06907d
XL
482 ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
483 where
484 P: Pointer<T>,
485 {
486 let new = new.into_usize();
487 self.data
488 .compare_exchange(current.into_usize(), new, success, failure)
489 .map(|_| unsafe { Shared::from_usize(new) })
490 .map_err(|current| unsafe {
491 CompareExchangeError {
492 current: Shared::from_usize(current),
493 new: P::from_usize(new),
494 }
495 })
496 }
497
498 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
499 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
500 /// same object, but with different tags, will not be considered equal.
501 ///
502 /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison
503 /// succeeds, which can result in more efficient code on some platforms. The return value is a
504 /// result indicating whether the new pointer was written. On success the pointer that was
505 /// written is returned. On failure the actual current value and `new` are returned.
506 ///
507 /// This method takes two `Ordering` arguments to describe the memory
508 /// ordering of this operation. `success` describes the required ordering for the
509 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
510 /// `failure` describes the required ordering for the load operation that takes place when
511 /// the comparison fails. Using `Acquire` as success ordering makes the store part
512 /// of this operation `Relaxed`, and using `Release` makes the successful load
513 /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
514 /// and must be equivalent to or weaker than the success ordering.
515 ///
516 /// [`compare_exchange`]: Atomic::compare_exchange
517 ///
518 /// # Examples
519 ///
520 /// ```
521 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
522 /// use std::sync::atomic::Ordering::SeqCst;
523 ///
524 /// let a = Atomic::new(1234);
525 /// let guard = &epoch::pin();
526 ///
527 /// let mut new = Owned::new(5678);
528 /// let mut ptr = a.load(SeqCst, guard);
529 /// loop {
530 /// match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
531 /// Ok(p) => {
532 /// ptr = p;
533 /// break;
534 /// }
535 /// Err(err) => {
536 /// ptr = err.current;
537 /// new = err.new;
538 /// }
539 /// }
540 /// }
541 ///
542 /// let mut curr = a.load(SeqCst, guard);
543 /// loop {
544 /// match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) {
545 /// Ok(_) => break,
546 /// Err(err) => curr = err.current,
547 /// }
548 /// }
549 /// ```
550 pub fn compare_exchange_weak<'g, P>(
551 &self,
552 current: Shared<'_, T>,
553 new: P,
554 success: Ordering,
555 failure: Ordering,
556 _: &'g Guard,
557 ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
416331ca 558 where
416331ca
XL
559 P: Pointer<T>,
560 {
561 let new = new.into_usize();
562 self.data
6a06907d 563 .compare_exchange_weak(current.into_usize(), new, success, failure)
416331ca
XL
564 .map(|_| unsafe { Shared::from_usize(new) })
565 .map_err(|current| unsafe {
6a06907d 566 CompareExchangeError {
416331ca
XL
567 current: Shared::from_usize(current),
568 new: P::from_usize(new),
569 }
570 })
571 }
572
5099ac24
FG
573 /// Fetches the pointer, and then applies a function to it that returns a new value.
574 /// Returns a `Result` of `Ok(previous_value)` if the function returned `Some`, else `Err(_)`.
575 ///
576 /// Note that the given function may be called multiple times if the value has been changed by
577 /// other threads in the meantime, as long as the function returns `Some(_)`, but the function
578 /// will have been applied only once to the stored value.
579 ///
580 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
581 /// ordering of this operation. The first describes the required ordering for
582 /// when the operation finally succeeds while the second describes the
583 /// required ordering for loads. These correspond to the success and failure
584 /// orderings of [`Atomic::compare_exchange`] respectively.
585 ///
586 /// Using [`Acquire`] as success ordering makes the store part of this
587 /// operation [`Relaxed`], and using [`Release`] makes the final successful
588 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
589 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
590 /// success ordering.
591 ///
592 /// [`Relaxed`]: Ordering::Relaxed
593 /// [`Acquire`]: Ordering::Acquire
594 /// [`Release`]: Ordering::Release
595 /// [`SeqCst`]: Ordering::SeqCst
596 ///
597 /// # Examples
598 ///
599 /// ```
600 /// use crossbeam_epoch::{self as epoch, Atomic};
601 /// use std::sync::atomic::Ordering::SeqCst;
602 ///
603 /// let a = Atomic::new(1234);
604 /// let guard = &epoch::pin();
605 ///
606 /// let res1 = a.fetch_update(SeqCst, SeqCst, guard, |x| Some(x.with_tag(1)));
607 /// assert!(res1.is_ok());
608 ///
609 /// let res2 = a.fetch_update(SeqCst, SeqCst, guard, |x| None);
610 /// assert!(res2.is_err());
611 /// ```
612 pub fn fetch_update<'g, F>(
613 &self,
614 set_order: Ordering,
615 fail_order: Ordering,
616 guard: &'g Guard,
617 mut func: F,
618 ) -> Result<Shared<'g, T>, Shared<'g, T>>
619 where
620 F: FnMut(Shared<'g, T>) -> Option<Shared<'g, T>>,
621 {
622 let mut prev = self.load(fail_order, guard);
623 while let Some(next) = func(prev) {
624 match self.compare_exchange_weak(prev, next, set_order, fail_order, guard) {
625 Ok(shared) => return Ok(shared),
626 Err(next_prev) => prev = next_prev.current,
627 }
628 }
629 Err(prev)
630 }
631
6a06907d
XL
632 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
633 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
634 /// same object, but with different tags, will not be considered equal.
635 ///
636 /// The return value is a result indicating whether the new pointer was written. On success the
637 /// pointer that was written is returned. On failure the actual current value and `new` are
638 /// returned.
639 ///
640 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
641 /// ordering of this operation.
642 ///
643 /// # Migrating to `compare_exchange`
644 ///
645 /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for
646 /// memory orderings:
647 ///
648 /// Original | Success | Failure
649 /// -------- | ------- | -------
650 /// Relaxed | Relaxed | Relaxed
651 /// Acquire | Acquire | Acquire
652 /// Release | Release | Relaxed
653 /// AcqRel | AcqRel | Acquire
654 /// SeqCst | SeqCst | SeqCst
655 ///
656 /// # Examples
657 ///
658 /// ```
659 /// # #![allow(deprecated)]
660 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
661 /// use std::sync::atomic::Ordering::SeqCst;
662 ///
663 /// let a = Atomic::new(1234);
664 ///
665 /// let guard = &epoch::pin();
666 /// let curr = a.load(SeqCst, guard);
667 /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
668 /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
669 /// ```
670 // TODO: remove in the next major version.
671 #[allow(deprecated)]
672 #[deprecated(note = "Use `compare_exchange` instead")]
673 pub fn compare_and_set<'g, O, P>(
674 &self,
675 current: Shared<'_, T>,
676 new: P,
677 ord: O,
678 guard: &'g Guard,
679 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
680 where
681 O: CompareAndSetOrdering,
682 P: Pointer<T>,
683 {
684 self.compare_exchange(current, new, ord.success(), ord.failure(), guard)
685 }
686
416331ca
XL
687 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
688 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
689 /// same object, but with different tags, will not be considered equal.
690 ///
691 /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
692 /// succeeds, which can result in more efficient code on some platforms. The return value is a
693 /// result indicating whether the new pointer was written. On success the pointer that was
694 /// written is returned. On failure the actual current value and `new` are returned.
695 ///
696 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
697 /// ordering of this operation.
698 ///
5869c6ff 699 /// [`compare_and_set`]: Atomic::compare_and_set
416331ca 700 ///
6a06907d
XL
701 /// # Migrating to `compare_exchange_weak`
702 ///
703 /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for
704 /// memory orderings:
705 ///
706 /// Original | Success | Failure
707 /// -------- | ------- | -------
708 /// Relaxed | Relaxed | Relaxed
709 /// Acquire | Acquire | Acquire
710 /// Release | Release | Relaxed
711 /// AcqRel | AcqRel | Acquire
712 /// SeqCst | SeqCst | SeqCst
713 ///
416331ca
XL
714 /// # Examples
715 ///
716 /// ```
6a06907d 717 /// # #![allow(deprecated)]
416331ca
XL
718 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
719 /// use std::sync::atomic::Ordering::SeqCst;
720 ///
721 /// let a = Atomic::new(1234);
722 /// let guard = &epoch::pin();
723 ///
724 /// let mut new = Owned::new(5678);
725 /// let mut ptr = a.load(SeqCst, guard);
726 /// loop {
727 /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
728 /// Ok(p) => {
729 /// ptr = p;
730 /// break;
731 /// }
732 /// Err(err) => {
733 /// ptr = err.current;
734 /// new = err.new;
735 /// }
736 /// }
737 /// }
738 ///
739 /// let mut curr = a.load(SeqCst, guard);
740 /// loop {
741 /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
742 /// Ok(_) => break,
743 /// Err(err) => curr = err.current,
744 /// }
745 /// }
746 /// ```
6a06907d
XL
747 // TODO: remove in the next major version.
748 #[allow(deprecated)]
749 #[deprecated(note = "Use `compare_exchange_weak` instead")]
416331ca
XL
750 pub fn compare_and_set_weak<'g, O, P>(
751 &self,
5869c6ff 752 current: Shared<'_, T>,
416331ca
XL
753 new: P,
754 ord: O,
6a06907d 755 guard: &'g Guard,
416331ca
XL
756 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
757 where
758 O: CompareAndSetOrdering,
759 P: Pointer<T>,
760 {
6a06907d 761 self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard)
416331ca
XL
762 }
763
764 /// Bitwise "and" with the current tag.
765 ///
766 /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
767 /// new tag to the result. Returns the previous pointer.
768 ///
769 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
770 /// operation.
771 ///
416331ca
XL
772 /// # Examples
773 ///
774 /// ```
775 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
776 /// use std::sync::atomic::Ordering::SeqCst;
777 ///
778 /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
779 /// let guard = &epoch::pin();
780 /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
781 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
782 /// ```
783 pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
784 unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
785 }
786
787 /// Bitwise "or" with the current tag.
788 ///
789 /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
790 /// new tag to the result. Returns the previous pointer.
791 ///
792 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
793 /// operation.
794 ///
416331ca
XL
795 /// # Examples
796 ///
797 /// ```
798 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
799 /// use std::sync::atomic::Ordering::SeqCst;
800 ///
801 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
802 /// let guard = &epoch::pin();
803 /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
804 /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
805 /// ```
806 pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
807 unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
808 }
809
810 /// Bitwise "xor" with the current tag.
811 ///
812 /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
813 /// new tag to the result. Returns the previous pointer.
814 ///
815 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
816 /// operation.
817 ///
416331ca
XL
818 /// # Examples
819 ///
820 /// ```
821 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
822 /// use std::sync::atomic::Ordering::SeqCst;
823 ///
824 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
825 /// let guard = &epoch::pin();
826 /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
827 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
828 /// ```
829 pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
830 unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
831 }
832
833 /// Takes ownership of the pointee.
834 ///
835 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
836 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
837 /// destructors of data structures.
838 ///
839 /// # Panics
840 ///
841 /// Panics if this pointer is null, but only in debug mode.
842 ///
843 /// # Safety
844 ///
845 /// This method may be called only if the pointer is valid and nobody else is holding a
846 /// reference to the same object.
847 ///
848 /// # Examples
849 ///
850 /// ```rust
851 /// # use std::mem;
852 /// # use crossbeam_epoch::Atomic;
853 /// struct DataStructure {
854 /// ptr: Atomic<usize>,
855 /// }
856 ///
857 /// impl Drop for DataStructure {
858 /// fn drop(&mut self) {
859 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
860 /// // any Shared or & to it ourselves.
861 /// unsafe {
862 /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
863 /// }
864 /// }
865 /// }
866 /// ```
867 pub unsafe fn into_owned(self) -> Owned<T> {
6a06907d
XL
868 #[cfg(crossbeam_loom)]
869 {
870 // FIXME: loom does not yet support into_inner, so we use unsync_load for now,
871 // which should have the same synchronization properties:
872 // https://github.com/tokio-rs/loom/issues/117
873 Owned::from_usize(self.data.unsync_load())
874 }
875 #[cfg(not(crossbeam_loom))]
876 {
877 Owned::from_usize(self.data.into_inner())
878 }
416331ca
XL
879 }
880}
881
5869c6ff
XL
882impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> {
883 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
416331ca 884 let data = self.data.load(Ordering::SeqCst);
5869c6ff 885 let (raw, tag) = decompose_tag::<T>(data);
416331ca
XL
886
887 f.debug_struct("Atomic")
888 .field("raw", &raw)
889 .field("tag", &tag)
890 .finish()
891 }
892}
893
5869c6ff
XL
894impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> {
895 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
416331ca 896 let data = self.data.load(Ordering::SeqCst);
5869c6ff
XL
897 let (raw, _) = decompose_tag::<T>(data);
898 fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f)
416331ca
XL
899 }
900}
901
5869c6ff 902impl<T: ?Sized + Pointable> Clone for Atomic<T> {
416331ca
XL
903 /// Returns a copy of the atomic value.
904 ///
905 /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
906 /// atomics or fences.
907 fn clone(&self) -> Self {
908 let data = self.data.load(Ordering::Relaxed);
909 Atomic::from_usize(data)
910 }
911}
912
5869c6ff 913impl<T: ?Sized + Pointable> Default for Atomic<T> {
416331ca
XL
914 fn default() -> Self {
915 Atomic::null()
916 }
917}
918
5869c6ff 919impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> {
416331ca
XL
920 /// Returns a new atomic pointer pointing to `owned`.
921 ///
922 /// # Examples
923 ///
924 /// ```
925 /// use crossbeam_epoch::{Atomic, Owned};
926 ///
927 /// let a = Atomic::<i32>::from(Owned::new(1234));
928 /// ```
929 fn from(owned: Owned<T>) -> Self {
930 let data = owned.data;
931 mem::forget(owned);
932 Self::from_usize(data)
933 }
934}
935
936impl<T> From<Box<T>> for Atomic<T> {
937 fn from(b: Box<T>) -> Self {
938 Self::from(Owned::from(b))
939 }
940}
941
942impl<T> From<T> for Atomic<T> {
943 fn from(t: T) -> Self {
944 Self::new(t)
945 }
946}
947
5869c6ff 948impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> {
416331ca
XL
949 /// Returns a new atomic pointer pointing to `ptr`.
950 ///
951 /// # Examples
952 ///
953 /// ```
954 /// use crossbeam_epoch::{Atomic, Shared};
955 ///
956 /// let a = Atomic::<i32>::from(Shared::<i32>::null());
957 /// ```
958 fn from(ptr: Shared<'g, T>) -> Self {
959 Self::from_usize(ptr.data)
960 }
961}
962
963impl<T> From<*const T> for Atomic<T> {
964 /// Returns a new atomic pointer pointing to `raw`.
965 ///
966 /// # Examples
967 ///
968 /// ```
969 /// use std::ptr;
970 /// use crossbeam_epoch::Atomic;
971 ///
972 /// let a = Atomic::<i32>::from(ptr::null::<i32>());
973 /// ```
974 fn from(raw: *const T) -> Self {
975 Self::from_usize(raw as usize)
976 }
977}
978
979/// A trait for either `Owned` or `Shared` pointers.
5869c6ff 980pub trait Pointer<T: ?Sized + Pointable> {
416331ca
XL
981 /// Returns the machine representation of the pointer.
982 fn into_usize(self) -> usize;
983
984 /// Returns a new pointer pointing to the tagged pointer `data`.
5869c6ff
XL
985 ///
986 /// # Safety
987 ///
988 /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
989 /// not be converted back by `Pointer::from_usize()` multiple times.
416331ca
XL
990 unsafe fn from_usize(data: usize) -> Self;
991}
992
993/// An owned heap-allocated object.
994///
995/// This type is very similar to `Box<T>`.
996///
997/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
998/// least significant bits of the address.
5869c6ff 999pub struct Owned<T: ?Sized + Pointable> {
416331ca
XL
1000 data: usize,
1001 _marker: PhantomData<Box<T>>,
1002}
1003
5869c6ff 1004impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> {
416331ca
XL
1005 #[inline]
1006 fn into_usize(self) -> usize {
1007 let data = self.data;
1008 mem::forget(self);
1009 data
1010 }
1011
1012 /// Returns a new pointer pointing to the tagged pointer `data`.
1013 ///
1014 /// # Panics
1015 ///
1016 /// Panics if the data is zero in debug mode.
1017 #[inline]
1018 unsafe fn from_usize(data: usize) -> Self {
1019 debug_assert!(data != 0, "converting zero into `Owned`");
1020 Owned {
5869c6ff 1021 data,
416331ca
XL
1022 _marker: PhantomData,
1023 }
1024 }
1025}
1026
1027impl<T> Owned<T> {
416331ca
XL
1028 /// Returns a new owned pointer pointing to `raw`.
1029 ///
1030 /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
1031 /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
1032 /// the same raw pointer.
1033 ///
1034 /// # Panics
1035 ///
1036 /// Panics if `raw` is not properly aligned.
1037 ///
5869c6ff
XL
1038 /// # Safety
1039 ///
1040 /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
1041 /// back by `Owned::from_raw()` multiple times.
1042 ///
416331ca
XL
1043 /// # Examples
1044 ///
1045 /// ```
1046 /// use crossbeam_epoch::Owned;
1047 ///
1048 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1049 /// ```
1050 pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
5869c6ff
XL
1051 let raw = raw as usize;
1052 ensure_aligned::<T>(raw);
1053 Self::from_usize(raw)
416331ca
XL
1054 }
1055
5869c6ff 1056 /// Converts the owned pointer into a `Box`.
416331ca
XL
1057 ///
1058 /// # Examples
1059 ///
1060 /// ```
5869c6ff 1061 /// use crossbeam_epoch::Owned;
416331ca
XL
1062 ///
1063 /// let o = Owned::new(1234);
5869c6ff
XL
1064 /// let b: Box<i32> = o.into_box();
1065 /// assert_eq!(*b, 1234);
416331ca 1066 /// ```
5869c6ff
XL
1067 pub fn into_box(self) -> Box<T> {
1068 let (raw, _) = decompose_tag::<T>(self.data);
1069 mem::forget(self);
1070 unsafe { Box::from_raw(raw as *mut _) }
1071 }
1072
1073 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
416331ca 1074 ///
5869c6ff
XL
1075 /// # Examples
1076 ///
1077 /// ```
1078 /// use crossbeam_epoch::Owned;
1079 ///
1080 /// let o = Owned::new(1234);
1081 /// ```
1082 pub fn new(init: T) -> Owned<T> {
1083 Self::init(init)
416331ca 1084 }
5869c6ff 1085}
416331ca 1086
5869c6ff
XL
1087impl<T: ?Sized + Pointable> Owned<T> {
1088 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1089 ///
1090 /// # Examples
1091 ///
1092 /// ```
1093 /// use crossbeam_epoch::Owned;
1094 ///
1095 /// let o = Owned::<i32>::init(1234);
1096 /// ```
1097 pub fn init(init: T::Init) -> Owned<T> {
1098 unsafe { Self::from_usize(T::init(init)) }
1099 }
1100
1101 /// Converts the owned pointer into a [`Shared`].
416331ca
XL
1102 ///
1103 /// # Examples
1104 ///
1105 /// ```
1106 /// use crossbeam_epoch::{self as epoch, Owned};
1107 ///
1108 /// let o = Owned::new(1234);
5869c6ff
XL
1109 /// let guard = &epoch::pin();
1110 /// let p = o.into_shared(guard);
416331ca 1111 /// ```
5869c6ff
XL
1112 #[allow(clippy::needless_lifetimes)]
1113 pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
1114 unsafe { Shared::from_usize(self.into_usize()) }
416331ca
XL
1115 }
1116
1117 /// Returns the tag stored within the pointer.
1118 ///
1119 /// # Examples
1120 ///
1121 /// ```
1122 /// use crossbeam_epoch::Owned;
1123 ///
1124 /// assert_eq!(Owned::new(1234).tag(), 0);
1125 /// ```
1126 pub fn tag(&self) -> usize {
5869c6ff 1127 let (_, tag) = decompose_tag::<T>(self.data);
416331ca
XL
1128 tag
1129 }
1130
1131 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1132 /// unused bits of the pointer to `T`.
1133 ///
1134 /// # Examples
1135 ///
1136 /// ```
1137 /// use crossbeam_epoch::Owned;
1138 ///
1139 /// let o = Owned::new(0u64);
1140 /// assert_eq!(o.tag(), 0);
1141 /// let o = o.with_tag(2);
1142 /// assert_eq!(o.tag(), 2);
1143 /// ```
1144 pub fn with_tag(self, tag: usize) -> Owned<T> {
1145 let data = self.into_usize();
5869c6ff 1146 unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
416331ca
XL
1147 }
1148}
1149
5869c6ff 1150impl<T: ?Sized + Pointable> Drop for Owned<T> {
416331ca 1151 fn drop(&mut self) {
5869c6ff 1152 let (raw, _) = decompose_tag::<T>(self.data);
416331ca 1153 unsafe {
5869c6ff 1154 T::drop(raw);
416331ca
XL
1155 }
1156 }
1157}
1158
5869c6ff
XL
1159impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> {
1160 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1161 let (raw, tag) = decompose_tag::<T>(self.data);
416331ca
XL
1162
1163 f.debug_struct("Owned")
1164 .field("raw", &raw)
1165 .field("tag", &tag)
1166 .finish()
1167 }
1168}
1169
1170impl<T: Clone> Clone for Owned<T> {
1171 fn clone(&self) -> Self {
1172 Owned::new((**self).clone()).with_tag(self.tag())
1173 }
1174}
1175
5869c6ff 1176impl<T: ?Sized + Pointable> Deref for Owned<T> {
416331ca
XL
1177 type Target = T;
1178
1179 fn deref(&self) -> &T {
5869c6ff
XL
1180 let (raw, _) = decompose_tag::<T>(self.data);
1181 unsafe { T::deref(raw) }
416331ca
XL
1182 }
1183}
1184
5869c6ff 1185impl<T: ?Sized + Pointable> DerefMut for Owned<T> {
416331ca 1186 fn deref_mut(&mut self) -> &mut T {
5869c6ff
XL
1187 let (raw, _) = decompose_tag::<T>(self.data);
1188 unsafe { T::deref_mut(raw) }
416331ca
XL
1189 }
1190}
1191
1192impl<T> From<T> for Owned<T> {
1193 fn from(t: T) -> Self {
1194 Owned::new(t)
1195 }
1196}
1197
1198impl<T> From<Box<T>> for Owned<T> {
1199 /// Returns a new owned pointer pointing to `b`.
1200 ///
1201 /// # Panics
1202 ///
1203 /// Panics if the pointer (the `Box`) is not properly aligned.
1204 ///
1205 /// # Examples
1206 ///
1207 /// ```
1208 /// use crossbeam_epoch::Owned;
1209 ///
1210 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1211 /// ```
1212 fn from(b: Box<T>) -> Self {
1213 unsafe { Self::from_raw(Box::into_raw(b)) }
1214 }
1215}
1216
5869c6ff 1217impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> {
416331ca 1218 fn borrow(&self) -> &T {
5869c6ff 1219 self.deref()
416331ca
XL
1220 }
1221}
1222
5869c6ff 1223impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> {
416331ca 1224 fn borrow_mut(&mut self) -> &mut T {
5869c6ff 1225 self.deref_mut()
416331ca
XL
1226 }
1227}
1228
5869c6ff 1229impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> {
416331ca 1230 fn as_ref(&self) -> &T {
5869c6ff 1231 self.deref()
416331ca
XL
1232 }
1233}
1234
5869c6ff 1235impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> {
416331ca 1236 fn as_mut(&mut self) -> &mut T {
5869c6ff 1237 self.deref_mut()
416331ca
XL
1238 }
1239}
1240
1241/// A pointer to an object protected by the epoch GC.
1242///
1243/// The pointer is valid for use only during the lifetime `'g`.
1244///
1245/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1246/// least significant bits of the address.
5869c6ff 1247pub struct Shared<'g, T: 'g + ?Sized + Pointable> {
416331ca
XL
1248 data: usize,
1249 _marker: PhantomData<(&'g (), *const T)>,
1250}
1251
5869c6ff 1252impl<T: ?Sized + Pointable> Clone for Shared<'_, T> {
416331ca 1253 fn clone(&self) -> Self {
5869c6ff 1254 Self {
416331ca
XL
1255 data: self.data,
1256 _marker: PhantomData,
1257 }
1258 }
1259}
1260
5869c6ff 1261impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {}
416331ca 1262
5869c6ff 1263impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> {
416331ca
XL
1264 #[inline]
1265 fn into_usize(self) -> usize {
1266 self.data
1267 }
1268
1269 #[inline]
1270 unsafe fn from_usize(data: usize) -> Self {
1271 Shared {
5869c6ff 1272 data,
416331ca
XL
1273 _marker: PhantomData,
1274 }
1275 }
1276}
1277
1278impl<'g, T> Shared<'g, T> {
5869c6ff
XL
1279 /// Converts the pointer to a raw pointer (without the tag).
1280 ///
1281 /// # Examples
1282 ///
1283 /// ```
1284 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1285 /// use std::sync::atomic::Ordering::SeqCst;
1286 ///
1287 /// let o = Owned::new(1234);
1288 /// let raw = &*o as *const _;
1289 /// let a = Atomic::from(o);
1290 ///
1291 /// let guard = &epoch::pin();
1292 /// let p = a.load(SeqCst, guard);
1293 /// assert_eq!(p.as_raw(), raw);
1294 /// ```
5869c6ff
XL
1295 pub fn as_raw(&self) -> *const T {
1296 let (raw, _) = decompose_tag::<T>(self.data);
1297 raw as *const _
1298 }
1299}
1300
1301impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
416331ca
XL
1302 /// Returns a new null pointer.
1303 ///
1304 /// # Examples
1305 ///
1306 /// ```
1307 /// use crossbeam_epoch::Shared;
1308 ///
1309 /// let p = Shared::<i32>::null();
1310 /// assert!(p.is_null());
1311 /// ```
1312 pub fn null() -> Shared<'g, T> {
1313 Shared {
1314 data: 0,
1315 _marker: PhantomData,
1316 }
1317 }
1318
1319 /// Returns `true` if the pointer is null.
1320 ///
1321 /// # Examples
1322 ///
1323 /// ```
1324 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1325 /// use std::sync::atomic::Ordering::SeqCst;
1326 ///
1327 /// let a = Atomic::null();
1328 /// let guard = &epoch::pin();
1329 /// assert!(a.load(SeqCst, guard).is_null());
1330 /// a.store(Owned::new(1234), SeqCst);
1331 /// assert!(!a.load(SeqCst, guard).is_null());
1332 /// ```
1333 pub fn is_null(&self) -> bool {
5869c6ff
XL
1334 let (raw, _) = decompose_tag::<T>(self.data);
1335 raw == 0
416331ca
XL
1336 }
1337
1338 /// Dereferences the pointer.
1339 ///
1340 /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1341 ///
1342 /// # Safety
1343 ///
1344 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1345 ///
5869c6ff 1346 /// Another concern is the possibility of data races due to lack of proper synchronization.
416331ca
XL
1347 /// For example, consider the following scenario:
1348 ///
1349 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1350 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1351 ///
1352 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1353 /// the read from the second thread. This is a data race. A possible solution would be to use
1354 /// `Release` and `Acquire` orderings.
1355 ///
1356 /// # Examples
1357 ///
1358 /// ```
1359 /// use crossbeam_epoch::{self as epoch, Atomic};
1360 /// use std::sync::atomic::Ordering::SeqCst;
1361 ///
1362 /// let a = Atomic::new(1234);
1363 /// let guard = &epoch::pin();
1364 /// let p = a.load(SeqCst, guard);
1365 /// unsafe {
1366 /// assert_eq!(p.deref(), &1234);
1367 /// }
1368 /// ```
1369 pub unsafe fn deref(&self) -> &'g T {
5869c6ff
XL
1370 let (raw, _) = decompose_tag::<T>(self.data);
1371 T::deref(raw)
416331ca
XL
1372 }
1373
1374 /// Dereferences the pointer.
1375 ///
1376 /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1377 ///
1378 /// # Safety
1379 ///
1380 /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1381 /// actual object at the same time.
1382 ///
1383 /// The user must know that there are no concurrent accesses towards the object itself.
1384 ///
1385 /// * Other than the above, all safety concerns of `deref()` applies here.
1386 ///
1387 /// # Examples
1388 ///
1389 /// ```
1390 /// use crossbeam_epoch::{self as epoch, Atomic};
1391 /// use std::sync::atomic::Ordering::SeqCst;
1392 ///
1393 /// let a = Atomic::new(vec![1, 2, 3, 4]);
1394 /// let guard = &epoch::pin();
1395 ///
1396 /// let mut p = a.load(SeqCst, guard);
1397 /// unsafe {
1398 /// assert!(!p.is_null());
1399 /// let b = p.deref_mut();
1400 /// assert_eq!(b, &vec![1, 2, 3, 4]);
1401 /// b.push(5);
1402 /// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1403 /// }
1404 ///
1405 /// let p = a.load(SeqCst, guard);
1406 /// unsafe {
1407 /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1408 /// }
1409 /// ```
1410 pub unsafe fn deref_mut(&mut self) -> &'g mut T {
5869c6ff
XL
1411 let (raw, _) = decompose_tag::<T>(self.data);
1412 T::deref_mut(raw)
416331ca
XL
1413 }
1414
1415 /// Converts the pointer to a reference.
1416 ///
1417 /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1418 ///
1419 /// # Safety
1420 ///
1421 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1422 ///
5869c6ff 1423 /// Another concern is the possibility of data races due to lack of proper synchronization.
416331ca
XL
1424 /// For example, consider the following scenario:
1425 ///
1426 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1427 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1428 ///
1429 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1430 /// the read from the second thread. This is a data race. A possible solution would be to use
1431 /// `Release` and `Acquire` orderings.
1432 ///
1433 /// # Examples
1434 ///
1435 /// ```
1436 /// use crossbeam_epoch::{self as epoch, Atomic};
1437 /// use std::sync::atomic::Ordering::SeqCst;
1438 ///
1439 /// let a = Atomic::new(1234);
1440 /// let guard = &epoch::pin();
1441 /// let p = a.load(SeqCst, guard);
1442 /// unsafe {
1443 /// assert_eq!(p.as_ref(), Some(&1234));
1444 /// }
1445 /// ```
1446 pub unsafe fn as_ref(&self) -> Option<&'g T> {
5869c6ff
XL
1447 let (raw, _) = decompose_tag::<T>(self.data);
1448 if raw == 0 {
1449 None
1450 } else {
1451 Some(T::deref(raw))
1452 }
416331ca
XL
1453 }
1454
1455 /// Takes ownership of the pointee.
1456 ///
1457 /// # Panics
1458 ///
1459 /// Panics if this pointer is null, but only in debug mode.
1460 ///
1461 /// # Safety
1462 ///
1463 /// This method may be called only if the pointer is valid and nobody else is holding a
1464 /// reference to the same object.
1465 ///
1466 /// # Examples
1467 ///
1468 /// ```
1469 /// use crossbeam_epoch::{self as epoch, Atomic};
1470 /// use std::sync::atomic::Ordering::SeqCst;
1471 ///
1472 /// let a = Atomic::new(1234);
1473 /// unsafe {
1474 /// let guard = &epoch::unprotected();
1475 /// let p = a.load(SeqCst, guard);
1476 /// drop(p.into_owned());
1477 /// }
1478 /// ```
1479 pub unsafe fn into_owned(self) -> Owned<T> {
5869c6ff 1480 debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
416331ca
XL
1481 Owned::from_usize(self.data)
1482 }
1483
1484 /// Returns the tag stored within the pointer.
1485 ///
1486 /// # Examples
1487 ///
1488 /// ```
1489 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1490 /// use std::sync::atomic::Ordering::SeqCst;
1491 ///
1492 /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1493 /// let guard = &epoch::pin();
1494 /// let p = a.load(SeqCst, guard);
1495 /// assert_eq!(p.tag(), 2);
1496 /// ```
1497 pub fn tag(&self) -> usize {
5869c6ff 1498 let (_, tag) = decompose_tag::<T>(self.data);
416331ca
XL
1499 tag
1500 }
1501
1502 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1503 /// unused bits of the pointer to `T`.
1504 ///
1505 /// # Examples
1506 ///
1507 /// ```
1508 /// use crossbeam_epoch::{self as epoch, Atomic};
1509 /// use std::sync::atomic::Ordering::SeqCst;
1510 ///
1511 /// let a = Atomic::new(0u64);
1512 /// let guard = &epoch::pin();
1513 /// let p1 = a.load(SeqCst, guard);
1514 /// let p2 = p1.with_tag(2);
1515 ///
1516 /// assert_eq!(p1.tag(), 0);
1517 /// assert_eq!(p2.tag(), 2);
1518 /// assert_eq!(p1.as_raw(), p2.as_raw());
1519 /// ```
1520 pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
5869c6ff 1521 unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
416331ca
XL
1522 }
1523}
1524
5869c6ff 1525impl<T> From<*const T> for Shared<'_, T> {
416331ca
XL
1526 /// Returns a new pointer pointing to `raw`.
1527 ///
1528 /// # Panics
1529 ///
1530 /// Panics if `raw` is not properly aligned.
1531 ///
1532 /// # Examples
1533 ///
1534 /// ```
1535 /// use crossbeam_epoch::Shared;
1536 ///
5869c6ff 1537 /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
416331ca
XL
1538 /// assert!(!p.is_null());
1539 /// ```
1540 fn from(raw: *const T) -> Self {
5869c6ff
XL
1541 let raw = raw as usize;
1542 ensure_aligned::<T>(raw);
1543 unsafe { Self::from_usize(raw) }
416331ca
XL
1544 }
1545}
1546
5869c6ff 1547impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> {
416331ca
XL
1548 fn eq(&self, other: &Self) -> bool {
1549 self.data == other.data
1550 }
1551}
1552
5869c6ff 1553impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {}
416331ca 1554
5869c6ff 1555impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
416331ca
XL
1556 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1557 self.data.partial_cmp(&other.data)
1558 }
1559}
1560
5869c6ff 1561impl<T: ?Sized + Pointable> Ord for Shared<'_, T> {
416331ca
XL
1562 fn cmp(&self, other: &Self) -> cmp::Ordering {
1563 self.data.cmp(&other.data)
1564 }
1565}
1566
5869c6ff
XL
1567impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> {
1568 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1569 let (raw, tag) = decompose_tag::<T>(self.data);
416331ca
XL
1570
1571 f.debug_struct("Shared")
1572 .field("raw", &raw)
1573 .field("tag", &tag)
1574 .finish()
1575 }
1576}
1577
5869c6ff
XL
1578impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> {
1579 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1580 fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f)
416331ca
XL
1581 }
1582}
1583
5869c6ff 1584impl<T: ?Sized + Pointable> Default for Shared<'_, T> {
416331ca
XL
1585 fn default() -> Self {
1586 Shared::null()
1587 }
1588}
1589
6a06907d 1590#[cfg(all(test, not(crossbeam_loom)))]
416331ca 1591mod tests {
17df50a5
XL
1592 use super::{Owned, Shared};
1593 use std::mem::MaybeUninit;
416331ca
XL
1594
1595 #[test]
1596 fn valid_tag_i8() {
1597 Shared::<i8>::null().with_tag(0);
1598 }
1599
1600 #[test]
1601 fn valid_tag_i64() {
1602 Shared::<i64>::null().with_tag(7);
1603 }
6a06907d 1604
ee023bcb 1605 #[rustversion::since(1.61)]
6a06907d
XL
1606 #[test]
1607 fn const_atomic_null() {
1608 use super::Atomic;
17df50a5
XL
1609 static _U: Atomic<u8> = Atomic::<u8>::null();
1610 }
1611
1612 #[test]
1613 fn array_init() {
1614 let owned = Owned::<[MaybeUninit<usize>]>::init(10);
1615 let arr: &[MaybeUninit<usize>] = &*owned;
1616 assert_eq!(arr.len(), 10);
6a06907d 1617 }
416331ca 1618}