]> git.proxmox.com Git - rustc.git/blame - vendor/crossbeam-epoch-0.3.1/src/atomic.rs
New upstream version 1.39.0+dfsg1
[rustc.git] / vendor / crossbeam-epoch-0.3.1 / src / atomic.rs
CommitLineData
2c00a5a8
XL
1use core::borrow::{Borrow, BorrowMut};
2use core::cmp;
3use core::fmt;
4use core::marker::PhantomData;
5use core::mem;
6use core::ptr;
7use core::ops::{Deref, DerefMut};
8use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
9use core::sync::atomic::Ordering;
10use alloc::boxed::Box;
11
12use guard::Guard;
13
14/// Given ordering for the success case in a compare-exchange operation, returns the strongest
15/// appropriate ordering for the failure case.
16#[inline]
17fn strongest_failure_ordering(ord: Ordering) -> Ordering {
18 match ord {
19 Ordering::Relaxed | Ordering::Release => Ordering::Relaxed,
20 Ordering::Acquire | Ordering::AcqRel => Ordering::Acquire,
21 _ => Ordering::SeqCst,
22 }
23}
24
25/// The error returned on failed compare-and-set operation.
26pub struct CompareAndSetError<'g, T: 'g, P: Pointer<T>> {
27 /// The value in the atomic pointer at the time of the failed operation.
28 pub current: Shared<'g, T>,
29
30 /// The new value, which the operation failed to store.
31 pub new: P,
32}
33
34impl<'g, T: 'g, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareAndSetError<'g, T, P> {
35 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
36 f.debug_struct("CompareAndSetError")
37 .field("current", &self.current)
38 .field("new", &self.new)
39 .finish()
40 }
41}
42
43/// Memory orderings for compare-and-set operations.
44///
45/// A compare-and-set operation can have different memory orderings depending on whether it
46/// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
47///
48/// The two ways of specifying orderings for compare-and-set are:
49///
50/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
51/// ordering is chosen.
52/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
53/// for the failure case.
54pub trait CompareAndSetOrdering {
55 /// The ordering of the operation when it succeeds.
56 fn success(&self) -> Ordering;
57
58 /// The ordering of the operation when it fails.
59 ///
60 /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
61 /// the success ordering.
62 fn failure(&self) -> Ordering;
63}
64
65impl CompareAndSetOrdering for Ordering {
66 #[inline]
67 fn success(&self) -> Ordering {
68 *self
69 }
70
71 #[inline]
72 fn failure(&self) -> Ordering {
73 strongest_failure_ordering(*self)
74 }
75}
76
77impl CompareAndSetOrdering for (Ordering, Ordering) {
78 #[inline]
79 fn success(&self) -> Ordering {
80 self.0
81 }
82
83 #[inline]
84 fn failure(&self) -> Ordering {
85 self.1
86 }
87}
88
89/// Panics if the pointer is not properly unaligned.
90#[inline]
91fn ensure_aligned<T>(raw: *const T) {
92 assert_eq!(raw as usize & low_bits::<T>(), 0, "unaligned pointer");
93}
94
95/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
96#[inline]
97fn low_bits<T>() -> usize {
98 (1 << mem::align_of::<T>().trailing_zeros()) - 1
99}
100
101/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
102///
103/// `tag` is truncated to fit into the unused bits of the pointer to `T`.
104#[inline]
105fn data_with_tag<T>(data: usize, tag: usize) -> usize {
106 (data & !low_bits::<T>()) | (tag & low_bits::<T>())
107}
108
109/// Decomposes a tagged pointer `data` into the pointer and the tag.
110#[inline]
111fn decompose_data<T>(data: usize) -> (*mut T, usize) {
112 let raw = (data & !low_bits::<T>()) as *mut T;
113 let tag = data & low_bits::<T>();
114 (raw, tag)
115}
116
117/// An atomic pointer that can be safely shared between threads.
118///
119/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
120/// least significant bits of the address. More precisely, a tag should be less than `(1 <<
121/// mem::align_of::<T>().trailing_zeros())`.
122///
123/// Any method that loads the pointer must be passed a reference to a [`Guard`].
124///
125/// [`Guard`]: struct.Guard.html
126pub struct Atomic<T> {
127 data: AtomicUsize,
128 _marker: PhantomData<*mut T>,
129}
130
131unsafe impl<T: Send + Sync> Send for Atomic<T> {}
132unsafe impl<T: Send + Sync> Sync for Atomic<T> {}
133
134impl<T> Atomic<T> {
135 /// Returns a new atomic pointer pointing to the tagged pointer `data`.
136 fn from_data(data: usize) -> Atomic<T> {
137 Atomic {
138 data: AtomicUsize::new(data),
139 _marker: PhantomData,
140 }
141 }
142
143 /// Returns a new null atomic pointer.
144 ///
145 /// # Examples
146 ///
147 /// ```
148 /// use crossbeam_epoch::Atomic;
149 ///
150 /// let a = Atomic::<i32>::null();
151 /// ```
152 #[cfg(not(feature = "nightly"))]
153 pub fn null() -> Atomic<T> {
154 Atomic {
155 data: ATOMIC_USIZE_INIT,
156 _marker: PhantomData,
157 }
158 }
159
160 /// Returns a new null atomic pointer.
161 ///
162 /// # Examples
163 ///
164 /// ```
165 /// use crossbeam_epoch::Atomic;
166 ///
167 /// let a = Atomic::<i32>::null();
168 /// ```
169 #[cfg(feature = "nightly")]
170 pub const fn null() -> Atomic<T> {
171 Self {
172 data: ATOMIC_USIZE_INIT,
173 _marker: PhantomData,
174 }
175 }
176
177 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
178 ///
179 /// # Examples
180 ///
181 /// ```
182 /// use crossbeam_epoch::Atomic;
183 ///
184 /// let a = Atomic::new(1234);
185 /// ```
186 pub fn new(value: T) -> Atomic<T> {
187 Self::from(Owned::new(value))
188 }
189
190 /// Loads a `Shared` from the atomic pointer.
191 ///
192 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
193 /// operation.
194 ///
195 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
196 ///
197 /// # Examples
198 ///
199 /// ```
200 /// use crossbeam_epoch::{self as epoch, Atomic};
201 /// use std::sync::atomic::Ordering::SeqCst;
202 ///
203 /// let a = Atomic::new(1234);
204 /// let guard = &epoch::pin();
205 /// let p = a.load(SeqCst, guard);
206 /// ```
207 pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
208 unsafe { Shared::from_data(self.data.load(ord)) }
209 }
210
211 /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
212 ///
213 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
214 /// operation.
215 ///
216 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
217 ///
218 /// # Examples
219 ///
220 /// ```
221 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
222 /// use std::sync::atomic::Ordering::SeqCst;
223 ///
224 /// let a = Atomic::new(1234);
225 /// a.store(Shared::null(), SeqCst);
226 /// a.store(Owned::new(1234), SeqCst);
227 /// ```
228 pub fn store<'g, P: Pointer<T>>(&self, new: P, ord: Ordering) {
229 self.data.store(new.into_data(), ord);
230 }
231
232 /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
233 /// `Shared`.
234 ///
235 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
236 /// operation.
237 ///
238 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
239 ///
240 /// # Examples
241 ///
242 /// ```
243 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
244 /// use std::sync::atomic::Ordering::SeqCst;
245 ///
246 /// let a = Atomic::new(1234);
247 /// let guard = &epoch::pin();
248 /// let p = a.swap(Shared::null(), SeqCst, guard);
249 /// ```
250 pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
251 unsafe { Shared::from_data(self.data.swap(new.into_data(), ord)) }
252 }
253
254 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
255 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
256 /// same object, but with different tags, will not be considered equal.
257 ///
258 /// The return value is a result indicating whether the new pointer was written. On success the
259 /// pointer that was written is returned. On failure the actual current value and `new` are
260 /// returned.
261 ///
262 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
263 /// ordering of this operation.
264 ///
265 /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
266 ///
267 /// # Examples
268 ///
269 /// ```
270 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
271 /// use std::sync::atomic::Ordering::SeqCst;
272 ///
273 /// let a = Atomic::new(1234);
274 ///
275 /// let guard = &epoch::pin();
276 /// let mut curr = a.load(SeqCst, guard);
277 /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
278 /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
279 /// ```
280 pub fn compare_and_set<'g, O, P>(
281 &self,
282 current: Shared<T>,
283 new: P,
284 ord: O,
285 _: &'g Guard,
286 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
287 where
288 O: CompareAndSetOrdering,
289 P: Pointer<T>,
290 {
291 let new = new.into_data();
292 self.data
293 .compare_exchange(current.into_data(), new, ord.success(), ord.failure())
294 .map(|_| unsafe { Shared::from_data(new) })
295 .map_err(|current| unsafe {
296 CompareAndSetError {
297 current: Shared::from_data(current),
298 new: P::from_data(new),
299 }
300 })
301 }
302
303 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
304 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
305 /// same object, but with different tags, will not be considered equal.
306 ///
307 /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
308 /// succeeds, which can result in more efficient code on some platforms. The return value is a
309 /// result indicating whether the new pointer was written. On success the pointer that was
310 /// written is returned. On failure the actual current value and `new` are returned.
311 ///
312 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
313 /// ordering of this operation.
314 ///
315 /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set
316 /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
317 ///
318 /// # Examples
319 ///
320 /// ```
321 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
322 /// use std::sync::atomic::Ordering::SeqCst;
323 ///
324 /// let a = Atomic::new(1234);
325 /// let guard = &epoch::pin();
326 ///
327 /// let mut new = Owned::new(5678);
328 /// let mut ptr = a.load(SeqCst, guard);
329 /// loop {
330 /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
331 /// Ok(p) => {
332 /// ptr = p;
333 /// break;
334 /// }
335 /// Err(err) => {
336 /// ptr = err.current;
337 /// new = err.new;
338 /// }
339 /// }
340 /// }
341 ///
342 /// let mut curr = a.load(SeqCst, guard);
343 /// loop {
344 /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
345 /// Ok(_) => break,
346 /// Err(err) => curr = err.current,
347 /// }
348 /// }
349 /// ```
350 pub fn compare_and_set_weak<'g, O, P>(
351 &self,
352 current: Shared<T>,
353 new: P,
354 ord: O,
355 _: &'g Guard,
356 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
357 where
358 O: CompareAndSetOrdering,
359 P: Pointer<T>,
360 {
361 let new = new.into_data();
362 self.data
363 .compare_exchange_weak(current.into_data(), new, ord.success(), ord.failure())
364 .map(|_| unsafe { Shared::from_data(new) })
365 .map_err(|current| unsafe {
366 CompareAndSetError {
367 current: Shared::from_data(current),
368 new: P::from_data(new),
369 }
370 })
371 }
372
373 /// Bitwise "and" with the current tag.
374 ///
375 /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
376 /// new tag to the result. Returns the previous pointer.
377 ///
378 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
379 /// operation.
380 ///
381 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
382 ///
383 /// # Examples
384 ///
385 /// ```
386 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
387 /// use std::sync::atomic::Ordering::SeqCst;
388 ///
389 /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
390 /// let guard = &epoch::pin();
391 /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
392 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
393 /// ```
394 pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
395 unsafe { Shared::from_data(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
396 }
397
398 /// Bitwise "or" with the current tag.
399 ///
400 /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
401 /// new tag to the result. Returns the previous pointer.
402 ///
403 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
404 /// operation.
405 ///
406 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
407 ///
408 /// # Examples
409 ///
410 /// ```
411 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
412 /// use std::sync::atomic::Ordering::SeqCst;
413 ///
414 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
415 /// let guard = &epoch::pin();
416 /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
417 /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
418 /// ```
419 pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
420 unsafe { Shared::from_data(self.data.fetch_or(val & low_bits::<T>(), ord)) }
421 }
422
423 /// Bitwise "xor" with the current tag.
424 ///
425 /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
426 /// new tag to the result. Returns the previous pointer.
427 ///
428 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
429 /// operation.
430 ///
431 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
432 ///
433 /// # Examples
434 ///
435 /// ```
436 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
437 /// use std::sync::atomic::Ordering::SeqCst;
438 ///
439 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
440 /// let guard = &epoch::pin();
441 /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
442 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
443 /// ```
444 pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
445 unsafe { Shared::from_data(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
446 }
447}
448
449impl<T> fmt::Debug for Atomic<T> {
450 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
451 let data = self.data.load(Ordering::SeqCst);
452 let (raw, tag) = decompose_data::<T>(data);
453
454 f.debug_struct("Atomic")
455 .field("raw", &raw)
456 .field("tag", &tag)
457 .finish()
458 }
459}
460
461impl<T> fmt::Pointer for Atomic<T> {
462 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
463 let data = self.data.load(Ordering::SeqCst);
464 let (raw, _) = decompose_data::<T>(data);
465 fmt::Pointer::fmt(&raw, f)
466 }
467}
468
469impl<T> Clone for Atomic<T> {
470 /// Returns a copy of the atomic value.
471 ///
472 /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
473 /// atomics or fences.
474 fn clone(&self) -> Self {
475 let data = self.data.load(Ordering::Relaxed);
476 Atomic::from_data(data)
477 }
478}
479
480impl<T> Default for Atomic<T> {
481 fn default() -> Self {
482 Atomic::null()
483 }
484}
485
486impl<T> From<Owned<T>> for Atomic<T> {
487 /// Returns a new atomic pointer pointing to `owned`.
488 ///
489 /// # Examples
490 ///
491 /// ```
492 /// use crossbeam_epoch::{Atomic, Owned};
493 ///
494 /// let a = Atomic::<i32>::from(Owned::new(1234));
495 /// ```
496 fn from(owned: Owned<T>) -> Self {
497 let data = owned.data;
498 mem::forget(owned);
499 Self::from_data(data)
500 }
501}
502
503impl<T> From<Box<T>> for Atomic<T> {
504 fn from(b: Box<T>) -> Self {
505 Self::from(Owned::from(b))
506 }
507}
508
509impl<T> From<T> for Atomic<T> {
510 fn from(t: T) -> Self {
511 Self::new(t)
512 }
513}
514
515impl<'g, T> From<Shared<'g, T>> for Atomic<T> {
516 /// Returns a new atomic pointer pointing to `ptr`.
517 ///
518 /// # Examples
519 ///
520 /// ```
521 /// use crossbeam_epoch::{Atomic, Shared};
522 ///
523 /// let a = Atomic::<i32>::from(Shared::<i32>::null());
524 /// ```
525 fn from(ptr: Shared<'g, T>) -> Self {
526 Self::from_data(ptr.data)
527 }
528}
529
530impl<T> From<*const T> for Atomic<T> {
531 /// Returns a new atomic pointer pointing to `raw`.
532 ///
533 /// # Examples
534 ///
535 /// ```
536 /// use std::ptr;
537 /// use crossbeam_epoch::Atomic;
538 ///
539 /// let a = Atomic::<i32>::from(ptr::null::<i32>());
540 /// ```
541 fn from(raw: *const T) -> Self {
542 Self::from_data(raw as usize)
543 }
544}
545
546/// A trait for either `Owned` or `Shared` pointers.
547pub trait Pointer<T> {
548 /// Returns the machine representation of the pointer.
549 fn into_data(self) -> usize;
550
551 /// Returns a new pointer pointing to the tagged pointer `data`.
552 unsafe fn from_data(data: usize) -> Self;
553}
554
555/// An owned heap-allocated object.
556///
557/// This type is very similar to `Box<T>`.
558///
559/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
560/// least significant bits of the address.
561pub struct Owned<T> {
562 data: usize,
563 _marker: PhantomData<Box<T>>,
564}
565
566impl<T> Pointer<T> for Owned<T> {
567 #[inline]
568 fn into_data(self) -> usize {
569 let data = self.data;
570 mem::forget(self);
571 data
572 }
573
574 /// Returns a new pointer pointing to the tagged pointer `data`.
575 ///
576 /// # Panics
577 ///
578 /// Panics if the data is zero in debug mode.
579 #[inline]
580 unsafe fn from_data(data: usize) -> Self {
581 debug_assert!(data != 0, "converting zero into `Owned`");
582 Owned {
583 data: data,
584 _marker: PhantomData,
585 }
586 }
587}
588
589impl<T> Owned<T> {
590 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
591 ///
592 /// # Examples
593 ///
594 /// ```
595 /// use crossbeam_epoch::Owned;
596 ///
597 /// let o = Owned::new(1234);
598 /// ```
599 pub fn new(value: T) -> Owned<T> {
600 Self::from(Box::new(value))
601 }
602
603 /// Returns a new owned pointer pointing to `raw`.
604 ///
605 /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
606 /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
607 /// the same raw pointer.
608 ///
609 /// # Panics
610 ///
611 /// Panics if `raw` is not properly aligned.
612 ///
613 /// # Examples
614 ///
615 /// ```
616 /// use crossbeam_epoch::Owned;
617 ///
618 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
619 /// ```
620 pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
621 ensure_aligned(raw);
622 Self::from_data(raw as usize)
623 }
624
625 /// Converts the owned pointer into a [`Shared`].
626 ///
627 /// # Examples
628 ///
629 /// ```
630 /// use crossbeam_epoch::{self as epoch, Owned};
631 ///
632 /// let o = Owned::new(1234);
633 /// let guard = &epoch::pin();
634 /// let p = o.into_shared(guard);
635 /// ```
636 ///
637 /// [`Shared`]: struct.Shared.html
638 pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
639 unsafe { Shared::from_data(self.into_data()) }
640 }
641
642 /// Converts the owned pointer into a `Box`.
643 ///
644 /// # Examples
645 ///
646 /// ```
647 /// use crossbeam_epoch::{self as epoch, Owned};
648 ///
649 /// let o = Owned::new(1234);
650 /// let b: Box<i32> = o.into_box();
651 /// assert_eq!(*b, 1234);
652 /// ```
653 pub fn into_box(self) -> Box<T> {
654 let (raw, _) = decompose_data::<T>(self.data);
655 mem::forget(self);
656 unsafe { Box::from_raw(raw) }
657 }
658
659 /// Returns the tag stored within the pointer.
660 ///
661 /// # Examples
662 ///
663 /// ```
664 /// use crossbeam_epoch::Owned;
665 ///
666 /// assert_eq!(Owned::new(1234).tag(), 0);
667 /// ```
668 pub fn tag(&self) -> usize {
669 let (_, tag) = decompose_data::<T>(self.data);
670 tag
671 }
672
673 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
674 /// unused bits of the pointer to `T`.
675 ///
676 /// # Examples
677 ///
678 /// ```
679 /// use crossbeam_epoch::Owned;
680 ///
681 /// let o = Owned::new(0u64);
682 /// assert_eq!(o.tag(), 0);
683 /// let o = o.with_tag(5);
684 /// assert_eq!(o.tag(), 5);
685 /// ```
686 pub fn with_tag(self, tag: usize) -> Owned<T> {
687 let data = self.into_data();
688 unsafe { Self::from_data(data_with_tag::<T>(data, tag)) }
689 }
690}
691
692impl<T> Drop for Owned<T> {
693 fn drop(&mut self) {
694 let (raw, _) = decompose_data::<T>(self.data);
695 unsafe {
696 drop(Box::from_raw(raw));
697 }
698 }
699}
700
701impl<T> fmt::Debug for Owned<T> {
702 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
703 let (raw, tag) = decompose_data::<T>(self.data);
704
705 f.debug_struct("Owned")
706 .field("raw", &raw)
707 .field("tag", &tag)
708 .finish()
709 }
710}
711
712impl<T: Clone> Clone for Owned<T> {
713 fn clone(&self) -> Self {
714 Owned::new((**self).clone()).with_tag(self.tag())
715 }
716}
717
718impl<T> Deref for Owned<T> {
719 type Target = T;
720
721 fn deref(&self) -> &T {
722 let (raw, _) = decompose_data::<T>(self.data);
723 unsafe { &*raw }
724 }
725}
726
727impl<T> DerefMut for Owned<T> {
728 fn deref_mut(&mut self) -> &mut T {
729 let (raw, _) = decompose_data::<T>(self.data);
730 unsafe { &mut *raw }
731 }
732}
733
734impl<T> From<T> for Owned<T> {
735 fn from(t: T) -> Self {
736 Owned::new(t)
737 }
738}
739
740impl<T> From<Box<T>> for Owned<T> {
741 /// Returns a new owned pointer pointing to `b`.
742 ///
743 /// # Panics
744 ///
745 /// Panics if the pointer (the `Box`) is not properly aligned.
746 ///
747 /// # Examples
748 ///
749 /// ```
750 /// use crossbeam_epoch::Owned;
751 ///
752 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
753 /// ```
754 fn from(b: Box<T>) -> Self {
755 unsafe { Self::from_raw(Box::into_raw(b)) }
756 }
757}
758
759impl<T> Borrow<T> for Owned<T> {
760 fn borrow(&self) -> &T {
761 &**self
762 }
763}
764
765impl<T> BorrowMut<T> for Owned<T> {
766 fn borrow_mut(&mut self) -> &mut T {
767 &mut **self
768 }
769}
770
771impl<T> AsRef<T> for Owned<T> {
772 fn as_ref(&self) -> &T {
773 &**self
774 }
775}
776
777impl<T> AsMut<T> for Owned<T> {
778 fn as_mut(&mut self) -> &mut T {
779 &mut **self
780 }
781}
782
783/// A pointer to an object protected by the epoch GC.
784///
785/// The pointer is valid for use only during the lifetime `'g`.
786///
787/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
788/// least significant bits of the address.
789pub struct Shared<'g, T: 'g> {
790 data: usize,
791 _marker: PhantomData<(&'g (), *const T)>,
792}
793
794impl<'g, T> Clone for Shared<'g, T> {
795 fn clone(&self) -> Self {
796 Shared {
797 data: self.data,
798 _marker: PhantomData,
799 }
800 }
801}
802
803impl<'g, T> Copy for Shared<'g, T> {}
804
805impl<'g, T> Pointer<T> for Shared<'g, T> {
806 #[inline]
807 fn into_data(self) -> usize {
808 self.data
809 }
810
811 #[inline]
812 unsafe fn from_data(data: usize) -> Self {
813 Shared {
814 data: data,
815 _marker: PhantomData,
816 }
817 }
818}
819
820impl<'g, T> Shared<'g, T> {
821 /// Returns a new null pointer.
822 ///
823 /// # Examples
824 ///
825 /// ```
826 /// use crossbeam_epoch::Shared;
827 ///
828 /// let p = Shared::<i32>::null();
829 /// assert!(p.is_null());
830 /// ```
831 pub fn null() -> Shared<'g, T> {
832 Shared {
833 data: 0,
834 _marker: PhantomData,
835 }
836 }
837
838 /// Returns `true` if the pointer is null.
839 ///
840 /// # Examples
841 ///
842 /// ```
843 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
844 /// use std::sync::atomic::Ordering::SeqCst;
845 ///
846 /// let a = Atomic::null();
847 /// let guard = &epoch::pin();
848 /// assert!(a.load(SeqCst, guard).is_null());
849 /// a.store(Owned::new(1234), SeqCst);
850 /// assert!(!a.load(SeqCst, guard).is_null());
851 /// ```
852 pub fn is_null(&self) -> bool {
853 self.as_raw().is_null()
854 }
855
856 /// Converts the pointer to a raw pointer (without the tag).
857 ///
858 /// # Examples
859 ///
860 /// ```
861 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
862 /// use std::sync::atomic::Ordering::SeqCst;
863 ///
864 /// let o = Owned::new(1234);
865 /// let raw = &*o as *const _;
866 /// let a = Atomic::from(o);
867 ///
868 /// let guard = &epoch::pin();
869 /// let p = a.load(SeqCst, guard);
870 /// assert_eq!(p.as_raw(), raw);
871 /// ```
872 pub fn as_raw(&self) -> *const T {
873 let (raw, _) = decompose_data::<T>(self.data);
874 raw
875 }
876
877 /// Dereferences the pointer.
878 ///
879 /// Returns a reference to the pointee that is valid during the lifetime `'g`.
880 ///
881 /// # Safety
882 ///
883 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
884 ///
885 /// Another concern is the possiblity of data races due to lack of proper synchronization.
886 /// For example, consider the following scenario:
887 ///
888 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
889 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
890 ///
891 /// The problem is that relaxed orderings don't synchronize initialization of the object with
892 /// the read from the second thread. This is a data race. A possible solution would be to use
893 /// `Release` and `Acquire` orderings.
894 ///
895 /// # Examples
896 ///
897 /// ```
898 /// use crossbeam_epoch::{self as epoch, Atomic};
899 /// use std::sync::atomic::Ordering::SeqCst;
900 ///
901 /// let a = Atomic::new(1234);
902 /// let guard = &epoch::pin();
903 /// let p = a.load(SeqCst, guard);
904 /// unsafe {
905 /// assert_eq!(p.deref(), &1234);
906 /// }
907 /// ```
908 pub unsafe fn deref(&self) -> &'g T {
909 &*self.as_raw()
910 }
911
912 /// Converts the pointer to a reference.
913 ///
914 /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
915 ///
916 /// # Safety
917 ///
918 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
919 ///
920 /// Another concern is the possiblity of data races due to lack of proper synchronization.
921 /// For example, consider the following scenario:
922 ///
923 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
924 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
925 ///
926 /// The problem is that relaxed orderings don't synchronize initialization of the object with
927 /// the read from the second thread. This is a data race. A possible solution would be to use
928 /// `Release` and `Acquire` orderings.
929 ///
930 /// # Examples
931 ///
932 /// ```
933 /// use crossbeam_epoch::{self as epoch, Atomic};
934 /// use std::sync::atomic::Ordering::SeqCst;
935 ///
936 /// let a = Atomic::new(1234);
937 /// let guard = &epoch::pin();
938 /// let p = a.load(SeqCst, guard);
939 /// unsafe {
940 /// assert_eq!(p.as_ref(), Some(&1234));
941 /// }
942 /// ```
943 pub unsafe fn as_ref(&self) -> Option<&'g T> {
944 self.as_raw().as_ref()
945 }
946
947 /// Takes ownership of the pointee.
948 ///
949 /// # Panics
950 ///
951 /// Panics if this pointer is null, but only in debug mode.
952 ///
953 /// # Safety
954 ///
955 /// This method may be called only if the pointer is valid and nobody else is holding a
956 /// reference to the same object.
957 ///
958 /// # Examples
959 ///
960 /// ```
961 /// use crossbeam_epoch::{self as epoch, Atomic};
962 /// use std::sync::atomic::Ordering::SeqCst;
963 ///
964 /// let a = Atomic::new(1234);
965 /// unsafe {
966 /// let guard = &epoch::unprotected();
967 /// let p = a.load(SeqCst, guard);
968 /// drop(p.into_owned());
969 /// }
970 /// ```
971 pub unsafe fn into_owned(self) -> Owned<T> {
972 debug_assert!(
973 self.as_raw() != ptr::null(),
974 "converting a null `Shared` into `Owned`"
975 );
976 Owned::from_data(self.data)
977 }
978
979 /// Returns the tag stored within the pointer.
980 ///
981 /// # Examples
982 ///
983 /// ```
984 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
985 /// use std::sync::atomic::Ordering::SeqCst;
986 ///
987 /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(5));
988 /// let guard = &epoch::pin();
989 /// let p = a.load(SeqCst, guard);
990 /// assert_eq!(p.tag(), 5);
991 /// ```
992 pub fn tag(&self) -> usize {
993 let (_, tag) = decompose_data::<T>(self.data);
994 tag
995 }
996
997 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
998 /// unused bits of the pointer to `T`.
999 ///
1000 /// # Examples
1001 ///
1002 /// ```
1003 /// use crossbeam_epoch::{self as epoch, Atomic};
1004 /// use std::sync::atomic::Ordering::SeqCst;
1005 ///
1006 /// let a = Atomic::new(0u64);
1007 /// let guard = &epoch::pin();
1008 /// let p1 = a.load(SeqCst, guard);
1009 /// let p2 = p1.with_tag(5);
1010 ///
1011 /// assert_eq!(p1.tag(), 0);
1012 /// assert_eq!(p2.tag(), 5);
1013 /// assert_eq!(p1.as_raw(), p2.as_raw());
1014 /// ```
1015 pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1016 unsafe { Self::from_data(data_with_tag::<T>(self.data, tag)) }
1017 }
1018}
1019
1020impl<'g, T> From<*const T> for Shared<'g, T> {
1021 /// Returns a new pointer pointing to `raw`.
1022 ///
1023 /// # Panics
1024 ///
1025 /// Panics if `raw` is not properly aligned.
1026 ///
1027 /// # Examples
1028 ///
1029 /// ```
1030 /// use crossbeam_epoch::Shared;
1031 ///
1032 /// let p = unsafe { Shared::from(Box::into_raw(Box::new(1234)) as *const _) };
1033 /// assert!(!p.is_null());
1034 /// ```
1035 fn from(raw: *const T) -> Self {
1036 ensure_aligned(raw);
1037 unsafe { Self::from_data(raw as usize) }
1038 }
1039}
1040
1041impl<'g, T> PartialEq<Shared<'g, T>> for Shared<'g, T> {
1042 fn eq(&self, other: &Self) -> bool {
1043 self.data == other.data
1044 }
1045}
1046
1047impl<'g, T> Eq for Shared<'g, T> {}
1048
1049impl<'g, T> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
1050 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1051 self.data.partial_cmp(&other.data)
1052 }
1053}
1054
1055impl<'g, T> Ord for Shared<'g, T> {
1056 fn cmp(&self, other: &Self) -> cmp::Ordering {
1057 self.data.cmp(&other.data)
1058 }
1059}
1060
1061impl<'g, T> fmt::Debug for Shared<'g, T> {
1062 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1063 let (raw, tag) = decompose_data::<T>(self.data);
1064
1065 f.debug_struct("Shared")
1066 .field("raw", &raw)
1067 .field("tag", &tag)
1068 .finish()
1069 }
1070}
1071
1072impl<'g, T> fmt::Pointer for Shared<'g, T> {
1073 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1074 fmt::Pointer::fmt(&self.as_raw(), f)
1075 }
1076}
1077
1078impl<'g, T> Default for Shared<'g, T> {
1079 fn default() -> Self {
1080 Shared::null()
1081 }
1082}
1083
1084#[cfg(test)]
1085mod tests {
1086 use super::Shared;
1087
1088 #[test]
1089 fn valid_tag_i8() {
1090 Shared::<i8>::null().with_tag(0);
1091 }
1092
1093 #[test]
1094 fn valid_tag_i64() {
1095 Shared::<i64>::null().with_tag(7);
1096 }
1097}