]> git.proxmox.com Git - rustc.git/blob - vendor/crossbeam-epoch-0.8.2/src/atomic.rs
New upstream version 1.59.0+dfsg1
[rustc.git] / vendor / crossbeam-epoch-0.8.2 / src / atomic.rs
1 use alloc::boxed::Box;
2 use core::borrow::{Borrow, BorrowMut};
3 use core::cmp;
4 use core::fmt;
5 use core::marker::PhantomData;
6 use core::mem;
7 use core::ops::{Deref, DerefMut};
8 use core::ptr;
9 use core::sync::atomic::{AtomicUsize, Ordering};
10
11 use crossbeam_utils::atomic::AtomicConsume;
12 use guard::Guard;
13
14 /// Given ordering for the success case in a compare-exchange operation, returns the strongest
15 /// appropriate ordering for the failure case.
16 #[inline]
17 fn strongest_failure_ordering(ord: Ordering) -> Ordering {
18 use self::Ordering::*;
19 match ord {
20 Relaxed | Release => Relaxed,
21 Acquire | AcqRel => Acquire,
22 _ => SeqCst,
23 }
24 }
25
26 /// The error returned on failed compare-and-set operation.
27 pub struct CompareAndSetError<'g, T: 'g, P: Pointer<T>> {
28 /// The value in the atomic pointer at the time of the failed operation.
29 pub current: Shared<'g, T>,
30
31 /// The new value, which the operation failed to store.
32 pub new: P,
33 }
34
35 impl<'g, T: 'g, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareAndSetError<'g, T, P> {
36 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
37 f.debug_struct("CompareAndSetError")
38 .field("current", &self.current)
39 .field("new", &self.new)
40 .finish()
41 }
42 }
43
44 /// Memory orderings for compare-and-set operations.
45 ///
46 /// A compare-and-set operation can have different memory orderings depending on whether it
47 /// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
48 ///
49 /// The two ways of specifying orderings for compare-and-set are:
50 ///
51 /// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
52 /// ordering is chosen.
53 /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
54 /// for the failure case.
55 pub trait CompareAndSetOrdering {
56 /// The ordering of the operation when it succeeds.
57 fn success(&self) -> Ordering;
58
59 /// The ordering of the operation when it fails.
60 ///
61 /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
62 /// the success ordering.
63 fn failure(&self) -> Ordering;
64 }
65
66 impl CompareAndSetOrdering for Ordering {
67 #[inline]
68 fn success(&self) -> Ordering {
69 *self
70 }
71
72 #[inline]
73 fn failure(&self) -> Ordering {
74 strongest_failure_ordering(*self)
75 }
76 }
77
78 impl CompareAndSetOrdering for (Ordering, Ordering) {
79 #[inline]
80 fn success(&self) -> Ordering {
81 self.0
82 }
83
84 #[inline]
85 fn failure(&self) -> Ordering {
86 self.1
87 }
88 }
89
90 /// Panics if the pointer is not properly unaligned.
91 #[inline]
92 fn ensure_aligned<T>(raw: *const T) {
93 assert_eq!(raw as usize & low_bits::<T>(), 0, "unaligned pointer");
94 }
95
96 /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
97 #[inline]
98 fn low_bits<T>() -> usize {
99 (1 << mem::align_of::<T>().trailing_zeros()) - 1
100 }
101
102 /// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
103 ///
104 /// `tag` is truncated to fit into the unused bits of the pointer to `T`.
105 #[inline]
106 fn data_with_tag<T>(data: usize, tag: usize) -> usize {
107 (data & !low_bits::<T>()) | (tag & low_bits::<T>())
108 }
109
110 /// Decomposes a tagged pointer `data` into the pointer and the tag.
111 #[inline]
112 fn decompose_data<T>(data: usize) -> (*mut T, usize) {
113 let raw = (data & !low_bits::<T>()) as *mut T;
114 let tag = data & low_bits::<T>();
115 (raw, tag)
116 }
117
118 /// An atomic pointer that can be safely shared between threads.
119 ///
120 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
121 /// least significant bits of the address. More precisely, a tag should be less than `(1 <<
122 /// mem::align_of::<T>().trailing_zeros())`.
123 ///
124 /// Any method that loads the pointer must be passed a reference to a [`Guard`].
125 ///
126 /// [`Guard`]: struct.Guard.html
127 pub struct Atomic<T> {
128 data: AtomicUsize,
129 _marker: PhantomData<*mut T>,
130 }
131
132 unsafe impl<T: Send + Sync> Send for Atomic<T> {}
133 unsafe impl<T: Send + Sync> Sync for Atomic<T> {}
134
135 impl<T> Atomic<T> {
136 /// Returns a new atomic pointer pointing to the tagged pointer `data`.
137 fn from_usize(data: usize) -> Self {
138 Self {
139 data: AtomicUsize::new(data),
140 _marker: PhantomData,
141 }
142 }
143
144 /// Returns a new null atomic pointer.
145 ///
146 /// # Examples
147 ///
148 /// ```
149 /// use crossbeam_epoch::Atomic;
150 ///
151 /// let a = Atomic::<i32>::null();
152 /// ```
153 #[cfg(not(has_min_const_fn))]
154 pub fn null() -> Atomic<T> {
155 Self {
156 data: AtomicUsize::new(0),
157 _marker: PhantomData,
158 }
159 }
160
161 /// Returns a new null atomic pointer.
162 ///
163 /// # Examples
164 ///
165 /// ```
166 /// use crossbeam_epoch::Atomic;
167 ///
168 /// let a = Atomic::<i32>::null();
169 /// ```
170 #[cfg(has_min_const_fn)]
171 pub const fn null() -> Atomic<T> {
172 Self {
173 data: AtomicUsize::new(0),
174 _marker: PhantomData,
175 }
176 }
177
178 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
179 ///
180 /// # Examples
181 ///
182 /// ```
183 /// use crossbeam_epoch::Atomic;
184 ///
185 /// let a = Atomic::new(1234);
186 /// ```
187 pub fn new(value: T) -> Atomic<T> {
188 Self::from(Owned::new(value))
189 }
190
191 /// Loads a `Shared` from the atomic pointer.
192 ///
193 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
194 /// operation.
195 ///
196 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
197 ///
198 /// # Examples
199 ///
200 /// ```
201 /// use crossbeam_epoch::{self as epoch, Atomic};
202 /// use std::sync::atomic::Ordering::SeqCst;
203 ///
204 /// let a = Atomic::new(1234);
205 /// let guard = &epoch::pin();
206 /// let p = a.load(SeqCst, guard);
207 /// ```
208 pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
209 unsafe { Shared::from_usize(self.data.load(ord)) }
210 }
211
212 /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
213 ///
214 /// This is similar to the "acquire" ordering, except that an ordering is
215 /// only guaranteed with operations that "depend on" the result of the load.
216 /// However consume loads are usually much faster than acquire loads on
217 /// architectures with a weak memory model since they don't require memory
218 /// fence instructions.
219 ///
220 /// The exact definition of "depend on" is a bit vague, but it works as you
221 /// would expect in practice since a lot of software, especially the Linux
222 /// kernel, rely on this behavior.
223 ///
224 /// # Examples
225 ///
226 /// ```
227 /// use crossbeam_epoch::{self as epoch, Atomic};
228 ///
229 /// let a = Atomic::new(1234);
230 /// let guard = &epoch::pin();
231 /// let p = a.load_consume(guard);
232 /// ```
233 pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
234 unsafe { Shared::from_usize(self.data.load_consume()) }
235 }
236
237 /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
238 ///
239 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
240 /// operation.
241 ///
242 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
243 ///
244 /// # Examples
245 ///
246 /// ```
247 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
248 /// use std::sync::atomic::Ordering::SeqCst;
249 ///
250 /// let a = Atomic::new(1234);
251 /// a.store(Shared::null(), SeqCst);
252 /// a.store(Owned::new(1234), SeqCst);
253 /// ```
254 pub fn store<'g, P: Pointer<T>>(&self, new: P, ord: Ordering) {
255 self.data.store(new.into_usize(), ord);
256 }
257
258 /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
259 /// `Shared`.
260 ///
261 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
262 /// operation.
263 ///
264 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
265 ///
266 /// # Examples
267 ///
268 /// ```
269 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
270 /// use std::sync::atomic::Ordering::SeqCst;
271 ///
272 /// let a = Atomic::new(1234);
273 /// let guard = &epoch::pin();
274 /// let p = a.swap(Shared::null(), SeqCst, guard);
275 /// ```
276 pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
277 unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
278 }
279
280 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
281 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
282 /// same object, but with different tags, will not be considered equal.
283 ///
284 /// The return value is a result indicating whether the new pointer was written. On success the
285 /// pointer that was written is returned. On failure the actual current value and `new` are
286 /// returned.
287 ///
288 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
289 /// ordering of this operation.
290 ///
291 /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
292 ///
293 /// # Examples
294 ///
295 /// ```
296 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
297 /// use std::sync::atomic::Ordering::SeqCst;
298 ///
299 /// let a = Atomic::new(1234);
300 ///
301 /// let guard = &epoch::pin();
302 /// let mut curr = a.load(SeqCst, guard);
303 /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
304 /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
305 /// ```
306 pub fn compare_and_set<'g, O, P>(
307 &self,
308 current: Shared<T>,
309 new: P,
310 ord: O,
311 _: &'g Guard,
312 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
313 where
314 O: CompareAndSetOrdering,
315 P: Pointer<T>,
316 {
317 let new = new.into_usize();
318 self.data
319 .compare_exchange(current.into_usize(), new, ord.success(), ord.failure())
320 .map(|_| unsafe { Shared::from_usize(new) })
321 .map_err(|current| unsafe {
322 CompareAndSetError {
323 current: Shared::from_usize(current),
324 new: P::from_usize(new),
325 }
326 })
327 }
328
329 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
330 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
331 /// same object, but with different tags, will not be considered equal.
332 ///
333 /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
334 /// succeeds, which can result in more efficient code on some platforms. The return value is a
335 /// result indicating whether the new pointer was written. On success the pointer that was
336 /// written is returned. On failure the actual current value and `new` are returned.
337 ///
338 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
339 /// ordering of this operation.
340 ///
341 /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set
342 /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
343 ///
344 /// # Examples
345 ///
346 /// ```
347 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
348 /// use std::sync::atomic::Ordering::SeqCst;
349 ///
350 /// let a = Atomic::new(1234);
351 /// let guard = &epoch::pin();
352 ///
353 /// let mut new = Owned::new(5678);
354 /// let mut ptr = a.load(SeqCst, guard);
355 /// loop {
356 /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
357 /// Ok(p) => {
358 /// ptr = p;
359 /// break;
360 /// }
361 /// Err(err) => {
362 /// ptr = err.current;
363 /// new = err.new;
364 /// }
365 /// }
366 /// }
367 ///
368 /// let mut curr = a.load(SeqCst, guard);
369 /// loop {
370 /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
371 /// Ok(_) => break,
372 /// Err(err) => curr = err.current,
373 /// }
374 /// }
375 /// ```
376 pub fn compare_and_set_weak<'g, O, P>(
377 &self,
378 current: Shared<T>,
379 new: P,
380 ord: O,
381 _: &'g Guard,
382 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
383 where
384 O: CompareAndSetOrdering,
385 P: Pointer<T>,
386 {
387 let new = new.into_usize();
388 self.data
389 .compare_exchange_weak(current.into_usize(), new, ord.success(), ord.failure())
390 .map(|_| unsafe { Shared::from_usize(new) })
391 .map_err(|current| unsafe {
392 CompareAndSetError {
393 current: Shared::from_usize(current),
394 new: P::from_usize(new),
395 }
396 })
397 }
398
399 /// Bitwise "and" with the current tag.
400 ///
401 /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
402 /// new tag to the result. Returns the previous pointer.
403 ///
404 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
405 /// operation.
406 ///
407 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
408 ///
409 /// # Examples
410 ///
411 /// ```
412 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
413 /// use std::sync::atomic::Ordering::SeqCst;
414 ///
415 /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
416 /// let guard = &epoch::pin();
417 /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
418 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
419 /// ```
420 pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
421 unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
422 }
423
424 /// Bitwise "or" with the current tag.
425 ///
426 /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
427 /// new tag to the result. Returns the previous pointer.
428 ///
429 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
430 /// operation.
431 ///
432 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
433 ///
434 /// # Examples
435 ///
436 /// ```
437 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
438 /// use std::sync::atomic::Ordering::SeqCst;
439 ///
440 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
441 /// let guard = &epoch::pin();
442 /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
443 /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
444 /// ```
445 pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
446 unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
447 }
448
449 /// Bitwise "xor" with the current tag.
450 ///
451 /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
452 /// new tag to the result. Returns the previous pointer.
453 ///
454 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
455 /// operation.
456 ///
457 /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
458 ///
459 /// # Examples
460 ///
461 /// ```
462 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
463 /// use std::sync::atomic::Ordering::SeqCst;
464 ///
465 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
466 /// let guard = &epoch::pin();
467 /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
468 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
469 /// ```
470 pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
471 unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
472 }
473
474 /// Takes ownership of the pointee.
475 ///
476 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
477 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
478 /// destructors of data structures.
479 ///
480 /// # Panics
481 ///
482 /// Panics if this pointer is null, but only in debug mode.
483 ///
484 /// # Safety
485 ///
486 /// This method may be called only if the pointer is valid and nobody else is holding a
487 /// reference to the same object.
488 ///
489 /// # Examples
490 ///
491 /// ```rust
492 /// # use std::mem;
493 /// # use crossbeam_epoch::Atomic;
494 /// struct DataStructure {
495 /// ptr: Atomic<usize>,
496 /// }
497 ///
498 /// impl Drop for DataStructure {
499 /// fn drop(&mut self) {
500 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
501 /// // any Shared or & to it ourselves.
502 /// unsafe {
503 /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
504 /// }
505 /// }
506 /// }
507 /// ```
508 pub unsafe fn into_owned(self) -> Owned<T> {
509 Owned::from_usize(self.data.into_inner())
510 }
511 }
512
513 impl<T> fmt::Debug for Atomic<T> {
514 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
515 let data = self.data.load(Ordering::SeqCst);
516 let (raw, tag) = decompose_data::<T>(data);
517
518 f.debug_struct("Atomic")
519 .field("raw", &raw)
520 .field("tag", &tag)
521 .finish()
522 }
523 }
524
525 impl<T> fmt::Pointer for Atomic<T> {
526 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
527 let data = self.data.load(Ordering::SeqCst);
528 let (raw, _) = decompose_data::<T>(data);
529 fmt::Pointer::fmt(&raw, f)
530 }
531 }
532
533 impl<T> Clone for Atomic<T> {
534 /// Returns a copy of the atomic value.
535 ///
536 /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
537 /// atomics or fences.
538 fn clone(&self) -> Self {
539 let data = self.data.load(Ordering::Relaxed);
540 Atomic::from_usize(data)
541 }
542 }
543
544 impl<T> Default for Atomic<T> {
545 fn default() -> Self {
546 Atomic::null()
547 }
548 }
549
550 impl<T> From<Owned<T>> for Atomic<T> {
551 /// Returns a new atomic pointer pointing to `owned`.
552 ///
553 /// # Examples
554 ///
555 /// ```
556 /// use crossbeam_epoch::{Atomic, Owned};
557 ///
558 /// let a = Atomic::<i32>::from(Owned::new(1234));
559 /// ```
560 fn from(owned: Owned<T>) -> Self {
561 let data = owned.data;
562 mem::forget(owned);
563 Self::from_usize(data)
564 }
565 }
566
567 impl<T> From<Box<T>> for Atomic<T> {
568 fn from(b: Box<T>) -> Self {
569 Self::from(Owned::from(b))
570 }
571 }
572
573 impl<T> From<T> for Atomic<T> {
574 fn from(t: T) -> Self {
575 Self::new(t)
576 }
577 }
578
579 impl<'g, T> From<Shared<'g, T>> for Atomic<T> {
580 /// Returns a new atomic pointer pointing to `ptr`.
581 ///
582 /// # Examples
583 ///
584 /// ```
585 /// use crossbeam_epoch::{Atomic, Shared};
586 ///
587 /// let a = Atomic::<i32>::from(Shared::<i32>::null());
588 /// ```
589 fn from(ptr: Shared<'g, T>) -> Self {
590 Self::from_usize(ptr.data)
591 }
592 }
593
594 impl<T> From<*const T> for Atomic<T> {
595 /// Returns a new atomic pointer pointing to `raw`.
596 ///
597 /// # Examples
598 ///
599 /// ```
600 /// use std::ptr;
601 /// use crossbeam_epoch::Atomic;
602 ///
603 /// let a = Atomic::<i32>::from(ptr::null::<i32>());
604 /// ```
605 fn from(raw: *const T) -> Self {
606 Self::from_usize(raw as usize)
607 }
608 }
609
610 /// A trait for either `Owned` or `Shared` pointers.
611 pub trait Pointer<T> {
612 /// Returns the machine representation of the pointer.
613 fn into_usize(self) -> usize;
614
615 /// Returns a new pointer pointing to the tagged pointer `data`.
616 unsafe fn from_usize(data: usize) -> Self;
617 }
618
619 /// An owned heap-allocated object.
620 ///
621 /// This type is very similar to `Box<T>`.
622 ///
623 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
624 /// least significant bits of the address.
625 pub struct Owned<T> {
626 data: usize,
627 _marker: PhantomData<Box<T>>,
628 }
629
630 impl<T> Pointer<T> for Owned<T> {
631 #[inline]
632 fn into_usize(self) -> usize {
633 let data = self.data;
634 mem::forget(self);
635 data
636 }
637
638 /// Returns a new pointer pointing to the tagged pointer `data`.
639 ///
640 /// # Panics
641 ///
642 /// Panics if the data is zero in debug mode.
643 #[inline]
644 unsafe fn from_usize(data: usize) -> Self {
645 debug_assert!(data != 0, "converting zero into `Owned`");
646 Owned {
647 data: data,
648 _marker: PhantomData,
649 }
650 }
651 }
652
653 impl<T> Owned<T> {
654 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
655 ///
656 /// # Examples
657 ///
658 /// ```
659 /// use crossbeam_epoch::Owned;
660 ///
661 /// let o = Owned::new(1234);
662 /// ```
663 pub fn new(value: T) -> Owned<T> {
664 Self::from(Box::new(value))
665 }
666
667 /// Returns a new owned pointer pointing to `raw`.
668 ///
669 /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
670 /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
671 /// the same raw pointer.
672 ///
673 /// # Panics
674 ///
675 /// Panics if `raw` is not properly aligned.
676 ///
677 /// # Examples
678 ///
679 /// ```
680 /// use crossbeam_epoch::Owned;
681 ///
682 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
683 /// ```
684 pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
685 ensure_aligned(raw);
686 Self::from_usize(raw as usize)
687 }
688
689 /// Converts the owned pointer into a [`Shared`].
690 ///
691 /// # Examples
692 ///
693 /// ```
694 /// use crossbeam_epoch::{self as epoch, Owned};
695 ///
696 /// let o = Owned::new(1234);
697 /// let guard = &epoch::pin();
698 /// let p = o.into_shared(guard);
699 /// ```
700 ///
701 /// [`Shared`]: struct.Shared.html
702 pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
703 unsafe { Shared::from_usize(self.into_usize()) }
704 }
705
706 /// Converts the owned pointer into a `Box`.
707 ///
708 /// # Examples
709 ///
710 /// ```
711 /// use crossbeam_epoch::{self as epoch, Owned};
712 ///
713 /// let o = Owned::new(1234);
714 /// let b: Box<i32> = o.into_box();
715 /// assert_eq!(*b, 1234);
716 /// ```
717 pub fn into_box(self) -> Box<T> {
718 let (raw, _) = decompose_data::<T>(self.data);
719 mem::forget(self);
720 unsafe { Box::from_raw(raw) }
721 }
722
723 /// Returns the tag stored within the pointer.
724 ///
725 /// # Examples
726 ///
727 /// ```
728 /// use crossbeam_epoch::Owned;
729 ///
730 /// assert_eq!(Owned::new(1234).tag(), 0);
731 /// ```
732 pub fn tag(&self) -> usize {
733 let (_, tag) = decompose_data::<T>(self.data);
734 tag
735 }
736
737 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
738 /// unused bits of the pointer to `T`.
739 ///
740 /// # Examples
741 ///
742 /// ```
743 /// use crossbeam_epoch::Owned;
744 ///
745 /// let o = Owned::new(0u64);
746 /// assert_eq!(o.tag(), 0);
747 /// let o = o.with_tag(2);
748 /// assert_eq!(o.tag(), 2);
749 /// ```
750 pub fn with_tag(self, tag: usize) -> Owned<T> {
751 let data = self.into_usize();
752 unsafe { Self::from_usize(data_with_tag::<T>(data, tag)) }
753 }
754 }
755
756 impl<T> Drop for Owned<T> {
757 fn drop(&mut self) {
758 let (raw, _) = decompose_data::<T>(self.data);
759 unsafe {
760 drop(Box::from_raw(raw));
761 }
762 }
763 }
764
765 impl<T> fmt::Debug for Owned<T> {
766 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
767 let (raw, tag) = decompose_data::<T>(self.data);
768
769 f.debug_struct("Owned")
770 .field("raw", &raw)
771 .field("tag", &tag)
772 .finish()
773 }
774 }
775
776 impl<T: Clone> Clone for Owned<T> {
777 fn clone(&self) -> Self {
778 Owned::new((**self).clone()).with_tag(self.tag())
779 }
780 }
781
782 impl<T> Deref for Owned<T> {
783 type Target = T;
784
785 fn deref(&self) -> &T {
786 let (raw, _) = decompose_data::<T>(self.data);
787 unsafe { &*raw }
788 }
789 }
790
791 impl<T> DerefMut for Owned<T> {
792 fn deref_mut(&mut self) -> &mut T {
793 let (raw, _) = decompose_data::<T>(self.data);
794 unsafe { &mut *raw }
795 }
796 }
797
798 impl<T> From<T> for Owned<T> {
799 fn from(t: T) -> Self {
800 Owned::new(t)
801 }
802 }
803
804 impl<T> From<Box<T>> for Owned<T> {
805 /// Returns a new owned pointer pointing to `b`.
806 ///
807 /// # Panics
808 ///
809 /// Panics if the pointer (the `Box`) is not properly aligned.
810 ///
811 /// # Examples
812 ///
813 /// ```
814 /// use crossbeam_epoch::Owned;
815 ///
816 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
817 /// ```
818 fn from(b: Box<T>) -> Self {
819 unsafe { Self::from_raw(Box::into_raw(b)) }
820 }
821 }
822
823 impl<T> Borrow<T> for Owned<T> {
824 fn borrow(&self) -> &T {
825 &**self
826 }
827 }
828
829 impl<T> BorrowMut<T> for Owned<T> {
830 fn borrow_mut(&mut self) -> &mut T {
831 &mut **self
832 }
833 }
834
835 impl<T> AsRef<T> for Owned<T> {
836 fn as_ref(&self) -> &T {
837 &**self
838 }
839 }
840
841 impl<T> AsMut<T> for Owned<T> {
842 fn as_mut(&mut self) -> &mut T {
843 &mut **self
844 }
845 }
846
847 /// A pointer to an object protected by the epoch GC.
848 ///
849 /// The pointer is valid for use only during the lifetime `'g`.
850 ///
851 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
852 /// least significant bits of the address.
853 pub struct Shared<'g, T: 'g> {
854 data: usize,
855 _marker: PhantomData<(&'g (), *const T)>,
856 }
857
858 impl<'g, T> Clone for Shared<'g, T> {
859 fn clone(&self) -> Self {
860 Shared {
861 data: self.data,
862 _marker: PhantomData,
863 }
864 }
865 }
866
867 impl<'g, T> Copy for Shared<'g, T> {}
868
869 impl<'g, T> Pointer<T> for Shared<'g, T> {
870 #[inline]
871 fn into_usize(self) -> usize {
872 self.data
873 }
874
875 #[inline]
876 unsafe fn from_usize(data: usize) -> Self {
877 Shared {
878 data: data,
879 _marker: PhantomData,
880 }
881 }
882 }
883
884 impl<'g, T> Shared<'g, T> {
885 /// Returns a new null pointer.
886 ///
887 /// # Examples
888 ///
889 /// ```
890 /// use crossbeam_epoch::Shared;
891 ///
892 /// let p = Shared::<i32>::null();
893 /// assert!(p.is_null());
894 /// ```
895 pub fn null() -> Shared<'g, T> {
896 Shared {
897 data: 0,
898 _marker: PhantomData,
899 }
900 }
901
902 /// Returns `true` if the pointer is null.
903 ///
904 /// # Examples
905 ///
906 /// ```
907 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
908 /// use std::sync::atomic::Ordering::SeqCst;
909 ///
910 /// let a = Atomic::null();
911 /// let guard = &epoch::pin();
912 /// assert!(a.load(SeqCst, guard).is_null());
913 /// a.store(Owned::new(1234), SeqCst);
914 /// assert!(!a.load(SeqCst, guard).is_null());
915 /// ```
916 pub fn is_null(&self) -> bool {
917 self.as_raw().is_null()
918 }
919
920 /// Converts the pointer to a raw pointer (without the tag).
921 ///
922 /// # Examples
923 ///
924 /// ```
925 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
926 /// use std::sync::atomic::Ordering::SeqCst;
927 ///
928 /// let o = Owned::new(1234);
929 /// let raw = &*o as *const _;
930 /// let a = Atomic::from(o);
931 ///
932 /// let guard = &epoch::pin();
933 /// let p = a.load(SeqCst, guard);
934 /// assert_eq!(p.as_raw(), raw);
935 /// ```
936 pub fn as_raw(&self) -> *const T {
937 let (raw, _) = decompose_data::<T>(self.data);
938 raw
939 }
940
941 /// Dereferences the pointer.
942 ///
943 /// Returns a reference to the pointee that is valid during the lifetime `'g`.
944 ///
945 /// # Safety
946 ///
947 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
948 ///
949 /// Another concern is the possiblity of data races due to lack of proper synchronization.
950 /// For example, consider the following scenario:
951 ///
952 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
953 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
954 ///
955 /// The problem is that relaxed orderings don't synchronize initialization of the object with
956 /// the read from the second thread. This is a data race. A possible solution would be to use
957 /// `Release` and `Acquire` orderings.
958 ///
959 /// # Examples
960 ///
961 /// ```
962 /// use crossbeam_epoch::{self as epoch, Atomic};
963 /// use std::sync::atomic::Ordering::SeqCst;
964 ///
965 /// let a = Atomic::new(1234);
966 /// let guard = &epoch::pin();
967 /// let p = a.load(SeqCst, guard);
968 /// unsafe {
969 /// assert_eq!(p.deref(), &1234);
970 /// }
971 /// ```
972 pub unsafe fn deref(&self) -> &'g T {
973 &*self.as_raw()
974 }
975
976 /// Dereferences the pointer.
977 ///
978 /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
979 ///
980 /// # Safety
981 ///
982 /// * There is no guarantee that there are no more threads attempting to read/write from/to the
983 /// actual object at the same time.
984 ///
985 /// The user must know that there are no concurrent accesses towards the object itself.
986 ///
987 /// * Other than the above, all safety concerns of `deref()` applies here.
988 ///
989 /// # Examples
990 ///
991 /// ```
992 /// use crossbeam_epoch::{self as epoch, Atomic};
993 /// use std::sync::atomic::Ordering::SeqCst;
994 ///
995 /// let a = Atomic::new(vec![1, 2, 3, 4]);
996 /// let guard = &epoch::pin();
997 ///
998 /// let mut p = a.load(SeqCst, guard);
999 /// unsafe {
1000 /// assert!(!p.is_null());
1001 /// let b = p.deref_mut();
1002 /// assert_eq!(b, &vec![1, 2, 3, 4]);
1003 /// b.push(5);
1004 /// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1005 /// }
1006 ///
1007 /// let p = a.load(SeqCst, guard);
1008 /// unsafe {
1009 /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1010 /// }
1011 /// ```
1012 pub unsafe fn deref_mut(&mut self) -> &'g mut T {
1013 &mut *(self.as_raw() as *mut T)
1014 }
1015
1016 /// Converts the pointer to a reference.
1017 ///
1018 /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1019 ///
1020 /// # Safety
1021 ///
1022 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1023 ///
1024 /// Another concern is the possiblity of data races due to lack of proper synchronization.
1025 /// For example, consider the following scenario:
1026 ///
1027 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1028 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1029 ///
1030 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1031 /// the read from the second thread. This is a data race. A possible solution would be to use
1032 /// `Release` and `Acquire` orderings.
1033 ///
1034 /// # Examples
1035 ///
1036 /// ```
1037 /// use crossbeam_epoch::{self as epoch, Atomic};
1038 /// use std::sync::atomic::Ordering::SeqCst;
1039 ///
1040 /// let a = Atomic::new(1234);
1041 /// let guard = &epoch::pin();
1042 /// let p = a.load(SeqCst, guard);
1043 /// unsafe {
1044 /// assert_eq!(p.as_ref(), Some(&1234));
1045 /// }
1046 /// ```
1047 pub unsafe fn as_ref(&self) -> Option<&'g T> {
1048 self.as_raw().as_ref()
1049 }
1050
1051 /// Takes ownership of the pointee.
1052 ///
1053 /// # Panics
1054 ///
1055 /// Panics if this pointer is null, but only in debug mode.
1056 ///
1057 /// # Safety
1058 ///
1059 /// This method may be called only if the pointer is valid and nobody else is holding a
1060 /// reference to the same object.
1061 ///
1062 /// # Examples
1063 ///
1064 /// ```
1065 /// use crossbeam_epoch::{self as epoch, Atomic};
1066 /// use std::sync::atomic::Ordering::SeqCst;
1067 ///
1068 /// let a = Atomic::new(1234);
1069 /// unsafe {
1070 /// let guard = &epoch::unprotected();
1071 /// let p = a.load(SeqCst, guard);
1072 /// drop(p.into_owned());
1073 /// }
1074 /// ```
1075 pub unsafe fn into_owned(self) -> Owned<T> {
1076 debug_assert!(
1077 self.as_raw() != ptr::null(),
1078 "converting a null `Shared` into `Owned`"
1079 );
1080 Owned::from_usize(self.data)
1081 }
1082
1083 /// Returns the tag stored within the pointer.
1084 ///
1085 /// # Examples
1086 ///
1087 /// ```
1088 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1089 /// use std::sync::atomic::Ordering::SeqCst;
1090 ///
1091 /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1092 /// let guard = &epoch::pin();
1093 /// let p = a.load(SeqCst, guard);
1094 /// assert_eq!(p.tag(), 2);
1095 /// ```
1096 pub fn tag(&self) -> usize {
1097 let (_, tag) = decompose_data::<T>(self.data);
1098 tag
1099 }
1100
1101 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1102 /// unused bits of the pointer to `T`.
1103 ///
1104 /// # Examples
1105 ///
1106 /// ```
1107 /// use crossbeam_epoch::{self as epoch, Atomic};
1108 /// use std::sync::atomic::Ordering::SeqCst;
1109 ///
1110 /// let a = Atomic::new(0u64);
1111 /// let guard = &epoch::pin();
1112 /// let p1 = a.load(SeqCst, guard);
1113 /// let p2 = p1.with_tag(2);
1114 ///
1115 /// assert_eq!(p1.tag(), 0);
1116 /// assert_eq!(p2.tag(), 2);
1117 /// assert_eq!(p1.as_raw(), p2.as_raw());
1118 /// ```
1119 pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1120 unsafe { Self::from_usize(data_with_tag::<T>(self.data, tag)) }
1121 }
1122 }
1123
1124 impl<'g, T> From<*const T> for Shared<'g, T> {
1125 /// Returns a new pointer pointing to `raw`.
1126 ///
1127 /// # Panics
1128 ///
1129 /// Panics if `raw` is not properly aligned.
1130 ///
1131 /// # Examples
1132 ///
1133 /// ```
1134 /// use crossbeam_epoch::Shared;
1135 ///
1136 /// let p = unsafe { Shared::from(Box::into_raw(Box::new(1234)) as *const _) };
1137 /// assert!(!p.is_null());
1138 /// ```
1139 fn from(raw: *const T) -> Self {
1140 ensure_aligned(raw);
1141 unsafe { Self::from_usize(raw as usize) }
1142 }
1143 }
1144
1145 impl<'g, T> PartialEq<Shared<'g, T>> for Shared<'g, T> {
1146 fn eq(&self, other: &Self) -> bool {
1147 self.data == other.data
1148 }
1149 }
1150
1151 impl<'g, T> Eq for Shared<'g, T> {}
1152
1153 impl<'g, T> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
1154 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1155 self.data.partial_cmp(&other.data)
1156 }
1157 }
1158
1159 impl<'g, T> Ord for Shared<'g, T> {
1160 fn cmp(&self, other: &Self) -> cmp::Ordering {
1161 self.data.cmp(&other.data)
1162 }
1163 }
1164
1165 impl<'g, T> fmt::Debug for Shared<'g, T> {
1166 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1167 let (raw, tag) = decompose_data::<T>(self.data);
1168
1169 f.debug_struct("Shared")
1170 .field("raw", &raw)
1171 .field("tag", &tag)
1172 .finish()
1173 }
1174 }
1175
1176 impl<'g, T> fmt::Pointer for Shared<'g, T> {
1177 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1178 fmt::Pointer::fmt(&self.as_raw(), f)
1179 }
1180 }
1181
1182 impl<'g, T> Default for Shared<'g, T> {
1183 fn default() -> Self {
1184 Shared::null()
1185 }
1186 }
1187
1188 #[cfg(test)]
1189 mod tests {
1190 use super::Shared;
1191
1192 #[test]
1193 fn valid_tag_i8() {
1194 Shared::<i8>::null().with_tag(0);
1195 }
1196
1197 #[test]
1198 fn valid_tag_i64() {
1199 Shared::<i64>::null().with_tag(7);
1200 }
1201 }