6 use crate::atomic
::Shared
;
7 use crate::collector
::Collector
;
8 use crate::deferred
::Deferred
;
9 use crate::internal
::Local
;
11 /// A guard that keeps the current thread pinned.
15 /// The current thread is pinned by calling [`pin`], which returns a new guard:
18 /// use crossbeam_epoch as epoch;
20 /// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference.
21 /// // This is not really necessary, but makes passing references to the guard a bit easier.
22 /// let guard = &epoch::pin();
25 /// When a guard gets dropped, the current thread is automatically unpinned.
27 /// # Pointers on the stack
29 /// Having a guard allows us to create pointers on the stack to heap-allocated objects.
33 /// use crossbeam_epoch::{self as epoch, Atomic};
34 /// use std::sync::atomic::Ordering::SeqCst;
36 /// // Create a heap-allocated number.
37 /// let a = Atomic::new(777);
39 /// // Pin the current thread.
40 /// let guard = &epoch::pin();
42 /// // Load the heap-allocated object and create pointer `p` on the stack.
43 /// let p = a.load(SeqCst, guard);
45 /// // Dereference the pointer and print the value:
46 /// if let Some(num) = unsafe { p.as_ref() } {
47 /// println!("The number is {}.", num);
49 /// # unsafe { drop(a.into_owned()); } // avoid leak
54 /// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the
55 /// thread will actually be pinned only when the first guard is created and unpinned when the last
59 /// use crossbeam_epoch as epoch;
61 /// let guard1 = epoch::pin();
62 /// let guard2 = epoch::pin();
63 /// assert!(epoch::is_pinned());
65 /// assert!(epoch::is_pinned());
67 /// assert!(!epoch::is_pinned());
70 /// [`pin`]: super::pin
72 pub(crate) local
: *const Local
,
76 /// Stores a function so that it can be executed at some point after all currently pinned
77 /// threads get unpinned.
79 /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
80 /// becomes full, some functions are moved into the global cache. At the same time, some
81 /// functions from both local and global caches may get executed in order to incrementally
82 /// clean up the caches as they fill up.
84 /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
85 /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
86 /// never run, but the epoch-based garbage collection will make an effort to execute it
89 /// If this method is called from an [`unprotected`] guard, the function will simply be
90 /// executed immediately.
91 pub fn defer
<F
, R
>(&self, f
: F
)
97 self.defer_unchecked(f
);
101 /// Stores a function so that it can be executed at some point after all currently pinned
102 /// threads get unpinned.
104 /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
105 /// becomes full, some functions are moved into the global cache. At the same time, some
106 /// functions from both local and global caches may get executed in order to incrementally
107 /// clean up the caches as they fill up.
109 /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
110 /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
111 /// never run, but the epoch-based garbage collection will make an effort to execute it
114 /// If this method is called from an [`unprotected`] guard, the function will simply be
115 /// executed immediately.
119 /// The given function must not hold reference onto the stack. It is highly recommended that
120 /// the passed function is **always** marked with `move` in order to prevent accidental
124 /// use crossbeam_epoch as epoch;
126 /// let guard = &epoch::pin();
127 /// let message = "Hello!";
129 /// // ALWAYS use `move` when sending a closure into `defer_unchecked`.
130 /// guard.defer_unchecked(move || {
131 /// println!("{}", message);
136 /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by
137 /// the closure must be `Send`.
139 /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove
140 /// `F: Send` for typical use cases. For example, consider the following code snippet, which
141 /// exemplifies the typical use case of deferring the deallocation of a shared reference:
144 /// let shared = Owned::new(7i32).into_shared(guard);
145 /// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`!
148 /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function,
149 /// because it's called only after the grace period and `shared` is no longer shared with other
150 /// threads. But we don't expect type systems to prove this.
154 /// When a heap-allocated object in a data structure becomes unreachable, it has to be
155 /// deallocated. However, the current thread and other threads may be still holding references
156 /// on the stack to that same object. Therefore it cannot be deallocated before those references
157 /// get dropped. This method can defer deallocation until all those threads get unpinned and
158 /// consequently drop all their references on the stack.
161 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
162 /// use std::sync::atomic::Ordering::SeqCst;
164 /// let a = Atomic::new("foo");
166 /// // Now suppose that `a` is shared among multiple threads and concurrently
167 /// // accessed and modified...
169 /// // Pin the current thread.
170 /// let guard = &epoch::pin();
172 /// // Steal the object currently stored in `a` and swap it with another one.
173 /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
175 /// if !p.is_null() {
176 /// // The object `p` is pointing to is now unreachable.
177 /// // Defer its deallocation until all currently pinned threads get unpinned.
179 /// // ALWAYS use `move` when sending a closure into `defer_unchecked`.
180 /// guard.defer_unchecked(move || {
181 /// println!("{} is now being deallocated.", p.deref());
182 /// // Now we have unique access to the object pointed to by `p` and can turn it
183 /// // into an `Owned`. Dropping the `Owned` will deallocate the object.
184 /// drop(p.into_owned());
188 /// # unsafe { drop(a.into_owned()); } // avoid leak
190 pub unsafe fn defer_unchecked
<F
, R
>(&self, f
: F
)
194 if let Some(local
) = self.local
.as_ref() {
195 local
.defer(Deferred
::new(move || drop(f())), self);
201 /// Stores a destructor for an object so that it can be deallocated and dropped at some point
202 /// after all currently pinned threads get unpinned.
204 /// This method first stores the destructor into the thread-local (or handle-local) cache. If
205 /// this cache becomes full, some destructors are moved into the global cache. At the same
206 /// time, some destructors from both local and global caches may get executed in order to
207 /// incrementally clean up the caches as they fill up.
209 /// There is no guarantee when exactly the destructor will be executed. The only guarantee is
210 /// that it won't be executed until all currently pinned threads get unpinned. In theory, the
211 /// destructor might never run, but the epoch-based garbage collection will make an effort to
212 /// execute it reasonably soon.
214 /// If this method is called from an [`unprotected`] guard, the destructor will simply be
215 /// executed immediately.
219 /// The object must not be reachable by other threads anymore, otherwise it might be still in
220 /// use when the destructor runs.
222 /// Apart from that, keep in mind that another thread may execute the destructor, so the object
223 /// must be sendable to other threads.
225 /// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove
226 /// `T: Send` for typical use cases. For example, consider the following code snippet, which
227 /// exemplifies the typical use case of deferring the deallocation of a shared reference:
230 /// let shared = Owned::new(7i32).into_shared(guard);
231 /// guard.defer_destroy(shared); // `Shared` is not `Send`!
234 /// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because
235 /// it's called only after the grace period and `shared` is no longer shared with other
236 /// threads. But we don't expect type systems to prove this.
240 /// When a heap-allocated object in a data structure becomes unreachable, it has to be
241 /// deallocated. However, the current thread and other threads may be still holding references
242 /// on the stack to that same object. Therefore it cannot be deallocated before those references
243 /// get dropped. This method can defer deallocation until all those threads get unpinned and
244 /// consequently drop all their references on the stack.
247 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
248 /// use std::sync::atomic::Ordering::SeqCst;
250 /// let a = Atomic::new("foo");
252 /// // Now suppose that `a` is shared among multiple threads and concurrently
253 /// // accessed and modified...
255 /// // Pin the current thread.
256 /// let guard = &epoch::pin();
258 /// // Steal the object currently stored in `a` and swap it with another one.
259 /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
261 /// if !p.is_null() {
262 /// // The object `p` is pointing to is now unreachable.
263 /// // Defer its deallocation until all currently pinned threads get unpinned.
265 /// guard.defer_destroy(p);
268 /// # unsafe { drop(a.into_owned()); } // avoid leak
270 pub unsafe fn defer_destroy
<T
>(&self, ptr
: Shared
<'_
, T
>) {
271 self.defer_unchecked(move || ptr
.into_owned());
274 /// Clears up the thread-local cache of deferred functions by executing them or moving into the
277 /// Call this method after deferring execution of a function if you want to get it executed as
278 /// soon as possible. Flushing will make sure it is residing in in the global cache, so that
279 /// any thread has a chance of taking the function and executing it.
281 /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens).
286 /// use crossbeam_epoch as epoch;
288 /// let guard = &epoch::pin();
289 /// guard.defer(move || {
290 /// println!("This better be printed as soon as possible!");
294 pub fn flush(&self) {
295 if let Some(local
) = unsafe { self.local.as_ref() }
{
300 /// Unpins and then immediately re-pins the thread.
302 /// This method is useful when you don't want delay the advancement of the global epoch by
303 /// holding an old epoch. For safety, you should not maintain any guard-based reference across
304 /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this
305 /// is the only active guard for the current thread.
307 /// If this method is called from an [`unprotected`] guard, then the call will be just no-op.
312 /// use crossbeam_epoch::{self as epoch, Atomic};
313 /// use std::sync::atomic::Ordering::SeqCst;
315 /// let a = Atomic::new(777);
316 /// let mut guard = epoch::pin();
318 /// let p = a.load(SeqCst, &guard);
319 /// assert_eq!(unsafe { p.as_ref() }, Some(&777));
323 /// let p = a.load(SeqCst, &guard);
324 /// assert_eq!(unsafe { p.as_ref() }, Some(&777));
326 /// # unsafe { drop(a.into_owned()); } // avoid leak
328 pub fn repin(&mut self) {
329 if let Some(local
) = unsafe { self.local.as_ref() }
{
334 /// Temporarily unpins the thread, executes the given function and then re-pins the thread.
336 /// This method is useful when you need to perform a long-running operation (e.g. sleeping)
337 /// and don't need to maintain any guard-based reference across the call (the latter is enforced
338 /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the
341 /// If this method is called from an [`unprotected`] guard, then the passed function is called
342 /// directly without unpinning the thread.
347 /// use crossbeam_epoch::{self as epoch, Atomic};
348 /// use std::sync::atomic::Ordering::SeqCst;
350 /// use std::time::Duration;
352 /// let a = Atomic::new(777);
353 /// let mut guard = epoch::pin();
355 /// let p = a.load(SeqCst, &guard);
356 /// assert_eq!(unsafe { p.as_ref() }, Some(&777));
358 /// guard.repin_after(|| thread::sleep(Duration::from_millis(50)));
360 /// let p = a.load(SeqCst, &guard);
361 /// assert_eq!(unsafe { p.as_ref() }, Some(&777));
363 /// # unsafe { drop(a.into_owned()); } // avoid leak
365 pub fn repin_after
<F
, R
>(&mut self, f
: F
) -> R
369 if let Some(local
) = unsafe { self.local.as_ref() }
{
370 // We need to acquire a handle here to ensure the Local doesn't
371 // disappear from under us.
372 local
.acquire_handle();
376 // Ensure the Guard is re-pinned even if the function panics
378 if let Some(local
) = unsafe { self.local.as_ref() }
{
379 mem
::forget(local
.pin());
380 local
.release_handle();
387 /// Returns the `Collector` associated with this guard.
389 /// This method is useful when you need to ensure that all guards used with
390 /// a data structure come from the same collector.
392 /// If this method is called from an [`unprotected`] guard, then `None` is returned.
397 /// use crossbeam_epoch as epoch;
399 /// let guard1 = epoch::pin();
400 /// let guard2 = epoch::pin();
401 /// assert!(guard1.collector() == guard2.collector());
403 pub fn collector(&self) -> Option
<&Collector
> {
404 unsafe { self.local.as_ref().map(|local| local.collector()) }
408 impl Drop
for Guard
{
411 if let Some(local
) = unsafe { self.local.as_ref() }
{
417 impl fmt
::Debug
for Guard
{
418 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
419 f
.pad("Guard { .. }")
423 /// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s.
425 /// This guard should be used in special occasions only. Note that it doesn't actually keep any
426 /// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely.
428 /// Note that calling [`defer`] with a dummy guard will not defer the function - it will just
429 /// execute the function immediately.
431 /// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`.
435 /// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the
436 /// [`Atomic`] is not being concurrently modified by other threads.
441 /// use crossbeam_epoch::{self as epoch, Atomic};
442 /// use std::sync::atomic::Ordering::Relaxed;
444 /// let a = Atomic::new(7);
447 /// // Load `a` without pinning the current thread.
448 /// a.load(Relaxed, epoch::unprotected());
450 /// // It's possible to create more dummy guards by calling `clone()`.
451 /// let dummy = &epoch::unprotected().clone();
453 /// dummy.defer(move || {
454 /// println!("This gets executed immediately.");
457 /// // Dropping `dummy` doesn't affect the current thread - it's just a noop.
459 /// # unsafe { drop(a.into_owned()); } // avoid leak
462 /// The most common use of this function is when constructing or destructing a data structure.
464 /// For example, we can use a dummy guard in the destructor of a Treiber stack because at that
465 /// point no other thread could concurrently modify the [`Atomic`]s we are accessing.
467 /// If we were to actually pin the current thread during destruction, that would just unnecessarily
468 /// delay garbage collection and incur some performance cost, so in cases like these `unprotected`
472 /// use crossbeam_epoch::{self as epoch, Atomic};
473 /// use std::mem::ManuallyDrop;
474 /// use std::sync::atomic::Ordering::Relaxed;
476 /// struct Stack<T> {
477 /// head: Atomic<Node<T>>,
481 /// data: ManuallyDrop<T>,
482 /// next: Atomic<Node<T>>,
485 /// impl<T> Drop for Stack<T> {
486 /// fn drop(&mut self) {
488 /// // Unprotected load.
489 /// let mut node = self.head.load(Relaxed, epoch::unprotected());
491 /// while let Some(n) = node.as_ref() {
492 /// // Unprotected load.
493 /// let next = n.next.load(Relaxed, epoch::unprotected());
495 /// // Take ownership of the node, then drop its data and deallocate it.
496 /// let mut o = node.into_owned();
497 /// ManuallyDrop::drop(&mut o.data);
507 /// [`Atomic`]: super::Atomic
508 /// [`defer`]: Guard::defer
510 pub unsafe fn unprotected() -> &'
static Guard
{
511 // An unprotected guard is just a `Guard` with its field `local` set to null.
512 // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in
514 struct GuardWrapper(Guard
);
515 unsafe impl Sync
for GuardWrapper {}
516 static UNPROTECTED
: GuardWrapper
= GuardWrapper(Guard
{
517 local
: core
::ptr
::null(),