1 // There's a lot of scary concurrent code in this module, but it is copied from
2 // `std::sync::Once` with two changes:
4 // * init function can fail
7 cell
::{Cell, UnsafeCell}
,
9 panic
::{RefUnwindSafe, UnwindSafe}
,
10 sync
::atomic
::{AtomicBool, AtomicPtr, Ordering}
,
11 thread
::{self, Thread}
,
15 pub(crate) struct OnceCell
<T
> {
16 // This `queue` field is the core of the implementation. It encodes two
17 // pieces of information:
19 // * The current state of the cell (`INCOMPLETE`, `RUNNING`, `COMPLETE`)
20 // * Linked list of threads waiting for the current cell.
22 // State is encoded in two low bits. Only `INCOMPLETE` and `RUNNING` states
24 queue
: AtomicPtr
<Waiter
>,
25 _marker
: PhantomData
<*mut Waiter
>,
26 value
: UnsafeCell
<Option
<T
>>,
29 // Why do we need `T: Send`?
30 // Thread A creates a `OnceCell` and shares it with
31 // scoped thread B, which fills the cell, which is
32 // then destroyed by A. That is, destructor observes
34 unsafe impl<T
: Sync
+ Send
> Sync
for OnceCell
<T
> {}
35 unsafe impl<T
: Send
> Send
for OnceCell
<T
> {}
37 impl<T
: RefUnwindSafe
+ UnwindSafe
> RefUnwindSafe
for OnceCell
<T
> {}
38 impl<T
: UnwindSafe
> UnwindSafe
for OnceCell
<T
> {}
41 pub(crate) const fn new() -> OnceCell
<T
> {
43 queue
: AtomicPtr
::new(INCOMPLETE_PTR
),
45 value
: UnsafeCell
::new(None
),
49 pub(crate) const fn with_value(value
: T
) -> OnceCell
<T
> {
51 queue
: AtomicPtr
::new(COMPLETE_PTR
),
53 value
: UnsafeCell
::new(Some(value
)),
57 /// Safety: synchronizes with store to value via Release/(Acquire|SeqCst).
59 pub(crate) fn is_initialized(&self) -> bool
{
60 // An `Acquire` load is enough because that makes all the initialization
61 // operations visible to us, and, this being a fast path, weaker
62 // ordering helps with performance. This `Acquire` synchronizes with
63 // `SeqCst` operations on the slow path.
64 self.queue
.load(Ordering
::Acquire
) == COMPLETE_PTR
67 /// Safety: synchronizes with store to value via SeqCst read from state,
68 /// writes value only once because we never get to INCOMPLETE state after a
71 pub(crate) fn initialize
<F
, E
>(&self, f
: F
) -> Result
<(), E
>
73 F
: FnOnce() -> Result
<T
, E
>,
76 let mut res
: Result
<(), E
> = Ok(());
77 let slot
: *mut Option
<T
> = self.value
.get();
81 let f
= unsafe { crate::unwrap_unchecked(f.take()) }
;
84 unsafe { *slot = Some(value) }
;
98 pub(crate) fn wait(&self) {
99 initialize_or_wait(&self.queue
, None
);
102 /// Get the reference to the underlying value, without checking if the cell
107 /// Caller must ensure that the cell is in initialized state, and that
108 /// the contents are acquired by (synchronized to) this thread.
109 pub(crate) unsafe fn get_unchecked(&self) -> &T
{
110 debug_assert
!(self.is_initialized());
111 let slot
= &*self.value
.get();
112 crate::unwrap_unchecked(slot
.as_ref())
115 /// Gets the mutable reference to the underlying value.
116 /// Returns `None` if the cell is empty.
117 pub(crate) fn get_mut(&mut self) -> Option
<&mut T
> {
118 // Safe b/c we have a unique access.
119 unsafe { &mut *self.value.get() }
.as_mut()
122 /// Consumes this `OnceCell`, returning the wrapped value.
123 /// Returns `None` if the cell was empty.
125 pub(crate) fn into_inner(self) -> Option
<T
> {
126 // Because `into_inner` takes `self` by value, the compiler statically
127 // verifies that it is not currently borrowed.
128 // So, it is safe to move out `Option<T>`.
129 self.value
.into_inner()
133 // Three states that a OnceCell can be in, encoded into the lower bits of `queue` in
134 // the OnceCell structure.
135 const INCOMPLETE
: usize = 0x0;
136 const RUNNING
: usize = 0x1;
137 const COMPLETE
: usize = 0x2;
138 const INCOMPLETE_PTR
: *mut Waiter
= INCOMPLETE
as *mut Waiter
;
139 const COMPLETE_PTR
: *mut Waiter
= COMPLETE
as *mut Waiter
;
141 // Mask to learn about the state. All other bits are the queue of waiters if
142 // this is in the RUNNING state.
143 const STATE_MASK
: usize = 0x3;
145 /// Representation of a node in the linked list of waiters in the RUNNING state.
146 /// A waiters is stored on the stack of the waiting threads.
147 #[repr(align(4))] // Ensure the two lower bits are free to use as state bits.
149 thread
: Cell
<Option
<Thread
>>,
150 signaled
: AtomicBool
,
154 /// Drains and notifies the queue of waiters on drop.
156 queue
: &'a AtomicPtr
<Waiter
>,
157 new_queue
: *mut Waiter
,
160 impl Drop
for Guard
<'_
> {
162 let queue
= self.queue
.swap(self.new_queue
, Ordering
::AcqRel
);
164 let state
= strict
::addr(queue
) & STATE_MASK
;
165 assert_eq
!(state
, RUNNING
);
168 let mut waiter
= strict
::map_addr(queue
, |q
| q
& !STATE_MASK
);
169 while !waiter
.is_null() {
170 let next
= (*waiter
).next
;
171 let thread
= (*waiter
).thread
.take().unwrap();
172 (*waiter
).signaled
.store(true, Ordering
::Release
);
180 // Corresponds to `std::sync::Once::call_inner`.
182 // Originally copied from std, but since modified to remove poisoning and to
185 // Note: this is intentionally monomorphic
187 fn initialize_or_wait(queue
: &AtomicPtr
<Waiter
>, mut init
: Option
<&mut dyn FnMut() -> bool
>) {
188 let mut curr_queue
= queue
.load(Ordering
::Acquire
);
191 let curr_state
= strict
::addr(curr_queue
) & STATE_MASK
;
192 match (curr_state
, &mut init
) {
193 (COMPLETE
, _
) => return,
194 (INCOMPLETE
, Some(init
)) => {
195 let exchange
= queue
.compare_exchange(
197 strict
::map_addr(curr_queue
, |q
| (q
& !STATE_MASK
) | RUNNING
),
201 if let Err(new_queue
) = exchange
{
202 curr_queue
= new_queue
;
205 let mut guard
= Guard { queue, new_queue: INCOMPLETE_PTR }
;
207 guard
.new_queue
= COMPLETE_PTR
;
211 (INCOMPLETE
, None
) | (RUNNING
, _
) => {
212 wait(&queue
, curr_queue
);
213 curr_queue
= queue
.load(Ordering
::Acquire
);
215 _
=> debug_assert
!(false),
220 fn wait(queue
: &AtomicPtr
<Waiter
>, mut curr_queue
: *mut Waiter
) {
221 let curr_state
= strict
::addr(curr_queue
) & STATE_MASK
;
224 thread
: Cell
::new(Some(thread
::current())),
225 signaled
: AtomicBool
::new(false),
226 next
: strict
::map_addr(curr_queue
, |q
| q
& !STATE_MASK
),
228 let me
= &node
as *const Waiter
as *mut Waiter
;
230 let exchange
= queue
.compare_exchange(
232 strict
::map_addr(me
, |q
| q
| curr_state
),
236 if let Err(new_queue
) = exchange
{
237 if strict
::addr(new_queue
) & STATE_MASK
!= curr_state
{
240 curr_queue
= new_queue
;
244 while !node
.signaled
.load(Ordering
::Acquire
) {
251 // Polyfill of strict provenance from https://crates.io/crates/sptr.
253 // Use free-standing function rather than a trait to keep things simple and
254 // avoid any potential conflicts with future stabile std API.
258 pub(crate) fn addr
<T
>(ptr
: *mut T
) -> usize
262 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
263 // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
265 unsafe { core::mem::transmute(ptr) }
270 pub(crate) fn with_addr
<T
>(ptr
: *mut T
, addr
: usize) -> *mut T
274 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
276 // In the mean-time, this operation is defined to be "as if" it was
277 // a wrapping_offset, so we can emulate it as such. This should properly
278 // restore pointer provenance even under today's compiler.
279 let self_addr
= self::addr(ptr
) as isize;
280 let dest_addr
= addr
as isize;
281 let offset
= dest_addr
.wrapping_sub(self_addr
);
283 // This is the canonical desugarring of this operation,
284 // but `pointer::cast` was only stabilized in 1.38.
285 // self.cast::<u8>().wrapping_offset(offset).cast::<T>()
286 (ptr
as *mut u8).wrapping_offset(offset
) as *mut T
291 pub(crate) fn map_addr
<T
>(ptr
: *mut T
, f
: impl FnOnce(usize) -> usize) -> *mut T
295 self::with_addr(ptr
, f(addr(ptr
)))
299 // These test are snatched from std as well.
303 use std
::{sync::mpsc::channel, thread}
;
307 impl<T
> OnceCell
<T
> {
308 fn init(&self, f
: impl FnOnce() -> T
) {
310 let _
= self.initialize(|| Ok
::<T
, Void
>(f()));
316 static O
: OnceCell
<()> = OnceCell
::new();
326 static O
: OnceCell
<()> = OnceCell
::new();
327 static mut RUN
: bool
= false;
329 let (tx
, rx
) = channel();
332 thread
::spawn(move || {
343 tx
.send(()).unwrap();
362 static O
: OnceCell
<()> = OnceCell
::new();
365 let t
= panic
::catch_unwind(|| {
370 // we can subvert poisoning, however
371 let mut called
= false;
377 // once any success happens, we stop propagating the poison
382 fn wait_for_force_to_finish() {
383 static O
: OnceCell
<()> = OnceCell
::new();
386 let t
= panic
::catch_unwind(|| {
391 // make sure someone's waiting inside the once via a force
392 let (tx1
, rx1
) = channel();
393 let (tx2
, rx2
) = channel();
394 let t1
= thread
::spawn(move || {
396 tx1
.send(()).unwrap();
403 // put another waiter on the once
404 let t2
= thread
::spawn(|| {
405 let mut called
= false;
412 tx2
.send(()).unwrap();
414 assert
!(t1
.join().is_ok());
415 assert
!(t2
.join().is_ok());
419 #[cfg(target_pointer_width = "64")]
421 use std
::mem
::size_of
;
423 assert_eq
!(size_of
::<OnceCell
<u32>>(), 4 * size_of
::<u32>());