]>
Commit | Line | Data |
---|---|---|
1a4d82fc | 1 | //! Thread local storage |
c34b1796 | 2 | |
e9174d1e | 3 | #![unstable(feature = "thread_local_internals", issue = "0")] |
1a4d82fc | 4 | |
416331ca | 5 | use crate::error::Error; |
532ac7d7 | 6 | use crate::fmt; |
1a4d82fc JJ |
7 | |
8 | /// A thread local storage key which owns its contents. | |
9 | /// | |
10 | /// This key uses the fastest possible implementation available to it for the | |
7cac9316 XL |
11 | /// target platform. It is instantiated with the [`thread_local!`] macro and the |
12 | /// primary method is the [`with`] method. | |
1a4d82fc | 13 | /// |
7cac9316 | 14 | /// The [`with`] method yields a reference to the contained value which cannot be |
bd371182 | 15 | /// sent across threads or escape the given closure. |
1a4d82fc JJ |
16 | /// |
17 | /// # Initialization and Destruction | |
18 | /// | |
7cac9316 XL |
19 | /// Initialization is dynamically performed on the first call to [`with`] |
20 | /// within a thread, and values that implement [`Drop`] get destructed when a | |
8bb4bdeb | 21 | /// thread exits. Some caveats apply, which are explained below. |
1a4d82fc | 22 | /// |
ea8adc8c XL |
23 | /// A `LocalKey`'s initializer cannot recursively depend on itself, and using |
24 | /// a `LocalKey` in this way will cause the initializer to infinitely recurse | |
25 | /// on the first call to `with`. | |
26 | /// | |
c34b1796 | 27 | /// # Examples |
1a4d82fc JJ |
28 | /// |
29 | /// ``` | |
30 | /// use std::cell::RefCell; | |
85aaf69f | 31 | /// use std::thread; |
1a4d82fc | 32 | /// |
c34b1796 | 33 | /// thread_local!(static FOO: RefCell<u32> = RefCell::new(1)); |
1a4d82fc JJ |
34 | /// |
35 | /// FOO.with(|f| { | |
36 | /// assert_eq!(*f.borrow(), 1); | |
37 | /// *f.borrow_mut() = 2; | |
38 | /// }); | |
39 | /// | |
40 | /// // each thread starts out with the initial value of 1 | |
532ac7d7 | 41 | /// let t = thread::spawn(move|| { |
1a4d82fc JJ |
42 | /// FOO.with(|f| { |
43 | /// assert_eq!(*f.borrow(), 1); | |
44 | /// *f.borrow_mut() = 3; | |
45 | /// }); | |
46 | /// }); | |
47 | /// | |
532ac7d7 XL |
48 | /// // wait for the thread to complete and bail out on panic |
49 | /// t.join().unwrap(); | |
50 | /// | |
1a4d82fc JJ |
51 | /// // we retain our original value of 2 despite the child thread |
52 | /// FOO.with(|f| { | |
53 | /// assert_eq!(*f.borrow(), 2); | |
54 | /// }); | |
55 | /// ``` | |
7453a54e SL |
56 | /// |
57 | /// # Platform-specific behavior | |
58 | /// | |
59 | /// Note that a "best effort" is made to ensure that destructors for types | |
a7813a04 | 60 | /// stored in thread local storage are run, but not all platforms can guarantee |
7453a54e SL |
61 | /// that destructors will be run for all types in thread local storage. For |
62 | /// example, there are a number of known caveats where destructors are not run: | |
63 | /// | |
64 | /// 1. On Unix systems when pthread-based TLS is being used, destructors will | |
65 | /// not be run for TLS values on the main thread when it exits. Note that the | |
66 | /// application will exit immediately after the main thread exits as well. | |
67 | /// 2. On all platforms it's possible for TLS to re-initialize other TLS slots | |
68 | /// during destruction. Some platforms ensure that this cannot happen | |
69 | /// infinitely by preventing re-initialization of any slot that has been | |
70 | /// destroyed, but not all platforms have this guard. Those platforms that do | |
71 | /// not guard typically have a synthetic limit after which point no more | |
72 | /// destructors are run. | |
7cac9316 XL |
73 | /// |
74 | /// [`with`]: ../../std/thread/struct.LocalKey.html#method.with | |
75 | /// [`thread_local!`]: ../../std/macro.thread_local.html | |
76 | /// [`Drop`]: ../../std/ops/trait.Drop.html | |
85aaf69f | 77 | #[stable(feature = "rust1", since = "1.0.0")] |
9cc50fc6 SL |
78 | pub struct LocalKey<T: 'static> { |
79 | // This outer `LocalKey<T>` type is what's going to be stored in statics, | |
80 | // but actual data inside will sometimes be tagged with #[thread_local]. | |
81 | // It's not valid for a true static to reference a #[thread_local] static, | |
82 | // so we get around that by exposing an accessor through a layer of function | |
83 | // indirection (this thunk). | |
1a4d82fc | 84 | // |
9cc50fc6 SL |
85 | // Note that the thunk is itself unsafe because the returned lifetime of the |
86 | // slot where data lives, `'static`, is not actually valid. The lifetime | |
3b2f2976 | 87 | // here is actually slightly shorter than the currently running thread! |
9cc50fc6 SL |
88 | // |
89 | // Although this is an extra layer of indirection, it should in theory be | |
90 | // trivially devirtualizable by LLVM because the value of `inner` never | |
91 | // changes and the constant should be readonly within a crate. This mainly | |
92 | // only runs into problems when TLS statics are exported across crates. | |
dc9dc135 | 93 | inner: unsafe fn() -> Option<&'static T>, |
1a4d82fc JJ |
94 | } |
95 | ||
8bb4bdeb | 96 | #[stable(feature = "std_debug", since = "1.16.0")] |
32a655c1 | 97 | impl<T: 'static> fmt::Debug for LocalKey<T> { |
532ac7d7 | 98 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
32a655c1 SL |
99 | f.pad("LocalKey { .. }") |
100 | } | |
101 | } | |
102 | ||
7cac9316 | 103 | /// Declare a new thread local storage key of type [`std::thread::LocalKey`]. |
62682a34 | 104 | /// |
3157f602 XL |
105 | /// # Syntax |
106 | /// | |
107 | /// The macro wraps any number of static declarations and makes them thread local. | |
041b39d2 | 108 | /// Publicity and attributes for each static are allowed. Example: |
3157f602 XL |
109 | /// |
110 | /// ``` | |
111 | /// use std::cell::RefCell; | |
112 | /// thread_local! { | |
113 | /// pub static FOO: RefCell<u32> = RefCell::new(1); | |
114 | /// | |
115 | /// #[allow(unused)] | |
116 | /// static BAR: RefCell<f32> = RefCell::new(1.0); | |
117 | /// } | |
118 | /// # fn main() {} | |
119 | /// ``` | |
120 | /// | |
7cac9316 | 121 | /// See [LocalKey documentation][`std::thread::LocalKey`] for more |
62682a34 | 122 | /// information. |
7cac9316 XL |
123 | /// |
124 | /// [`std::thread::LocalKey`]: ../std/thread/struct.LocalKey.html | |
1a4d82fc | 125 | #[macro_export] |
62682a34 | 126 | #[stable(feature = "rust1", since = "1.0.0")] |
532ac7d7 | 127 | #[allow_internal_unstable(thread_local_internals)] |
041b39d2 XL |
128 | macro_rules! thread_local { |
129 | // empty (base case for the recursion) | |
130 | () => {}; | |
131 | ||
132 | // process multiple declarations | |
133 | ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr; $($rest:tt)*) => ( | |
a1dfa0c6 XL |
134 | $crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, $init); |
135 | $crate::thread_local!($($rest)*); | |
041b39d2 XL |
136 | ); |
137 | ||
138 | // handle a single declaration | |
139 | ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr) => ( | |
a1dfa0c6 | 140 | $crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, $init); |
041b39d2 XL |
141 | ); |
142 | } | |
143 | ||
041b39d2 XL |
144 | #[doc(hidden)] |
145 | #[unstable(feature = "thread_local_internals", | |
146 | reason = "should not be necessary", | |
147 | issue = "0")] | |
148 | #[macro_export] | |
532ac7d7 | 149 | #[allow_internal_unstable(thread_local_internals, cfg_target_thread_local, thread_local)] |
ea8adc8c | 150 | #[allow_internal_unsafe] |
041b39d2 | 151 | macro_rules! __thread_local_inner { |
ea8adc8c XL |
152 | (@key $(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => { |
153 | { | |
154 | #[inline] | |
041b39d2 XL |
155 | fn __init() -> $t { $init } |
156 | ||
dc9dc135 | 157 | unsafe fn __getit() -> $crate::option::Option<&'static $t> { |
0bf4aa26 | 158 | #[cfg(all(target_arch = "wasm32", not(target_feature = "atomics")))] |
83c7162d XL |
159 | static __KEY: $crate::thread::__StaticLocalKeyInner<$t> = |
160 | $crate::thread::__StaticLocalKeyInner::new(); | |
161 | ||
041b39d2 | 162 | #[thread_local] |
0bf4aa26 XL |
163 | #[cfg(all( |
164 | target_thread_local, | |
165 | not(all(target_arch = "wasm32", not(target_feature = "atomics"))), | |
166 | ))] | |
041b39d2 XL |
167 | static __KEY: $crate::thread::__FastLocalKeyInner<$t> = |
168 | $crate::thread::__FastLocalKeyInner::new(); | |
169 | ||
0bf4aa26 XL |
170 | #[cfg(all( |
171 | not(target_thread_local), | |
172 | not(all(target_arch = "wasm32", not(target_feature = "atomics"))), | |
173 | ))] | |
041b39d2 XL |
174 | static __KEY: $crate::thread::__OsLocalKeyInner<$t> = |
175 | $crate::thread::__OsLocalKeyInner::new(); | |
176 | ||
dc9dc135 | 177 | __KEY.get(__init) |
041b39d2 XL |
178 | } |
179 | ||
3b2f2976 | 180 | unsafe { |
dc9dc135 | 181 | $crate::thread::LocalKey::new(__getit) |
3b2f2976 | 182 | } |
ea8adc8c XL |
183 | } |
184 | }; | |
185 | ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => { | |
ea8adc8c | 186 | $(#[$attr])* $vis const $name: $crate::thread::LocalKey<$t> = |
a1dfa0c6 | 187 | $crate::__thread_local_inner!(@key $(#[$attr])* $vis $name, $t, $init); |
041b39d2 XL |
188 | } |
189 | } | |
190 | ||
041b39d2 | 191 | /// An error returned by [`LocalKey::try_with`](struct.LocalKey.html#method.try_with). |
0531ce1d | 192 | #[stable(feature = "thread_local_try_with", since = "1.26.0")] |
416331ca | 193 | #[derive(Clone, Copy, Eq, PartialEq)] |
041b39d2 XL |
194 | pub struct AccessError { |
195 | _private: (), | |
196 | } | |
197 | ||
0531ce1d | 198 | #[stable(feature = "thread_local_try_with", since = "1.26.0")] |
041b39d2 | 199 | impl fmt::Debug for AccessError { |
532ac7d7 | 200 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
041b39d2 XL |
201 | f.debug_struct("AccessError").finish() |
202 | } | |
203 | } | |
204 | ||
0531ce1d | 205 | #[stable(feature = "thread_local_try_with", since = "1.26.0")] |
041b39d2 | 206 | impl fmt::Display for AccessError { |
532ac7d7 | 207 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
041b39d2 XL |
208 | fmt::Display::fmt("already destroyed", f) |
209 | } | |
210 | } | |
211 | ||
416331ca XL |
212 | #[stable(feature = "thread_local_try_with", since = "1.26.0")] |
213 | impl Error for AccessError {} | |
214 | ||
c34b1796 | 215 | impl<T: 'static> LocalKey<T> { |
62682a34 SL |
216 | #[doc(hidden)] |
217 | #[unstable(feature = "thread_local_internals", | |
e9174d1e SL |
218 | reason = "recently added to create a key", |
219 | issue = "0")] | |
dc9dc135 | 220 | pub const unsafe fn new(inner: unsafe fn() -> Option<&'static T>) -> LocalKey<T> { |
62682a34 | 221 | LocalKey { |
3b2f2976 | 222 | inner, |
62682a34 SL |
223 | } |
224 | } | |
225 | ||
9346a6ac | 226 | /// Acquires a reference to the value in this TLS key. |
1a4d82fc JJ |
227 | /// |
228 | /// This will lazily initialize the value if this thread has not referenced | |
229 | /// this key yet. | |
230 | /// | |
231 | /// # Panics | |
232 | /// | |
233 | /// This function will `panic!()` if the key currently has its | |
234 | /// destructor running, and it **may** panic if the destructor has | |
235 | /// previously been run for this thread. | |
85aaf69f | 236 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
237 | pub fn with<F, R>(&'static self, f: F) -> R |
238 | where F: FnOnce(&T) -> R { | |
e74abb32 XL |
239 | self.try_with(f).expect("cannot access a Thread Local Storage value \ |
240 | during or after destruction") | |
1a4d82fc JJ |
241 | } |
242 | ||
041b39d2 XL |
243 | /// Acquires a reference to the value in this TLS key. |
244 | /// | |
245 | /// This will lazily initialize the value if this thread has not referenced | |
246 | /// this key yet. If the key has been destroyed (which may happen if this is called | |
8faf50e0 | 247 | /// in a destructor), this function will return an [`AccessError`](struct.AccessError.html). |
041b39d2 XL |
248 | /// |
249 | /// # Panics | |
250 | /// | |
251 | /// This function will still `panic!()` if the key is uninitialized and the | |
252 | /// key's initializer panics. | |
0531ce1d | 253 | #[stable(feature = "thread_local_try_with", since = "1.26.0")] |
041b39d2 | 254 | pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError> |
0531ce1d XL |
255 | where |
256 | F: FnOnce(&T) -> R, | |
257 | { | |
041b39d2 | 258 | unsafe { |
dc9dc135 | 259 | let thread_local = (self.inner)().ok_or(AccessError { |
041b39d2 XL |
260 | _private: (), |
261 | })?; | |
dc9dc135 XL |
262 | Ok(f(thread_local)) |
263 | } | |
264 | } | |
265 | } | |
266 | ||
267 | mod lazy { | |
268 | use crate::cell::UnsafeCell; | |
269 | use crate::mem; | |
270 | use crate::hint; | |
271 | ||
272 | pub struct LazyKeyInner<T> { | |
273 | inner: UnsafeCell<Option<T>>, | |
274 | } | |
275 | ||
276 | impl<T> LazyKeyInner<T> { | |
277 | pub const fn new() -> LazyKeyInner<T> { | |
278 | LazyKeyInner { | |
279 | inner: UnsafeCell::new(None), | |
280 | } | |
281 | } | |
282 | ||
283 | pub unsafe fn get(&self) -> Option<&'static T> { | |
284 | (*self.inner.get()).as_ref() | |
285 | } | |
286 | ||
287 | pub unsafe fn initialize<F: FnOnce() -> T>(&self, init: F) -> &'static T { | |
288 | // Execute the initialization up front, *then* move it into our slot, | |
289 | // just in case initialization fails. | |
290 | let value = init(); | |
291 | let ptr = self.inner.get(); | |
292 | ||
293 | // note that this can in theory just be `*ptr = Some(value)`, but due to | |
294 | // the compiler will currently codegen that pattern with something like: | |
295 | // | |
296 | // ptr::drop_in_place(ptr) | |
297 | // ptr::write(ptr, Some(value)) | |
298 | // | |
299 | // Due to this pattern it's possible for the destructor of the value in | |
300 | // `ptr` (e.g., if this is being recursively initialized) to re-access | |
301 | // TLS, in which case there will be a `&` and `&mut` pointer to the same | |
302 | // value (an aliasing violation). To avoid setting the "I'm running a | |
303 | // destructor" flag we just use `mem::replace` which should sequence the | |
304 | // operations a little differently and make this safe to call. | |
305 | mem::replace(&mut *ptr, Some(value)); | |
306 | ||
307 | // After storing `Some` we want to get a reference to the contents of | |
308 | // what we just stored. While we could use `unwrap` here and it should | |
309 | // always work it empirically doesn't seem to always get optimized away, | |
310 | // which means that using something like `try_with` can pull in | |
311 | // panicking code and cause a large size bloat. | |
312 | match *ptr { | |
313 | Some(ref x) => x, | |
314 | None => hint::unreachable_unchecked(), | |
315 | } | |
316 | } | |
317 | ||
318 | #[allow(unused)] | |
319 | pub unsafe fn take(&mut self) -> Option<T> { | |
320 | (*self.inner.get()).take() | |
041b39d2 XL |
321 | } |
322 | } | |
323 | } | |
324 | ||
83c7162d XL |
325 | /// On some platforms like wasm32 there's no threads, so no need to generate |
326 | /// thread locals and we can instead just use plain statics! | |
327 | #[doc(hidden)] | |
0bf4aa26 | 328 | #[cfg(all(target_arch = "wasm32", not(target_feature = "atomics")))] |
83c7162d | 329 | pub mod statik { |
dc9dc135 | 330 | use super::lazy::LazyKeyInner; |
532ac7d7 | 331 | use crate::fmt; |
83c7162d XL |
332 | |
333 | pub struct Key<T> { | |
dc9dc135 | 334 | inner: LazyKeyInner<T>, |
83c7162d XL |
335 | } |
336 | ||
532ac7d7 | 337 | unsafe impl<T> Sync for Key<T> { } |
83c7162d XL |
338 | |
339 | impl<T> fmt::Debug for Key<T> { | |
532ac7d7 | 340 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
83c7162d XL |
341 | f.pad("Key { .. }") |
342 | } | |
343 | } | |
344 | ||
345 | impl<T> Key<T> { | |
346 | pub const fn new() -> Key<T> { | |
347 | Key { | |
dc9dc135 | 348 | inner: LazyKeyInner::new(), |
83c7162d XL |
349 | } |
350 | } | |
351 | ||
dc9dc135 XL |
352 | pub unsafe fn get(&self, init: fn() -> T) -> Option<&'static T> { |
353 | let value = match self.inner.get() { | |
354 | Some(ref value) => value, | |
355 | None => self.inner.initialize(init), | |
356 | }; | |
357 | Some(value) | |
83c7162d XL |
358 | } |
359 | } | |
360 | } | |
361 | ||
041b39d2 XL |
362 | #[doc(hidden)] |
363 | #[cfg(target_thread_local)] | |
364 | pub mod fast { | |
dc9dc135 XL |
365 | use super::lazy::LazyKeyInner; |
366 | use crate::cell::Cell; | |
532ac7d7 XL |
367 | use crate::fmt; |
368 | use crate::mem; | |
dc9dc135 | 369 | use crate::sys::fast_thread_local::register_dtor; |
041b39d2 | 370 | |
dc9dc135 XL |
371 | #[derive(Copy, Clone)] |
372 | enum DtorState { | |
373 | Unregistered, | |
374 | Registered, | |
375 | RunningOrHasRun, | |
376 | } | |
377 | ||
378 | // This data structure has been carefully constructed so that the fast path | |
379 | // only contains one branch on x86. That optimization is necessary to avoid | |
380 | // duplicated tls lookups on OSX. | |
381 | // | |
382 | // LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722 | |
041b39d2 | 383 | pub struct Key<T> { |
dc9dc135 XL |
384 | // If `LazyKeyInner::get` returns `None`, that indicates either: |
385 | // * The value has never been initialized | |
386 | // * The value is being recursively initialized | |
387 | // * The value has already been destroyed or is being destroyed | |
388 | // To determine which kind of `None`, check `dtor_state`. | |
389 | // | |
390 | // This is very optimizer friendly for the fast path - initialized but | |
391 | // not yet dropped. | |
392 | inner: LazyKeyInner<T>, | |
041b39d2 XL |
393 | |
394 | // Metadata to keep track of the state of the destructor. Remember that | |
dc9dc135 XL |
395 | // this variable is thread-local, not global. |
396 | dtor_state: Cell<DtorState>, | |
041b39d2 XL |
397 | } |
398 | ||
399 | impl<T> fmt::Debug for Key<T> { | |
532ac7d7 | 400 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
041b39d2 XL |
401 | f.pad("Key { .. }") |
402 | } | |
403 | } | |
404 | ||
041b39d2 XL |
405 | impl<T> Key<T> { |
406 | pub const fn new() -> Key<T> { | |
407 | Key { | |
dc9dc135 XL |
408 | inner: LazyKeyInner::new(), |
409 | dtor_state: Cell::new(DtorState::Unregistered), | |
041b39d2 XL |
410 | } |
411 | } | |
412 | ||
dc9dc135 XL |
413 | pub unsafe fn get<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> { |
414 | match self.inner.get() { | |
415 | Some(val) => Some(val), | |
416 | None => self.try_initialize(init), | |
041b39d2 | 417 | } |
041b39d2 XL |
418 | } |
419 | ||
dc9dc135 XL |
420 | // `try_initialize` is only called once per fast thread local variable, |
421 | // except in corner cases where thread_local dtors reference other | |
422 | // thread_local's, or it is being recursively initialized. | |
423 | // | |
424 | // Macos: Inlining this function can cause two `tlv_get_addr` calls to | |
425 | // be performed for every call to `Key::get`. The #[cold] hint makes | |
426 | // that less likely. | |
427 | // LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722 | |
428 | #[cold] | |
429 | unsafe fn try_initialize<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> { | |
430 | if !mem::needs_drop::<T>() || self.try_register_dtor() { | |
431 | Some(self.inner.initialize(init)) | |
432 | } else { | |
433 | None | |
041b39d2 | 434 | } |
dc9dc135 | 435 | } |
041b39d2 | 436 | |
dc9dc135 XL |
437 | // `try_register_dtor` is only called once per fast thread local |
438 | // variable, except in corner cases where thread_local dtors reference | |
439 | // other thread_local's, or it is being recursively initialized. | |
440 | unsafe fn try_register_dtor(&self) -> bool { | |
441 | match self.dtor_state.get() { | |
442 | DtorState::Unregistered => { | |
443 | // dtor registration happens before initialization. | |
444 | register_dtor(self as *const _ as *mut u8, | |
445 | destroy_value::<T>); | |
446 | self.dtor_state.set(DtorState::Registered); | |
447 | true | |
448 | } | |
449 | DtorState::Registered => { | |
450 | // recursively initialized | |
451 | true | |
452 | } | |
453 | DtorState::RunningOrHasRun => { | |
454 | false | |
455 | } | |
456 | } | |
041b39d2 XL |
457 | } |
458 | } | |
459 | ||
460 | unsafe extern fn destroy_value<T>(ptr: *mut u8) { | |
461 | let ptr = ptr as *mut Key<T>; | |
041b39d2 | 462 | |
dc9dc135 XL |
463 | // Right before we run the user destructor be sure to set the |
464 | // `Option<T>` to `None`, and `dtor_state` to `RunningOrHasRun`. This | |
465 | // causes future calls to `get` to run `try_initialize_drop` again, | |
466 | // which will now fail, and return `None`. | |
467 | let value = (*ptr).inner.take(); | |
468 | (*ptr).dtor_state.set(DtorState::RunningOrHasRun); | |
469 | drop(value); | |
041b39d2 | 470 | } |
1a4d82fc JJ |
471 | } |
472 | ||
d9579d0f | 473 | #[doc(hidden)] |
9cc50fc6 | 474 | pub mod os { |
dc9dc135 XL |
475 | use super::lazy::LazyKeyInner; |
476 | use crate::cell::Cell; | |
532ac7d7 XL |
477 | use crate::fmt; |
478 | use crate::marker; | |
479 | use crate::ptr; | |
480 | use crate::sys_common::thread_local::StaticKey as OsStaticKey; | |
1a4d82fc | 481 | |
1a4d82fc | 482 | pub struct Key<T> { |
1a4d82fc | 483 | // OS-TLS key that we'll use to key off. |
62682a34 SL |
484 | os: OsStaticKey, |
485 | marker: marker::PhantomData<Cell<T>>, | |
1a4d82fc JJ |
486 | } |
487 | ||
32a655c1 | 488 | impl<T> fmt::Debug for Key<T> { |
532ac7d7 | 489 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
32a655c1 SL |
490 | f.pad("Key { .. }") |
491 | } | |
492 | } | |
493 | ||
532ac7d7 | 494 | unsafe impl<T> Sync for Key<T> { } |
1a4d82fc JJ |
495 | |
496 | struct Value<T: 'static> { | |
dc9dc135 | 497 | inner: LazyKeyInner<T>, |
1a4d82fc | 498 | key: &'static Key<T>, |
1a4d82fc JJ |
499 | } |
500 | ||
62682a34 SL |
501 | impl<T: 'static> Key<T> { |
502 | pub const fn new() -> Key<T> { | |
503 | Key { | |
504 | os: OsStaticKey::new(Some(destroy_value::<T>)), | |
505 | marker: marker::PhantomData | |
506 | } | |
1a4d82fc JJ |
507 | } |
508 | ||
dc9dc135 | 509 | pub unsafe fn get(&'static self, init: fn() -> T) -> Option<&'static T> { |
3b2f2976 | 510 | let ptr = self.os.get() as *mut Value<T>; |
dc9dc135 | 511 | if ptr as usize > 1 { |
e74abb32 XL |
512 | if let Some(ref value) = (*ptr).inner.get() { |
513 | return Some(value); | |
1a4d82fc | 514 | } |
dc9dc135 XL |
515 | } |
516 | self.try_initialize(init) | |
517 | } | |
518 | ||
519 | // `try_initialize` is only called once per os thread local variable, | |
520 | // except in corner cases where thread_local dtors reference other | |
521 | // thread_local's, or it is being recursively initialized. | |
522 | unsafe fn try_initialize(&'static self, init: fn() -> T) -> Option<&'static T> { | |
523 | let ptr = self.os.get() as *mut Value<T>; | |
524 | if ptr as usize == 1 { | |
525 | // destructor is running | |
526 | return None | |
7453a54e | 527 | } |
3b2f2976 | 528 | |
dc9dc135 XL |
529 | let ptr = if ptr.is_null() { |
530 | // If the lookup returned null, we haven't initialized our own | |
531 | // local copy, so do that now. | |
532 | let ptr: Box<Value<T>> = box Value { | |
533 | inner: LazyKeyInner::new(), | |
534 | key: self, | |
535 | }; | |
536 | let ptr = Box::into_raw(ptr); | |
537 | self.os.set(ptr as *mut u8); | |
538 | ptr | |
539 | } else { | |
540 | // recursive initialization | |
541 | ptr | |
3b2f2976 | 542 | }; |
dc9dc135 XL |
543 | |
544 | Some((*ptr).inner.initialize(init)) | |
1a4d82fc JJ |
545 | } |
546 | } | |
547 | ||
041b39d2 | 548 | unsafe extern fn destroy_value<T: 'static>(ptr: *mut u8) { |
1a4d82fc JJ |
549 | // The OS TLS ensures that this key contains a NULL value when this |
550 | // destructor starts to run. We set it back to a sentinel value of 1 to | |
551 | // ensure that any future calls to `get` for this thread will return | |
552 | // `None`. | |
553 | // | |
554 | // Note that to prevent an infinite loop we reset it back to null right | |
555 | // before we return from the destructor ourselves. | |
d9579d0f | 556 | let ptr = Box::from_raw(ptr as *mut Value<T>); |
1a4d82fc JJ |
557 | let key = ptr.key; |
558 | key.os.set(1 as *mut u8); | |
559 | drop(ptr); | |
85aaf69f | 560 | key.os.set(ptr::null_mut()); |
1a4d82fc JJ |
561 | } |
562 | } | |
563 | ||
c30ab7b3 | 564 | #[cfg(all(test, not(target_os = "emscripten")))] |
1a4d82fc | 565 | mod tests { |
532ac7d7 XL |
566 | use crate::sync::mpsc::{channel, Sender}; |
567 | use crate::cell::{Cell, UnsafeCell}; | |
568 | use crate::thread; | |
1a4d82fc JJ |
569 | |
570 | struct Foo(Sender<()>); | |
571 | ||
572 | impl Drop for Foo { | |
573 | fn drop(&mut self) { | |
574 | let Foo(ref s) = *self; | |
575 | s.send(()).unwrap(); | |
576 | } | |
577 | } | |
578 | ||
579 | #[test] | |
580 | fn smoke_no_dtor() { | |
62682a34 | 581 | thread_local!(static FOO: Cell<i32> = Cell::new(1)); |
1a4d82fc | 582 | |
62682a34 SL |
583 | FOO.with(|f| { |
584 | assert_eq!(f.get(), 1); | |
585 | f.set(2); | |
1a4d82fc JJ |
586 | }); |
587 | let (tx, rx) = channel(); | |
85aaf69f | 588 | let _t = thread::spawn(move|| { |
62682a34 SL |
589 | FOO.with(|f| { |
590 | assert_eq!(f.get(), 1); | |
1a4d82fc JJ |
591 | }); |
592 | tx.send(()).unwrap(); | |
593 | }); | |
594 | rx.recv().unwrap(); | |
595 | ||
62682a34 SL |
596 | FOO.with(|f| { |
597 | assert_eq!(f.get(), 2); | |
1a4d82fc JJ |
598 | }); |
599 | } | |
600 | ||
601 | #[test] | |
602 | fn states() { | |
603 | struct Foo; | |
604 | impl Drop for Foo { | |
605 | fn drop(&mut self) { | |
0531ce1d | 606 | assert!(FOO.try_with(|_| ()).is_err()); |
1a4d82fc JJ |
607 | } |
608 | } | |
0531ce1d | 609 | thread_local!(static FOO: Foo = Foo); |
1a4d82fc | 610 | |
85aaf69f | 611 | thread::spawn(|| { |
0531ce1d | 612 | assert!(FOO.try_with(|_| ()).is_ok()); |
532ac7d7 | 613 | }).join().ok().expect("thread panicked"); |
1a4d82fc JJ |
614 | } |
615 | ||
616 | #[test] | |
617 | fn smoke_dtor() { | |
62682a34 | 618 | thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell::new(None)); |
1a4d82fc JJ |
619 | |
620 | let (tx, rx) = channel(); | |
85aaf69f | 621 | let _t = thread::spawn(move|| unsafe { |
1a4d82fc JJ |
622 | let mut tx = Some(tx); |
623 | FOO.with(|f| { | |
624 | *f.get() = Some(Foo(tx.take().unwrap())); | |
625 | }); | |
626 | }); | |
627 | rx.recv().unwrap(); | |
628 | } | |
629 | ||
630 | #[test] | |
631 | fn circular() { | |
632 | struct S1; | |
633 | struct S2; | |
62682a34 SL |
634 | thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None)); |
635 | thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell::new(None)); | |
c34b1796 | 636 | static mut HITS: u32 = 0; |
1a4d82fc JJ |
637 | |
638 | impl Drop for S1 { | |
639 | fn drop(&mut self) { | |
640 | unsafe { | |
641 | HITS += 1; | |
0531ce1d | 642 | if K2.try_with(|_| ()).is_err() { |
1a4d82fc JJ |
643 | assert_eq!(HITS, 3); |
644 | } else { | |
645 | if HITS == 1 { | |
646 | K2.with(|s| *s.get() = Some(S2)); | |
647 | } else { | |
648 | assert_eq!(HITS, 3); | |
649 | } | |
650 | } | |
651 | } | |
652 | } | |
653 | } | |
654 | impl Drop for S2 { | |
655 | fn drop(&mut self) { | |
656 | unsafe { | |
657 | HITS += 1; | |
0531ce1d | 658 | assert!(K1.try_with(|_| ()).is_ok()); |
1a4d82fc JJ |
659 | assert_eq!(HITS, 2); |
660 | K1.with(|s| *s.get() = Some(S1)); | |
661 | } | |
662 | } | |
663 | } | |
664 | ||
85aaf69f | 665 | thread::spawn(move|| { |
1a4d82fc | 666 | drop(S1); |
532ac7d7 | 667 | }).join().ok().expect("thread panicked"); |
1a4d82fc JJ |
668 | } |
669 | ||
670 | #[test] | |
671 | fn self_referential() { | |
672 | struct S1; | |
62682a34 | 673 | thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None)); |
1a4d82fc JJ |
674 | |
675 | impl Drop for S1 { | |
676 | fn drop(&mut self) { | |
0531ce1d | 677 | assert!(K1.try_with(|_| ()).is_err()); |
1a4d82fc JJ |
678 | } |
679 | } | |
680 | ||
85aaf69f | 681 | thread::spawn(move|| unsafe { |
1a4d82fc | 682 | K1.with(|s| *s.get() = Some(S1)); |
532ac7d7 | 683 | }).join().ok().expect("thread panicked"); |
1a4d82fc JJ |
684 | } |
685 | ||
7453a54e | 686 | // Note that this test will deadlock if TLS destructors aren't run (this |
9fa01778 | 687 | // requires the destructor to be run to pass the test). |
1a4d82fc JJ |
688 | #[test] |
689 | fn dtors_in_dtors_in_dtors() { | |
690 | struct S1(Sender<()>); | |
62682a34 SL |
691 | thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None)); |
692 | thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell::new(None)); | |
1a4d82fc JJ |
693 | |
694 | impl Drop for S1 { | |
695 | fn drop(&mut self) { | |
696 | let S1(ref tx) = *self; | |
697 | unsafe { | |
0531ce1d | 698 | let _ = K2.try_with(|s| *s.get() = Some(Foo(tx.clone()))); |
1a4d82fc JJ |
699 | } |
700 | } | |
701 | } | |
702 | ||
703 | let (tx, rx) = channel(); | |
85aaf69f | 704 | let _t = thread::spawn(move|| unsafe { |
1a4d82fc JJ |
705 | let mut tx = Some(tx); |
706 | K1.with(|s| *s.get() = Some(S1(tx.take().unwrap()))); | |
707 | }); | |
708 | rx.recv().unwrap(); | |
709 | } | |
710 | } | |
711 | ||
712 | #[cfg(test)] | |
713 | mod dynamic_tests { | |
532ac7d7 XL |
714 | use crate::cell::RefCell; |
715 | use crate::collections::HashMap; | |
1a4d82fc JJ |
716 | |
717 | #[test] | |
718 | fn smoke() { | |
c34b1796 AL |
719 | fn square(i: i32) -> i32 { i * i } |
720 | thread_local!(static FOO: i32 = square(3)); | |
1a4d82fc JJ |
721 | |
722 | FOO.with(|f| { | |
723 | assert_eq!(*f, 9); | |
724 | }); | |
725 | } | |
726 | ||
727 | #[test] | |
728 | fn hashmap() { | |
c34b1796 | 729 | fn map() -> RefCell<HashMap<i32, i32>> { |
1a4d82fc JJ |
730 | let mut m = HashMap::new(); |
731 | m.insert(1, 2); | |
732 | RefCell::new(m) | |
733 | } | |
c34b1796 | 734 | thread_local!(static FOO: RefCell<HashMap<i32, i32>> = map()); |
1a4d82fc JJ |
735 | |
736 | FOO.with(|map| { | |
c34b1796 | 737 | assert_eq!(map.borrow()[&1], 2); |
1a4d82fc JJ |
738 | }); |
739 | } | |
740 | ||
741 | #[test] | |
742 | fn refcell_vec() { | |
c34b1796 | 743 | thread_local!(static FOO: RefCell<Vec<u32>> = RefCell::new(vec![1, 2, 3])); |
1a4d82fc JJ |
744 | |
745 | FOO.with(|vec| { | |
746 | assert_eq!(vec.borrow().len(), 3); | |
747 | vec.borrow_mut().push(4); | |
748 | assert_eq!(vec.borrow()[3], 4); | |
749 | }); | |
750 | } | |
751 | } |