1 // Copyright 2017 Amanieu d'Antras
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
8 //! Per-object thread-local storage
10 //! This library provides the `ThreadLocal` type which allows a separate copy of
11 //! an object to be used for each thread. This allows for per-object
12 //! thread-local storage, unlike the standard library's `thread_local!` macro
13 //! which only allows static thread-local storage.
15 //! Per-thread objects are not destroyed when a thread exits. Instead, objects
16 //! are only destroyed when the `ThreadLocal` containing them is destroyed.
18 //! You can also iterate over the thread-local values of all thread in a
19 //! `ThreadLocal` object using the `iter_mut` and `into_iter` methods. This can
20 //! only be done if you have mutable access to the `ThreadLocal` object, which
21 //! guarantees that you are the only thread currently accessing it.
23 //! A `CachedThreadLocal` type is also provided which wraps a `ThreadLocal` but
24 //! also uses a special fast path for the first thread that writes into it. The
25 //! fast path has very low overhead (<1ns per access) while keeping the same
26 //! performance as `ThreadLocal` for other threads.
28 //! Note that since thread IDs are recycled when a thread exits, it is possible
29 //! for one thread to retrieve the object of another thread. Since this can only
30 //! occur after a thread has exited this does not lead to any race conditions.
34 //! Basic usage of `ThreadLocal`:
37 //! use thread_local::ThreadLocal;
38 //! let tls: ThreadLocal<u32> = ThreadLocal::new();
39 //! assert_eq!(tls.get(), None);
40 //! assert_eq!(tls.get_or(|| Box::new(5)), &5);
41 //! assert_eq!(tls.get(), Some(&5));
44 //! Combining thread-local values into a single result:
47 //! use thread_local::ThreadLocal;
48 //! use std::sync::Arc;
49 //! use std::cell::Cell;
52 //! let tls = Arc::new(ThreadLocal::new());
54 //! // Create a bunch of threads to do stuff
56 //! let tls2 = tls.clone();
57 //! thread::spawn(move || {
58 //! // Increment a counter to count some event...
59 //! let cell = tls2.get_or(|| Box::new(Cell::new(0)));
60 //! cell.set(cell.get() + 1);
61 //! }).join().unwrap();
64 //! // Once all threads are done, collect the counter values and return the
65 //! // sum of all thread-local counter values.
66 //! let tls = Arc::try_unwrap(tls).unwrap();
67 //! let total = tls.into_iter().fold(0, |x, y| x + y.get());
68 //! assert_eq!(total, 5);
71 #![warn(missing_docs)]
74 extern crate lazy_static
;
79 use std
::sync
::atomic
::{AtomicPtr, AtomicUsize, Ordering}
;
81 use std
::marker
::PhantomData
;
82 use std
::cell
::UnsafeCell
;
85 use std
::option
::IntoIter
as OptionIter
;
86 use std
::panic
::UnwindSafe
;
87 use unreachable
::{UncheckedOptionExt, UncheckedResultExt}
;
89 /// Thread-local variable wrapper
91 /// See the [module-level documentation](index.html) for more.
92 pub struct ThreadLocal
<T
: ?Sized
+ Send
> {
93 // Pointer to the current top-level hash table
94 table
: AtomicPtr
<Table
<T
>>,
96 // Lock used to guard against concurrent modifications. This is only taken
97 // while writing to the table, not when reading from it. This also guards
98 // the counter for the total number of values in the hash table.
101 // PhantomData to indicate that we logically own T
102 marker
: PhantomData
<T
>,
105 struct Table
<T
: ?Sized
+ Send
> {
106 // Hash entries for the table
107 entries
: Box
<[TableEntry
<T
>]>,
109 // Number of bits used for the hash function
112 // Previous table, half the size of the current one
113 prev
: Option
<Box
<Table
<T
>>>,
116 struct TableEntry
<T
: ?Sized
+ Send
> {
117 // Current owner of this entry, or 0 if this is an empty entry
120 // The object associated with this entry. This is only ever accessed by the
121 // owner of the entry.
122 data
: UnsafeCell
<Option
<Box
<T
>>>,
125 // ThreadLocal is always Sync, even if T isn't
126 unsafe impl<T
: ?Sized
+ Send
> Sync
for ThreadLocal
<T
> {}
128 impl<T
: ?Sized
+ Send
> Default
for ThreadLocal
<T
> {
129 fn default() -> ThreadLocal
<T
> {
134 impl<T
: ?Sized
+ Send
> Drop
for ThreadLocal
<T
> {
137 Box
::from_raw(self.table
.load(Ordering
::Relaxed
));
142 // Implementation of Clone for TableEntry, needed to make vec![] work
143 impl<T
: ?Sized
+ Send
> Clone
for TableEntry
<T
> {
144 fn clone(&self) -> TableEntry
<T
> {
146 owner
: AtomicUsize
::new(0),
147 data
: UnsafeCell
::new(None
),
152 // Hash function for the thread id
153 #[cfg(target_pointer_width = "32")]
155 fn hash(id
: usize, bits
: usize) -> usize {
156 id
.wrapping_mul(0x9E3779B9) >> (32 - bits
)
158 #[cfg(target_pointer_width = "64")]
160 fn hash(id
: usize, bits
: usize) -> usize {
161 id
.wrapping_mul(0x9E37_79B9_7F4A_7C15) >> (64 - bits
)
164 impl<T
: ?Sized
+ Send
> ThreadLocal
<T
> {
165 /// Creates a new empty `ThreadLocal`.
166 pub fn new() -> ThreadLocal
<T
> {
167 let entry
= TableEntry
{
168 owner
: AtomicUsize
::new(0),
169 data
: UnsafeCell
::new(None
),
172 entries
: vec
![entry
; 2].into_boxed_slice(),
177 table
: AtomicPtr
::new(Box
::into_raw(Box
::new(table
))),
183 /// Returns the element for the current thread, if it exists.
184 pub fn get(&self) -> Option
<&T
> {
185 let id
= thread_id
::get();
189 /// Returns the element for the current thread, or creates it if it doesn't
191 pub fn get_or
<F
>(&self, create
: F
) -> &T
193 F
: FnOnce() -> Box
<T
>,
196 self.get_or_try(|| Ok
::<Box
<T
>, ()>(create()))
197 .unchecked_unwrap_ok()
201 /// Returns the element for the current thread, or creates it if it doesn't
202 /// exist. If `create` fails, that error is returned and no element is
204 pub fn get_or_try
<F
, E
>(&self, create
: F
) -> Result
<&T
, E
>
206 F
: FnOnce() -> Result
<Box
<T
>, E
>,
208 let id
= thread_id
::get();
209 match self.get_fast(id
) {
211 None
=> Ok(self.insert(id
, try
!(create()), true)),
215 // Simple hash table lookup function
216 fn lookup(id
: usize, table
: &Table
<T
>) -> Option
<&UnsafeCell
<Option
<Box
<T
>>>> {
217 // Because we use a Mutex to prevent concurrent modifications (but not
218 // reads) of the hash table, we can avoid any memory barriers here. No
219 // elements between our hash bucket and our value can have been modified
220 // since we inserted our thread-local value into the table.
221 for entry
in table
.entries
.iter().cycle().skip(hash(id
, table
.hash_bits
)) {
222 let owner
= entry
.owner
.load(Ordering
::Relaxed
);
224 return Some(&entry
.data
);
233 // Fast path: try to find our thread in the top-level hash table
234 fn get_fast(&self, id
: usize) -> Option
<&T
> {
235 let table
= unsafe { &*self.table.load(Ordering::Relaxed) }
;
236 match Self::lookup(id
, table
) {
237 Some(x
) => unsafe { Some((*x.get()).as_ref().unchecked_unwrap()) }
,
238 None
=> self.get_slow(id
, table
),
242 // Slow path: try to find our thread in the other hash tables, and then
243 // move it to the top-level hash table.
245 fn get_slow(&self, id
: usize, table_top
: &Table
<T
>) -> Option
<&T
> {
246 let mut current
= &table_top
.prev
;
247 while let Some(ref table
) = *current
{
248 if let Some(x
) = Self::lookup(id
, table
) {
249 let data
= unsafe { (*x.get()).take().unchecked_unwrap() }
;
250 return Some(self.insert(id
, data
, false));
252 current
= &table
.prev
;
258 fn insert(&self, id
: usize, data
: Box
<T
>, new
: bool
) -> &T
{
259 // Lock the Mutex to ensure only a single thread is modify the hash
261 let mut count
= self.lock
.lock().unwrap();
265 let table_raw
= self.table
.load(Ordering
::Relaxed
);
266 let table
= unsafe { &*table_raw }
;
268 // If the current top-level hash table is more than 75% full, add a new
269 // level with 2x the capacity. Elements will be moved up to the new top
270 // level table as they are accessed.
271 let table
= if *count
> table
.entries
.len() * 3 / 4 {
272 let entry
= TableEntry
{
273 owner
: AtomicUsize
::new(0),
274 data
: UnsafeCell
::new(None
),
276 let new_table
= Box
::into_raw(Box
::new(Table
{
277 entries
: vec
![entry
; table
.entries
.len() * 2].into_boxed_slice(),
278 hash_bits
: table
.hash_bits
+ 1,
279 prev
: unsafe { Some(Box::from_raw(table_raw)) }
,
281 self.table
.store(new_table
, Ordering
::Release
);
282 unsafe { &*new_table }
287 // Insert the new element into the top-level hash table
288 for entry
in table
.entries
.iter().cycle().skip(hash(id
, table
.hash_bits
)) {
289 let owner
= entry
.owner
.load(Ordering
::Relaxed
);
292 entry
.owner
.store(id
, Ordering
::Relaxed
);
293 *entry
.data
.get() = Some(data
);
294 return (*entry
.data
.get()).as_ref().unchecked_unwrap();
298 // This can happen if create() inserted a value into this
299 // ThreadLocal between our calls to get_fast() and insert(). We
300 // just return the existing value and drop the newly-allocated
303 return (*entry
.data
.get()).as_ref().unchecked_unwrap();
310 /// Returns a mutable iterator over the local values of all threads.
312 /// Since this call borrows the `ThreadLocal` mutably, this operation can
313 /// be done safely---the mutable borrow statically guarantees no other
314 /// threads are currently accessing their associated values.
315 pub fn iter_mut(&mut self) -> IterMut
<T
> {
317 remaining
: *self.lock
.lock().unwrap(),
319 table
: self.table
.load(Ordering
::Relaxed
),
327 /// Removes all thread-specific values from the `ThreadLocal`, effectively
328 /// reseting it to its original state.
330 /// Since this call borrows the `ThreadLocal` mutably, this operation can
331 /// be done safely---the mutable borrow statically guarantees no other
332 /// threads are currently accessing their associated values.
333 pub fn clear(&mut self) {
334 *self = ThreadLocal
::new();
338 impl<T
: ?Sized
+ Send
> IntoIterator
for ThreadLocal
<T
> {
340 type IntoIter
= IntoIter
<T
>;
342 fn into_iter(self) -> IntoIter
<T
> {
344 remaining
: *self.lock
.lock().unwrap(),
346 table
: self.table
.load(Ordering
::Relaxed
),
355 impl<'a
, T
: ?Sized
+ Send
+ 'a
> IntoIterator
for &'a
mut ThreadLocal
<T
> {
356 type Item
= &'a
mut Box
<T
>;
357 type IntoIter
= IterMut
<'a
, T
>;
359 fn into_iter(self) -> IterMut
<'a
, T
> {
364 impl<T
: Send
+ Default
> ThreadLocal
<T
> {
365 /// Returns the element for the current thread, or creates a default one if
366 /// it doesn't exist.
367 pub fn get_default(&self) -> &T
{
368 self.get_or(|| Box
::new(T
::default()))
372 impl<T
: ?Sized
+ Send
+ fmt
::Debug
> fmt
::Debug
for ThreadLocal
<T
> {
373 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
374 write
!(f
, "ThreadLocal {{ local_data: {:?} }}", self.get())
378 impl<T
: ?Sized
+ Send
+ UnwindSafe
> UnwindSafe
for ThreadLocal
<T
> {}
380 struct RawIter
<T
: ?Sized
+ Send
> {
383 table
: *const Table
<T
>,
386 impl<T
: ?Sized
+ Send
> RawIter
<T
> {
387 fn next(&mut self) -> Option
<*mut Option
<Box
<T
>>> {
388 if self.remaining
== 0 {
393 let entries
= unsafe { &(*self.table).entries[..] }
;
394 while self.index
< entries
.len() {
395 let val
= entries
[self.index
].data
.get();
397 if unsafe { (*val).is_some() }
{
403 self.table
= unsafe { &**(*self.table).prev.as_ref().unchecked_unwrap() }
;
408 /// Mutable iterator over the contents of a `ThreadLocal`.
409 pub struct IterMut
<'a
, T
: ?Sized
+ Send
+ 'a
> {
411 marker
: PhantomData
<&'a
mut ThreadLocal
<T
>>,
414 impl<'a
, T
: ?Sized
+ Send
+ 'a
> Iterator
for IterMut
<'a
, T
> {
415 type Item
= &'a
mut Box
<T
>;
417 fn next(&mut self) -> Option
<&'a
mut Box
<T
>> {
418 self.raw
.next().map(|x
| unsafe {
419 (*x
).as_mut().unchecked_unwrap()
423 fn size_hint(&self) -> (usize, Option
<usize>) {
424 (self.raw
.remaining
, Some(self.raw
.remaining
))
428 impl<'a
, T
: ?Sized
+ Send
+ 'a
> ExactSizeIterator
for IterMut
<'a
, T
> {}
430 /// An iterator that moves out of a `ThreadLocal`.
431 pub struct IntoIter
<T
: ?Sized
+ Send
> {
433 _thread_local
: ThreadLocal
<T
>,
436 impl<T
: ?Sized
+ Send
> Iterator
for IntoIter
<T
> {
439 fn next(&mut self) -> Option
<Box
<T
>> {
441 |x
| unsafe { (*x).take().unchecked_unwrap() }
,
445 fn size_hint(&self) -> (usize, Option
<usize>) {
446 (self.raw
.remaining
, Some(self.raw
.remaining
))
450 impl<T
: ?Sized
+ Send
> ExactSizeIterator
for IntoIter
<T
> {}
452 /// Wrapper around `ThreadLocal` which adds a fast path for a single thread.
454 /// This has the same API as `ThreadLocal`, but will register the first thread
455 /// that sets a value as its owner. All accesses by the owner will go through
456 /// a special fast path which is much faster than the normal `ThreadLocal` path.
457 pub struct CachedThreadLocal
<T
: ?Sized
+ Send
> {
459 local
: UnsafeCell
<Option
<Box
<T
>>>,
460 global
: ThreadLocal
<T
>,
463 // CachedThreadLocal is always Sync, even if T isn't
464 unsafe impl<T
: ?Sized
+ Send
> Sync
for CachedThreadLocal
<T
> {}
466 impl<T
: ?Sized
+ Send
> Default
for CachedThreadLocal
<T
> {
467 fn default() -> CachedThreadLocal
<T
> {
468 CachedThreadLocal
::new()
472 impl<T
: ?Sized
+ Send
> CachedThreadLocal
<T
> {
473 /// Creates a new empty `CachedThreadLocal`.
474 pub fn new() -> CachedThreadLocal
<T
> {
476 owner
: AtomicUsize
::new(0),
477 local
: UnsafeCell
::new(None
),
478 global
: ThreadLocal
::new(),
482 /// Returns the element for the current thread, if it exists.
483 pub fn get(&self) -> Option
<&T
> {
484 let id
= thread_id
::get();
485 let owner
= self.owner
.load(Ordering
::Relaxed
);
487 return unsafe { Some((*self.local.get()).as_ref().unchecked_unwrap()) }
;
492 self.global
.get_fast(id
)
495 /// Returns the element for the current thread, or creates it if it doesn't
498 pub fn get_or
<F
>(&self, create
: F
) -> &T
500 F
: FnOnce() -> Box
<T
>,
503 self.get_or_try(|| Ok
::<Box
<T
>, ()>(create()))
504 .unchecked_unwrap_ok()
508 /// Returns the element for the current thread, or creates it if it doesn't
509 /// exist. If `create` fails, that error is returned and no element is
511 pub fn get_or_try
<F
, E
>(&self, create
: F
) -> Result
<&T
, E
>
513 F
: FnOnce() -> Result
<Box
<T
>, E
>,
515 let id
= thread_id
::get();
516 let owner
= self.owner
.load(Ordering
::Relaxed
);
518 return Ok(unsafe { (*self.local.get()).as_ref().unchecked_unwrap() }
);
520 self.get_or_try_slow(id
, owner
, create
)
525 fn get_or_try_slow
<F
, E
>(&self, id
: usize, owner
: usize, create
: F
) -> Result
<&T
, E
>
527 F
: FnOnce() -> Result
<Box
<T
>, E
>,
529 if owner
== 0 && self.owner
.compare_and_swap(0, id
, Ordering
::Relaxed
) == 0 {
531 (*self.local
.get()) = Some(try
!(create()));
532 return Ok((*self.local
.get()).as_ref().unchecked_unwrap());
535 match self.global
.get_fast(id
) {
537 None
=> Ok(self.global
.insert(id
, try
!(create()), true)),
541 /// Returns a mutable iterator over the local values of all threads.
543 /// Since this call borrows the `ThreadLocal` mutably, this operation can
544 /// be done safely---the mutable borrow statically guarantees no other
545 /// threads are currently accessing their associated values.
546 pub fn iter_mut(&mut self) -> CachedIterMut
<T
> {
548 (*self.local
.get()).as_mut().into_iter().chain(
555 /// Removes all thread-specific values from the `ThreadLocal`, effectively
556 /// reseting it to its original state.
558 /// Since this call borrows the `ThreadLocal` mutably, this operation can
559 /// be done safely---the mutable borrow statically guarantees no other
560 /// threads are currently accessing their associated values.
561 pub fn clear(&mut self) {
562 *self = CachedThreadLocal
::new();
566 impl<T
: ?Sized
+ Send
> IntoIterator
for CachedThreadLocal
<T
> {
568 type IntoIter
= CachedIntoIter
<T
>;
570 fn into_iter(self) -> CachedIntoIter
<T
> {
572 (*self.local
.get()).take().into_iter().chain(
580 impl<'a
, T
: ?Sized
+ Send
+ 'a
> IntoIterator
for &'a
mut CachedThreadLocal
<T
> {
581 type Item
= &'a
mut Box
<T
>;
582 type IntoIter
= CachedIterMut
<'a
, T
>;
584 fn into_iter(self) -> CachedIterMut
<'a
, T
> {
589 impl<T
: Send
+ Default
> CachedThreadLocal
<T
> {
590 /// Returns the element for the current thread, or creates a default one if
591 /// it doesn't exist.
592 pub fn get_default(&self) -> &T
{
593 self.get_or(|| Box
::new(T
::default()))
597 impl<T
: ?Sized
+ Send
+ fmt
::Debug
> fmt
::Debug
for CachedThreadLocal
<T
> {
598 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
599 write
!(f
, "ThreadLocal {{ local_data: {:?} }}", self.get())
603 /// Mutable iterator over the contents of a `CachedThreadLocal`.
604 pub type CachedIterMut
<'a
, T
> = Chain
<OptionIter
<&'a
mut Box
<T
>>, IterMut
<'a
, T
>>;
606 /// An iterator that moves out of a `CachedThreadLocal`.
607 pub type CachedIntoIter
<T
> = Chain
<OptionIter
<Box
<T
>>, IntoIter
<T
>>;
609 impl<T
: ?Sized
+ Send
+ UnwindSafe
> UnwindSafe
for CachedThreadLocal
<T
> {}
613 use std
::cell
::RefCell
;
615 use std
::sync
::atomic
::AtomicUsize
;
616 use std
::sync
::atomic
::Ordering
::Relaxed
;
618 use super::{ThreadLocal, CachedThreadLocal}
;
620 fn make_create() -> Arc
<Fn() -> Box
<usize> + Send
+ Sync
> {
621 let count
= AtomicUsize
::new(0);
622 Arc
::new(move || Box
::new(count
.fetch_add(1, Relaxed
)))
627 let create
= make_create();
628 let mut tls
= ThreadLocal
::new();
629 assert_eq
!(None
, tls
.get());
630 assert_eq
!("ThreadLocal { local_data: None }", format
!("{:?}", &tls
));
631 assert_eq
!(0, *tls
.get_or(|| create()));
632 assert_eq
!(Some(&0), tls
.get());
633 assert_eq
!(0, *tls
.get_or(|| create()));
634 assert_eq
!(Some(&0), tls
.get());
635 assert_eq
!(0, *tls
.get_or(|| create()));
636 assert_eq
!(Some(&0), tls
.get());
637 assert_eq
!("ThreadLocal { local_data: Some(0) }", format
!("{:?}", &tls
));
639 assert_eq
!(None
, tls
.get());
643 fn same_thread_cached() {
644 let create
= make_create();
645 let mut tls
= CachedThreadLocal
::new();
646 assert_eq
!(None
, tls
.get());
647 assert_eq
!("ThreadLocal { local_data: None }", format
!("{:?}", &tls
));
648 assert_eq
!(0, *tls
.get_or(|| create()));
649 assert_eq
!(Some(&0), tls
.get());
650 assert_eq
!(0, *tls
.get_or(|| create()));
651 assert_eq
!(Some(&0), tls
.get());
652 assert_eq
!(0, *tls
.get_or(|| create()));
653 assert_eq
!(Some(&0), tls
.get());
654 assert_eq
!("ThreadLocal { local_data: Some(0) }", format
!("{:?}", &tls
));
656 assert_eq
!(None
, tls
.get());
660 fn different_thread() {
661 let create
= make_create();
662 let tls
= Arc
::new(ThreadLocal
::new());
663 assert_eq
!(None
, tls
.get());
664 assert_eq
!(0, *tls
.get_or(|| create()));
665 assert_eq
!(Some(&0), tls
.get());
667 let tls2
= tls
.clone();
668 let create2
= create
.clone();
669 thread
::spawn(move || {
670 assert_eq
!(None
, tls2
.get());
671 assert_eq
!(1, *tls2
.get_or(|| create2()));
672 assert_eq
!(Some(&1), tls2
.get());
676 assert_eq
!(Some(&0), tls
.get());
677 assert_eq
!(0, *tls
.get_or(|| create()));
681 fn different_thread_cached() {
682 let create
= make_create();
683 let tls
= Arc
::new(CachedThreadLocal
::new());
684 assert_eq
!(None
, tls
.get());
685 assert_eq
!(0, *tls
.get_or(|| create()));
686 assert_eq
!(Some(&0), tls
.get());
688 let tls2
= tls
.clone();
689 let create2
= create
.clone();
690 thread
::spawn(move || {
691 assert_eq
!(None
, tls2
.get());
692 assert_eq
!(1, *tls2
.get_or(|| create2()));
693 assert_eq
!(Some(&1), tls2
.get());
697 assert_eq
!(Some(&0), tls
.get());
698 assert_eq
!(0, *tls
.get_or(|| create()));
703 let tls
= Arc
::new(ThreadLocal
::new());
704 tls
.get_or(|| Box
::new(1));
706 let tls2
= tls
.clone();
707 thread
::spawn(move || {
708 tls2
.get_or(|| Box
::new(2));
709 let tls3
= tls2
.clone();
710 thread
::spawn(move || { tls3.get_or(|| Box::new(3)); }
)
716 let mut tls
= Arc
::try_unwrap(tls
).unwrap();
717 let mut v
= tls
.iter_mut().map(|x
| **x
).collect
::<Vec
<i32>>();
719 assert_eq
!(vec
![1, 2, 3], v
);
720 let mut v
= tls
.into_iter().map(|x
| *x
).collect
::<Vec
<i32>>();
722 assert_eq
!(vec
![1, 2, 3], v
);
727 let tls
= Arc
::new(CachedThreadLocal
::new());
728 tls
.get_or(|| Box
::new(1));
730 let tls2
= tls
.clone();
731 thread
::spawn(move || {
732 tls2
.get_or(|| Box
::new(2));
733 let tls3
= tls2
.clone();
734 thread
::spawn(move || { tls3.get_or(|| Box::new(3)); }
)
740 let mut tls
= Arc
::try_unwrap(tls
).unwrap();
741 let mut v
= tls
.iter_mut().map(|x
| **x
).collect
::<Vec
<i32>>();
743 assert_eq
!(vec
![1, 2, 3], v
);
744 let mut v
= tls
.into_iter().map(|x
| *x
).collect
::<Vec
<i32>>();
746 assert_eq
!(vec
![1, 2, 3], v
);
752 foo
::<ThreadLocal
<String
>>();
753 foo
::<ThreadLocal
<RefCell
<String
>>>();
754 foo
::<CachedThreadLocal
<String
>>();
755 foo
::<CachedThreadLocal
<RefCell
<String
>>>();