]> git.proxmox.com Git - rustc.git/blob - src/libstd/sys/wasm/thread.rs
New upstream version 1.41.1+dfsg1
[rustc.git] / src / libstd / sys / wasm / thread.rs
1 use crate::ffi::CStr;
2 use crate::io;
3 use crate::sys::{unsupported, Void};
4 use crate::time::Duration;
5
6 pub struct Thread(Void);
7
8 pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
9
10 impl Thread {
11 // unsafe: see thread::Builder::spawn_unchecked for safety requirements
12 pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
13 unsupported()
14 }
15
16 pub fn yield_now() {
17 // do nothing
18 }
19
20 pub fn set_name(_name: &CStr) {
21 // nope
22 }
23
24 #[cfg(not(target_feature = "atomics"))]
25 pub fn sleep(_dur: Duration) {
26 panic!("can't sleep");
27 }
28
29 #[cfg(target_feature = "atomics")]
30 pub fn sleep(dur: Duration) {
31 use crate::arch::wasm32;
32 use crate::cmp;
33
34 // Use an atomic wait to block the current thread artificially with a
35 // timeout listed. Note that we should never be notified (return value
36 // of 0) or our comparison should never fail (return value of 1) so we
37 // should always only resume execution through a timeout (return value
38 // 2).
39 let mut nanos = dur.as_nanos();
40 while nanos > 0 {
41 let amt = cmp::min(i64::max_value() as u128, nanos);
42 let mut x = 0;
43 let val = unsafe { wasm32::i32_atomic_wait(&mut x, 0, amt as i64) };
44 debug_assert_eq!(val, 2);
45 nanos -= amt;
46 }
47 }
48
49 pub fn join(self) {
50 match self.0 {}
51 }
52 }
53
54 pub mod guard {
55 pub type Guard = !;
56 pub unsafe fn current() -> Option<Guard> {
57 None
58 }
59 pub unsafe fn init() -> Option<Guard> {
60 None
61 }
62 }
63
64 // This is only used by atomics primitives when the `atomics` feature is
65 // enabled. In that mode we currently just use our own thread-local to store our
66 // current thread's ID, and then we lazily initialize it to something allocated
67 // from a global counter.
68 #[cfg(target_feature = "atomics")]
69 pub fn my_id() -> u32 {
70 use crate::sync::atomic::{AtomicU32, Ordering::SeqCst};
71
72 static NEXT_ID: AtomicU32 = AtomicU32::new(0);
73
74 #[thread_local]
75 static mut MY_ID: u32 = 0;
76
77 unsafe {
78 // If our thread ID isn't set yet then we need to allocate one. Do so
79 // with with a simple "atomically add to a global counter" strategy.
80 // This strategy doesn't handled what happens when the counter
81 // overflows, however, so just abort everything once the counter
82 // overflows and eventually we could have some sort of recycling scheme
83 // (or maybe this is all totally irrelevant by that point!). In any case
84 // though we're using a CAS loop instead of a `fetch_add` to ensure that
85 // the global counter never overflows.
86 if MY_ID == 0 {
87 let mut cur = NEXT_ID.load(SeqCst);
88 MY_ID = loop {
89 let next = cur.checked_add(1).unwrap_or_else(|| crate::arch::wasm32::unreachable());
90 match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) {
91 Ok(_) => break next,
92 Err(i) => cur = i,
93 }
94 };
95 }
96 MY_ID
97 }
98 }