]> git.proxmox.com Git - rustc.git/blame - vendor/rayon-core/src/latch.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / vendor / rayon-core / src / latch.rs
CommitLineData
1b1a35ee 1use std::sync::atomic::{AtomicUsize, Ordering};
f035d41b 2use std::sync::{Arc, Condvar, Mutex};
1b1a35ee 3use std::usize;
2c00a5a8 4
1b1a35ee 5use crate::registry::{Registry, WorkerThread};
2c00a5a8
XL
6
7/// We define various kinds of latches, which are all a primitive signaling
8/// mechanism. A latch starts as false. Eventually someone calls `set()` and
9/// it becomes true. You can test if it has been set by calling `probe()`.
10///
11/// Some kinds of latches, but not all, support a `wait()` operation
12/// that will wait until the latch is set, blocking efficiently. That
13/// is not part of the trait since it is not possibly to do with all
14/// latches.
15///
16/// The intention is that `set()` is called once, but `probe()` may be
17/// called any number of times. Once `probe()` returns true, the memory
18/// effects that occurred before `set()` become visible.
19///
20/// It'd probably be better to refactor the API into two paired types,
21/// but that's a bit of work, and this is not a public API.
22///
23/// ## Memory ordering
24///
25/// Latches need to guarantee two things:
26///
27/// - Once `probe()` returns true, all memory effects from the `set()`
28/// are visible (in other words, the set should synchronize-with
29/// the probe).
30/// - Once `set()` occurs, the next `probe()` *will* observe it. This
31/// typically requires a seq-cst ordering. See [the "tickle-then-get-sleepy" scenario in the sleep
32/// README](/src/sleep/README.md#tickle-then-get-sleepy) for details.
1b1a35ee 33pub(super) trait Latch {
2c00a5a8 34 /// Set the latch, signalling others.
1b1a35ee
XL
35 ///
36 /// # WARNING
37 ///
38 /// Setting a latch triggers other threads to wake up and (in some
39 /// cases) complete. This may, in turn, cause memory to be
40 /// allocated and so forth. One must be very careful about this,
41 /// and it's typically better to read all the fields you will need
42 /// to access *before* a latch is set!
2c00a5a8
XL
43 fn set(&self);
44}
45
1b1a35ee
XL
46pub(super) trait AsCoreLatch {
47 fn as_core_latch(&self) -> &CoreLatch;
2c00a5a8
XL
48}
49
1b1a35ee
XL
50/// Latch is not set, owning thread is awake
51const UNSET: usize = 0;
52
53/// Latch is not set, owning thread is going to sleep on this latch
54/// (but has not yet fallen asleep).
55const SLEEPY: usize = 1;
56
57/// Latch is not set, owning thread is asleep on this latch and
58/// must be awoken.
59const SLEEPING: usize = 2;
60
61/// Latch is set.
62const SET: usize = 3;
63
2c00a5a8
XL
64/// Spin latches are the simplest, most efficient kind, but they do
65/// not support a `wait()` operation. They just have a boolean flag
66/// that becomes true when `set()` is called.
1b1a35ee
XL
67#[derive(Debug)]
68pub(super) struct CoreLatch {
69 state: AtomicUsize,
2c00a5a8
XL
70}
71
1b1a35ee
XL
72impl CoreLatch {
73 #[inline]
74 fn new() -> Self {
75 Self {
76 state: AtomicUsize::new(0),
77 }
78 }
79
80 /// Returns the address of this core latch as an integer. Used
81 /// for logging.
82 #[inline]
83 pub(super) fn addr(&self) -> usize {
84 self as *const CoreLatch as usize
85 }
86
87 /// Invoked by owning thread as it prepares to sleep. Returns true
88 /// if the owning thread may proceed to fall asleep, false if the
89 /// latch was set in the meantime.
90 #[inline]
91 pub(super) fn get_sleepy(&self) -> bool {
92 self.state
93 .compare_exchange(UNSET, SLEEPY, Ordering::SeqCst, Ordering::Relaxed)
94 .is_ok()
95 }
96
97 /// Invoked by owning thread as it falls asleep sleep. Returns
98 /// true if the owning thread should block, or false if the latch
99 /// was set in the meantime.
100 #[inline]
101 pub(super) fn fall_asleep(&self) -> bool {
102 self.state
103 .compare_exchange(SLEEPY, SLEEPING, Ordering::SeqCst, Ordering::Relaxed)
104 .is_ok()
105 }
106
107 /// Invoked by owning thread as it falls asleep sleep. Returns
108 /// true if the owning thread should block, or false if the latch
109 /// was set in the meantime.
110 #[inline]
111 pub(super) fn wake_up(&self) {
112 if !self.probe() {
113 let _ =
114 self.state
115 .compare_exchange(SLEEPING, UNSET, Ordering::SeqCst, Ordering::Relaxed);
116 }
117 }
118
119 /// Set the latch. If this returns true, the owning thread was sleeping
120 /// and must be awoken.
121 ///
122 /// This is private because, typically, setting a latch involves
123 /// doing some wakeups; those are encapsulated in the surrounding
124 /// latch code.
125 #[inline]
126 fn set(&self) -> bool {
127 let old_state = self.state.swap(SET, Ordering::AcqRel);
128 old_state == SLEEPING
129 }
130
131 /// Test if this latch has been set.
132 #[inline]
133 pub(super) fn probe(&self) -> bool {
134 self.state.load(Ordering::Acquire) == SET
135 }
136}
137
138/// Spin latches are the simplest, most efficient kind, but they do
139/// not support a `wait()` operation. They just have a boolean flag
140/// that becomes true when `set()` is called.
141pub(super) struct SpinLatch<'r> {
142 core_latch: CoreLatch,
143 registry: &'r Arc<Registry>,
144 target_worker_index: usize,
145 cross: bool,
146}
147
148impl<'r> SpinLatch<'r> {
149 /// Creates a new spin latch that is owned by `thread`. This means
150 /// that `thread` is the only thread that should be blocking on
151 /// this latch -- it also means that when the latch is set, we
152 /// will wake `thread` if it is sleeping.
153 #[inline]
154 pub(super) fn new(thread: &'r WorkerThread) -> SpinLatch<'r> {
155 SpinLatch {
156 core_latch: CoreLatch::new(),
157 registry: thread.registry(),
158 target_worker_index: thread.index(),
159 cross: false,
160 }
161 }
162
163 /// Creates a new spin latch for cross-threadpool blocking. Notably, we
164 /// need to make sure the registry is kept alive after setting, so we can
165 /// safely call the notification.
2c00a5a8 166 #[inline]
1b1a35ee 167 pub(super) fn cross(thread: &'r WorkerThread) -> SpinLatch<'r> {
416331ca 168 SpinLatch {
1b1a35ee
XL
169 cross: true,
170 ..SpinLatch::new(thread)
416331ca 171 }
2c00a5a8 172 }
1b1a35ee
XL
173
174 #[inline]
175 pub(super) fn probe(&self) -> bool {
176 self.core_latch.probe()
177 }
2c00a5a8
XL
178}
179
1b1a35ee 180impl<'r> AsCoreLatch for SpinLatch<'r> {
2c00a5a8 181 #[inline]
1b1a35ee
XL
182 fn as_core_latch(&self) -> &CoreLatch {
183 &self.core_latch
2c00a5a8
XL
184 }
185}
186
1b1a35ee 187impl<'r> Latch for SpinLatch<'r> {
2c00a5a8
XL
188 #[inline]
189 fn set(&self) {
1b1a35ee
XL
190 let cross_registry;
191
923072b8 192 let registry: &Registry = if self.cross {
1b1a35ee
XL
193 // Ensure the registry stays alive while we notify it.
194 // Otherwise, it would be possible that we set the spin
195 // latch and the other thread sees it and exits, causing
196 // the registry to be deallocated, all before we get a
197 // chance to invoke `registry.notify_worker_latch_is_set`.
198 cross_registry = Arc::clone(self.registry);
6522a427 199 &cross_registry
1b1a35ee
XL
200 } else {
201 // If this is not a "cross-registry" spin-latch, then the
202 // thread which is performing `set` is itself ensuring
923072b8
FG
203 // that the registry stays alive. However, that doesn't
204 // include this *particular* `Arc` handle if the waiting
205 // thread then exits, so we must completely dereference it.
6522a427 206 self.registry
1b1a35ee
XL
207 };
208 let target_worker_index = self.target_worker_index;
209
210 // NOTE: Once we `set`, the target may proceed and invalidate `&self`!
211 if self.core_latch.set() {
212 // Subtle: at this point, we can no longer read from
213 // `self`, because the thread owning this spin latch may
214 // have awoken and deallocated the latch. Therefore, we
215 // only use fields whose values we already read.
216 registry.notify_worker_latch_is_set(target_worker_index);
217 }
2c00a5a8
XL
218 }
219}
220
221/// A Latch starts as false and eventually becomes true. You can block
222/// until it becomes true.
17df50a5 223#[derive(Debug)]
416331ca 224pub(super) struct LockLatch {
2c00a5a8
XL
225 m: Mutex<bool>,
226 v: Condvar,
227}
228
229impl LockLatch {
230 #[inline]
416331ca 231 pub(super) fn new() -> LockLatch {
2c00a5a8
XL
232 LockLatch {
233 m: Mutex::new(false),
234 v: Condvar::new(),
235 }
236 }
237
e74abb32
XL
238 /// Block until latch is set, then resets this lock latch so it can be reused again.
239 pub(super) fn wait_and_reset(&self) {
240 let mut guard = self.m.lock().unwrap();
241 while !*guard {
242 guard = self.v.wait(guard).unwrap();
243 }
244 *guard = false;
245 }
246
2c00a5a8 247 /// Block until latch is set.
416331ca 248 pub(super) fn wait(&self) {
2c00a5a8
XL
249 let mut guard = self.m.lock().unwrap();
250 while !*guard {
251 guard = self.v.wait(guard).unwrap();
252 }
253 }
254}
255
2c00a5a8
XL
256impl Latch for LockLatch {
257 #[inline]
258 fn set(&self) {
259 let mut guard = self.m.lock().unwrap();
260 *guard = true;
261 self.v.notify_all();
262 }
263}
264
265/// Counting latches are used to implement scopes. They track a
266/// counter. Unlike other latches, calling `set()` does not
267/// necessarily make the latch be considered `set()`; instead, it just
268/// decrements the counter. The latch is only "set" (in the sense that
269/// `probe()` returns true) once the counter reaches zero.
1b1a35ee
XL
270///
271/// Note: like a `SpinLatch`, count laches are always associated with
272/// some registry that is probing them, which must be tickled when
273/// they are set. *Unlike* a `SpinLatch`, they don't themselves hold a
274/// reference to that registry. This is because in some cases the
275/// registry owns the count-latch, and that would create a cycle. So a
276/// `CountLatch` must be given a reference to its owning registry when
277/// it is set. For this reason, it does not implement the `Latch`
278/// trait (but it doesn't have to, as it is not used in those generic
279/// contexts).
2c00a5a8 280#[derive(Debug)]
416331ca 281pub(super) struct CountLatch {
1b1a35ee 282 core_latch: CoreLatch,
2c00a5a8
XL
283 counter: AtomicUsize,
284}
285
286impl CountLatch {
287 #[inline]
416331ca 288 pub(super) fn new() -> CountLatch {
6522a427
EL
289 Self::with_count(1)
290 }
291
292 #[inline]
293 pub(super) fn with_count(n: usize) -> CountLatch {
416331ca 294 CountLatch {
1b1a35ee 295 core_latch: CoreLatch::new(),
6522a427 296 counter: AtomicUsize::new(n),
416331ca 297 }
2c00a5a8
XL
298 }
299
300 #[inline]
416331ca 301 pub(super) fn increment(&self) {
1b1a35ee 302 debug_assert!(!self.core_latch.probe());
2c00a5a8
XL
303 self.counter.fetch_add(1, Ordering::Relaxed);
304 }
2c00a5a8 305
1b1a35ee
XL
306 /// Decrements the latch counter by one. If this is the final
307 /// count, then the latch is **set**, and calls to `probe()` will
17df50a5 308 /// return true. Returns whether the latch was set.
2c00a5a8 309 #[inline]
17df50a5 310 pub(super) fn set(&self) -> bool {
1b1a35ee
XL
311 if self.counter.fetch_sub(1, Ordering::SeqCst) == 1 {
312 self.core_latch.set();
313 true
314 } else {
315 false
2c00a5a8
XL
316 }
317 }
2c00a5a8 318
1b1a35ee
XL
319 /// Decrements the latch counter by one and possibly set it. If
320 /// the latch is set, then the specific worker thread is tickled,
321 /// which should be the one that owns this latch.
2c00a5a8 322 #[inline]
1b1a35ee
XL
323 pub(super) fn set_and_tickle_one(&self, registry: &Registry, target_worker_index: usize) {
324 if self.set() {
325 registry.notify_worker_latch_is_set(target_worker_index);
326 }
2c00a5a8
XL
327 }
328}
329
1b1a35ee 330impl AsCoreLatch for CountLatch {
2c00a5a8 331 #[inline]
1b1a35ee
XL
332 fn as_core_latch(&self) -> &CoreLatch {
333 &self.core_latch
e74abb32
XL
334 }
335}
336
17df50a5
XL
337#[derive(Debug)]
338pub(super) struct CountLockLatch {
339 lock_latch: LockLatch,
340 counter: AtomicUsize,
341}
342
343impl CountLockLatch {
344 #[inline]
6522a427 345 pub(super) fn with_count(n: usize) -> CountLockLatch {
17df50a5
XL
346 CountLockLatch {
347 lock_latch: LockLatch::new(),
6522a427 348 counter: AtomicUsize::new(n),
17df50a5
XL
349 }
350 }
351
352 #[inline]
353 pub(super) fn increment(&self) {
354 let old_counter = self.counter.fetch_add(1, Ordering::Relaxed);
355 debug_assert!(old_counter != 0);
356 }
357
358 pub(super) fn wait(&self) {
359 self.lock_latch.wait();
360 }
361}
362
363impl Latch for CountLockLatch {
364 #[inline]
365 fn set(&self) {
366 if self.counter.fetch_sub(1, Ordering::SeqCst) == 1 {
367 self.lock_latch.set();
368 }
369 }
370}
371
e74abb32
XL
372impl<'a, L> Latch for &'a L
373where
374 L: Latch,
375{
1b1a35ee 376 #[inline]
e74abb32
XL
377 fn set(&self) {
378 L::set(self);
379 }
380}