]> git.proxmox.com Git - rustc.git/blame - vendor/rustc-rayon-core/src/thread_pool/test.rs
New upstream version 1.40.0+dfsg1
[rustc.git] / vendor / rustc-rayon-core / src / thread_pool / test.rs
CommitLineData
2c00a5a8
XL
1#![cfg(test)]
2
2c00a5a8 3use std::sync::atomic::{AtomicUsize, Ordering};
e74abb32
XL
4use std::sync::mpsc::channel;
5use std::sync::{Arc, Mutex};
2c00a5a8 6
2c00a5a8
XL
7use join;
8use thread_pool::ThreadPool;
9use unwind;
532ac7d7
XL
10#[allow(deprecated)]
11use Configuration;
12use ThreadPoolBuilder;
2c00a5a8
XL
13
14#[test]
15#[should_panic(expected = "Hello, world!")]
16fn panic_propagate() {
17 let thread_pool = ThreadPoolBuilder::new().build().unwrap();
18 thread_pool.install(|| {
532ac7d7
XL
19 panic!("Hello, world!");
20 });
2c00a5a8
XL
21}
22
23#[test]
24fn workers_stop() {
25 let registry;
26
27 {
28 // once we exit this block, thread-pool will be dropped
29 let thread_pool = ThreadPoolBuilder::new().num_threads(22).build().unwrap();
30 registry = thread_pool.install(|| {
532ac7d7
XL
31 // do some work on these threads
32 join_a_lot(22);
2c00a5a8 33
532ac7d7
XL
34 thread_pool.registry.clone()
35 });
2c00a5a8
XL
36 assert_eq!(registry.num_threads(), 22);
37 }
38
39 // once thread-pool is dropped, registry should terminate, which
40 // should lead to worker threads stopping
41 registry.wait_until_stopped();
42}
43
44fn join_a_lot(n: usize) {
45 if n > 0 {
46 join(|| join_a_lot(n - 1), || join_a_lot(n - 1));
47 }
48}
49
50#[test]
51fn sleeper_stop() {
52 use std::{thread, time};
53
54 let registry;
55
56 {
57 // once we exit this block, thread-pool will be dropped
58 let thread_pool = ThreadPoolBuilder::new().num_threads(22).build().unwrap();
59 registry = thread_pool.registry.clone();
60
61 // Give time for at least some of the thread pool to fall asleep.
62 thread::sleep(time::Duration::from_secs(1));
63 }
64
65 // once thread-pool is dropped, registry should terminate, which
66 // should lead to worker threads stopping
67 registry.wait_until_stopped();
68}
69
70/// Create a start/exit handler that increments an atomic counter.
e74abb32 71fn count_handler() -> (Arc<AtomicUsize>, impl Fn(usize)) {
2c00a5a8 72 let count = Arc::new(AtomicUsize::new(0));
e74abb32
XL
73 (count.clone(), move |_| {
74 count.fetch_add(1, Ordering::SeqCst);
75 })
2c00a5a8
XL
76}
77
78/// Wait until a counter is no longer shared, then return its value.
79fn wait_for_counter(mut counter: Arc<AtomicUsize>) -> usize {
80 use std::{thread, time};
81
82 for _ in 0..60 {
83 counter = match Arc::try_unwrap(counter) {
84 Ok(counter) => return counter.into_inner(),
85 Err(counter) => {
86 thread::sleep(time::Duration::from_secs(1));
87 counter
88 }
89 };
90 }
91
92 // That's too long!
93 panic!("Counter is still shared!");
94}
95
96#[test]
97fn failed_thread_stack() {
98 // Note: we first tried to force failure with a `usize::MAX` stack, but
99 // macOS and Windows weren't fazed, or at least didn't fail the way we want.
100 // They work with `isize::MAX`, but 32-bit platforms may feasibly allocate a
101 // 2GB stack, so it might not fail until the second thread.
102 let stack_size = ::std::isize::MAX as usize;
103
104 let (start_count, start_handler) = count_handler();
105 let (exit_count, exit_handler) = count_handler();
106 let builder = ThreadPoolBuilder::new()
107 .num_threads(10)
108 .stack_size(stack_size)
e74abb32
XL
109 .start_handler(start_handler)
110 .exit_handler(exit_handler);
2c00a5a8
XL
111
112 let pool = builder.build();
113 assert!(pool.is_err(), "thread stack should have failed!");
114
115 // With such a huge stack, 64-bit will probably fail on the first thread;
116 // 32-bit might manage the first 2GB, but certainly fail the second.
117 let start_count = wait_for_counter(start_count);
118 assert!(start_count <= 1);
119 assert_eq!(start_count, wait_for_counter(exit_count));
120}
121
122#[test]
123fn panic_thread_name() {
124 let (start_count, start_handler) = count_handler();
125 let (exit_count, exit_handler) = count_handler();
126 let builder = ThreadPoolBuilder::new()
127 .num_threads(10)
e74abb32
XL
128 .start_handler(start_handler)
129 .exit_handler(exit_handler)
2c00a5a8 130 .thread_name(|i| {
532ac7d7
XL
131 if i >= 5 {
132 panic!();
133 }
134 format!("panic_thread_name#{}", i)
135 });
2c00a5a8
XL
136
137 let pool = unwind::halt_unwinding(|| builder.build());
138 assert!(pool.is_err(), "thread-name panic should propagate!");
139
140 // Assuming they're created in order, threads 0 through 4 should have
141 // been started already, and then terminated by the panic.
142 assert_eq!(5, wait_for_counter(start_count));
143 assert_eq!(5, wait_for_counter(exit_count));
144}
145
146#[test]
147fn self_install() {
148 let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
149
150 // If the inner `install` blocks, then nothing will actually run it!
151 assert!(pool.install(|| pool.install(|| true)));
152}
153
154#[test]
155fn mutual_install() {
156 let pool1 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
157 let pool2 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
158
159 let ok = pool1.install(|| {
160 // This creates a dependency from `pool1` -> `pool2`
161 pool2.install(|| {
162 // This creates a dependency from `pool2` -> `pool1`
163 pool1.install(|| {
532ac7d7
XL
164 // If they blocked on inter-pool installs, there would be no
165 // threads left to run this!
166 true
2c00a5a8
XL
167 })
168 })
169 });
170 assert!(ok);
171}
172
173#[test]
174fn mutual_install_sleepy() {
175 use std::{thread, time};
176
177 let pool1 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
178 let pool2 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
179
180 let ok = pool1.install(|| {
181 // This creates a dependency from `pool1` -> `pool2`
182 pool2.install(|| {
183 // Give `pool1` time to fall asleep.
184 thread::sleep(time::Duration::from_secs(1));
185
186 // This creates a dependency from `pool2` -> `pool1`
187 pool1.install(|| {
532ac7d7
XL
188 // Give `pool2` time to fall asleep.
189 thread::sleep(time::Duration::from_secs(1));
2c00a5a8 190
532ac7d7
XL
191 // If they blocked on inter-pool installs, there would be no
192 // threads left to run this!
193 true
2c00a5a8
XL
194 })
195 })
196 });
197 assert!(ok);
198}
199
200#[test]
201#[allow(deprecated)]
202fn check_thread_pool_new() {
203 let pool = ThreadPool::new(Configuration::new().num_threads(22)).unwrap();
204 assert_eq!(pool.current_num_threads(), 22);
205}
e74abb32
XL
206
207macro_rules! test_scope_order {
208 ($scope:ident => $spawn:ident) => {{
209 let builder = ThreadPoolBuilder::new().num_threads(1);
210 let pool = builder.build().unwrap();
211 pool.install(|| {
212 let vec = Mutex::new(vec![]);
213 pool.$scope(|scope| {
214 let vec = &vec;
215 for i in 0..10 {
216 scope.$spawn(move |_| {
217 vec.lock().unwrap().push(i);
218 });
219 }
220 });
221 vec.into_inner().unwrap()
222 })
223 }};
224}
225
226#[test]
227fn scope_lifo_order() {
228 let vec = test_scope_order!(scope => spawn);
229 let expected: Vec<i32> = (0..10).rev().collect(); // LIFO -> reversed
230 assert_eq!(vec, expected);
231}
232
233#[test]
234fn scope_fifo_order() {
235 let vec = test_scope_order!(scope_fifo => spawn_fifo);
236 let expected: Vec<i32> = (0..10).collect(); // FIFO -> natural order
237 assert_eq!(vec, expected);
238}
239
240macro_rules! test_spawn_order {
241 ($spawn:ident) => {{
242 let builder = ThreadPoolBuilder::new().num_threads(1);
243 let pool = &builder.build().unwrap();
244 let (tx, rx) = channel();
245 pool.install(move || {
246 for i in 0..10 {
247 let tx = tx.clone();
248 pool.$spawn(move || {
249 tx.send(i).unwrap();
250 });
251 }
252 });
253 rx.iter().collect::<Vec<i32>>()
254 }};
255}
256
257#[test]
258fn spawn_lifo_order() {
259 let vec = test_spawn_order!(spawn);
260 let expected: Vec<i32> = (0..10).rev().collect(); // LIFO -> reversed
261 assert_eq!(vec, expected);
262}
263
264#[test]
265fn spawn_fifo_order() {
266 let vec = test_spawn_order!(spawn_fifo);
267 let expected: Vec<i32> = (0..10).collect(); // FIFO -> natural order
268 assert_eq!(vec, expected);
269}