]> git.proxmox.com Git - mirror_spl-debian.git/blob - modules/splat/splat-condvar.c
Go through and add a header with the proper UCRL number.
[mirror_spl-debian.git] / modules / splat / splat-condvar.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #include "splat-internal.h"
28
29 #define SPLAT_SUBSYSTEM_CONDVAR 0x0500
30 #define SPLAT_CONDVAR_NAME "condvar"
31 #define SPLAT_CONDVAR_DESC "Kernel Condition Variable Tests"
32
33 #define SPLAT_CONDVAR_TEST1_ID 0x0501
34 #define SPLAT_CONDVAR_TEST1_NAME "signal1"
35 #define SPLAT_CONDVAR_TEST1_DESC "Wake a single thread, cv_wait()/cv_signal()"
36
37 #define SPLAT_CONDVAR_TEST2_ID 0x0502
38 #define SPLAT_CONDVAR_TEST2_NAME "broadcast1"
39 #define SPLAT_CONDVAR_TEST2_DESC "Wake all threads, cv_wait()/cv_broadcast()"
40
41 #define SPLAT_CONDVAR_TEST3_ID 0x0503
42 #define SPLAT_CONDVAR_TEST3_NAME "signal2"
43 #define SPLAT_CONDVAR_TEST3_DESC "Wake a single thread, cv_wait_timeout()/cv_signal()"
44
45 #define SPLAT_CONDVAR_TEST4_ID 0x0504
46 #define SPLAT_CONDVAR_TEST4_NAME "broadcast2"
47 #define SPLAT_CONDVAR_TEST4_DESC "Wake all threads, cv_wait_timeout()/cv_broadcast()"
48
49 #define SPLAT_CONDVAR_TEST5_ID 0x0505
50 #define SPLAT_CONDVAR_TEST5_NAME "timeout"
51 #define SPLAT_CONDVAR_TEST5_DESC "Timeout thread, cv_wait_timeout()"
52
53 #define SPLAT_CONDVAR_TEST_MAGIC 0x115599DDUL
54 #define SPLAT_CONDVAR_TEST_NAME "condvar_test"
55 #define SPLAT_CONDVAR_TEST_COUNT 8
56
57 typedef struct condvar_priv {
58 unsigned long cv_magic;
59 struct file *cv_file;
60 kcondvar_t cv_condvar;
61 kmutex_t cv_mtx;
62 } condvar_priv_t;
63
64 typedef struct condvar_thr {
65 int ct_id;
66 const char *ct_name;
67 condvar_priv_t *ct_cvp;
68 int ct_rc;
69 } condvar_thr_t;
70
71 int
72 splat_condvar_test12_thread(void *arg)
73 {
74 condvar_thr_t *ct = (condvar_thr_t *)arg;
75 condvar_priv_t *cv = ct->ct_cvp;
76 char name[16];
77
78 ASSERT(cv->cv_magic == SPLAT_CONDVAR_TEST_MAGIC);
79 snprintf(name, sizeof(name),"%s%d",SPLAT_CONDVAR_TEST_NAME,ct->ct_id);
80 daemonize(name);
81
82 mutex_enter(&cv->cv_mtx);
83 splat_vprint(cv->cv_file, ct->ct_name,
84 "%s thread sleeping with %d waiters\n",
85 name, atomic_read(&cv->cv_condvar.cv_waiters));
86 cv_wait(&cv->cv_condvar, &cv->cv_mtx);
87 splat_vprint(cv->cv_file, ct->ct_name,
88 "%s thread woken %d waiters remain\n",
89 name, atomic_read(&cv->cv_condvar.cv_waiters));
90 mutex_exit(&cv->cv_mtx);
91
92 return 0;
93 }
94
95 static int
96 splat_condvar_test1(struct file *file, void *arg)
97 {
98 int i, count = 0, rc = 0;
99 long pids[SPLAT_CONDVAR_TEST_COUNT];
100 condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
101 condvar_priv_t cv;
102
103 cv.cv_magic = SPLAT_CONDVAR_TEST_MAGIC;
104 cv.cv_file = file;
105 mutex_init(&cv.cv_mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
106 cv_init(&cv.cv_condvar, SPLAT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
107
108 /* Create some threads, the exact number isn't important just as
109 * long as we know how many we managed to create and should expect. */
110 for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
111 ct[i].ct_cvp = &cv;
112 ct[i].ct_id = i;
113 ct[i].ct_name = SPLAT_CONDVAR_TEST1_NAME;
114 ct[i].ct_rc = 0;
115
116 pids[i] = kernel_thread(splat_condvar_test12_thread, &ct[i], 0);
117 if (pids[i] >= 0)
118 count++;
119 }
120
121 /* Wait until all threads are waiting on the condition variable */
122 while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
123 schedule();
124
125 /* Wake a single thread at a time, wait until it exits */
126 for (i = 1; i <= count; i++) {
127 cv_signal(&cv.cv_condvar);
128
129 while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
130 schedule();
131
132 /* Correct behavior 1 thread woken */
133 if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
134 continue;
135
136 splat_vprint(file, SPLAT_CONDVAR_TEST1_NAME, "Attempted to "
137 "wake %d thread but work %d threads woke\n",
138 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
139 rc = -EINVAL;
140 break;
141 }
142
143 if (!rc)
144 splat_vprint(file, SPLAT_CONDVAR_TEST1_NAME, "Correctly woke "
145 "%d sleeping threads %d at a time\n", count, 1);
146
147 /* Wait until that last nutex is dropped */
148 while (mutex_owner(&cv.cv_mtx))
149 schedule();
150
151 /* Wake everything for the failure case */
152 cv_broadcast(&cv.cv_condvar);
153 cv_destroy(&cv.cv_condvar);
154 mutex_destroy(&cv.cv_mtx);
155
156 return rc;
157 }
158
159 static int
160 splat_condvar_test2(struct file *file, void *arg)
161 {
162 int i, count = 0, rc = 0;
163 long pids[SPLAT_CONDVAR_TEST_COUNT];
164 condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
165 condvar_priv_t cv;
166
167 cv.cv_magic = SPLAT_CONDVAR_TEST_MAGIC;
168 cv.cv_file = file;
169 mutex_init(&cv.cv_mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
170 cv_init(&cv.cv_condvar, SPLAT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
171
172 /* Create some threads, the exact number isn't important just as
173 * long as we know how many we managed to create and should expect. */
174 for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
175 ct[i].ct_cvp = &cv;
176 ct[i].ct_id = i;
177 ct[i].ct_name = SPLAT_CONDVAR_TEST2_NAME;
178 ct[i].ct_rc = 0;
179
180 pids[i] = kernel_thread(splat_condvar_test12_thread, &ct[i], 0);
181 if (pids[i] > 0)
182 count++;
183 }
184
185 /* Wait until all threads are waiting on the condition variable */
186 while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
187 schedule();
188
189 /* Wake all threads waiting on the condition variable */
190 cv_broadcast(&cv.cv_condvar);
191
192 /* Wait until all threads have exited */
193 while ((atomic_read(&cv.cv_condvar.cv_waiters) > 0) || mutex_owner(&cv.cv_mtx))
194 schedule();
195
196 splat_vprint(file, SPLAT_CONDVAR_TEST2_NAME, "Correctly woke all "
197 "%d sleeping threads at once\n", count);
198
199 /* Wake everything for the failure case */
200 cv_destroy(&cv.cv_condvar);
201 mutex_destroy(&cv.cv_mtx);
202
203 return rc;
204 }
205
206 int
207 splat_condvar_test34_thread(void *arg)
208 {
209 condvar_thr_t *ct = (condvar_thr_t *)arg;
210 condvar_priv_t *cv = ct->ct_cvp;
211 char name[16];
212 clock_t rc;
213
214 ASSERT(cv->cv_magic == SPLAT_CONDVAR_TEST_MAGIC);
215 snprintf(name, sizeof(name), "%s%d", SPLAT_CONDVAR_TEST_NAME, ct->ct_id);
216 daemonize(name);
217
218 mutex_enter(&cv->cv_mtx);
219 splat_vprint(cv->cv_file, ct->ct_name,
220 "%s thread sleeping with %d waiters\n",
221 name, atomic_read(&cv->cv_condvar.cv_waiters));
222
223 /* Sleep no longer than 3 seconds, for this test we should
224 * actually never sleep that long without being woken up. */
225 rc = cv_timedwait(&cv->cv_condvar, &cv->cv_mtx, lbolt + HZ * 3);
226 if (rc == -1) {
227 ct->ct_rc = -ETIMEDOUT;
228 splat_vprint(cv->cv_file, ct->ct_name, "%s thread timed out, "
229 "should have been woken\n", name);
230 } else {
231 splat_vprint(cv->cv_file, ct->ct_name,
232 "%s thread woken %d waiters remain\n",
233 name, atomic_read(&cv->cv_condvar.cv_waiters));
234 }
235
236 mutex_exit(&cv->cv_mtx);
237
238 return 0;
239 }
240
241 static int
242 splat_condvar_test3(struct file *file, void *arg)
243 {
244 int i, count = 0, rc = 0;
245 long pids[SPLAT_CONDVAR_TEST_COUNT];
246 condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
247 condvar_priv_t cv;
248
249 cv.cv_magic = SPLAT_CONDVAR_TEST_MAGIC;
250 cv.cv_file = file;
251 mutex_init(&cv.cv_mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
252 cv_init(&cv.cv_condvar, SPLAT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
253
254 /* Create some threads, the exact number isn't important just as
255 * long as we know how many we managed to create and should expect. */
256 for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
257 ct[i].ct_cvp = &cv;
258 ct[i].ct_id = i;
259 ct[i].ct_name = SPLAT_CONDVAR_TEST3_NAME;
260 ct[i].ct_rc = 0;
261
262 pids[i] = kernel_thread(splat_condvar_test34_thread, &ct[i], 0);
263 if (pids[i] >= 0)
264 count++;
265 }
266
267 /* Wait until all threads are waiting on the condition variable */
268 while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
269 schedule();
270
271 /* Wake a single thread at a time, wait until it exits */
272 for (i = 1; i <= count; i++) {
273 cv_signal(&cv.cv_condvar);
274
275 while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
276 schedule();
277
278 /* Correct behavior 1 thread woken */
279 if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
280 continue;
281
282 splat_vprint(file, SPLAT_CONDVAR_TEST3_NAME, "Attempted to "
283 "wake %d thread but work %d threads woke\n",
284 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
285 rc = -EINVAL;
286 break;
287 }
288
289 /* Validate no waiting thread timed out early */
290 for (i = 0; i < count; i++)
291 if (ct[i].ct_rc)
292 rc = ct[i].ct_rc;
293
294 if (!rc)
295 splat_vprint(file, SPLAT_CONDVAR_TEST3_NAME, "Correctly woke "
296 "%d sleeping threads %d at a time\n", count, 1);
297
298 /* Wait until that last nutex is dropped */
299 while (mutex_owner(&cv.cv_mtx))
300 schedule();
301
302 /* Wake everything for the failure case */
303 cv_broadcast(&cv.cv_condvar);
304 cv_destroy(&cv.cv_condvar);
305 mutex_destroy(&cv.cv_mtx);
306
307 return rc;
308 }
309
310 static int
311 splat_condvar_test4(struct file *file, void *arg)
312 {
313 int i, count = 0, rc = 0;
314 long pids[SPLAT_CONDVAR_TEST_COUNT];
315 condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
316 condvar_priv_t cv;
317
318 cv.cv_magic = SPLAT_CONDVAR_TEST_MAGIC;
319 cv.cv_file = file;
320 mutex_init(&cv.cv_mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
321 cv_init(&cv.cv_condvar, SPLAT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
322
323 /* Create some threads, the exact number isn't important just as
324 * long as we know how many we managed to create and should expect. */
325 for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
326 ct[i].ct_cvp = &cv;
327 ct[i].ct_id = i;
328 ct[i].ct_name = SPLAT_CONDVAR_TEST3_NAME;
329 ct[i].ct_rc = 0;
330
331 pids[i] = kernel_thread(splat_condvar_test34_thread, &ct[i], 0);
332 if (pids[i] >= 0)
333 count++;
334 }
335
336 /* Wait until all threads are waiting on the condition variable */
337 while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
338 schedule();
339
340 /* Wake a single thread at a time, wait until it exits */
341 for (i = 1; i <= count; i++) {
342 cv_signal(&cv.cv_condvar);
343
344 while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
345 schedule();
346
347 /* Correct behavior 1 thread woken */
348 if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
349 continue;
350
351 splat_vprint(file, SPLAT_CONDVAR_TEST3_NAME, "Attempted to "
352 "wake %d thread but work %d threads woke\n",
353 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
354 rc = -EINVAL;
355 break;
356 }
357
358 /* Validate no waiting thread timed out early */
359 for (i = 0; i < count; i++)
360 if (ct[i].ct_rc)
361 rc = ct[i].ct_rc;
362
363 if (!rc)
364 splat_vprint(file, SPLAT_CONDVAR_TEST3_NAME, "Correctly woke "
365 "%d sleeping threads %d at a time\n", count, 1);
366
367 /* Wait until that last nutex is dropped */
368 while (mutex_owner(&cv.cv_mtx))
369 schedule();
370
371 /* Wake everything for the failure case */
372 cv_broadcast(&cv.cv_condvar);
373 cv_destroy(&cv.cv_condvar);
374 mutex_destroy(&cv.cv_mtx);
375
376 return rc;
377 }
378
379 static int
380 splat_condvar_test5(struct file *file, void *arg)
381 {
382 kcondvar_t condvar;
383 kmutex_t mtx;
384 clock_t time_left, time_before, time_after, time_delta;
385 int64_t whole_delta;
386 int32_t remain_delta;
387 int rc = 0;
388
389 mutex_init(&mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
390 cv_init(&condvar, SPLAT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
391
392 splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME, "Thread going to sleep for "
393 "%d second and expecting to be woken by timeout\n", 1);
394
395 /* Allow a 1 second timeout, plenty long to validate correctness. */
396 time_before = lbolt;
397 mutex_enter(&mtx);
398 time_left = cv_timedwait(&condvar, &mtx, lbolt + HZ);
399 mutex_exit(&mtx);
400 time_after = lbolt;
401 time_delta = time_after - time_before; /* XXX - Handle jiffie wrap */
402 whole_delta = time_delta;
403 remain_delta = do_div(whole_delta, HZ);
404
405 if (time_left == -1) {
406 if (time_delta >= HZ) {
407 splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME,
408 "Thread correctly timed out and was asleep "
409 "for %d.%d seconds (%d second min)\n",
410 (int)whole_delta, remain_delta, 1);
411 } else {
412 splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME,
413 "Thread correctly timed out but was only "
414 "asleep for %d.%d seconds (%d second "
415 "min)\n", (int)whole_delta, remain_delta, 1);
416 rc = -ETIMEDOUT;
417 }
418 } else {
419 splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME,
420 "Thread exited after only %d.%d seconds, it "
421 "did not hit the %d second timeout\n",
422 (int)whole_delta, remain_delta, 1);
423 rc = -ETIMEDOUT;
424 }
425
426 cv_destroy(&condvar);
427 mutex_destroy(&mtx);
428
429 return rc;
430 }
431
432 splat_subsystem_t *
433 splat_condvar_init(void)
434 {
435 splat_subsystem_t *sub;
436
437 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
438 if (sub == NULL)
439 return NULL;
440
441 memset(sub, 0, sizeof(*sub));
442 strncpy(sub->desc.name, SPLAT_CONDVAR_NAME, SPLAT_NAME_SIZE);
443 strncpy(sub->desc.desc, SPLAT_CONDVAR_DESC, SPLAT_DESC_SIZE);
444 INIT_LIST_HEAD(&sub->subsystem_list);
445 INIT_LIST_HEAD(&sub->test_list);
446 spin_lock_init(&sub->test_lock);
447 sub->desc.id = SPLAT_SUBSYSTEM_CONDVAR;
448
449 SPLAT_TEST_INIT(sub, SPLAT_CONDVAR_TEST1_NAME, SPLAT_CONDVAR_TEST1_DESC,
450 SPLAT_CONDVAR_TEST1_ID, splat_condvar_test1);
451 SPLAT_TEST_INIT(sub, SPLAT_CONDVAR_TEST2_NAME, SPLAT_CONDVAR_TEST2_DESC,
452 SPLAT_CONDVAR_TEST2_ID, splat_condvar_test2);
453 SPLAT_TEST_INIT(sub, SPLAT_CONDVAR_TEST3_NAME, SPLAT_CONDVAR_TEST3_DESC,
454 SPLAT_CONDVAR_TEST3_ID, splat_condvar_test3);
455 SPLAT_TEST_INIT(sub, SPLAT_CONDVAR_TEST4_NAME, SPLAT_CONDVAR_TEST4_DESC,
456 SPLAT_CONDVAR_TEST4_ID, splat_condvar_test4);
457 SPLAT_TEST_INIT(sub, SPLAT_CONDVAR_TEST5_NAME, SPLAT_CONDVAR_TEST5_DESC,
458 SPLAT_CONDVAR_TEST5_ID, splat_condvar_test5);
459
460 return sub;
461 }
462
463 void
464 splat_condvar_fini(splat_subsystem_t *sub)
465 {
466 ASSERT(sub);
467 SPLAT_TEST_FINI(sub, SPLAT_CONDVAR_TEST5_ID);
468 SPLAT_TEST_FINI(sub, SPLAT_CONDVAR_TEST4_ID);
469 SPLAT_TEST_FINI(sub, SPLAT_CONDVAR_TEST3_ID);
470 SPLAT_TEST_FINI(sub, SPLAT_CONDVAR_TEST2_ID);
471 SPLAT_TEST_FINI(sub, SPLAT_CONDVAR_TEST1_ID);
472
473 kfree(sub);
474 }
475
476 int
477 splat_condvar_id(void) {
478 return SPLAT_SUBSYSTEM_CONDVAR;
479 }