]> git.proxmox.com Git - mirror_spl-debian.git/blob - modules/splat/splat-rwlock.c
Go through and add a header with the proper UCRL number.
[mirror_spl-debian.git] / modules / splat / splat-rwlock.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #include "splat-internal.h"
28
29 #define SPLAT_SUBSYSTEM_RWLOCK 0x0700
30 #define SPLAT_RWLOCK_NAME "rwlock"
31 #define SPLAT_RWLOCK_DESC "Kernel RW Lock Tests"
32
33 #define SPLAT_RWLOCK_TEST1_ID 0x0701
34 #define SPLAT_RWLOCK_TEST1_NAME "rwtest1"
35 #define SPLAT_RWLOCK_TEST1_DESC "Multiple Readers One Writer"
36
37 #define SPLAT_RWLOCK_TEST2_ID 0x0702
38 #define SPLAT_RWLOCK_TEST2_NAME "rwtest2"
39 #define SPLAT_RWLOCK_TEST2_DESC "Multiple Writers"
40
41 #define SPLAT_RWLOCK_TEST3_ID 0x0703
42 #define SPLAT_RWLOCK_TEST3_NAME "rwtest3"
43 #define SPLAT_RWLOCK_TEST3_DESC "Owner Verification"
44
45 #define SPLAT_RWLOCK_TEST4_ID 0x0704
46 #define SPLAT_RWLOCK_TEST4_NAME "rwtest4"
47 #define SPLAT_RWLOCK_TEST4_DESC "Trylock Test"
48
49 #define SPLAT_RWLOCK_TEST5_ID 0x0705
50 #define SPLAT_RWLOCK_TEST5_NAME "rwtest5"
51 #define SPLAT_RWLOCK_TEST5_DESC "Write Downgrade Test"
52
53 #define SPLAT_RWLOCK_TEST6_ID 0x0706
54 #define SPLAT_RWLOCK_TEST6_NAME "rwtest6"
55 #define SPLAT_RWLOCK_TEST6_DESC "Read Upgrade Test"
56
57 #define SPLAT_RWLOCK_TEST_MAGIC 0x115599DDUL
58 #define SPLAT_RWLOCK_TEST_NAME "rwlock_test"
59 #define SPLAT_RWLOCK_TEST_COUNT 8
60
61 #define SPLAT_RWLOCK_RELEASE_INIT 0
62 #define SPLAT_RWLOCK_RELEASE_WRITERS 1
63 #define SPLAT_RWLOCK_RELEASE_READERS 2
64
65 typedef struct rw_priv {
66 unsigned long rw_magic;
67 struct file *rw_file;
68 krwlock_t rwl;
69 spinlock_t rw_priv_lock;
70 wait_queue_head_t rw_waitq;
71 atomic_t rw_completed;
72 atomic_t rw_acquired;
73 atomic_t rw_waiters;
74 atomic_t rw_release;
75 } rw_priv_t;
76
77 typedef struct rw_thr {
78 int rwt_id;
79 const char *rwt_name;
80 rw_priv_t *rwt_rwp;
81 int rwt_rc;
82 } rw_thr_t;
83
84 static inline void
85 splat_rwlock_sleep(signed long delay)
86 {
87 set_current_state(TASK_INTERRUPTIBLE);
88 schedule_timeout(delay);
89 }
90
91 #define splat_rwlock_lock_and_test(lock,test) \
92 ({ \
93 int ret = 0; \
94 \
95 spin_lock(lock); \
96 ret = (test) ? 1 : 0; \
97 spin_unlock(lock); \
98 ret; \
99 })
100
101 void splat_init_rw_priv(rw_priv_t *rwv, struct file *file)
102 {
103 rwv->rw_magic = SPLAT_RWLOCK_TEST_MAGIC;
104 rwv->rw_file = file;
105 spin_lock_init(&rwv->rw_priv_lock);
106 init_waitqueue_head(&rwv->rw_waitq);
107 atomic_set(&rwv->rw_completed, 0);
108 atomic_set(&rwv->rw_acquired, 0);
109 atomic_set(&rwv->rw_waiters, 0);
110 atomic_set(&rwv->rw_release, SPLAT_RWLOCK_RELEASE_INIT);
111
112 /* Initialize the read/write lock */
113 rw_init(&rwv->rwl, SPLAT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
114 }
115
116 int
117 splat_rwlock_test1_writer_thread(void *arg)
118 {
119 rw_thr_t *rwt = (rw_thr_t *)arg;
120 rw_priv_t *rwv = rwt->rwt_rwp;
121 uint8_t rnd = 0;
122 char name[16];
123
124 ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
125 snprintf(name, sizeof(name), "%s%d",
126 SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
127 daemonize(name);
128 get_random_bytes((void *)&rnd, 1);
129 splat_rwlock_sleep(rnd * HZ / 1000);
130
131 spin_lock(&rwv->rw_priv_lock);
132 splat_vprint(rwv->rw_file, rwt->rwt_name,
133 "%s writer thread trying to acquire rwlock with "
134 "%d holding lock and %d waiting\n",
135 name, atomic_read(&rwv->rw_acquired),
136 atomic_read(&rwv->rw_waiters));
137 atomic_inc(&rwv->rw_waiters);
138 spin_unlock(&rwv->rw_priv_lock);
139
140 /* Take the semaphore for writing
141 * release it when we are told to */
142 rw_enter(&rwv->rwl, RW_WRITER);
143
144 spin_lock(&rwv->rw_priv_lock);
145 atomic_dec(&rwv->rw_waiters);
146 atomic_inc(&rwv->rw_acquired);
147 splat_vprint(rwv->rw_file, rwt->rwt_name,
148 "%s writer thread acquired rwlock with "
149 "%d holding lock and %d waiting\n",
150 name, atomic_read(&rwv->rw_acquired),
151 atomic_read(&rwv->rw_waiters));
152 spin_unlock(&rwv->rw_priv_lock);
153
154 /* Wait here until the control thread
155 * says we can release the write lock */
156 wait_event_interruptible(rwv->rw_waitq,
157 splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
158 atomic_read(&rwv->rw_release) ==
159 SPLAT_RWLOCK_RELEASE_WRITERS));
160 spin_lock(&rwv->rw_priv_lock);
161 atomic_inc(&rwv->rw_completed);
162 atomic_dec(&rwv->rw_acquired);
163 splat_vprint(rwv->rw_file, rwt->rwt_name,
164 "%s writer thread dropped rwlock with "
165 "%d holding lock and %d waiting\n",
166 name, atomic_read(&rwv->rw_acquired),
167 atomic_read(&rwv->rw_waiters));
168 spin_unlock(&rwv->rw_priv_lock);
169
170 /* Release the semaphore */
171 rw_exit(&rwv->rwl);
172 return 0;
173 }
174
175 int
176 splat_rwlock_test1_reader_thread(void *arg)
177 {
178 rw_thr_t *rwt = (rw_thr_t *)arg;
179 rw_priv_t *rwv = rwt->rwt_rwp;
180 uint8_t rnd = 0;
181 char name[16];
182
183 ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
184 snprintf(name, sizeof(name), "%s%d",
185 SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
186 daemonize(name);
187 get_random_bytes((void *)&rnd, 1);
188 splat_rwlock_sleep(rnd * HZ / 1000);
189
190 /* Don't try and and take the semaphore until
191 * someone else has already acquired it */
192 wait_event_interruptible(rwv->rw_waitq,
193 splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
194 atomic_read(&rwv->rw_acquired) > 0));
195
196 spin_lock(&rwv->rw_priv_lock);
197 splat_vprint(rwv->rw_file, rwt->rwt_name,
198 "%s reader thread trying to acquire rwlock with "
199 "%d holding lock and %d waiting\n",
200 name, atomic_read(&rwv->rw_acquired),
201 atomic_read(&rwv->rw_waiters));
202 atomic_inc(&rwv->rw_waiters);
203 spin_unlock(&rwv->rw_priv_lock);
204
205 /* Take the semaphore for reading
206 * release it when we are told to */
207 rw_enter(&rwv->rwl, RW_READER);
208
209 spin_lock(&rwv->rw_priv_lock);
210 atomic_dec(&rwv->rw_waiters);
211 atomic_inc(&rwv->rw_acquired);
212 splat_vprint(rwv->rw_file, rwt->rwt_name,
213 "%s reader thread acquired rwlock with "
214 "%d holding lock and %d waiting\n",
215 name, atomic_read(&rwv->rw_acquired),
216 atomic_read(&rwv->rw_waiters));
217 spin_unlock(&rwv->rw_priv_lock);
218
219 /* Wait here until the control thread
220 * says we can release the read lock */
221 wait_event_interruptible(rwv->rw_waitq,
222 splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
223 atomic_read(&rwv->rw_release) ==
224 SPLAT_RWLOCK_RELEASE_READERS));
225
226 spin_lock(&rwv->rw_priv_lock);
227 atomic_inc(&rwv->rw_completed);
228 atomic_dec(&rwv->rw_acquired);
229 splat_vprint(rwv->rw_file, rwt->rwt_name,
230 "%s reader thread dropped rwlock with "
231 "%d holding lock and %d waiting\n",
232 name, atomic_read(&rwv->rw_acquired),
233 atomic_read(&rwv->rw_waiters));
234 spin_unlock(&rwv->rw_priv_lock);
235
236 /* Release the semaphore */
237 rw_exit(&rwv->rwl);
238 return 0;
239 }
240
241 static int
242 splat_rwlock_test1(struct file *file, void *arg)
243 {
244 int i, count = 0, rc = 0;
245 long pids[SPLAT_RWLOCK_TEST_COUNT];
246 rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
247 rw_priv_t rwv;
248
249 /* Initialize private data including the rwlock */
250 splat_init_rw_priv(&rwv, file);
251
252 /* Create some threads, the exact number isn't important just as
253 * long as we know how many we managed to create and should expect. */
254 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
255 rwt[i].rwt_rwp = &rwv;
256 rwt[i].rwt_id = i;
257 rwt[i].rwt_name = SPLAT_RWLOCK_TEST1_NAME;
258 rwt[i].rwt_rc = 0;
259
260 /* The first thread will be a writer */
261 if (i == 0) {
262 pids[i] = kernel_thread(splat_rwlock_test1_writer_thread,
263 &rwt[i], 0);
264 } else {
265 pids[i] = kernel_thread(splat_rwlock_test1_reader_thread,
266 &rwt[i], 0);
267 }
268
269 if (pids[i] >= 0) {
270 count++;
271 }
272 }
273
274 /* Once the writer has the lock, release the readers */
275 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, atomic_read(&rwv.rw_acquired) <= 0)) {
276 splat_rwlock_sleep(1 * HZ);
277 }
278 wake_up_interruptible(&rwv.rw_waitq);
279
280 /* Ensure that there is only 1 writer and all readers are waiting */
281 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
282 atomic_read(&rwv.rw_acquired) != 1 ||
283 atomic_read(&rwv.rw_waiters) !=
284 SPLAT_RWLOCK_TEST_COUNT - 1)) {
285
286 splat_rwlock_sleep(1 * HZ);
287 }
288 /* Relase the writer */
289 spin_lock(&rwv.rw_priv_lock);
290 atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS);
291 spin_unlock(&rwv.rw_priv_lock);
292 wake_up_interruptible(&rwv.rw_waitq);
293
294 /* Now ensure that there are multiple reader threads holding the lock */
295 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
296 atomic_read(&rwv.rw_acquired) <= 1)) {
297 splat_rwlock_sleep(1 * HZ);
298 }
299 /* Release the readers */
300 spin_lock(&rwv.rw_priv_lock);
301 atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_READERS);
302 spin_unlock(&rwv.rw_priv_lock);
303 wake_up_interruptible(&rwv.rw_waitq);
304
305 /* Wait for the test to complete */
306 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
307 atomic_read(&rwv.rw_acquired) != 0 ||
308 atomic_read(&rwv.rw_waiters) != 0)) {
309 splat_rwlock_sleep(1 * HZ);
310
311 }
312
313 rw_destroy(&rwv.rwl);
314 return rc;
315 }
316
317 int
318 splat_rwlock_test2_writer_thread(void *arg)
319 {
320 rw_thr_t *rwt = (rw_thr_t *)arg;
321 rw_priv_t *rwv = rwt->rwt_rwp;
322 uint8_t rnd = 0;
323 char name[16];
324
325 ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
326 snprintf(name, sizeof(name), "%s%d",
327 SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
328 daemonize(name);
329 get_random_bytes((void *)&rnd, 1);
330 splat_rwlock_sleep(rnd * HZ / 1000);
331
332 /* Here just increment the waiters count even if we are not
333 * exactly about to call rw_enter(). Not really a big deal
334 * since more than likely will be true when we simulate work
335 * later on */
336 spin_lock(&rwv->rw_priv_lock);
337 splat_vprint(rwv->rw_file, rwt->rwt_name,
338 "%s writer thread trying to acquire rwlock with "
339 "%d holding lock and %d waiting\n",
340 name, atomic_read(&rwv->rw_acquired),
341 atomic_read(&rwv->rw_waiters));
342 atomic_inc(&rwv->rw_waiters);
343 spin_unlock(&rwv->rw_priv_lock);
344
345 /* Wait here until the control thread
346 * says we can acquire the write lock */
347 wait_event_interruptible(rwv->rw_waitq,
348 splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
349 atomic_read(&rwv->rw_release) ==
350 SPLAT_RWLOCK_RELEASE_WRITERS));
351
352 /* Take the semaphore for writing */
353 rw_enter(&rwv->rwl, RW_WRITER);
354
355 spin_lock(&rwv->rw_priv_lock);
356 atomic_dec(&rwv->rw_waiters);
357 atomic_inc(&rwv->rw_acquired);
358 splat_vprint(rwv->rw_file, rwt->rwt_name,
359 "%s writer thread acquired rwlock with "
360 "%d holding lock and %d waiting\n",
361 name, atomic_read(&rwv->rw_acquired),
362 atomic_read(&rwv->rw_waiters));
363 spin_unlock(&rwv->rw_priv_lock);
364
365 /* Give up the processor for a bit to simulate
366 * doing some work while taking the write lock */
367 splat_rwlock_sleep(rnd * HZ / 1000);
368
369 /* Ensure that we are the only one writing */
370 if (atomic_read(&rwv->rw_acquired) > 1) {
371 rwt->rwt_rc = 1;
372 } else {
373 rwt->rwt_rc = 0;
374 }
375
376 spin_lock(&rwv->rw_priv_lock);
377 atomic_inc(&rwv->rw_completed);
378 atomic_dec(&rwv->rw_acquired);
379 splat_vprint(rwv->rw_file, rwt->rwt_name,
380 "%s writer thread dropped rwlock with "
381 "%d holding lock and %d waiting\n",
382 name, atomic_read(&rwv->rw_acquired),
383 atomic_read(&rwv->rw_waiters));
384 spin_unlock(&rwv->rw_priv_lock);
385
386 rw_exit(&rwv->rwl);
387
388 return 0;
389 }
390
391 static int
392 splat_rwlock_test2(struct file *file, void *arg)
393 {
394 int i, count = 0, rc = 0;
395 long pids[SPLAT_RWLOCK_TEST_COUNT];
396 rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
397 rw_priv_t rwv;
398
399 /* Initialize private data including the rwlock */
400 splat_init_rw_priv(&rwv, file);
401
402 /* Create some threads, the exact number isn't important just as
403 * long as we know how many we managed to create and should expect. */
404 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
405 rwt[i].rwt_rwp = &rwv;
406 rwt[i].rwt_id = i;
407 rwt[i].rwt_name = SPLAT_RWLOCK_TEST2_NAME;
408 rwt[i].rwt_rc = 0;
409
410 /* The first thread will be a writer */
411 pids[i] = kernel_thread(splat_rwlock_test2_writer_thread,
412 &rwt[i], 0);
413
414 if (pids[i] >= 0) {
415 count++;
416 }
417 }
418
419 /* Wait for writers to get queued up */
420 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
421 atomic_read(&rwv.rw_waiters) < SPLAT_RWLOCK_TEST_COUNT)) {
422 splat_rwlock_sleep(1 * HZ);
423 }
424 /* Relase the writers */
425 spin_lock(&rwv.rw_priv_lock);
426 atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS);
427 spin_unlock(&rwv.rw_priv_lock);
428 wake_up_interruptible(&rwv.rw_waitq);
429
430 /* Wait for the test to complete */
431 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
432 atomic_read(&rwv.rw_acquired) != 0 ||
433 atomic_read(&rwv.rw_waiters) != 0)) {
434 splat_rwlock_sleep(1 * HZ);
435 }
436
437 /* If any of the write threads ever acquired the lock
438 * while another thread had it, make sure we return
439 * an error */
440 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
441 if (rwt[i].rwt_rc) {
442 rc++;
443 }
444 }
445
446 rw_destroy(&rwv.rwl);
447 return rc;
448 }
449
450 static int
451 splat_rwlock_test3(struct file *file, void *arg)
452 {
453 kthread_t *owner;
454 rw_priv_t rwv;
455 int rc = 0;
456
457 /* Initialize private data
458 * including the rwlock */
459 splat_init_rw_priv(&rwv, file);
460
461 /* Take the rwlock for writing */
462 rw_enter(&rwv.rwl, RW_WRITER);
463 owner = rw_owner(&rwv.rwl);
464 if (current != owner) {
465 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should "
466 "be owned by pid %d but is owned by pid %d\n",
467 current->pid, owner ? owner->pid : -1);
468 rc = -EINVAL;
469 goto out;
470 }
471
472 /* Release the rwlock */
473 rw_exit(&rwv.rwl);
474 owner = rw_owner(&rwv.rwl);
475 if (owner) {
476 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should not "
477 "be owned but is owned by pid %d\n", owner->pid);
478 rc = -EINVAL;
479 goto out;
480 }
481
482 /* Take the rwlock for reading.
483 * Should not have an owner */
484 rw_enter(&rwv.rwl, RW_READER);
485 owner = rw_owner(&rwv.rwl);
486 if (owner) {
487 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should not "
488 "be owned but is owned by pid %d\n", owner->pid);
489 /* Release the rwlock */
490 rw_exit(&rwv.rwl);
491 rc = -EINVAL;
492 goto out;
493 }
494
495 /* Release the rwlock */
496 rw_exit(&rwv.rwl);
497
498 out:
499 rw_destroy(&rwv.rwl);
500 return rc;
501 }
502
503 int
504 splat_rwlock_test4_reader_thread(void *arg)
505 {
506 rw_thr_t *rwt = (rw_thr_t *)arg;
507 rw_priv_t *rwv = rwt->rwt_rwp;
508 uint8_t rnd = 0;
509 char name[16];
510
511 ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
512 snprintf(name, sizeof(name), "%s%d",
513 SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
514 daemonize(name);
515 get_random_bytes((void *)&rnd, 1);
516 splat_rwlock_sleep(rnd * HZ / 1000);
517
518 /* Don't try and and take the semaphore until
519 * someone else has already acquired it */
520 wait_event_interruptible(rwv->rw_waitq,
521 splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
522 atomic_read(&rwv->rw_acquired) > 0));
523
524 spin_lock(&rwv->rw_priv_lock);
525 splat_vprint(rwv->rw_file, rwt->rwt_name,
526 "%s reader thread trying to acquire rwlock with "
527 "%d holding lock and %d waiting\n",
528 name, atomic_read(&rwv->rw_acquired),
529 atomic_read(&rwv->rw_waiters));
530 spin_unlock(&rwv->rw_priv_lock);
531
532 /* Take the semaphore for reading
533 * release it when we are told to */
534 rwt->rwt_rc = rw_tryenter(&rwv->rwl, RW_READER);
535
536 /* Here we acquired the lock this is a
537 * failure since the writer should be
538 * holding the lock */
539 if (rwt->rwt_rc == 1) {
540 spin_lock(&rwv->rw_priv_lock);
541 atomic_inc(&rwv->rw_acquired);
542 splat_vprint(rwv->rw_file, rwt->rwt_name,
543 "%s reader thread acquired rwlock with "
544 "%d holding lock and %d waiting\n",
545 name, atomic_read(&rwv->rw_acquired),
546 atomic_read(&rwv->rw_waiters));
547 spin_unlock(&rwv->rw_priv_lock);
548
549 spin_lock(&rwv->rw_priv_lock);
550 atomic_dec(&rwv->rw_acquired);
551 splat_vprint(rwv->rw_file, rwt->rwt_name,
552 "%s reader thread dropped rwlock with "
553 "%d holding lock and %d waiting\n",
554 name, atomic_read(&rwv->rw_acquired),
555 atomic_read(&rwv->rw_waiters));
556 spin_unlock(&rwv->rw_priv_lock);
557
558 /* Release the semaphore */
559 rw_exit(&rwv->rwl);
560 }
561 /* Here we know we didn't block and didn't
562 * acquire the rwlock for reading */
563 else {
564 spin_lock(&rwv->rw_priv_lock);
565 atomic_inc(&rwv->rw_completed);
566 splat_vprint(rwv->rw_file, rwt->rwt_name,
567 "%s reader thread could not acquire rwlock with "
568 "%d holding lock and %d waiting\n",
569 name, atomic_read(&rwv->rw_acquired),
570 atomic_read(&rwv->rw_waiters));
571 spin_unlock(&rwv->rw_priv_lock);
572 }
573
574 return 0;
575 }
576
577 static int
578 splat_rwlock_test4(struct file *file, void *arg)
579 {
580 int i, count = 0, rc = 0;
581 long pids[SPLAT_RWLOCK_TEST_COUNT];
582 rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
583 rw_priv_t rwv;
584
585 /* Initialize private data
586 * including the rwlock */
587 splat_init_rw_priv(&rwv, file);
588
589 /* Create some threads, the exact number isn't important just as
590 * long as we know how many we managed to create and should expect. */
591 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
592 rwt[i].rwt_rwp = &rwv;
593 rwt[i].rwt_id = i;
594 rwt[i].rwt_name = SPLAT_RWLOCK_TEST4_NAME;
595 rwt[i].rwt_rc = 0;
596
597 /* The first thread will be a writer */
598 if (i == 0) {
599 /* We can reuse the test1 writer thread here */
600 pids[i] = kernel_thread(splat_rwlock_test1_writer_thread,
601 &rwt[i], 0);
602 } else {
603 pids[i] = kernel_thread(splat_rwlock_test4_reader_thread,
604 &rwt[i], 0);
605 }
606
607 if (pids[i] >= 0) {
608 count++;
609 }
610 }
611
612 /* Once the writer has the lock, release the readers */
613 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
614 atomic_read(&rwv.rw_acquired) <= 0)) {
615 splat_rwlock_sleep(1 * HZ);
616 }
617 wake_up_interruptible(&rwv.rw_waitq);
618
619 /* Make sure that the reader threads complete */
620 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
621 atomic_read(&rwv.rw_completed) != SPLAT_RWLOCK_TEST_COUNT - 1)) {
622 splat_rwlock_sleep(1 * HZ);
623 }
624 /* Release the writer */
625 spin_lock(&rwv.rw_priv_lock);
626 atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS);
627 spin_unlock(&rwv.rw_priv_lock);
628 wake_up_interruptible(&rwv.rw_waitq);
629
630 /* Wait for the test to complete */
631 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
632 atomic_read(&rwv.rw_acquired) != 0 ||
633 atomic_read(&rwv.rw_waiters) != 0)) {
634 splat_rwlock_sleep(1 * HZ);
635 }
636
637 /* If any of the reader threads ever acquired the lock
638 * while another thread had it, make sure we return
639 * an error since the rw_tryenter() should have failed */
640 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
641 if (rwt[i].rwt_rc) {
642 rc++;
643 }
644 }
645
646 rw_destroy(&rwv.rwl);
647 return rc;
648 }
649
650 static int
651 splat_rwlock_test5(struct file *file, void *arg)
652 {
653 kthread_t *owner;
654 rw_priv_t rwv;
655 int rc = 0;
656
657 /* Initialize private data
658 * including the rwlock */
659 splat_init_rw_priv(&rwv, file);
660
661 /* Take the rwlock for writing */
662 rw_enter(&rwv.rwl, RW_WRITER);
663 owner = rw_owner(&rwv.rwl);
664 if (current != owner) {
665 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "rwlock should "
666 "be owned by pid %d but is owned by pid %d\n",
667 current->pid, owner ? owner->pid : -1);
668 rc = -EINVAL;
669 goto out;
670 }
671
672 /* Make sure that the downgrade
673 * worked properly */
674 rw_downgrade(&rwv.rwl);
675
676 owner = rw_owner(&rwv.rwl);
677 if (owner) {
678 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "rwlock should not "
679 "be owned but is owned by pid %d\n", owner->pid);
680 /* Release the rwlock */
681 rw_exit(&rwv.rwl);
682 rc = -EINVAL;
683 goto out;
684 }
685
686 /* Release the rwlock */
687 rw_exit(&rwv.rwl);
688
689 out:
690 rw_destroy(&rwv.rwl);
691 return rc;
692 }
693
694 static int
695 splat_rwlock_test6(struct file *file, void *arg)
696 {
697 kthread_t *owner;
698 rw_priv_t rwv;
699 int rc = 0;
700
701 /* Initialize private data
702 * including the rwlock */
703 splat_init_rw_priv(&rwv, file);
704
705 /* Take the rwlock for reading */
706 rw_enter(&rwv.rwl, RW_READER);
707 owner = rw_owner(&rwv.rwl);
708 if (owner) {
709 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should not "
710 "be owned but is owned by pid %d\n", owner->pid);
711 rc = -EINVAL;
712 goto out;
713 }
714
715 /* Make sure that the upgrade
716 * worked properly */
717 rc = !rw_tryupgrade(&rwv.rwl);
718
719 owner = rw_owner(&rwv.rwl);
720 if (rc || current != owner) {
721 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should "
722 "be owned by pid %d but is owned by pid %d "
723 "trylock rc %d\n",
724 current->pid, owner ? owner->pid : -1, rc);
725 rc = -EINVAL;
726 goto out;
727 }
728
729 /* Release the rwlock */
730 rw_exit(&rwv.rwl);
731
732 out:
733 rw_destroy(&rwv.rwl);
734 return rc;
735 }
736
737 splat_subsystem_t *
738 splat_rwlock_init(void)
739 {
740 splat_subsystem_t *sub;
741
742 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
743 if (sub == NULL)
744 return NULL;
745
746 memset(sub, 0, sizeof(*sub));
747 strncpy(sub->desc.name, SPLAT_RWLOCK_NAME, SPLAT_NAME_SIZE);
748 strncpy(sub->desc.desc, SPLAT_RWLOCK_DESC, SPLAT_DESC_SIZE);
749 INIT_LIST_HEAD(&sub->subsystem_list);
750 INIT_LIST_HEAD(&sub->test_list);
751 spin_lock_init(&sub->test_lock);
752 sub->desc.id = SPLAT_SUBSYSTEM_RWLOCK;
753
754 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST1_NAME, SPLAT_RWLOCK_TEST1_DESC,
755 SPLAT_RWLOCK_TEST1_ID, splat_rwlock_test1);
756 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST2_NAME, SPLAT_RWLOCK_TEST2_DESC,
757 SPLAT_RWLOCK_TEST2_ID, splat_rwlock_test2);
758 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST3_NAME, SPLAT_RWLOCK_TEST3_DESC,
759 SPLAT_RWLOCK_TEST3_ID, splat_rwlock_test3);
760 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST4_NAME, SPLAT_RWLOCK_TEST4_DESC,
761 SPLAT_RWLOCK_TEST4_ID, splat_rwlock_test4);
762 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST5_NAME, SPLAT_RWLOCK_TEST5_DESC,
763 SPLAT_RWLOCK_TEST5_ID, splat_rwlock_test5);
764 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST6_NAME, SPLAT_RWLOCK_TEST6_DESC,
765 SPLAT_RWLOCK_TEST6_ID, splat_rwlock_test6);
766
767 return sub;
768 }
769
770 void
771 splat_rwlock_fini(splat_subsystem_t *sub)
772 {
773 ASSERT(sub);
774 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST6_ID);
775 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST5_ID);
776 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST4_ID);
777 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST3_ID);
778 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST2_ID);
779 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST1_ID);
780 kfree(sub);
781 }
782
783 int
784 splat_rwlock_id(void) {
785 return SPLAT_SUBSYSTEM_RWLOCK;
786 }