]> git.proxmox.com Git - mirror_zfs.git/blob - splat/splat-rwlock.c
New approach
[mirror_zfs.git] / splat / splat-rwlock.c
1 #include <splat-ctl.h>
2
3 #define KZT_SUBSYSTEM_RWLOCK 0x0700
4 #define KZT_RWLOCK_NAME "rwlock"
5 #define KZT_RWLOCK_DESC "Kernel RW Lock Tests"
6
7 #define KZT_RWLOCK_TEST1_ID 0x0701
8 #define KZT_RWLOCK_TEST1_NAME "rwtest1"
9 #define KZT_RWLOCK_TEST1_DESC "Multiple Readers One Writer"
10
11 #define KZT_RWLOCK_TEST2_ID 0x0702
12 #define KZT_RWLOCK_TEST2_NAME "rwtest2"
13 #define KZT_RWLOCK_TEST2_DESC "Multiple Writers"
14
15 #define KZT_RWLOCK_TEST3_ID 0x0703
16 #define KZT_RWLOCK_TEST3_NAME "rwtest3"
17 #define KZT_RWLOCK_TEST3_DESC "Owner Verification"
18
19 #define KZT_RWLOCK_TEST4_ID 0x0704
20 #define KZT_RWLOCK_TEST4_NAME "rwtest4"
21 #define KZT_RWLOCK_TEST4_DESC "Trylock Test"
22
23 #define KZT_RWLOCK_TEST5_ID 0x0705
24 #define KZT_RWLOCK_TEST5_NAME "rwtest5"
25 #define KZT_RWLOCK_TEST5_DESC "Write Downgrade Test"
26
27 #define KZT_RWLOCK_TEST6_ID 0x0706
28 #define KZT_RWLOCK_TEST6_NAME "rwtest6"
29 #define KZT_RWLOCK_TEST6_DESC "Read Upgrade Test"
30
31 #define KZT_RWLOCK_TEST_MAGIC 0x115599DDUL
32 #define KZT_RWLOCK_TEST_NAME "rwlock_test"
33 #define KZT_RWLOCK_TEST_COUNT 8
34
35 #define KZT_RWLOCK_RELEASE_INIT 0
36 #define KZT_RWLOCK_RELEASE_WRITERS 1
37 #define KZT_RWLOCK_RELEASE_READERS 2
38
39 typedef struct rw_priv {
40 unsigned long rw_magic;
41 struct file *rw_file;
42 krwlock_t rwl;
43 spinlock_t rw_priv_lock;
44 wait_queue_head_t rw_waitq;
45 atomic_t rw_completed;
46 atomic_t rw_acquired;
47 atomic_t rw_waiters;
48 atomic_t rw_release;
49 } rw_priv_t;
50
51 typedef struct rw_thr {
52 int rwt_id;
53 const char *rwt_name;
54 rw_priv_t *rwt_rwp;
55 int rwt_rc;
56 } rw_thr_t;
57
58 static inline void
59 kzt_rwlock_sleep(signed long delay)
60 {
61 set_current_state(TASK_INTERRUPTIBLE);
62 schedule_timeout(delay);
63 }
64
65 #define kzt_rwlock_lock_and_test(lock,test) \
66 ({ \
67 int ret = 0; \
68 \
69 spin_lock(lock); \
70 ret = (test) ? 1 : 0; \
71 spin_unlock(lock); \
72 ret; \
73 })
74
75 void kzt_init_rw_priv(rw_priv_t *rwv, struct file *file)
76 {
77 rwv->rw_magic = KZT_RWLOCK_TEST_MAGIC;
78 rwv->rw_file = file;
79 spin_lock_init(&rwv->rw_priv_lock);
80 init_waitqueue_head(&rwv->rw_waitq);
81 atomic_set(&rwv->rw_completed, 0);
82 atomic_set(&rwv->rw_acquired, 0);
83 atomic_set(&rwv->rw_waiters, 0);
84 atomic_set(&rwv->rw_release, KZT_RWLOCK_RELEASE_INIT);
85
86 /* Initialize the read/write lock */
87 rw_init(&rwv->rwl, KZT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
88 }
89
90 int
91 kzt_rwlock_test1_writer_thread(void *arg)
92 {
93 rw_thr_t *rwt = (rw_thr_t *)arg;
94 rw_priv_t *rwv = rwt->rwt_rwp;
95 uint8_t rnd = 0;
96 char name[16];
97
98 ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
99 snprintf(name, sizeof(name), "%s%d",
100 KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
101 daemonize(name);
102 get_random_bytes((void *)&rnd, 1);
103 kzt_rwlock_sleep(rnd * HZ / 1000);
104
105 spin_lock(&rwv->rw_priv_lock);
106 kzt_vprint(rwv->rw_file, rwt->rwt_name,
107 "%s writer thread trying to acquire rwlock with "
108 "%d holding lock and %d waiting\n",
109 name, atomic_read(&rwv->rw_acquired),
110 atomic_read(&rwv->rw_waiters));
111 atomic_inc(&rwv->rw_waiters);
112 spin_unlock(&rwv->rw_priv_lock);
113
114 /* Take the semaphore for writing
115 * release it when we are told to */
116 rw_enter(&rwv->rwl, RW_WRITER);
117
118 spin_lock(&rwv->rw_priv_lock);
119 atomic_dec(&rwv->rw_waiters);
120 atomic_inc(&rwv->rw_acquired);
121 kzt_vprint(rwv->rw_file, rwt->rwt_name,
122 "%s writer thread acquired rwlock with "
123 "%d holding lock and %d waiting\n",
124 name, atomic_read(&rwv->rw_acquired),
125 atomic_read(&rwv->rw_waiters));
126 spin_unlock(&rwv->rw_priv_lock);
127
128 /* Wait here until the control thread
129 * says we can release the write lock */
130 wait_event_interruptible(rwv->rw_waitq,
131 kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
132 atomic_read(&rwv->rw_release) ==
133 KZT_RWLOCK_RELEASE_WRITERS));
134 spin_lock(&rwv->rw_priv_lock);
135 atomic_inc(&rwv->rw_completed);
136 atomic_dec(&rwv->rw_acquired);
137 kzt_vprint(rwv->rw_file, rwt->rwt_name,
138 "%s writer thread dropped rwlock with "
139 "%d holding lock and %d waiting\n",
140 name, atomic_read(&rwv->rw_acquired),
141 atomic_read(&rwv->rw_waiters));
142 spin_unlock(&rwv->rw_priv_lock);
143
144 /* Release the semaphore */
145 rw_exit(&rwv->rwl);
146 return 0;
147 }
148
149 int
150 kzt_rwlock_test1_reader_thread(void *arg)
151 {
152 rw_thr_t *rwt = (rw_thr_t *)arg;
153 rw_priv_t *rwv = rwt->rwt_rwp;
154 uint8_t rnd = 0;
155 char name[16];
156
157 ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
158 snprintf(name, sizeof(name), "%s%d",
159 KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
160 daemonize(name);
161 get_random_bytes((void *)&rnd, 1);
162 kzt_rwlock_sleep(rnd * HZ / 1000);
163
164 /* Don't try and and take the semaphore until
165 * someone else has already acquired it */
166 wait_event_interruptible(rwv->rw_waitq,
167 kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
168 atomic_read(&rwv->rw_acquired) > 0));
169
170 spin_lock(&rwv->rw_priv_lock);
171 kzt_vprint(rwv->rw_file, rwt->rwt_name,
172 "%s reader thread trying to acquire rwlock with "
173 "%d holding lock and %d waiting\n",
174 name, atomic_read(&rwv->rw_acquired),
175 atomic_read(&rwv->rw_waiters));
176 atomic_inc(&rwv->rw_waiters);
177 spin_unlock(&rwv->rw_priv_lock);
178
179 /* Take the semaphore for reading
180 * release it when we are told to */
181 rw_enter(&rwv->rwl, RW_READER);
182
183 spin_lock(&rwv->rw_priv_lock);
184 atomic_dec(&rwv->rw_waiters);
185 atomic_inc(&rwv->rw_acquired);
186 kzt_vprint(rwv->rw_file, rwt->rwt_name,
187 "%s reader thread acquired rwlock with "
188 "%d holding lock and %d waiting\n",
189 name, atomic_read(&rwv->rw_acquired),
190 atomic_read(&rwv->rw_waiters));
191 spin_unlock(&rwv->rw_priv_lock);
192
193 /* Wait here until the control thread
194 * says we can release the read lock */
195 wait_event_interruptible(rwv->rw_waitq,
196 kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
197 atomic_read(&rwv->rw_release) ==
198 KZT_RWLOCK_RELEASE_READERS));
199
200 spin_lock(&rwv->rw_priv_lock);
201 atomic_inc(&rwv->rw_completed);
202 atomic_dec(&rwv->rw_acquired);
203 kzt_vprint(rwv->rw_file, rwt->rwt_name,
204 "%s reader thread dropped rwlock with "
205 "%d holding lock and %d waiting\n",
206 name, atomic_read(&rwv->rw_acquired),
207 atomic_read(&rwv->rw_waiters));
208 spin_unlock(&rwv->rw_priv_lock);
209
210 /* Release the semaphore */
211 rw_exit(&rwv->rwl);
212 return 0;
213 }
214
215 static int
216 kzt_rwlock_test1(struct file *file, void *arg)
217 {
218 int i, count = 0, rc = 0;
219 long pids[KZT_RWLOCK_TEST_COUNT];
220 rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
221 rw_priv_t rwv;
222
223 /* Initialize private data
224 * including the rwlock */
225 kzt_init_rw_priv(&rwv, file);
226
227 /* Create some threads, the exact number isn't important just as
228 * long as we know how many we managed to create and should expect. */
229 for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
230 rwt[i].rwt_rwp = &rwv;
231 rwt[i].rwt_id = i;
232 rwt[i].rwt_name = KZT_RWLOCK_TEST1_NAME;
233 rwt[i].rwt_rc = 0;
234
235 /* The first thread will be a writer */
236 if (i == 0) {
237 pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
238 &rwt[i], 0);
239 } else {
240 pids[i] = kernel_thread(kzt_rwlock_test1_reader_thread,
241 &rwt[i], 0);
242 }
243
244 if (pids[i] >= 0) {
245 count++;
246 }
247 }
248
249 /* Once the writer has the lock, release the readers */
250 while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock, atomic_read(&rwv.rw_acquired) <= 0)) {
251 kzt_rwlock_sleep(1 * HZ);
252 }
253 wake_up_interruptible(&rwv.rw_waitq);
254
255 /* Ensure that there is only 1 writer and all readers are waiting */
256 while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
257 atomic_read(&rwv.rw_acquired) != 1 ||
258 atomic_read(&rwv.rw_waiters) !=
259 KZT_RWLOCK_TEST_COUNT - 1)) {
260
261 kzt_rwlock_sleep(1 * HZ);
262 }
263 /* Relase the writer */
264 spin_lock(&rwv.rw_priv_lock);
265 atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
266 spin_unlock(&rwv.rw_priv_lock);
267 wake_up_interruptible(&rwv.rw_waitq);
268
269 /* Now ensure that there are multiple reader threads holding the lock */
270 while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
271 atomic_read(&rwv.rw_acquired) <= 1)) {
272 kzt_rwlock_sleep(1 * HZ);
273 }
274 /* Release the readers */
275 spin_lock(&rwv.rw_priv_lock);
276 atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_READERS);
277 spin_unlock(&rwv.rw_priv_lock);
278 wake_up_interruptible(&rwv.rw_waitq);
279
280 /* Wait for the test to complete */
281 while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
282 atomic_read(&rwv.rw_acquired) != 0 ||
283 atomic_read(&rwv.rw_waiters) != 0)) {
284 kzt_rwlock_sleep(1 * HZ);
285
286 }
287
288 rw_destroy(&rwv.rwl);
289 return rc;
290 }
291
292 int
293 kzt_rwlock_test2_writer_thread(void *arg)
294 {
295 rw_thr_t *rwt = (rw_thr_t *)arg;
296 rw_priv_t *rwv = rwt->rwt_rwp;
297 uint8_t rnd = 0;
298 char name[16];
299
300 ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
301 snprintf(name, sizeof(name), "%s%d",
302 KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
303 daemonize(name);
304 get_random_bytes((void *)&rnd, 1);
305 kzt_rwlock_sleep(rnd * HZ / 1000);
306
307 /* Here just increment the waiters count even if we are not
308 * exactly about to call rw_enter(). Not really a big deal
309 * since more than likely will be true when we simulate work
310 * later on */
311 spin_lock(&rwv->rw_priv_lock);
312 kzt_vprint(rwv->rw_file, rwt->rwt_name,
313 "%s writer thread trying to acquire rwlock with "
314 "%d holding lock and %d waiting\n",
315 name, atomic_read(&rwv->rw_acquired),
316 atomic_read(&rwv->rw_waiters));
317 atomic_inc(&rwv->rw_waiters);
318 spin_unlock(&rwv->rw_priv_lock);
319
320 /* Wait here until the control thread
321 * says we can acquire the write lock */
322 wait_event_interruptible(rwv->rw_waitq,
323 kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
324 atomic_read(&rwv->rw_release) ==
325 KZT_RWLOCK_RELEASE_WRITERS));
326
327 /* Take the semaphore for writing */
328 rw_enter(&rwv->rwl, RW_WRITER);
329
330 spin_lock(&rwv->rw_priv_lock);
331 atomic_dec(&rwv->rw_waiters);
332 atomic_inc(&rwv->rw_acquired);
333 kzt_vprint(rwv->rw_file, rwt->rwt_name,
334 "%s writer thread acquired rwlock with "
335 "%d holding lock and %d waiting\n",
336 name, atomic_read(&rwv->rw_acquired),
337 atomic_read(&rwv->rw_waiters));
338 spin_unlock(&rwv->rw_priv_lock);
339
340 /* Give up the processor for a bit to simulate
341 * doing some work while taking the write lock */
342 kzt_rwlock_sleep(rnd * HZ / 1000);
343
344 /* Ensure that we are the only one writing */
345 if (atomic_read(&rwv->rw_acquired) > 1) {
346 rwt->rwt_rc = 1;
347 } else {
348 rwt->rwt_rc = 0;
349 }
350
351 spin_lock(&rwv->rw_priv_lock);
352 atomic_inc(&rwv->rw_completed);
353 atomic_dec(&rwv->rw_acquired);
354 kzt_vprint(rwv->rw_file, rwt->rwt_name,
355 "%s writer thread dropped rwlock with "
356 "%d holding lock and %d waiting\n",
357 name, atomic_read(&rwv->rw_acquired),
358 atomic_read(&rwv->rw_waiters));
359 spin_unlock(&rwv->rw_priv_lock);
360
361 rw_exit(&rwv->rwl);
362
363
364 return 0;
365 }
366
367 static int
368 kzt_rwlock_test2(struct file *file, void *arg)
369 {
370 int i, count = 0, rc = 0;
371 long pids[KZT_RWLOCK_TEST_COUNT];
372 rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
373 rw_priv_t rwv;
374
375 /* Initialize private data
376 * including the rwlock */
377 kzt_init_rw_priv(&rwv, file);
378
379 /* Create some threads, the exact number isn't important just as
380 * long as we know how many we managed to create and should expect. */
381 for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
382 rwt[i].rwt_rwp = &rwv;
383 rwt[i].rwt_id = i;
384 rwt[i].rwt_name = KZT_RWLOCK_TEST2_NAME;
385 rwt[i].rwt_rc = 0;
386
387 /* The first thread will be a writer */
388 pids[i] = kernel_thread(kzt_rwlock_test2_writer_thread,
389 &rwt[i], 0);
390
391 if (pids[i] >= 0) {
392 count++;
393 }
394 }
395
396 /* Wait for writers to get queued up */
397 while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
398 atomic_read(&rwv.rw_waiters) < KZT_RWLOCK_TEST_COUNT)) {
399 kzt_rwlock_sleep(1 * HZ);
400 }
401 /* Relase the writers */
402 spin_lock(&rwv.rw_priv_lock);
403 atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
404 spin_unlock(&rwv.rw_priv_lock);
405 wake_up_interruptible(&rwv.rw_waitq);
406
407 /* Wait for the test to complete */
408 while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
409 atomic_read(&rwv.rw_acquired) != 0 ||
410 atomic_read(&rwv.rw_waiters) != 0)) {
411 kzt_rwlock_sleep(1 * HZ);
412 }
413
414 /* If any of the write threads ever acquired the lock
415 * while another thread had it, make sure we return
416 * an error */
417 for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
418 if (rwt[i].rwt_rc) {
419 rc++;
420 }
421 }
422
423 rw_destroy(&rwv.rwl);
424 return rc;
425 }
426
427 static int
428 kzt_rwlock_test3(struct file *file, void *arg)
429 {
430 kthread_t *owner;
431 rw_priv_t rwv;
432 int rc = 0;
433
434 /* Initialize private data
435 * including the rwlock */
436 kzt_init_rw_priv(&rwv, file);
437
438 /* Take the rwlock for writing */
439 rw_enter(&rwv.rwl, RW_WRITER);
440 owner = rw_owner(&rwv.rwl);
441 if (current != owner) {
442 kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should "
443 "be owned by pid %d but is owned by pid %d\n",
444 current->pid, owner ? owner->pid : -1);
445 rc = -EINVAL;
446 goto out;
447 }
448
449 /* Release the rwlock */
450 rw_exit(&rwv.rwl);
451 owner = rw_owner(&rwv.rwl);
452 if (owner) {
453 kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
454 "be owned but is owned by pid %d\n", owner->pid);
455 rc = -EINVAL;
456 goto out;
457 }
458
459 /* Take the rwlock for reading.
460 * Should not have an owner */
461 rw_enter(&rwv.rwl, RW_READER);
462 owner = rw_owner(&rwv.rwl);
463 if (owner) {
464 kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
465 "be owned but is owned by pid %d\n", owner->pid);
466 /* Release the rwlock */
467 rw_exit(&rwv.rwl);
468 rc = -EINVAL;
469 goto out;
470 }
471
472 /* Release the rwlock */
473 rw_exit(&rwv.rwl);
474
475 out:
476 rw_destroy(&rwv.rwl);
477 return rc;
478 }
479
480 int
481 kzt_rwlock_test4_reader_thread(void *arg)
482 {
483 rw_thr_t *rwt = (rw_thr_t *)arg;
484 rw_priv_t *rwv = rwt->rwt_rwp;
485 uint8_t rnd = 0;
486 char name[16];
487
488 ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
489 snprintf(name, sizeof(name), "%s%d",
490 KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
491 daemonize(name);
492 get_random_bytes((void *)&rnd, 1);
493 kzt_rwlock_sleep(rnd * HZ / 1000);
494
495 /* Don't try and and take the semaphore until
496 * someone else has already acquired it */
497 wait_event_interruptible(rwv->rw_waitq,
498 kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
499 atomic_read(&rwv->rw_acquired) > 0));
500
501 spin_lock(&rwv->rw_priv_lock);
502 kzt_vprint(rwv->rw_file, rwt->rwt_name,
503 "%s reader thread trying to acquire rwlock with "
504 "%d holding lock and %d waiting\n",
505 name, atomic_read(&rwv->rw_acquired),
506 atomic_read(&rwv->rw_waiters));
507 spin_unlock(&rwv->rw_priv_lock);
508
509 /* Take the semaphore for reading
510 * release it when we are told to */
511 rwt->rwt_rc = rw_tryenter(&rwv->rwl, RW_READER);
512
513 /* Here we acquired the lock this is a
514 * failure since the writer should be
515 * holding the lock */
516 if (rwt->rwt_rc == 1) {
517 spin_lock(&rwv->rw_priv_lock);
518 atomic_inc(&rwv->rw_acquired);
519 kzt_vprint(rwv->rw_file, rwt->rwt_name,
520 "%s reader thread acquired rwlock with "
521 "%d holding lock and %d waiting\n",
522 name, atomic_read(&rwv->rw_acquired),
523 atomic_read(&rwv->rw_waiters));
524 spin_unlock(&rwv->rw_priv_lock);
525
526 spin_lock(&rwv->rw_priv_lock);
527 atomic_dec(&rwv->rw_acquired);
528 kzt_vprint(rwv->rw_file, rwt->rwt_name,
529 "%s reader thread dropped rwlock with "
530 "%d holding lock and %d waiting\n",
531 name, atomic_read(&rwv->rw_acquired),
532 atomic_read(&rwv->rw_waiters));
533 spin_unlock(&rwv->rw_priv_lock);
534
535 /* Release the semaphore */
536 rw_exit(&rwv->rwl);
537 }
538 /* Here we know we didn't block and didn't
539 * acquire the rwlock for reading */
540 else {
541 spin_lock(&rwv->rw_priv_lock);
542 atomic_inc(&rwv->rw_completed);
543 kzt_vprint(rwv->rw_file, rwt->rwt_name,
544 "%s reader thread could not acquire rwlock with "
545 "%d holding lock and %d waiting\n",
546 name, atomic_read(&rwv->rw_acquired),
547 atomic_read(&rwv->rw_waiters));
548 spin_unlock(&rwv->rw_priv_lock);
549 }
550
551 return 0;
552 }
553
554 static int
555 kzt_rwlock_test4(struct file *file, void *arg)
556 {
557 int i, count = 0, rc = 0;
558 long pids[KZT_RWLOCK_TEST_COUNT];
559 rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
560 rw_priv_t rwv;
561
562 /* Initialize private data
563 * including the rwlock */
564 kzt_init_rw_priv(&rwv, file);
565
566 /* Create some threads, the exact number isn't important just as
567 * long as we know how many we managed to create and should expect. */
568 for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
569 rwt[i].rwt_rwp = &rwv;
570 rwt[i].rwt_id = i;
571 rwt[i].rwt_name = KZT_RWLOCK_TEST4_NAME;
572 rwt[i].rwt_rc = 0;
573
574 /* The first thread will be a writer */
575 if (i == 0) {
576 /* We can reuse the test1 writer thread here */
577 pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
578 &rwt[i], 0);
579 } else {
580 pids[i] = kernel_thread(kzt_rwlock_test4_reader_thread,
581 &rwt[i], 0);
582 }
583
584 if (pids[i] >= 0) {
585 count++;
586 }
587 }
588
589 /* Once the writer has the lock, release the readers */
590 while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
591 atomic_read(&rwv.rw_acquired) <= 0)) {
592 kzt_rwlock_sleep(1 * HZ);
593 }
594 wake_up_interruptible(&rwv.rw_waitq);
595
596 /* Make sure that the reader threads complete */
597 while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
598 atomic_read(&rwv.rw_completed) != KZT_RWLOCK_TEST_COUNT - 1)) {
599 kzt_rwlock_sleep(1 * HZ);
600 }
601 /* Release the writer */
602 spin_lock(&rwv.rw_priv_lock);
603 atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
604 spin_unlock(&rwv.rw_priv_lock);
605 wake_up_interruptible(&rwv.rw_waitq);
606
607 /* Wait for the test to complete */
608 while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
609 atomic_read(&rwv.rw_acquired) != 0 ||
610 atomic_read(&rwv.rw_waiters) != 0)) {
611 kzt_rwlock_sleep(1 * HZ);
612 }
613
614 /* If any of the reader threads ever acquired the lock
615 * while another thread had it, make sure we return
616 * an error since the rw_tryenter() should have failed */
617 for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
618 if (rwt[i].rwt_rc) {
619 rc++;
620 }
621 }
622
623 rw_destroy(&rwv.rwl);
624 return rc;
625 }
626
627 static int
628 kzt_rwlock_test5(struct file *file, void *arg)
629 {
630 kthread_t *owner;
631 rw_priv_t rwv;
632 int rc = 0;
633
634 /* Initialize private data
635 * including the rwlock */
636 kzt_init_rw_priv(&rwv, file);
637
638 /* Take the rwlock for writing */
639 rw_enter(&rwv.rwl, RW_WRITER);
640 owner = rw_owner(&rwv.rwl);
641 if (current != owner) {
642 kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should "
643 "be owned by pid %d but is owned by pid %d\n",
644 current->pid, owner ? owner->pid : -1);
645 rc = -EINVAL;
646 goto out;
647 }
648
649 /* Make sure that the downgrade
650 * worked properly */
651 rw_downgrade(&rwv.rwl);
652
653 owner = rw_owner(&rwv.rwl);
654 if (owner) {
655 kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should not "
656 "be owned but is owned by pid %d\n", owner->pid);
657 /* Release the rwlock */
658 rw_exit(&rwv.rwl);
659 rc = -EINVAL;
660 goto out;
661 }
662
663 /* Release the rwlock */
664 rw_exit(&rwv.rwl);
665
666 out:
667 rw_destroy(&rwv.rwl);
668 return rc;
669 }
670
671 static int
672 kzt_rwlock_test6(struct file *file, void *arg)
673 {
674 kthread_t *owner;
675 rw_priv_t rwv;
676 int rc = 0;
677
678 /* Initialize private data
679 * including the rwlock */
680 kzt_init_rw_priv(&rwv, file);
681
682 /* Take the rwlock for reading */
683 rw_enter(&rwv.rwl, RW_READER);
684 owner = rw_owner(&rwv.rwl);
685 if (owner) {
686 kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should not "
687 "be owned but is owned by pid %d\n", owner->pid);
688 rc = -EINVAL;
689 goto out;
690 }
691
692 /* Make sure that the upgrade
693 * worked properly */
694 rc = !rw_tryupgrade(&rwv.rwl);
695
696 owner = rw_owner(&rwv.rwl);
697 if (rc || current != owner) {
698 kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should "
699 "be owned by pid %d but is owned by pid %d "
700 "trylock rc %d\n",
701 current->pid, owner ? owner->pid : -1, rc);
702 rc = -EINVAL;
703 goto out;
704 }
705
706 /* Release the rwlock */
707 rw_exit(&rwv.rwl);
708
709 out:
710 rw_destroy(&rwv.rwl);
711 return rc;
712 }
713
714 kzt_subsystem_t *
715 kzt_rwlock_init(void)
716 {
717 kzt_subsystem_t *sub;
718
719 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
720 if (sub == NULL)
721 return NULL;
722
723 memset(sub, 0, sizeof(*sub));
724 strncpy(sub->desc.name, KZT_RWLOCK_NAME, KZT_NAME_SIZE);
725 strncpy(sub->desc.desc, KZT_RWLOCK_DESC, KZT_DESC_SIZE);
726 INIT_LIST_HEAD(&sub->subsystem_list);
727 INIT_LIST_HEAD(&sub->test_list);
728 spin_lock_init(&sub->test_lock);
729 sub->desc.id = KZT_SUBSYSTEM_RWLOCK;
730
731 KZT_TEST_INIT(sub, KZT_RWLOCK_TEST1_NAME, KZT_RWLOCK_TEST1_DESC,
732 KZT_RWLOCK_TEST1_ID, kzt_rwlock_test1);
733 KZT_TEST_INIT(sub, KZT_RWLOCK_TEST2_NAME, KZT_RWLOCK_TEST2_DESC,
734 KZT_RWLOCK_TEST2_ID, kzt_rwlock_test2);
735 KZT_TEST_INIT(sub, KZT_RWLOCK_TEST3_NAME, KZT_RWLOCK_TEST3_DESC,
736 KZT_RWLOCK_TEST3_ID, kzt_rwlock_test3);
737 KZT_TEST_INIT(sub, KZT_RWLOCK_TEST4_NAME, KZT_RWLOCK_TEST4_DESC,
738 KZT_RWLOCK_TEST4_ID, kzt_rwlock_test4);
739 KZT_TEST_INIT(sub, KZT_RWLOCK_TEST5_NAME, KZT_RWLOCK_TEST5_DESC,
740 KZT_RWLOCK_TEST5_ID, kzt_rwlock_test5);
741 KZT_TEST_INIT(sub, KZT_RWLOCK_TEST6_NAME, KZT_RWLOCK_TEST6_DESC,
742 KZT_RWLOCK_TEST6_ID, kzt_rwlock_test6);
743
744 return sub;
745 }
746
747 void
748 kzt_rwlock_fini(kzt_subsystem_t *sub)
749 {
750 ASSERT(sub);
751 KZT_TEST_FINI(sub, KZT_RWLOCK_TEST6_ID);
752 KZT_TEST_FINI(sub, KZT_RWLOCK_TEST5_ID);
753 KZT_TEST_FINI(sub, KZT_RWLOCK_TEST4_ID);
754 KZT_TEST_FINI(sub, KZT_RWLOCK_TEST3_ID);
755 KZT_TEST_FINI(sub, KZT_RWLOCK_TEST2_ID);
756 KZT_TEST_FINI(sub, KZT_RWLOCK_TEST1_ID);
757 kfree(sub);
758 }
759
760 int
761 kzt_rwlock_id(void) {
762 return KZT_SUBSYSTEM_RWLOCK;
763 }