]> git.proxmox.com Git - mirror_spl.git/blob - module/splat/splat-rwlock.c
Implement a proper rw_tryupgrade
[mirror_spl.git] / module / splat / splat-rwlock.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Read/Writer Lock Tests.
25 \*****************************************************************************/
26
27 #include <sys/random.h>
28 #include <sys/rwlock.h>
29 #include <sys/taskq.h>
30 #include <linux/delay.h>
31 #include <linux/mm_compat.h>
32 #include "splat-internal.h"
33
34 #define SPLAT_RWLOCK_NAME "rwlock"
35 #define SPLAT_RWLOCK_DESC "Kernel RW Lock Tests"
36
37 #define SPLAT_RWLOCK_TEST1_ID 0x0701
38 #define SPLAT_RWLOCK_TEST1_NAME "N-rd/1-wr"
39 #define SPLAT_RWLOCK_TEST1_DESC "Multiple readers one writer"
40
41 #define SPLAT_RWLOCK_TEST2_ID 0x0702
42 #define SPLAT_RWLOCK_TEST2_NAME "0-rd/N-wr"
43 #define SPLAT_RWLOCK_TEST2_DESC "Multiple writers"
44
45 #define SPLAT_RWLOCK_TEST3_ID 0x0703
46 #define SPLAT_RWLOCK_TEST3_NAME "held"
47 #define SPLAT_RWLOCK_TEST3_DESC "RW_{LOCK|READ|WRITE}_HELD"
48
49 #define SPLAT_RWLOCK_TEST4_ID 0x0704
50 #define SPLAT_RWLOCK_TEST4_NAME "tryenter"
51 #define SPLAT_RWLOCK_TEST4_DESC "Tryenter"
52
53 #define SPLAT_RWLOCK_TEST5_ID 0x0705
54 #define SPLAT_RWLOCK_TEST5_NAME "rw_downgrade"
55 #define SPLAT_RWLOCK_TEST5_DESC "Write downgrade"
56
57 #define SPLAT_RWLOCK_TEST6_ID 0x0706
58 #define SPLAT_RWLOCK_TEST6_NAME "rw_tryupgrade-1"
59 #define SPLAT_RWLOCK_TEST6_DESC "rwsem->count value"
60
61 #define SPLAT_RWLOCK_TEST7_ID 0x0707
62 #define SPLAT_RWLOCK_TEST7_NAME "rw_tryupgrade-2"
63 #define SPLAT_RWLOCK_TEST7_DESC "Read upgrade"
64
65 #define SPLAT_RWLOCK_TEST_MAGIC 0x115599DDUL
66 #define SPLAT_RWLOCK_TEST_NAME "rwlock_test"
67 #define SPLAT_RWLOCK_TEST_TASKQ "rwlock_taskq"
68 #define SPLAT_RWLOCK_TEST_COUNT 8
69
70 #define SPLAT_RWLOCK_RELEASE_INIT 0
71 #define SPLAT_RWLOCK_RELEASE_WR 1
72 #define SPLAT_RWLOCK_RELEASE_RD 2
73
74 typedef struct rw_priv {
75 unsigned long rw_magic;
76 struct file *rw_file;
77 krwlock_t rw_rwlock;
78 spinlock_t rw_lock;
79 wait_queue_head_t rw_waitq;
80 int rw_completed;
81 int rw_holders;
82 int rw_waiters;
83 int rw_release;
84 int rw_rc;
85 krw_t rw_type;
86 } rw_priv_t;
87
88 typedef struct rw_thr {
89 const char *rwt_name;
90 rw_priv_t *rwt_rwp;
91 struct task_struct *rwt_thread;
92 } rw_thr_t;
93
94 void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
95 {
96 rwp->rw_magic = SPLAT_RWLOCK_TEST_MAGIC;
97 rwp->rw_file = file;
98 rw_init(&rwp->rw_rwlock, SPLAT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
99 spin_lock_init(&rwp->rw_lock);
100 init_waitqueue_head(&rwp->rw_waitq);
101 rwp->rw_completed = 0;
102 rwp->rw_holders = 0;
103 rwp->rw_waiters = 0;
104 rwp->rw_release = SPLAT_RWLOCK_RELEASE_INIT;
105 rwp->rw_rc = 0;
106 rwp->rw_type = 0;
107 }
108
109 static int
110 splat_rwlock_wr_thr(void *arg)
111 {
112 rw_thr_t *rwt = (rw_thr_t *)arg;
113 rw_priv_t *rwp = rwt->rwt_rwp;
114 uint8_t rnd;
115
116 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
117
118 get_random_bytes((void *)&rnd, 1);
119 msleep((unsigned int)rnd);
120
121 splat_vprint(rwp->rw_file, rwt->rwt_name,
122 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
123 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
124 spin_lock(&rwp->rw_lock);
125 rwp->rw_waiters++;
126 spin_unlock(&rwp->rw_lock);
127 rw_enter(&rwp->rw_rwlock, RW_WRITER);
128
129 spin_lock(&rwp->rw_lock);
130 rwp->rw_waiters--;
131 rwp->rw_holders++;
132 spin_unlock(&rwp->rw_lock);
133 splat_vprint(rwp->rw_file, rwt->rwt_name,
134 "%s acquired rwlock (%d holding/%d waiting)\n",
135 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
136
137 /* Wait for control thread to signal we can release the write lock */
138 wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
139 rwp->rw_release == SPLAT_RWLOCK_RELEASE_WR));
140
141 spin_lock(&rwp->rw_lock);
142 rwp->rw_completed++;
143 rwp->rw_holders--;
144 spin_unlock(&rwp->rw_lock);
145 splat_vprint(rwp->rw_file, rwt->rwt_name,
146 "%s dropped rwlock (%d holding/%d waiting)\n",
147 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
148
149 rw_exit(&rwp->rw_rwlock);
150
151 return 0;
152 }
153
154 static int
155 splat_rwlock_rd_thr(void *arg)
156 {
157 rw_thr_t *rwt = (rw_thr_t *)arg;
158 rw_priv_t *rwp = rwt->rwt_rwp;
159 uint8_t rnd;
160
161 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
162
163 get_random_bytes((void *)&rnd, 1);
164 msleep((unsigned int)rnd);
165
166 /* Don't try and take the semaphore until after someone has it */
167 wait_event_interruptible(rwp->rw_waitq,
168 splat_locked_test(&rwp->rw_lock, rwp->rw_holders > 0));
169
170 splat_vprint(rwp->rw_file, rwt->rwt_name,
171 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
172 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
173 spin_lock(&rwp->rw_lock);
174 rwp->rw_waiters++;
175 spin_unlock(&rwp->rw_lock);
176 rw_enter(&rwp->rw_rwlock, RW_READER);
177
178 spin_lock(&rwp->rw_lock);
179 rwp->rw_waiters--;
180 rwp->rw_holders++;
181 spin_unlock(&rwp->rw_lock);
182 splat_vprint(rwp->rw_file, rwt->rwt_name,
183 "%s acquired rwlock (%d holding/%d waiting)\n",
184 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
185
186 /* Wait for control thread to signal we can release the read lock */
187 wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
188 rwp->rw_release == SPLAT_RWLOCK_RELEASE_RD));
189
190 spin_lock(&rwp->rw_lock);
191 rwp->rw_completed++;
192 rwp->rw_holders--;
193 spin_unlock(&rwp->rw_lock);
194 splat_vprint(rwp->rw_file, rwt->rwt_name,
195 "%s dropped rwlock (%d holding/%d waiting)\n",
196 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
197
198 rw_exit(&rwp->rw_rwlock);
199
200 return 0;
201 }
202
203 static int
204 splat_rwlock_test1(struct file *file, void *arg)
205 {
206 int i, count = 0, rc = 0;
207 rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
208 rw_priv_t *rwp;
209
210 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
211 if (rwp == NULL)
212 return -ENOMEM;
213
214 splat_init_rw_priv(rwp, file);
215
216 /* Create some threads, the exact number isn't important just as
217 * long as we know how many we managed to create and should expect. */
218 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
219 rwt[i].rwt_rwp = rwp;
220 rwt[i].rwt_name = SPLAT_RWLOCK_TEST1_NAME;
221
222 /* The first thread will be the writer */
223 if (i == 0)
224 rwt[i].rwt_thread = spl_kthread_create(splat_rwlock_wr_thr,
225 &rwt[i], "%s/%d", SPLAT_RWLOCK_TEST_NAME, i);
226 else
227 rwt[i].rwt_thread = spl_kthread_create(splat_rwlock_rd_thr,
228 &rwt[i], "%s/%d", SPLAT_RWLOCK_TEST_NAME, i);
229
230 if (!IS_ERR(rwt[i].rwt_thread)) {
231 wake_up_process(rwt[i].rwt_thread);
232 count++;
233 }
234 }
235
236 /* Wait for the writer */
237 while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders == 0)) {
238 wake_up_interruptible(&rwp->rw_waitq);
239 msleep(100);
240 }
241
242 /* Wait for 'count-1' readers */
243 while (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters < count - 1)) {
244 wake_up_interruptible(&rwp->rw_waitq);
245 msleep(100);
246 }
247
248 /* Verify there is only one lock holder */
249 if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders) != 1) {
250 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only 1 holder "
251 "expected for rwlock (%d holding/%d waiting)\n",
252 rwp->rw_holders, rwp->rw_waiters);
253 rc = -EINVAL;
254 }
255
256 /* Verify 'count-1' readers */
257 if (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters != count - 1)) {
258 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d waiters "
259 "expected for rwlock (%d holding/%d waiting)\n",
260 count - 1, rwp->rw_holders, rwp->rw_waiters);
261 rc = -EINVAL;
262 }
263
264 /* Signal the writer to release, allows readers to acquire */
265 spin_lock(&rwp->rw_lock);
266 rwp->rw_release = SPLAT_RWLOCK_RELEASE_WR;
267 wake_up_interruptible(&rwp->rw_waitq);
268 spin_unlock(&rwp->rw_lock);
269
270 /* Wait for 'count-1' readers to hold the lock */
271 while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders < count - 1)) {
272 wake_up_interruptible(&rwp->rw_waitq);
273 msleep(100);
274 }
275
276 /* Verify there are 'count-1' readers */
277 if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders != count - 1)) {
278 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d holders "
279 "expected for rwlock (%d holding/%d waiting)\n",
280 count - 1, rwp->rw_holders, rwp->rw_waiters);
281 rc = -EINVAL;
282 }
283
284 /* Release 'count-1' readers */
285 spin_lock(&rwp->rw_lock);
286 rwp->rw_release = SPLAT_RWLOCK_RELEASE_RD;
287 wake_up_interruptible(&rwp->rw_waitq);
288 spin_unlock(&rwp->rw_lock);
289
290 /* Wait for the test to complete */
291 while (splat_locked_test(&rwp->rw_lock,
292 rwp->rw_holders>0 || rwp->rw_waiters>0))
293 msleep(100);
294
295 rw_destroy(&(rwp->rw_rwlock));
296 kfree(rwp);
297
298 return rc;
299 }
300
301 static void
302 splat_rwlock_test2_func(void *arg)
303 {
304 rw_priv_t *rwp = (rw_priv_t *)arg;
305 int rc;
306 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
307
308 /* Read the value before sleeping and write it after we wake up to
309 * maximize the chance of a race if rwlocks are not working properly */
310 rw_enter(&rwp->rw_rwlock, RW_WRITER);
311 rc = rwp->rw_rc;
312 set_current_state(TASK_INTERRUPTIBLE);
313 schedule_timeout(HZ / 100); /* 1/100 of a second */
314 VERIFY(rwp->rw_rc == rc);
315 rwp->rw_rc = rc + 1;
316 rw_exit(&rwp->rw_rwlock);
317 }
318
319 static int
320 splat_rwlock_test2(struct file *file, void *arg)
321 {
322 rw_priv_t *rwp;
323 taskq_t *tq;
324 int i, rc = 0, tq_count = 256;
325
326 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
327 if (rwp == NULL)
328 return -ENOMEM;
329
330 splat_init_rw_priv(rwp, file);
331
332 /* Create several threads allowing tasks to race with each other */
333 tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, num_online_cpus(),
334 defclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
335 if (tq == NULL) {
336 rc = -ENOMEM;
337 goto out;
338 }
339
340 /*
341 * Schedule N work items to the work queue each of which enters the
342 * writer rwlock, sleeps briefly, then exits the writer rwlock. On a
343 * multiprocessor box these work items will be handled by all available
344 * CPUs. The task function checks to ensure the tracked shared variable
345 * is always only incremented by one. Additionally, the rwlock itself
346 * is instrumented such that if any two processors are in the
347 * critical region at the same time the system will panic. If the
348 * rwlock is implemented right this will never happy, that's a pass.
349 */
350 for (i = 0; i < tq_count; i++) {
351 if (!taskq_dispatch(tq,splat_rwlock_test2_func,rwp,TQ_SLEEP)) {
352 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME,
353 "Failed to queue task %d\n", i);
354 rc = -EINVAL;
355 }
356 }
357
358 taskq_wait(tq);
359
360 if (rwp->rw_rc == tq_count) {
361 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
362 "correctly entered/exited the rwlock %d times\n",
363 num_online_cpus(), rwp->rw_rc);
364 } else {
365 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
366 "only processed %d/%d w rwlock work items\n",
367 num_online_cpus(), rwp->rw_rc, tq_count);
368 rc = -EINVAL;
369 }
370
371 taskq_destroy(tq);
372 rw_destroy(&(rwp->rw_rwlock));
373 out:
374 kfree(rwp);
375 return rc;
376 }
377
378 #define splat_rwlock_test3_helper(rwp,rex1,rex2,wex1,wex2,held_func,rc) \
379 do { \
380 int result, _rc1_, _rc2_, _rc3_, _rc4_; \
381 \
382 rc = 0; \
383 rw_enter(&(rwp)->rw_rwlock, RW_READER); \
384 _rc1_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex1); \
385 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
386 " returned %d (expected %d) when RW_READER\n", \
387 _rc1_ ? "Fail " : "", result, rex1); \
388 rw_exit(&(rwp)->rw_rwlock); \
389 _rc2_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex2); \
390 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
391 " returned %d (expected %d) when !RW_READER\n", \
392 _rc2_ ? "Fail " : "", result, rex2); \
393 \
394 rw_enter(&(rwp)->rw_rwlock, RW_WRITER); \
395 _rc3_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex1); \
396 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
397 " returned %d (expected %d) when RW_WRITER\n", \
398 _rc3_ ? "Fail " : "", result, wex1); \
399 rw_exit(&(rwp)->rw_rwlock); \
400 _rc4_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex2); \
401 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
402 " returned %d (expected %d) when !RW_WRITER\n", \
403 _rc4_ ? "Fail " : "", result, wex2); \
404 \
405 rc = ((_rc1_ || _rc2_ || _rc3_ || _rc4_) ? -EINVAL : 0); \
406 } while(0);
407
408 static int
409 splat_rwlock_test3(struct file *file, void *arg)
410 {
411 rw_priv_t *rwp;
412 int rc1, rc2, rc3;
413
414 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
415 if (rwp == NULL)
416 return -ENOMEM;
417
418 splat_init_rw_priv(rwp, file);
419
420 splat_rwlock_test3_helper(rwp, 1, 0, 1, 0, RW_LOCK_HELD, rc1);
421 splat_rwlock_test3_helper(rwp, 1, 0, 0, 0, RW_READ_HELD, rc2);
422 splat_rwlock_test3_helper(rwp, 0, 0, 1, 0, RW_WRITE_HELD, rc3);
423
424 rw_destroy(&rwp->rw_rwlock);
425 kfree(rwp);
426
427 return ((rc1 || rc2 || rc3) ? -EINVAL : 0);
428 }
429
430 static void
431 splat_rwlock_test4_func(void *arg)
432 {
433 rw_priv_t *rwp = (rw_priv_t *)arg;
434 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
435
436 if (rw_tryenter(&rwp->rw_rwlock, rwp->rw_type)) {
437 rwp->rw_rc = 0;
438 rw_exit(&rwp->rw_rwlock);
439 } else {
440 rwp->rw_rc = -EBUSY;
441 }
442 }
443
444 static char *
445 splat_rwlock_test4_name(krw_t type)
446 {
447 switch (type) {
448 case RW_NONE: return "RW_NONE";
449 case RW_WRITER: return "RW_WRITER";
450 case RW_READER: return "RW_READER";
451 }
452
453 return NULL;
454 }
455
456 static int
457 splat_rwlock_test4_type(taskq_t *tq, rw_priv_t *rwp, int expected_rc,
458 krw_t holder_type, krw_t try_type)
459 {
460 int id, rc = 0;
461
462 /* Schedule a task function which will try and acquire the rwlock
463 * using type try_type while the rwlock is being held as holder_type.
464 * The result must match expected_rc for the test to pass */
465 rwp->rw_rc = -EINVAL;
466 rwp->rw_type = try_type;
467
468 if (holder_type == RW_WRITER || holder_type == RW_READER)
469 rw_enter(&rwp->rw_rwlock, holder_type);
470
471 id = taskq_dispatch(tq, splat_rwlock_test4_func, rwp, TQ_SLEEP);
472 if (id == 0) {
473 splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME, "%s",
474 "taskq_dispatch() failed\n");
475 rc = -EINVAL;
476 goto out;
477 }
478
479 taskq_wait_id(tq, id);
480
481 if (rwp->rw_rc != expected_rc)
482 rc = -EINVAL;
483
484 splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME,
485 "%srw_tryenter(%s) returned %d (expected %d) when %s\n",
486 rc ? "Fail " : "", splat_rwlock_test4_name(try_type),
487 rwp->rw_rc, expected_rc,
488 splat_rwlock_test4_name(holder_type));
489 out:
490 if (holder_type == RW_WRITER || holder_type == RW_READER)
491 rw_exit(&rwp->rw_rwlock);
492
493 return rc;
494 }
495
496 static int
497 splat_rwlock_test4(struct file *file, void *arg)
498 {
499 rw_priv_t *rwp;
500 taskq_t *tq;
501 int rc = 0, rc1, rc2, rc3, rc4, rc5, rc6;
502
503 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
504 if (rwp == NULL)
505 return -ENOMEM;
506
507 tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, 1, defclsyspri,
508 50, INT_MAX, TASKQ_PREPOPULATE);
509 if (tq == NULL) {
510 rc = -ENOMEM;
511 goto out;
512 }
513
514 splat_init_rw_priv(rwp, file);
515
516 /* Validate all combinations of rw_tryenter() contention */
517 rc1 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_WRITER);
518 rc2 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_READER);
519 rc3 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_WRITER);
520 rc4 = splat_rwlock_test4_type(tq, rwp, 0, RW_READER, RW_READER);
521 rc5 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_WRITER);
522 rc6 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_READER);
523
524 if (rc1 || rc2 || rc3 || rc4 || rc5 || rc6)
525 rc = -EINVAL;
526
527 taskq_destroy(tq);
528 out:
529 rw_destroy(&(rwp->rw_rwlock));
530 kfree(rwp);
531
532 return rc;
533 }
534
535 static int
536 splat_rwlock_test5(struct file *file, void *arg)
537 {
538 rw_priv_t *rwp;
539 int rc = -EINVAL;
540
541 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
542 if (rwp == NULL)
543 return -ENOMEM;
544
545 splat_init_rw_priv(rwp, file);
546
547 rw_enter(&rwp->rw_rwlock, RW_WRITER);
548 if (!RW_WRITE_HELD(&rwp->rw_rwlock)) {
549 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
550 "rwlock should be write lock: %d\n",
551 RW_WRITE_HELD(&rwp->rw_rwlock));
552 goto out;
553 }
554
555 rw_downgrade(&rwp->rw_rwlock);
556 if (!RW_READ_HELD(&rwp->rw_rwlock)) {
557 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
558 "rwlock should be read lock: %d\n",
559 RW_READ_HELD(&rwp->rw_rwlock));
560 goto out;
561 }
562
563 rc = 0;
564 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "%s",
565 "rwlock properly downgraded\n");
566 out:
567 rw_exit(&rwp->rw_rwlock);
568 rw_destroy(&rwp->rw_rwlock);
569 kfree(rwp);
570
571 return rc;
572 }
573
574 static int
575 splat_rwlock_test6(struct file *file, void *arg)
576 {
577 rw_priv_t *rwp;
578 int rc;
579
580 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
581 if (rwp == NULL)
582 return -ENOMEM;
583
584 splat_init_rw_priv(rwp, file);
585
586 rw_enter(&rwp->rw_rwlock, RW_READER);
587 if (RWSEM_COUNT(SEM(&rwp->rw_rwlock)) !=
588 SPL_RWSEM_SINGLE_READER_VALUE) {
589 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
590 "We assumed single reader rwsem->count "
591 "should be %ld, but is %ld\n",
592 SPL_RWSEM_SINGLE_READER_VALUE,
593 RWSEM_COUNT(SEM(&rwp->rw_rwlock)));
594 rc = -ENOLCK;
595 goto out;
596 }
597 rw_exit(&rwp->rw_rwlock);
598
599 rw_enter(&rwp->rw_rwlock, RW_WRITER);
600 if (RWSEM_COUNT(SEM(&rwp->rw_rwlock)) !=
601 SPL_RWSEM_SINGLE_WRITER_VALUE) {
602 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
603 "We assumed single writer rwsem->count "
604 "should be %ld, but is %ld\n",
605 SPL_RWSEM_SINGLE_WRITER_VALUE,
606 RWSEM_COUNT(SEM(&rwp->rw_rwlock)));
607 rc = -ENOLCK;
608 goto out;
609 }
610 rc = 0;
611 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s",
612 "rwsem->count same as we assumed\n");
613 out:
614 rw_exit(&rwp->rw_rwlock);
615 rw_destroy(&rwp->rw_rwlock);
616 kfree(rwp);
617
618 return rc;
619 }
620
621 static int
622 splat_rwlock_test7(struct file *file, void *arg)
623 {
624 rw_priv_t *rwp;
625 int rc;
626
627 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
628 if (rwp == NULL)
629 return -ENOMEM;
630
631 splat_init_rw_priv(rwp, file);
632
633 rw_enter(&rwp->rw_rwlock, RW_READER);
634 if (!RW_READ_HELD(&rwp->rw_rwlock)) {
635 splat_vprint(file, SPLAT_RWLOCK_TEST7_NAME,
636 "rwlock should be read lock: %d\n",
637 RW_READ_HELD(&rwp->rw_rwlock));
638 rc = -ENOLCK;
639 goto out;
640 }
641
642 /* With one reader upgrade should never fail. */
643 rc = rw_tryupgrade(&rwp->rw_rwlock);
644 if (!rc) {
645 splat_vprint(file, SPLAT_RWLOCK_TEST7_NAME,
646 "rwlock failed upgrade from reader: %d\n",
647 RW_READ_HELD(&rwp->rw_rwlock));
648 rc = -ENOLCK;
649 goto out;
650 }
651
652 if (RW_READ_HELD(&rwp->rw_rwlock) || !RW_WRITE_HELD(&rwp->rw_rwlock)) {
653 splat_vprint(file, SPLAT_RWLOCK_TEST7_NAME, "rwlock should "
654 "have 0 (not %d) reader and 1 (not %d) writer\n",
655 RW_READ_HELD(&rwp->rw_rwlock),
656 RW_WRITE_HELD(&rwp->rw_rwlock));
657 goto out;
658 }
659
660 rc = 0;
661 splat_vprint(file, SPLAT_RWLOCK_TEST7_NAME, "%s",
662 "rwlock properly upgraded\n");
663 out:
664 rw_exit(&rwp->rw_rwlock);
665 rw_destroy(&rwp->rw_rwlock);
666 kfree(rwp);
667
668 return rc;
669 }
670
671 splat_subsystem_t *
672 splat_rwlock_init(void)
673 {
674 splat_subsystem_t *sub;
675
676 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
677 if (sub == NULL)
678 return NULL;
679
680 memset(sub, 0, sizeof(*sub));
681 strncpy(sub->desc.name, SPLAT_RWLOCK_NAME, SPLAT_NAME_SIZE);
682 strncpy(sub->desc.desc, SPLAT_RWLOCK_DESC, SPLAT_DESC_SIZE);
683 INIT_LIST_HEAD(&sub->subsystem_list);
684 INIT_LIST_HEAD(&sub->test_list);
685 spin_lock_init(&sub->test_lock);
686 sub->desc.id = SPLAT_SUBSYSTEM_RWLOCK;
687
688 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST1_NAME, SPLAT_RWLOCK_TEST1_DESC,
689 SPLAT_RWLOCK_TEST1_ID, splat_rwlock_test1);
690 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST2_NAME, SPLAT_RWLOCK_TEST2_DESC,
691 SPLAT_RWLOCK_TEST2_ID, splat_rwlock_test2);
692 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST3_NAME, SPLAT_RWLOCK_TEST3_DESC,
693 SPLAT_RWLOCK_TEST3_ID, splat_rwlock_test3);
694 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST4_NAME, SPLAT_RWLOCK_TEST4_DESC,
695 SPLAT_RWLOCK_TEST4_ID, splat_rwlock_test4);
696 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST5_NAME, SPLAT_RWLOCK_TEST5_DESC,
697 SPLAT_RWLOCK_TEST5_ID, splat_rwlock_test5);
698 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST6_NAME, SPLAT_RWLOCK_TEST6_DESC,
699 SPLAT_RWLOCK_TEST6_ID, splat_rwlock_test6);
700 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST7_NAME, SPLAT_RWLOCK_TEST7_DESC,
701 SPLAT_RWLOCK_TEST7_ID, splat_rwlock_test7);
702
703 return sub;
704 }
705
706 void
707 splat_rwlock_fini(splat_subsystem_t *sub)
708 {
709 ASSERT(sub);
710 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST7_ID);
711 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST6_ID);
712 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST5_ID);
713 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST4_ID);
714 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST3_ID);
715 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST2_ID);
716 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST1_ID);
717 kfree(sub);
718 }
719
720 int
721 splat_rwlock_id(void) {
722 return SPLAT_SUBSYSTEM_RWLOCK;
723 }