]> git.proxmox.com Git - mirror_spl.git/blob - module/splat/splat-rwlock.c
Linux 4.13 compat: wait queues
[mirror_spl.git] / module / splat / splat-rwlock.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Read/Writer Lock Tests.
25 \*****************************************************************************/
26
27 #include <sys/random.h>
28 #include <sys/rwlock.h>
29 #include <sys/taskq.h>
30 #include <linux/delay.h>
31 #include <linux/mm_compat.h>
32 #include "splat-internal.h"
33
34 #define SPLAT_RWLOCK_NAME "rwlock"
35 #define SPLAT_RWLOCK_DESC "Kernel RW Lock Tests"
36
37 #define SPLAT_RWLOCK_TEST1_ID 0x0701
38 #define SPLAT_RWLOCK_TEST1_NAME "N-rd/1-wr"
39 #define SPLAT_RWLOCK_TEST1_DESC "Multiple readers one writer"
40
41 #define SPLAT_RWLOCK_TEST2_ID 0x0702
42 #define SPLAT_RWLOCK_TEST2_NAME "0-rd/N-wr"
43 #define SPLAT_RWLOCK_TEST2_DESC "Multiple writers"
44
45 #define SPLAT_RWLOCK_TEST3_ID 0x0703
46 #define SPLAT_RWLOCK_TEST3_NAME "held"
47 #define SPLAT_RWLOCK_TEST3_DESC "RW_{LOCK|READ|WRITE}_HELD"
48
49 #define SPLAT_RWLOCK_TEST4_ID 0x0704
50 #define SPLAT_RWLOCK_TEST4_NAME "tryenter"
51 #define SPLAT_RWLOCK_TEST4_DESC "Tryenter"
52
53 #define SPLAT_RWLOCK_TEST5_ID 0x0705
54 #define SPLAT_RWLOCK_TEST5_NAME "rw_downgrade"
55 #define SPLAT_RWLOCK_TEST5_DESC "Write downgrade"
56
57 #define SPLAT_RWLOCK_TEST6_ID 0x0706
58 #define SPLAT_RWLOCK_TEST6_NAME "rw_tryupgrade-1"
59 #define SPLAT_RWLOCK_TEST6_DESC "rwsem->count value"
60
61 #define SPLAT_RWLOCK_TEST7_ID 0x0707
62 #define SPLAT_RWLOCK_TEST7_NAME "rw_tryupgrade-2"
63 #define SPLAT_RWLOCK_TEST7_DESC "Read upgrade"
64
65 #define SPLAT_RWLOCK_TEST_MAGIC 0x115599DDUL
66 #define SPLAT_RWLOCK_TEST_NAME "rwlock_test"
67 #define SPLAT_RWLOCK_TEST_TASKQ "rwlock_taskq"
68 #define SPLAT_RWLOCK_TEST_COUNT 8
69
70 #define SPLAT_RWLOCK_RELEASE_INIT 0
71 #define SPLAT_RWLOCK_RELEASE_WR 1
72 #define SPLAT_RWLOCK_RELEASE_RD 2
73
74 typedef struct rw_priv {
75 unsigned long rw_magic;
76 struct file *rw_file;
77 krwlock_t rw_rwlock;
78 spinlock_t rw_lock;
79 spl_wait_queue_head_t rw_waitq;
80 int rw_completed;
81 int rw_holders;
82 int rw_waiters;
83 int rw_release;
84 int rw_rc;
85 krw_t rw_type;
86 } rw_priv_t;
87
88 typedef struct rw_thr {
89 const char *rwt_name;
90 rw_priv_t *rwt_rwp;
91 struct task_struct *rwt_thread;
92 } rw_thr_t;
93
94 void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
95 {
96 rwp->rw_magic = SPLAT_RWLOCK_TEST_MAGIC;
97 rwp->rw_file = file;
98 rw_init(&rwp->rw_rwlock, SPLAT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
99 spin_lock_init(&rwp->rw_lock);
100 init_waitqueue_head(&rwp->rw_waitq);
101 rwp->rw_completed = 0;
102 rwp->rw_holders = 0;
103 rwp->rw_waiters = 0;
104 rwp->rw_release = SPLAT_RWLOCK_RELEASE_INIT;
105 rwp->rw_rc = 0;
106 rwp->rw_type = 0;
107 }
108
109 #if defined(CONFIG_PREEMPT_RT_FULL)
110 static int
111 splat_rwlock_test1(struct file *file, void *arg)
112 {
113 /*
114 * This test will never succeed on PREEMPT_RT_FULL because these
115 * kernels only allow a single thread to hold the lock.
116 */
117 return 0;
118 }
119 #else
120 static int
121 splat_rwlock_wr_thr(void *arg)
122 {
123 rw_thr_t *rwt = (rw_thr_t *)arg;
124 rw_priv_t *rwp = rwt->rwt_rwp;
125 uint8_t rnd;
126
127 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
128
129 get_random_bytes((void *)&rnd, 1);
130 msleep((unsigned int)rnd);
131
132 splat_vprint(rwp->rw_file, rwt->rwt_name,
133 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
134 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
135 spin_lock(&rwp->rw_lock);
136 rwp->rw_waiters++;
137 spin_unlock(&rwp->rw_lock);
138 rw_enter(&rwp->rw_rwlock, RW_WRITER);
139
140 spin_lock(&rwp->rw_lock);
141 rwp->rw_waiters--;
142 rwp->rw_holders++;
143 spin_unlock(&rwp->rw_lock);
144 splat_vprint(rwp->rw_file, rwt->rwt_name,
145 "%s acquired rwlock (%d holding/%d waiting)\n",
146 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
147
148 /* Wait for control thread to signal we can release the write lock */
149 wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
150 rwp->rw_release == SPLAT_RWLOCK_RELEASE_WR));
151
152 spin_lock(&rwp->rw_lock);
153 rwp->rw_completed++;
154 rwp->rw_holders--;
155 spin_unlock(&rwp->rw_lock);
156 splat_vprint(rwp->rw_file, rwt->rwt_name,
157 "%s dropped rwlock (%d holding/%d waiting)\n",
158 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
159
160 rw_exit(&rwp->rw_rwlock);
161
162 return 0;
163 }
164
165 static int
166 splat_rwlock_rd_thr(void *arg)
167 {
168 rw_thr_t *rwt = (rw_thr_t *)arg;
169 rw_priv_t *rwp = rwt->rwt_rwp;
170 uint8_t rnd;
171
172 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
173
174 get_random_bytes((void *)&rnd, 1);
175 msleep((unsigned int)rnd);
176
177 /* Don't try and take the semaphore until after someone has it */
178 wait_event_interruptible(rwp->rw_waitq,
179 splat_locked_test(&rwp->rw_lock, rwp->rw_holders > 0));
180
181 splat_vprint(rwp->rw_file, rwt->rwt_name,
182 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
183 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
184 spin_lock(&rwp->rw_lock);
185 rwp->rw_waiters++;
186 spin_unlock(&rwp->rw_lock);
187 rw_enter(&rwp->rw_rwlock, RW_READER);
188
189 spin_lock(&rwp->rw_lock);
190 rwp->rw_waiters--;
191 rwp->rw_holders++;
192 spin_unlock(&rwp->rw_lock);
193 splat_vprint(rwp->rw_file, rwt->rwt_name,
194 "%s acquired rwlock (%d holding/%d waiting)\n",
195 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
196
197 /* Wait for control thread to signal we can release the read lock */
198 wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
199 rwp->rw_release == SPLAT_RWLOCK_RELEASE_RD));
200
201 spin_lock(&rwp->rw_lock);
202 rwp->rw_completed++;
203 rwp->rw_holders--;
204 spin_unlock(&rwp->rw_lock);
205 splat_vprint(rwp->rw_file, rwt->rwt_name,
206 "%s dropped rwlock (%d holding/%d waiting)\n",
207 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
208
209 rw_exit(&rwp->rw_rwlock);
210
211 return 0;
212 }
213
214 static int
215 splat_rwlock_test1(struct file *file, void *arg)
216 {
217 int i, count = 0, rc = 0;
218 rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
219 rw_priv_t *rwp;
220
221 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
222 if (rwp == NULL)
223 return -ENOMEM;
224
225 splat_init_rw_priv(rwp, file);
226
227 /* Create some threads, the exact number isn't important just as
228 * long as we know how many we managed to create and should expect. */
229 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
230 rwt[i].rwt_rwp = rwp;
231 rwt[i].rwt_name = SPLAT_RWLOCK_TEST1_NAME;
232
233 /* The first thread will be the writer */
234 if (i == 0)
235 rwt[i].rwt_thread = spl_kthread_create(splat_rwlock_wr_thr,
236 &rwt[i], "%s/%d", SPLAT_RWLOCK_TEST_NAME, i);
237 else
238 rwt[i].rwt_thread = spl_kthread_create(splat_rwlock_rd_thr,
239 &rwt[i], "%s/%d", SPLAT_RWLOCK_TEST_NAME, i);
240
241 if (!IS_ERR(rwt[i].rwt_thread)) {
242 wake_up_process(rwt[i].rwt_thread);
243 count++;
244 }
245 }
246
247 /* Wait for the writer */
248 while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders == 0)) {
249 wake_up_interruptible(&rwp->rw_waitq);
250 msleep(100);
251 }
252
253 /* Wait for 'count-1' readers */
254 while (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters < count - 1)) {
255 wake_up_interruptible(&rwp->rw_waitq);
256 msleep(100);
257 }
258
259 /* Verify there is only one lock holder */
260 if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders) != 1) {
261 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only 1 holder "
262 "expected for rwlock (%d holding/%d waiting)\n",
263 rwp->rw_holders, rwp->rw_waiters);
264 rc = -EINVAL;
265 }
266
267 /* Verify 'count-1' readers */
268 if (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters != count - 1)) {
269 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d waiters "
270 "expected for rwlock (%d holding/%d waiting)\n",
271 count - 1, rwp->rw_holders, rwp->rw_waiters);
272 rc = -EINVAL;
273 }
274
275 /* Signal the writer to release, allows readers to acquire */
276 spin_lock(&rwp->rw_lock);
277 rwp->rw_release = SPLAT_RWLOCK_RELEASE_WR;
278 wake_up_interruptible(&rwp->rw_waitq);
279 spin_unlock(&rwp->rw_lock);
280
281 /* Wait for 'count-1' readers to hold the lock */
282 while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders < count - 1)) {
283 wake_up_interruptible(&rwp->rw_waitq);
284 msleep(100);
285 }
286
287 /* Verify there are 'count-1' readers */
288 if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders != count - 1)) {
289 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d holders "
290 "expected for rwlock (%d holding/%d waiting)\n",
291 count - 1, rwp->rw_holders, rwp->rw_waiters);
292 rc = -EINVAL;
293 }
294
295 /* Release 'count-1' readers */
296 spin_lock(&rwp->rw_lock);
297 rwp->rw_release = SPLAT_RWLOCK_RELEASE_RD;
298 wake_up_interruptible(&rwp->rw_waitq);
299 spin_unlock(&rwp->rw_lock);
300
301 /* Wait for the test to complete */
302 while (splat_locked_test(&rwp->rw_lock,
303 rwp->rw_holders>0 || rwp->rw_waiters>0))
304 msleep(100);
305
306 rw_destroy(&(rwp->rw_rwlock));
307 kfree(rwp);
308
309 return rc;
310 }
311 #endif
312
313 static void
314 splat_rwlock_test2_func(void *arg)
315 {
316 rw_priv_t *rwp = (rw_priv_t *)arg;
317 int rc;
318 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
319
320 /* Read the value before sleeping and write it after we wake up to
321 * maximize the chance of a race if rwlocks are not working properly */
322 rw_enter(&rwp->rw_rwlock, RW_WRITER);
323 rc = rwp->rw_rc;
324 set_current_state(TASK_INTERRUPTIBLE);
325 schedule_timeout(HZ / 100); /* 1/100 of a second */
326 VERIFY(rwp->rw_rc == rc);
327 rwp->rw_rc = rc + 1;
328 rw_exit(&rwp->rw_rwlock);
329 }
330
331 static int
332 splat_rwlock_test2(struct file *file, void *arg)
333 {
334 rw_priv_t *rwp;
335 taskq_t *tq;
336 int i, rc = 0, tq_count = 256;
337
338 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
339 if (rwp == NULL)
340 return -ENOMEM;
341
342 splat_init_rw_priv(rwp, file);
343
344 /* Create several threads allowing tasks to race with each other */
345 tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, num_online_cpus(),
346 defclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
347 if (tq == NULL) {
348 rc = -ENOMEM;
349 goto out;
350 }
351
352 /*
353 * Schedule N work items to the work queue each of which enters the
354 * writer rwlock, sleeps briefly, then exits the writer rwlock. On a
355 * multiprocessor box these work items will be handled by all available
356 * CPUs. The task function checks to ensure the tracked shared variable
357 * is always only incremented by one. Additionally, the rwlock itself
358 * is instrumented such that if any two processors are in the
359 * critical region at the same time the system will panic. If the
360 * rwlock is implemented right this will never happy, that's a pass.
361 */
362 for (i = 0; i < tq_count; i++) {
363 if (taskq_dispatch(tq, splat_rwlock_test2_func, rwp,
364 TQ_SLEEP) == TASKQID_INVALID) {
365 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME,
366 "Failed to queue task %d\n", i);
367 rc = -EINVAL;
368 }
369 }
370
371 taskq_wait(tq);
372
373 if (rwp->rw_rc == tq_count) {
374 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
375 "correctly entered/exited the rwlock %d times\n",
376 num_online_cpus(), rwp->rw_rc);
377 } else {
378 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
379 "only processed %d/%d w rwlock work items\n",
380 num_online_cpus(), rwp->rw_rc, tq_count);
381 rc = -EINVAL;
382 }
383
384 taskq_destroy(tq);
385 rw_destroy(&(rwp->rw_rwlock));
386 out:
387 kfree(rwp);
388 return rc;
389 }
390
391 #define splat_rwlock_test3_helper(rwp,rex1,rex2,wex1,wex2,held_func,rc) \
392 do { \
393 int result, _rc1_, _rc2_, _rc3_, _rc4_; \
394 \
395 rc = 0; \
396 rw_enter(&(rwp)->rw_rwlock, RW_READER); \
397 _rc1_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex1); \
398 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
399 " returned %d (expected %d) when RW_READER\n", \
400 _rc1_ ? "Fail " : "", result, rex1); \
401 rw_exit(&(rwp)->rw_rwlock); \
402 _rc2_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex2); \
403 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
404 " returned %d (expected %d) when !RW_READER\n", \
405 _rc2_ ? "Fail " : "", result, rex2); \
406 \
407 rw_enter(&(rwp)->rw_rwlock, RW_WRITER); \
408 _rc3_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex1); \
409 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
410 " returned %d (expected %d) when RW_WRITER\n", \
411 _rc3_ ? "Fail " : "", result, wex1); \
412 rw_exit(&(rwp)->rw_rwlock); \
413 _rc4_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex2); \
414 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
415 " returned %d (expected %d) when !RW_WRITER\n", \
416 _rc4_ ? "Fail " : "", result, wex2); \
417 \
418 rc = ((_rc1_ || _rc2_ || _rc3_ || _rc4_) ? -EINVAL : 0); \
419 } while(0);
420
421 static int
422 splat_rwlock_test3(struct file *file, void *arg)
423 {
424 rw_priv_t *rwp;
425 int rc1, rc2, rc3;
426
427 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
428 if (rwp == NULL)
429 return -ENOMEM;
430
431 splat_init_rw_priv(rwp, file);
432
433 splat_rwlock_test3_helper(rwp, 1, 0, 1, 0, RW_LOCK_HELD, rc1);
434 splat_rwlock_test3_helper(rwp, 1, 0, 0, 0, RW_READ_HELD, rc2);
435 splat_rwlock_test3_helper(rwp, 0, 0, 1, 0, RW_WRITE_HELD, rc3);
436
437 rw_destroy(&rwp->rw_rwlock);
438 kfree(rwp);
439
440 return ((rc1 || rc2 || rc3) ? -EINVAL : 0);
441 }
442
443 static void
444 splat_rwlock_test4_func(void *arg)
445 {
446 rw_priv_t *rwp = (rw_priv_t *)arg;
447 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
448
449 if (rw_tryenter(&rwp->rw_rwlock, rwp->rw_type)) {
450 rwp->rw_rc = 0;
451 rw_exit(&rwp->rw_rwlock);
452 } else {
453 rwp->rw_rc = -EBUSY;
454 }
455 }
456
457 static char *
458 splat_rwlock_test4_name(krw_t type)
459 {
460 switch (type) {
461 case RW_NONE: return "RW_NONE";
462 case RW_WRITER: return "RW_WRITER";
463 case RW_READER: return "RW_READER";
464 }
465
466 return NULL;
467 }
468
469 static int
470 splat_rwlock_test4_type(taskq_t *tq, rw_priv_t *rwp, int expected_rc,
471 krw_t holder_type, krw_t try_type)
472 {
473 int id, rc = 0;
474
475 /* Schedule a task function which will try and acquire the rwlock
476 * using type try_type while the rwlock is being held as holder_type.
477 * The result must match expected_rc for the test to pass */
478 rwp->rw_rc = -EINVAL;
479 rwp->rw_type = try_type;
480
481 if (holder_type == RW_WRITER || holder_type == RW_READER)
482 rw_enter(&rwp->rw_rwlock, holder_type);
483
484 id = taskq_dispatch(tq, splat_rwlock_test4_func, rwp, TQ_SLEEP);
485 if (id == TASKQID_INVALID) {
486 splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME, "%s",
487 "taskq_dispatch() failed\n");
488 rc = -EINVAL;
489 goto out;
490 }
491
492 taskq_wait_id(tq, id);
493
494 if (rwp->rw_rc != expected_rc)
495 rc = -EINVAL;
496
497 splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME,
498 "%srw_tryenter(%s) returned %d (expected %d) when %s\n",
499 rc ? "Fail " : "", splat_rwlock_test4_name(try_type),
500 rwp->rw_rc, expected_rc,
501 splat_rwlock_test4_name(holder_type));
502 out:
503 if (holder_type == RW_WRITER || holder_type == RW_READER)
504 rw_exit(&rwp->rw_rwlock);
505
506 return rc;
507 }
508
509 static int
510 splat_rwlock_test4(struct file *file, void *arg)
511 {
512 rw_priv_t *rwp;
513 taskq_t *tq;
514 int rc = 0, rc1, rc2, rc3, rc4, rc5, rc6;
515
516 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
517 if (rwp == NULL)
518 return -ENOMEM;
519
520 tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, 1, defclsyspri,
521 50, INT_MAX, TASKQ_PREPOPULATE);
522 if (tq == NULL) {
523 rc = -ENOMEM;
524 goto out;
525 }
526
527 splat_init_rw_priv(rwp, file);
528
529 /*
530 * Validate all combinations of rw_tryenter() contention.
531 *
532 * The concurrent reader test is modified for PREEMPT_RT_FULL
533 * kernels which do not permit concurrent read locks to be taken
534 * from different threads. The same thread is allowed to take
535 * the read lock multiple times.
536 */
537 rc1 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_WRITER);
538 rc2 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_READER);
539 rc3 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_WRITER);
540 #if defined(CONFIG_PREEMPT_RT_FULL)
541 rc4 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_READER);
542 #else
543 rc4 = splat_rwlock_test4_type(tq, rwp, 0, RW_READER, RW_READER);
544 #endif
545 rc5 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_WRITER);
546 rc6 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_READER);
547
548 if (rc1 || rc2 || rc3 || rc4 || rc5 || rc6)
549 rc = -EINVAL;
550
551 taskq_destroy(tq);
552 out:
553 rw_destroy(&(rwp->rw_rwlock));
554 kfree(rwp);
555
556 return rc;
557 }
558
559 static int
560 splat_rwlock_test5(struct file *file, void *arg)
561 {
562 rw_priv_t *rwp;
563 int rc = -EINVAL;
564
565 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
566 if (rwp == NULL)
567 return -ENOMEM;
568
569 splat_init_rw_priv(rwp, file);
570
571 rw_enter(&rwp->rw_rwlock, RW_WRITER);
572 if (!RW_WRITE_HELD(&rwp->rw_rwlock)) {
573 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
574 "rwlock should be write lock: %d\n",
575 RW_WRITE_HELD(&rwp->rw_rwlock));
576 goto out;
577 }
578
579 rw_downgrade(&rwp->rw_rwlock);
580 if (!RW_READ_HELD(&rwp->rw_rwlock)) {
581 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
582 "rwlock should be read lock: %d\n",
583 RW_READ_HELD(&rwp->rw_rwlock));
584 goto out;
585 }
586
587 rc = 0;
588 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "%s",
589 "rwlock properly downgraded\n");
590 out:
591 rw_exit(&rwp->rw_rwlock);
592 rw_destroy(&rwp->rw_rwlock);
593 kfree(rwp);
594
595 return rc;
596 }
597
598 static int
599 splat_rwlock_test6(struct file *file, void *arg)
600 {
601 rw_priv_t *rwp;
602 int rc;
603
604 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
605 if (rwp == NULL)
606 return -ENOMEM;
607
608 splat_init_rw_priv(rwp, file);
609
610 rw_enter(&rwp->rw_rwlock, RW_READER);
611 if (RWSEM_COUNT(SEM(&rwp->rw_rwlock)) !=
612 SPL_RWSEM_SINGLE_READER_VALUE) {
613 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
614 "We assumed single reader rwsem->count "
615 "should be %ld, but is %ld\n",
616 (long int)SPL_RWSEM_SINGLE_READER_VALUE,
617 (long int)RWSEM_COUNT(SEM(&rwp->rw_rwlock)));
618 rc = -ENOLCK;
619 goto out;
620 }
621 rw_exit(&rwp->rw_rwlock);
622
623 rw_enter(&rwp->rw_rwlock, RW_WRITER);
624 if (RWSEM_COUNT(SEM(&rwp->rw_rwlock)) !=
625 SPL_RWSEM_SINGLE_WRITER_VALUE) {
626 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
627 "We assumed single writer rwsem->count "
628 "should be %ld, but is %ld\n",
629 (long int)SPL_RWSEM_SINGLE_WRITER_VALUE,
630 (long int)RWSEM_COUNT(SEM(&rwp->rw_rwlock)));
631 rc = -ENOLCK;
632 goto out;
633 }
634 rc = 0;
635 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s",
636 "rwsem->count same as we assumed\n");
637 out:
638 rw_exit(&rwp->rw_rwlock);
639 rw_destroy(&rwp->rw_rwlock);
640 kfree(rwp);
641
642 return rc;
643 }
644
645 static int
646 splat_rwlock_test7(struct file *file, void *arg)
647 {
648 rw_priv_t *rwp;
649 int rc;
650
651 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
652 if (rwp == NULL)
653 return -ENOMEM;
654
655 splat_init_rw_priv(rwp, file);
656
657 rw_enter(&rwp->rw_rwlock, RW_READER);
658 if (!RW_READ_HELD(&rwp->rw_rwlock)) {
659 splat_vprint(file, SPLAT_RWLOCK_TEST7_NAME,
660 "rwlock should be read lock: %d\n",
661 RW_READ_HELD(&rwp->rw_rwlock));
662 rc = -ENOLCK;
663 goto out;
664 }
665
666 /* With one reader upgrade should never fail. */
667 rc = rw_tryupgrade(&rwp->rw_rwlock);
668 if (!rc) {
669 splat_vprint(file, SPLAT_RWLOCK_TEST7_NAME,
670 "rwlock failed upgrade from reader: %d\n",
671 RW_READ_HELD(&rwp->rw_rwlock));
672 rc = -ENOLCK;
673 goto out;
674 }
675
676 if (RW_READ_HELD(&rwp->rw_rwlock) || !RW_WRITE_HELD(&rwp->rw_rwlock)) {
677 splat_vprint(file, SPLAT_RWLOCK_TEST7_NAME, "rwlock should "
678 "have 0 (not %d) reader and 1 (not %d) writer\n",
679 RW_READ_HELD(&rwp->rw_rwlock),
680 RW_WRITE_HELD(&rwp->rw_rwlock));
681 goto out;
682 }
683
684 rc = 0;
685 splat_vprint(file, SPLAT_RWLOCK_TEST7_NAME, "%s",
686 "rwlock properly upgraded\n");
687 out:
688 rw_exit(&rwp->rw_rwlock);
689 rw_destroy(&rwp->rw_rwlock);
690 kfree(rwp);
691
692 return rc;
693 }
694
695 splat_subsystem_t *
696 splat_rwlock_init(void)
697 {
698 splat_subsystem_t *sub;
699
700 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
701 if (sub == NULL)
702 return NULL;
703
704 memset(sub, 0, sizeof(*sub));
705 strncpy(sub->desc.name, SPLAT_RWLOCK_NAME, SPLAT_NAME_SIZE);
706 strncpy(sub->desc.desc, SPLAT_RWLOCK_DESC, SPLAT_DESC_SIZE);
707 INIT_LIST_HEAD(&sub->subsystem_list);
708 INIT_LIST_HEAD(&sub->test_list);
709 spin_lock_init(&sub->test_lock);
710 sub->desc.id = SPLAT_SUBSYSTEM_RWLOCK;
711
712 splat_test_init(sub, SPLAT_RWLOCK_TEST1_NAME, SPLAT_RWLOCK_TEST1_DESC,
713 SPLAT_RWLOCK_TEST1_ID, splat_rwlock_test1);
714 splat_test_init(sub, SPLAT_RWLOCK_TEST2_NAME, SPLAT_RWLOCK_TEST2_DESC,
715 SPLAT_RWLOCK_TEST2_ID, splat_rwlock_test2);
716 splat_test_init(sub, SPLAT_RWLOCK_TEST3_NAME, SPLAT_RWLOCK_TEST3_DESC,
717 SPLAT_RWLOCK_TEST3_ID, splat_rwlock_test3);
718 splat_test_init(sub, SPLAT_RWLOCK_TEST4_NAME, SPLAT_RWLOCK_TEST4_DESC,
719 SPLAT_RWLOCK_TEST4_ID, splat_rwlock_test4);
720 splat_test_init(sub, SPLAT_RWLOCK_TEST5_NAME, SPLAT_RWLOCK_TEST5_DESC,
721 SPLAT_RWLOCK_TEST5_ID, splat_rwlock_test5);
722 splat_test_init(sub, SPLAT_RWLOCK_TEST6_NAME, SPLAT_RWLOCK_TEST6_DESC,
723 SPLAT_RWLOCK_TEST6_ID, splat_rwlock_test6);
724 splat_test_init(sub, SPLAT_RWLOCK_TEST7_NAME, SPLAT_RWLOCK_TEST7_DESC,
725 SPLAT_RWLOCK_TEST7_ID, splat_rwlock_test7);
726
727 return sub;
728 }
729
730 void
731 splat_rwlock_fini(splat_subsystem_t *sub)
732 {
733 ASSERT(sub);
734 splat_test_fini(sub, SPLAT_RWLOCK_TEST7_ID);
735 splat_test_fini(sub, SPLAT_RWLOCK_TEST6_ID);
736 splat_test_fini(sub, SPLAT_RWLOCK_TEST5_ID);
737 splat_test_fini(sub, SPLAT_RWLOCK_TEST4_ID);
738 splat_test_fini(sub, SPLAT_RWLOCK_TEST3_ID);
739 splat_test_fini(sub, SPLAT_RWLOCK_TEST2_ID);
740 splat_test_fini(sub, SPLAT_RWLOCK_TEST1_ID);
741 kfree(sub);
742 }
743
744 int
745 splat_rwlock_id(void) {
746 return SPLAT_SUBSYSTEM_RWLOCK;
747 }