]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-rwlock.c
Imported Upstream version 0.6.3+git20140731
[mirror_spl-debian.git] / module / splat / splat-rwlock.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Read/Writer Lock Tests.
25 \*****************************************************************************/
26
27 #include <sys/rwlock.h>
28 #include <sys/taskq.h>
29 #include <sys/random.h>
30 #include "splat-internal.h"
31
32 #define SPLAT_RWLOCK_NAME "rwlock"
33 #define SPLAT_RWLOCK_DESC "Kernel RW Lock Tests"
34
35 #define SPLAT_RWLOCK_TEST1_ID 0x0701
36 #define SPLAT_RWLOCK_TEST1_NAME "N-rd/1-wr"
37 #define SPLAT_RWLOCK_TEST1_DESC "Multiple readers one writer"
38
39 #define SPLAT_RWLOCK_TEST2_ID 0x0702
40 #define SPLAT_RWLOCK_TEST2_NAME "0-rd/N-wr"
41 #define SPLAT_RWLOCK_TEST2_DESC "Multiple writers"
42
43 #define SPLAT_RWLOCK_TEST3_ID 0x0703
44 #define SPLAT_RWLOCK_TEST3_NAME "held"
45 #define SPLAT_RWLOCK_TEST3_DESC "RW_{LOCK|READ|WRITE}_HELD"
46
47 #define SPLAT_RWLOCK_TEST4_ID 0x0704
48 #define SPLAT_RWLOCK_TEST4_NAME "tryenter"
49 #define SPLAT_RWLOCK_TEST4_DESC "Tryenter"
50
51 #define SPLAT_RWLOCK_TEST5_ID 0x0705
52 #define SPLAT_RWLOCK_TEST5_NAME "rw_downgrade"
53 #define SPLAT_RWLOCK_TEST5_DESC "Write downgrade"
54
55 #define SPLAT_RWLOCK_TEST6_ID 0x0706
56 #define SPLAT_RWLOCK_TEST6_NAME "rw_tryupgrade"
57 #define SPLAT_RWLOCK_TEST6_DESC "Read upgrade"
58
59 #define SPLAT_RWLOCK_TEST_MAGIC 0x115599DDUL
60 #define SPLAT_RWLOCK_TEST_NAME "rwlock_test"
61 #define SPLAT_RWLOCK_TEST_TASKQ "rwlock_taskq"
62 #define SPLAT_RWLOCK_TEST_COUNT 8
63
64 #define SPLAT_RWLOCK_RELEASE_INIT 0
65 #define SPLAT_RWLOCK_RELEASE_WR 1
66 #define SPLAT_RWLOCK_RELEASE_RD 2
67
68 typedef struct rw_priv {
69 unsigned long rw_magic;
70 struct file *rw_file;
71 krwlock_t rw_rwlock;
72 spinlock_t rw_lock;
73 wait_queue_head_t rw_waitq;
74 int rw_completed;
75 int rw_holders;
76 int rw_waiters;
77 int rw_release;
78 int rw_rc;
79 krw_t rw_type;
80 } rw_priv_t;
81
82 typedef struct rw_thr {
83 const char *rwt_name;
84 rw_priv_t *rwt_rwp;
85 struct task_struct *rwt_thread;
86 } rw_thr_t;
87
88 void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
89 {
90 rwp->rw_magic = SPLAT_RWLOCK_TEST_MAGIC;
91 rwp->rw_file = file;
92 rw_init(&rwp->rw_rwlock, SPLAT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
93 spin_lock_init(&rwp->rw_lock);
94 init_waitqueue_head(&rwp->rw_waitq);
95 rwp->rw_completed = 0;
96 rwp->rw_holders = 0;
97 rwp->rw_waiters = 0;
98 rwp->rw_release = SPLAT_RWLOCK_RELEASE_INIT;
99 rwp->rw_rc = 0;
100 rwp->rw_type = 0;
101 }
102
103 static int
104 splat_rwlock_wr_thr(void *arg)
105 {
106 rw_thr_t *rwt = (rw_thr_t *)arg;
107 rw_priv_t *rwp = rwt->rwt_rwp;
108 uint8_t rnd;
109
110 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
111
112 get_random_bytes((void *)&rnd, 1);
113 msleep((unsigned int)rnd);
114
115 splat_vprint(rwp->rw_file, rwt->rwt_name,
116 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
117 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
118 spin_lock(&rwp->rw_lock);
119 rwp->rw_waiters++;
120 spin_unlock(&rwp->rw_lock);
121 rw_enter(&rwp->rw_rwlock, RW_WRITER);
122
123 spin_lock(&rwp->rw_lock);
124 rwp->rw_waiters--;
125 rwp->rw_holders++;
126 spin_unlock(&rwp->rw_lock);
127 splat_vprint(rwp->rw_file, rwt->rwt_name,
128 "%s acquired rwlock (%d holding/%d waiting)\n",
129 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
130
131 /* Wait for control thread to signal we can release the write lock */
132 wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
133 rwp->rw_release == SPLAT_RWLOCK_RELEASE_WR));
134
135 spin_lock(&rwp->rw_lock);
136 rwp->rw_completed++;
137 rwp->rw_holders--;
138 spin_unlock(&rwp->rw_lock);
139 splat_vprint(rwp->rw_file, rwt->rwt_name,
140 "%s dropped rwlock (%d holding/%d waiting)\n",
141 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
142
143 rw_exit(&rwp->rw_rwlock);
144
145 return 0;
146 }
147
148 static int
149 splat_rwlock_rd_thr(void *arg)
150 {
151 rw_thr_t *rwt = (rw_thr_t *)arg;
152 rw_priv_t *rwp = rwt->rwt_rwp;
153 uint8_t rnd;
154
155 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
156
157 get_random_bytes((void *)&rnd, 1);
158 msleep((unsigned int)rnd);
159
160 /* Don't try and take the semaphore until after someone has it */
161 wait_event_interruptible(rwp->rw_waitq,
162 splat_locked_test(&rwp->rw_lock, rwp->rw_holders > 0));
163
164 splat_vprint(rwp->rw_file, rwt->rwt_name,
165 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
166 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
167 spin_lock(&rwp->rw_lock);
168 rwp->rw_waiters++;
169 spin_unlock(&rwp->rw_lock);
170 rw_enter(&rwp->rw_rwlock, RW_READER);
171
172 spin_lock(&rwp->rw_lock);
173 rwp->rw_waiters--;
174 rwp->rw_holders++;
175 spin_unlock(&rwp->rw_lock);
176 splat_vprint(rwp->rw_file, rwt->rwt_name,
177 "%s acquired rwlock (%d holding/%d waiting)\n",
178 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
179
180 /* Wait for control thread to signal we can release the read lock */
181 wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
182 rwp->rw_release == SPLAT_RWLOCK_RELEASE_RD));
183
184 spin_lock(&rwp->rw_lock);
185 rwp->rw_completed++;
186 rwp->rw_holders--;
187 spin_unlock(&rwp->rw_lock);
188 splat_vprint(rwp->rw_file, rwt->rwt_name,
189 "%s dropped rwlock (%d holding/%d waiting)\n",
190 rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
191
192 rw_exit(&rwp->rw_rwlock);
193
194 return 0;
195 }
196
197 static int
198 splat_rwlock_test1(struct file *file, void *arg)
199 {
200 int i, count = 0, rc = 0;
201 rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
202 rw_priv_t *rwp;
203
204 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
205 if (rwp == NULL)
206 return -ENOMEM;
207
208 splat_init_rw_priv(rwp, file);
209
210 /* Create some threads, the exact number isn't important just as
211 * long as we know how many we managed to create and should expect. */
212 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
213 rwt[i].rwt_rwp = rwp;
214 rwt[i].rwt_name = SPLAT_RWLOCK_TEST1_NAME;
215
216 /* The first thread will be the writer */
217 if (i == 0)
218 rwt[i].rwt_thread = spl_kthread_create(splat_rwlock_wr_thr,
219 &rwt[i], "%s/%d", SPLAT_RWLOCK_TEST_NAME, i);
220 else
221 rwt[i].rwt_thread = spl_kthread_create(splat_rwlock_rd_thr,
222 &rwt[i], "%s/%d", SPLAT_RWLOCK_TEST_NAME, i);
223
224 if (!IS_ERR(rwt[i].rwt_thread)) {
225 wake_up_process(rwt[i].rwt_thread);
226 count++;
227 }
228 }
229
230 /* Wait for the writer */
231 while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders == 0)) {
232 wake_up_interruptible(&rwp->rw_waitq);
233 msleep(100);
234 }
235
236 /* Wait for 'count-1' readers */
237 while (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters < count - 1)) {
238 wake_up_interruptible(&rwp->rw_waitq);
239 msleep(100);
240 }
241
242 /* Verify there is only one lock holder */
243 if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders) != 1) {
244 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only 1 holder "
245 "expected for rwlock (%d holding/%d waiting)\n",
246 rwp->rw_holders, rwp->rw_waiters);
247 rc = -EINVAL;
248 }
249
250 /* Verify 'count-1' readers */
251 if (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters != count - 1)) {
252 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d waiters "
253 "expected for rwlock (%d holding/%d waiting)\n",
254 count - 1, rwp->rw_holders, rwp->rw_waiters);
255 rc = -EINVAL;
256 }
257
258 /* Signal the writer to release, allows readers to acquire */
259 spin_lock(&rwp->rw_lock);
260 rwp->rw_release = SPLAT_RWLOCK_RELEASE_WR;
261 wake_up_interruptible(&rwp->rw_waitq);
262 spin_unlock(&rwp->rw_lock);
263
264 /* Wait for 'count-1' readers to hold the lock */
265 while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders < count - 1)) {
266 wake_up_interruptible(&rwp->rw_waitq);
267 msleep(100);
268 }
269
270 /* Verify there are 'count-1' readers */
271 if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders != count - 1)) {
272 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d holders "
273 "expected for rwlock (%d holding/%d waiting)\n",
274 count - 1, rwp->rw_holders, rwp->rw_waiters);
275 rc = -EINVAL;
276 }
277
278 /* Release 'count-1' readers */
279 spin_lock(&rwp->rw_lock);
280 rwp->rw_release = SPLAT_RWLOCK_RELEASE_RD;
281 wake_up_interruptible(&rwp->rw_waitq);
282 spin_unlock(&rwp->rw_lock);
283
284 /* Wait for the test to complete */
285 while (splat_locked_test(&rwp->rw_lock,
286 rwp->rw_holders>0 || rwp->rw_waiters>0))
287 msleep(100);
288
289 rw_destroy(&(rwp->rw_rwlock));
290 kfree(rwp);
291
292 return rc;
293 }
294
295 static void
296 splat_rwlock_test2_func(void *arg)
297 {
298 rw_priv_t *rwp = (rw_priv_t *)arg;
299 int rc;
300 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
301
302 /* Read the value before sleeping and write it after we wake up to
303 * maximize the chance of a race if rwlocks are not working properly */
304 rw_enter(&rwp->rw_rwlock, RW_WRITER);
305 rc = rwp->rw_rc;
306 set_current_state(TASK_INTERRUPTIBLE);
307 schedule_timeout(HZ / 100); /* 1/100 of a second */
308 VERIFY(rwp->rw_rc == rc);
309 rwp->rw_rc = rc + 1;
310 rw_exit(&rwp->rw_rwlock);
311 }
312
313 static int
314 splat_rwlock_test2(struct file *file, void *arg)
315 {
316 rw_priv_t *rwp;
317 taskq_t *tq;
318 int i, rc = 0, tq_count = 256;
319
320 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
321 if (rwp == NULL)
322 return -ENOMEM;
323
324 splat_init_rw_priv(rwp, file);
325
326 /* Create several threads allowing tasks to race with each other */
327 tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, num_online_cpus(),
328 maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
329 if (tq == NULL) {
330 rc = -ENOMEM;
331 goto out;
332 }
333
334 /*
335 * Schedule N work items to the work queue each of which enters the
336 * writer rwlock, sleeps briefly, then exits the writer rwlock. On a
337 * multiprocessor box these work items will be handled by all available
338 * CPUs. The task function checks to ensure the tracked shared variable
339 * is always only incremented by one. Additionally, the rwlock itself
340 * is instrumented such that if any two processors are in the
341 * critical region at the same time the system will panic. If the
342 * rwlock is implemented right this will never happy, that's a pass.
343 */
344 for (i = 0; i < tq_count; i++) {
345 if (!taskq_dispatch(tq,splat_rwlock_test2_func,rwp,TQ_SLEEP)) {
346 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME,
347 "Failed to queue task %d\n", i);
348 rc = -EINVAL;
349 }
350 }
351
352 taskq_wait(tq);
353
354 if (rwp->rw_rc == tq_count) {
355 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
356 "correctly entered/exited the rwlock %d times\n",
357 num_online_cpus(), rwp->rw_rc);
358 } else {
359 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
360 "only processed %d/%d w rwlock work items\n",
361 num_online_cpus(), rwp->rw_rc, tq_count);
362 rc = -EINVAL;
363 }
364
365 taskq_destroy(tq);
366 rw_destroy(&(rwp->rw_rwlock));
367 out:
368 kfree(rwp);
369 return rc;
370 }
371
372 #define splat_rwlock_test3_helper(rwp,rex1,rex2,wex1,wex2,held_func,rc) \
373 do { \
374 int result, _rc1_, _rc2_, _rc3_, _rc4_; \
375 \
376 rc = 0; \
377 rw_enter(&(rwp)->rw_rwlock, RW_READER); \
378 _rc1_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex1); \
379 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
380 " returned %d (expected %d) when RW_READER\n", \
381 _rc1_ ? "Fail " : "", result, rex1); \
382 rw_exit(&(rwp)->rw_rwlock); \
383 _rc2_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex2); \
384 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
385 " returned %d (expected %d) when !RW_READER\n", \
386 _rc2_ ? "Fail " : "", result, rex2); \
387 \
388 rw_enter(&(rwp)->rw_rwlock, RW_WRITER); \
389 _rc3_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex1); \
390 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
391 " returned %d (expected %d) when RW_WRITER\n", \
392 _rc3_ ? "Fail " : "", result, wex1); \
393 rw_exit(&(rwp)->rw_rwlock); \
394 _rc4_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex2); \
395 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
396 " returned %d (expected %d) when !RW_WRITER\n", \
397 _rc4_ ? "Fail " : "", result, wex2); \
398 \
399 rc = ((_rc1_ || _rc2_ || _rc3_ || _rc4_) ? -EINVAL : 0); \
400 } while(0);
401
402 static int
403 splat_rwlock_test3(struct file *file, void *arg)
404 {
405 rw_priv_t *rwp;
406 int rc1, rc2, rc3;
407
408 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
409 if (rwp == NULL)
410 return -ENOMEM;
411
412 splat_init_rw_priv(rwp, file);
413
414 splat_rwlock_test3_helper(rwp, 1, 0, 1, 0, RW_LOCK_HELD, rc1);
415 splat_rwlock_test3_helper(rwp, 1, 0, 0, 0, RW_READ_HELD, rc2);
416 splat_rwlock_test3_helper(rwp, 0, 0, 1, 0, RW_WRITE_HELD, rc3);
417
418 rw_destroy(&rwp->rw_rwlock);
419 kfree(rwp);
420
421 return ((rc1 || rc2 || rc3) ? -EINVAL : 0);
422 }
423
424 static void
425 splat_rwlock_test4_func(void *arg)
426 {
427 rw_priv_t *rwp = (rw_priv_t *)arg;
428 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
429
430 if (rw_tryenter(&rwp->rw_rwlock, rwp->rw_type)) {
431 rwp->rw_rc = 0;
432 rw_exit(&rwp->rw_rwlock);
433 } else {
434 rwp->rw_rc = -EBUSY;
435 }
436 }
437
438 static char *
439 splat_rwlock_test4_name(krw_t type)
440 {
441 switch (type) {
442 case RW_NONE: return "RW_NONE";
443 case RW_WRITER: return "RW_WRITER";
444 case RW_READER: return "RW_READER";
445 }
446
447 return NULL;
448 }
449
450 static int
451 splat_rwlock_test4_type(taskq_t *tq, rw_priv_t *rwp, int expected_rc,
452 krw_t holder_type, krw_t try_type)
453 {
454 int id, rc = 0;
455
456 /* Schedule a task function which will try and acquire the rwlock
457 * using type try_type while the rwlock is being held as holder_type.
458 * The result must match expected_rc for the test to pass */
459 rwp->rw_rc = -EINVAL;
460 rwp->rw_type = try_type;
461
462 if (holder_type == RW_WRITER || holder_type == RW_READER)
463 rw_enter(&rwp->rw_rwlock, holder_type);
464
465 id = taskq_dispatch(tq, splat_rwlock_test4_func, rwp, TQ_SLEEP);
466 if (id == 0) {
467 splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME, "%s",
468 "taskq_dispatch() failed\n");
469 rc = -EINVAL;
470 goto out;
471 }
472
473 taskq_wait_id(tq, id);
474
475 if (rwp->rw_rc != expected_rc)
476 rc = -EINVAL;
477
478 splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME,
479 "%srw_tryenter(%s) returned %d (expected %d) when %s\n",
480 rc ? "Fail " : "", splat_rwlock_test4_name(try_type),
481 rwp->rw_rc, expected_rc,
482 splat_rwlock_test4_name(holder_type));
483 out:
484 if (holder_type == RW_WRITER || holder_type == RW_READER)
485 rw_exit(&rwp->rw_rwlock);
486
487 return rc;
488 }
489
490 static int
491 splat_rwlock_test4(struct file *file, void *arg)
492 {
493 rw_priv_t *rwp;
494 taskq_t *tq;
495 int rc = 0, rc1, rc2, rc3, rc4, rc5, rc6;
496
497 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
498 if (rwp == NULL)
499 return -ENOMEM;
500
501 tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, 1, maxclsyspri,
502 50, INT_MAX, TASKQ_PREPOPULATE);
503 if (tq == NULL) {
504 rc = -ENOMEM;
505 goto out;
506 }
507
508 splat_init_rw_priv(rwp, file);
509
510 /* Validate all combinations of rw_tryenter() contention */
511 rc1 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_WRITER);
512 rc2 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_READER);
513 rc3 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_WRITER);
514 rc4 = splat_rwlock_test4_type(tq, rwp, 0, RW_READER, RW_READER);
515 rc5 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_WRITER);
516 rc6 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_READER);
517
518 if (rc1 || rc2 || rc3 || rc4 || rc5 || rc6)
519 rc = -EINVAL;
520
521 taskq_destroy(tq);
522 out:
523 rw_destroy(&(rwp->rw_rwlock));
524 kfree(rwp);
525
526 return rc;
527 }
528
529 static int
530 splat_rwlock_test5(struct file *file, void *arg)
531 {
532 rw_priv_t *rwp;
533 int rc = -EINVAL;
534
535 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
536 if (rwp == NULL)
537 return -ENOMEM;
538
539 splat_init_rw_priv(rwp, file);
540
541 rw_enter(&rwp->rw_rwlock, RW_WRITER);
542 if (!RW_WRITE_HELD(&rwp->rw_rwlock)) {
543 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
544 "rwlock should be write lock: %d\n",
545 RW_WRITE_HELD(&rwp->rw_rwlock));
546 goto out;
547 }
548
549 rw_downgrade(&rwp->rw_rwlock);
550 if (!RW_READ_HELD(&rwp->rw_rwlock)) {
551 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
552 "rwlock should be read lock: %d\n",
553 RW_READ_HELD(&rwp->rw_rwlock));
554 goto out;
555 }
556
557 rc = 0;
558 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "%s",
559 "rwlock properly downgraded\n");
560 out:
561 rw_exit(&rwp->rw_rwlock);
562 rw_destroy(&rwp->rw_rwlock);
563 kfree(rwp);
564
565 return rc;
566 }
567
568 static int
569 splat_rwlock_test6(struct file *file, void *arg)
570 {
571 rw_priv_t *rwp;
572 int rc;
573
574 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
575 if (rwp == NULL)
576 return -ENOMEM;
577
578 splat_init_rw_priv(rwp, file);
579
580 rw_enter(&rwp->rw_rwlock, RW_READER);
581 if (!RW_READ_HELD(&rwp->rw_rwlock)) {
582 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
583 "rwlock should be read lock: %d\n",
584 RW_READ_HELD(&rwp->rw_rwlock));
585 rc = -ENOLCK;
586 goto out;
587 }
588
589 #if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
590 /* With one reader upgrade should never fail. */
591 rc = rw_tryupgrade(&rwp->rw_rwlock);
592 if (!rc) {
593 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
594 "rwlock failed upgrade from reader: %d\n",
595 RW_READ_HELD(&rwp->rw_rwlock));
596 rc = -ENOLCK;
597 goto out;
598 }
599
600 if (RW_READ_HELD(&rwp->rw_rwlock) || !RW_WRITE_HELD(&rwp->rw_rwlock)) {
601 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should "
602 "have 0 (not %d) reader and 1 (not %d) writer\n",
603 RW_READ_HELD(&rwp->rw_rwlock),
604 RW_WRITE_HELD(&rwp->rw_rwlock));
605 goto out;
606 }
607
608 rc = 0;
609 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s",
610 "rwlock properly upgraded\n");
611 #else
612 rc = 0;
613 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s",
614 "rw_tryupgrade() is disabled for this arch\n");
615 #endif
616 out:
617 rw_exit(&rwp->rw_rwlock);
618 rw_destroy(&rwp->rw_rwlock);
619 kfree(rwp);
620
621 return rc;
622 }
623
624 splat_subsystem_t *
625 splat_rwlock_init(void)
626 {
627 splat_subsystem_t *sub;
628
629 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
630 if (sub == NULL)
631 return NULL;
632
633 memset(sub, 0, sizeof(*sub));
634 strncpy(sub->desc.name, SPLAT_RWLOCK_NAME, SPLAT_NAME_SIZE);
635 strncpy(sub->desc.desc, SPLAT_RWLOCK_DESC, SPLAT_DESC_SIZE);
636 INIT_LIST_HEAD(&sub->subsystem_list);
637 INIT_LIST_HEAD(&sub->test_list);
638 spin_lock_init(&sub->test_lock);
639 sub->desc.id = SPLAT_SUBSYSTEM_RWLOCK;
640
641 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST1_NAME, SPLAT_RWLOCK_TEST1_DESC,
642 SPLAT_RWLOCK_TEST1_ID, splat_rwlock_test1);
643 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST2_NAME, SPLAT_RWLOCK_TEST2_DESC,
644 SPLAT_RWLOCK_TEST2_ID, splat_rwlock_test2);
645 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST3_NAME, SPLAT_RWLOCK_TEST3_DESC,
646 SPLAT_RWLOCK_TEST3_ID, splat_rwlock_test3);
647 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST4_NAME, SPLAT_RWLOCK_TEST4_DESC,
648 SPLAT_RWLOCK_TEST4_ID, splat_rwlock_test4);
649 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST5_NAME, SPLAT_RWLOCK_TEST5_DESC,
650 SPLAT_RWLOCK_TEST5_ID, splat_rwlock_test5);
651 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST6_NAME, SPLAT_RWLOCK_TEST6_DESC,
652 SPLAT_RWLOCK_TEST6_ID, splat_rwlock_test6);
653
654 return sub;
655 }
656
657 void
658 splat_rwlock_fini(splat_subsystem_t *sub)
659 {
660 ASSERT(sub);
661 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST6_ID);
662 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST5_ID);
663 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST4_ID);
664 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST3_ID);
665 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST2_ID);
666 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST1_ID);
667 kfree(sub);
668 }
669
670 int
671 splat_rwlock_id(void) {
672 return SPLAT_SUBSYSTEM_RWLOCK;
673 }