]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-rwlock.c
Merge branch 'splat'
[mirror_spl-debian.git] / module / splat / splat-rwlock.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Read/Writer Lock Tests.
25 \*****************************************************************************/
26
27 #include <sys/rwlock.h>
28 #include <sys/taskq.h>
29 #include <sys/random.h>
30 #include "splat-internal.h"
31
32 #define SPLAT_RWLOCK_NAME "rwlock"
33 #define SPLAT_RWLOCK_DESC "Kernel RW Lock Tests"
34
35 #define SPLAT_RWLOCK_TEST1_ID 0x0701
36 #define SPLAT_RWLOCK_TEST1_NAME "N-rd/1-wr"
37 #define SPLAT_RWLOCK_TEST1_DESC "Multiple readers one writer"
38
39 #define SPLAT_RWLOCK_TEST2_ID 0x0702
40 #define SPLAT_RWLOCK_TEST2_NAME "0-rd/N-wr"
41 #define SPLAT_RWLOCK_TEST2_DESC "Multiple writers"
42
43 #define SPLAT_RWLOCK_TEST3_ID 0x0703
44 #define SPLAT_RWLOCK_TEST3_NAME "held"
45 #define SPLAT_RWLOCK_TEST3_DESC "RW_{LOCK|READ|WRITE}_HELD"
46
47 #define SPLAT_RWLOCK_TEST4_ID 0x0704
48 #define SPLAT_RWLOCK_TEST4_NAME "tryenter"
49 #define SPLAT_RWLOCK_TEST4_DESC "Tryenter"
50
51 #define SPLAT_RWLOCK_TEST5_ID 0x0705
52 #define SPLAT_RWLOCK_TEST5_NAME "rw_downgrade"
53 #define SPLAT_RWLOCK_TEST5_DESC "Write downgrade"
54
55 #define SPLAT_RWLOCK_TEST6_ID 0x0706
56 #define SPLAT_RWLOCK_TEST6_NAME "rw_tryupgrade"
57 #define SPLAT_RWLOCK_TEST6_DESC "Read upgrade"
58
59 #define SPLAT_RWLOCK_TEST_MAGIC 0x115599DDUL
60 #define SPLAT_RWLOCK_TEST_NAME "rwlock_test"
61 #define SPLAT_RWLOCK_TEST_TASKQ "rwlock_taskq"
62 #define SPLAT_RWLOCK_TEST_COUNT 8
63
64 #define SPLAT_RWLOCK_RELEASE_INIT 0
65 #define SPLAT_RWLOCK_RELEASE_WR 1
66 #define SPLAT_RWLOCK_RELEASE_RD 2
67
68 typedef struct rw_priv {
69 unsigned long rw_magic;
70 struct file *rw_file;
71 krwlock_t rw_rwlock;
72 spinlock_t rw_lock;
73 wait_queue_head_t rw_waitq;
74 int rw_completed;
75 int rw_holders;
76 int rw_waiters;
77 int rw_release;
78 int rw_rc;
79 krw_t rw_type;
80 } rw_priv_t;
81
82 typedef struct rw_thr {
83 const char *rwt_name;
84 rw_priv_t *rwt_rwp;
85 int rwt_id;
86 } rw_thr_t;
87
88 void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
89 {
90 rwp->rw_magic = SPLAT_RWLOCK_TEST_MAGIC;
91 rwp->rw_file = file;
92 rw_init(&rwp->rw_rwlock, SPLAT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
93 spin_lock_init(&rwp->rw_lock);
94 init_waitqueue_head(&rwp->rw_waitq);
95 rwp->rw_completed = 0;
96 rwp->rw_holders = 0;
97 rwp->rw_waiters = 0;
98 rwp->rw_release = SPLAT_RWLOCK_RELEASE_INIT;
99 rwp->rw_rc = 0;
100 rwp->rw_type = 0;
101 }
102
103 static int
104 splat_rwlock_wr_thr(void *arg)
105 {
106 rw_thr_t *rwt = (rw_thr_t *)arg;
107 rw_priv_t *rwp = rwt->rwt_rwp;
108 uint8_t rnd;
109 char name[16];
110
111 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
112 snprintf(name, sizeof(name), "rwlock_wr_thr%d", rwt->rwt_id);
113 daemonize(name);
114 get_random_bytes((void *)&rnd, 1);
115 msleep((unsigned int)rnd);
116
117 splat_vprint(rwp->rw_file, rwt->rwt_name,
118 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
119 name, rwp->rw_holders, rwp->rw_waiters);
120 spin_lock(&rwp->rw_lock);
121 rwp->rw_waiters++;
122 spin_unlock(&rwp->rw_lock);
123 rw_enter(&rwp->rw_rwlock, RW_WRITER);
124
125 spin_lock(&rwp->rw_lock);
126 rwp->rw_waiters--;
127 rwp->rw_holders++;
128 spin_unlock(&rwp->rw_lock);
129 splat_vprint(rwp->rw_file, rwt->rwt_name,
130 "%s acquired rwlock (%d holding/%d waiting)\n",
131 name, rwp->rw_holders, rwp->rw_waiters);
132
133 /* Wait for control thread to signal we can release the write lock */
134 wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
135 rwp->rw_release == SPLAT_RWLOCK_RELEASE_WR));
136
137 spin_lock(&rwp->rw_lock);
138 rwp->rw_completed++;
139 rwp->rw_holders--;
140 spin_unlock(&rwp->rw_lock);
141 splat_vprint(rwp->rw_file, rwt->rwt_name,
142 "%s dropped rwlock (%d holding/%d waiting)\n",
143 name, rwp->rw_holders, rwp->rw_waiters);
144
145 rw_exit(&rwp->rw_rwlock);
146
147 return 0;
148 }
149
150 static int
151 splat_rwlock_rd_thr(void *arg)
152 {
153 rw_thr_t *rwt = (rw_thr_t *)arg;
154 rw_priv_t *rwp = rwt->rwt_rwp;
155 uint8_t rnd;
156 char name[16];
157
158 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
159 snprintf(name, sizeof(name), "rwlock_rd_thr%d", rwt->rwt_id);
160 daemonize(name);
161 get_random_bytes((void *)&rnd, 1);
162 msleep((unsigned int)rnd);
163
164 /* Don't try and take the semaphore until after someone has it */
165 wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
166 rwp->rw_holders > 0));
167
168 splat_vprint(rwp->rw_file, rwt->rwt_name,
169 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
170 name, rwp->rw_holders, rwp->rw_waiters);
171 spin_lock(&rwp->rw_lock);
172 rwp->rw_waiters++;
173 spin_unlock(&rwp->rw_lock);
174 rw_enter(&rwp->rw_rwlock, RW_READER);
175
176 spin_lock(&rwp->rw_lock);
177 rwp->rw_waiters--;
178 rwp->rw_holders++;
179 spin_unlock(&rwp->rw_lock);
180 splat_vprint(rwp->rw_file, rwt->rwt_name,
181 "%s acquired rwlock (%d holding/%d waiting)\n",
182 name, rwp->rw_holders, rwp->rw_waiters);
183
184 /* Wait for control thread to signal we can release the read lock */
185 wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
186 rwp->rw_release == SPLAT_RWLOCK_RELEASE_RD));
187
188 spin_lock(&rwp->rw_lock);
189 rwp->rw_completed++;
190 rwp->rw_holders--;
191 spin_unlock(&rwp->rw_lock);
192 splat_vprint(rwp->rw_file, rwt->rwt_name,
193 "%s dropped rwlock (%d holding/%d waiting)\n",
194 name, rwp->rw_holders, rwp->rw_waiters);
195
196 rw_exit(&rwp->rw_rwlock);
197
198 return 0;
199 }
200
201 static int
202 splat_rwlock_test1(struct file *file, void *arg)
203 {
204 int i, count = 0, rc = 0;
205 long pids[SPLAT_RWLOCK_TEST_COUNT];
206 rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
207 rw_priv_t *rwp;
208
209 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
210 if (rwp == NULL)
211 return -ENOMEM;
212
213 splat_init_rw_priv(rwp, file);
214
215 /* Create some threads, the exact number isn't important just as
216 * long as we know how many we managed to create and should expect. */
217
218
219
220 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
221 rwt[i].rwt_rwp = rwp;
222 rwt[i].rwt_id = i;
223 rwt[i].rwt_name = SPLAT_RWLOCK_TEST1_NAME;
224
225 /* The first thread will be the writer */
226 if (i == 0)
227 pids[i] = kernel_thread(splat_rwlock_wr_thr, &rwt[i], 0);
228 else
229 pids[i] = kernel_thread(splat_rwlock_rd_thr, &rwt[i], 0);
230
231 if (pids[i] >= 0)
232 count++;
233 }
234
235 /* Wait for the writer */
236 while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders == 0)) {
237 wake_up_interruptible(&rwp->rw_waitq);
238 msleep(100);
239 }
240
241 /* Wait for 'count-1' readers */
242 while (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters < count - 1)) {
243 wake_up_interruptible(&rwp->rw_waitq);
244 msleep(100);
245 }
246
247 /* Verify there is only one lock holder */
248 if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders) != 1) {
249 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only 1 holder "
250 "expected for rwlock (%d holding/%d waiting)\n",
251 rwp->rw_holders, rwp->rw_waiters);
252 rc = -EINVAL;
253 }
254
255 /* Verify 'count-1' readers */
256 if (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters != count - 1)) {
257 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d waiters "
258 "expected for rwlock (%d holding/%d waiting)\n",
259 count - 1, rwp->rw_holders, rwp->rw_waiters);
260 rc = -EINVAL;
261 }
262
263 /* Signal the writer to release, allows readers to acquire */
264 spin_lock(&rwp->rw_lock);
265 rwp->rw_release = SPLAT_RWLOCK_RELEASE_WR;
266 wake_up_interruptible(&rwp->rw_waitq);
267 spin_unlock(&rwp->rw_lock);
268
269 /* Wait for 'count-1' readers to hold the lock */
270 while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders < count - 1)) {
271 wake_up_interruptible(&rwp->rw_waitq);
272 msleep(100);
273 }
274
275 /* Verify there are 'count-1' readers */
276 if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders != count - 1)) {
277 splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d holders "
278 "expected for rwlock (%d holding/%d waiting)\n",
279 count - 1, rwp->rw_holders, rwp->rw_waiters);
280 rc = -EINVAL;
281 }
282
283 /* Release 'count-1' readers */
284 spin_lock(&rwp->rw_lock);
285 rwp->rw_release = SPLAT_RWLOCK_RELEASE_RD;
286 wake_up_interruptible(&rwp->rw_waitq);
287 spin_unlock(&rwp->rw_lock);
288
289 /* Wait for the test to complete */
290 while (splat_locked_test(&rwp->rw_lock,
291 rwp->rw_holders>0 || rwp->rw_waiters>0))
292 msleep(100);
293
294 rw_destroy(&(rwp->rw_rwlock));
295 kfree(rwp);
296
297 return rc;
298 }
299
300 static void
301 splat_rwlock_test2_func(void *arg)
302 {
303 rw_priv_t *rwp = (rw_priv_t *)arg;
304 int rc;
305 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
306
307 /* Read the value before sleeping and write it after we wake up to
308 * maximize the chance of a race if rwlocks are not working properly */
309 rw_enter(&rwp->rw_rwlock, RW_WRITER);
310 rc = rwp->rw_rc;
311 set_current_state(TASK_INTERRUPTIBLE);
312 schedule_timeout(HZ / 100); /* 1/100 of a second */
313 VERIFY(rwp->rw_rc == rc);
314 rwp->rw_rc = rc + 1;
315 rw_exit(&rwp->rw_rwlock);
316 }
317
318 static int
319 splat_rwlock_test2(struct file *file, void *arg)
320 {
321 rw_priv_t *rwp;
322 taskq_t *tq;
323 int i, rc = 0, tq_count = 256;
324
325 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
326 if (rwp == NULL)
327 return -ENOMEM;
328
329 splat_init_rw_priv(rwp, file);
330
331 /* Create several threads allowing tasks to race with each other */
332 tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, num_online_cpus(),
333 maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
334 if (tq == NULL) {
335 rc = -ENOMEM;
336 goto out;
337 }
338
339 /*
340 * Schedule N work items to the work queue each of which enters the
341 * writer rwlock, sleeps briefly, then exits the writer rwlock. On a
342 * multiprocessor box these work items will be handled by all available
343 * CPUs. The task function checks to ensure the tracked shared variable
344 * is always only incremented by one. Additionally, the rwlock itself
345 * is instrumented such that if any two processors are in the
346 * critical region at the same time the system will panic. If the
347 * rwlock is implemented right this will never happy, that's a pass.
348 */
349 for (i = 0; i < tq_count; i++) {
350 if (!taskq_dispatch(tq,splat_rwlock_test2_func,rwp,TQ_SLEEP)) {
351 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME,
352 "Failed to queue task %d\n", i);
353 rc = -EINVAL;
354 }
355 }
356
357 taskq_wait(tq);
358
359 if (rwp->rw_rc == tq_count) {
360 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
361 "correctly entered/exited the rwlock %d times\n",
362 num_online_cpus(), rwp->rw_rc);
363 } else {
364 splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
365 "only processed %d/%d w rwlock work items\n",
366 num_online_cpus(), rwp->rw_rc, tq_count);
367 rc = -EINVAL;
368 }
369
370 taskq_destroy(tq);
371 rw_destroy(&(rwp->rw_rwlock));
372 out:
373 kfree(rwp);
374 return rc;
375 }
376
377 #define splat_rwlock_test3_helper(rwp,rex1,rex2,wex1,wex2,held_func,rc) \
378 do { \
379 int result, _rc1_, _rc2_, _rc3_, _rc4_; \
380 \
381 rc = 0; \
382 rw_enter(&(rwp)->rw_rwlock, RW_READER); \
383 _rc1_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex1); \
384 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
385 " returned %d (expected %d) when RW_READER\n", \
386 _rc1_ ? "Fail " : "", result, rex1); \
387 rw_exit(&(rwp)->rw_rwlock); \
388 _rc2_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex2); \
389 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
390 " returned %d (expected %d) when !RW_READER\n", \
391 _rc2_ ? "Fail " : "", result, rex2); \
392 \
393 rw_enter(&(rwp)->rw_rwlock, RW_WRITER); \
394 _rc3_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex1); \
395 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
396 " returned %d (expected %d) when RW_WRITER\n", \
397 _rc3_ ? "Fail " : "", result, wex1); \
398 rw_exit(&(rwp)->rw_rwlock); \
399 _rc4_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex2); \
400 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
401 " returned %d (expected %d) when !RW_WRITER\n", \
402 _rc4_ ? "Fail " : "", result, wex2); \
403 \
404 rc = ((_rc1_ || _rc2_ || _rc3_ || _rc4_) ? -EINVAL : 0); \
405 } while(0);
406
407 static int
408 splat_rwlock_test3(struct file *file, void *arg)
409 {
410 rw_priv_t *rwp;
411 int rc1, rc2, rc3;
412
413 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
414 if (rwp == NULL)
415 return -ENOMEM;
416
417 splat_init_rw_priv(rwp, file);
418
419 splat_rwlock_test3_helper(rwp, 1, 0, 1, 0, RW_LOCK_HELD, rc1);
420 splat_rwlock_test3_helper(rwp, 1, 0, 0, 0, RW_READ_HELD, rc2);
421 splat_rwlock_test3_helper(rwp, 0, 0, 1, 0, RW_WRITE_HELD, rc3);
422
423 rw_destroy(&rwp->rw_rwlock);
424 kfree(rwp);
425
426 return ((rc1 || rc2 || rc3) ? -EINVAL : 0);
427 }
428
429 static void
430 splat_rwlock_test4_func(void *arg)
431 {
432 rw_priv_t *rwp = (rw_priv_t *)arg;
433 ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
434
435 if (rw_tryenter(&rwp->rw_rwlock, rwp->rw_type)) {
436 rwp->rw_rc = 0;
437 rw_exit(&rwp->rw_rwlock);
438 } else {
439 rwp->rw_rc = -EBUSY;
440 }
441 }
442
443 static char *
444 splat_rwlock_test4_name(krw_t type)
445 {
446 switch (type) {
447 case RW_NONE: return "RW_NONE";
448 case RW_WRITER: return "RW_WRITER";
449 case RW_READER: return "RW_READER";
450 }
451
452 return NULL;
453 }
454
455 static int
456 splat_rwlock_test4_type(taskq_t *tq, rw_priv_t *rwp, int expected_rc,
457 krw_t holder_type, krw_t try_type)
458 {
459 int id, rc = 0;
460
461 /* Schedule a task function which will try and acquire the rwlock
462 * using type try_type while the rwlock is being held as holder_type.
463 * The result must match expected_rc for the test to pass */
464 rwp->rw_rc = -EINVAL;
465 rwp->rw_type = try_type;
466
467 if (holder_type == RW_WRITER || holder_type == RW_READER)
468 rw_enter(&rwp->rw_rwlock, holder_type);
469
470 id = taskq_dispatch(tq, splat_rwlock_test4_func, rwp, TQ_SLEEP);
471 if (id == 0) {
472 splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME, "%s",
473 "taskq_dispatch() failed\n");
474 rc = -EINVAL;
475 goto out;
476 }
477
478 taskq_wait_id(tq, id);
479
480 if (rwp->rw_rc != expected_rc)
481 rc = -EINVAL;
482
483 splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME,
484 "%srw_tryenter(%s) returned %d (expected %d) when %s\n",
485 rc ? "Fail " : "", splat_rwlock_test4_name(try_type),
486 rwp->rw_rc, expected_rc,
487 splat_rwlock_test4_name(holder_type));
488 out:
489 if (holder_type == RW_WRITER || holder_type == RW_READER)
490 rw_exit(&rwp->rw_rwlock);
491
492 return rc;
493 }
494
495 static int
496 splat_rwlock_test4(struct file *file, void *arg)
497 {
498 rw_priv_t *rwp;
499 taskq_t *tq;
500 int rc = 0, rc1, rc2, rc3, rc4, rc5, rc6;
501
502 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
503 if (rwp == NULL)
504 return -ENOMEM;
505
506 tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, 1, maxclsyspri,
507 50, INT_MAX, TASKQ_PREPOPULATE);
508 if (tq == NULL) {
509 rc = -ENOMEM;
510 goto out;
511 }
512
513 splat_init_rw_priv(rwp, file);
514
515 /* Validate all combinations of rw_tryenter() contention */
516 rc1 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_WRITER);
517 rc2 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_READER);
518 rc3 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_WRITER);
519 rc4 = splat_rwlock_test4_type(tq, rwp, 0, RW_READER, RW_READER);
520 rc5 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_WRITER);
521 rc6 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_READER);
522
523 if (rc1 || rc2 || rc3 || rc4 || rc5 || rc6)
524 rc = -EINVAL;
525
526 taskq_destroy(tq);
527 out:
528 rw_destroy(&(rwp->rw_rwlock));
529 kfree(rwp);
530
531 return rc;
532 }
533
534 static int
535 splat_rwlock_test5(struct file *file, void *arg)
536 {
537 rw_priv_t *rwp;
538 int rc = -EINVAL;
539
540 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
541 if (rwp == NULL)
542 return -ENOMEM;
543
544 splat_init_rw_priv(rwp, file);
545
546 rw_enter(&rwp->rw_rwlock, RW_WRITER);
547 if (!RW_WRITE_HELD(&rwp->rw_rwlock)) {
548 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
549 "rwlock should be write lock: %d\n",
550 RW_WRITE_HELD(&rwp->rw_rwlock));
551 goto out;
552 }
553
554 rw_downgrade(&rwp->rw_rwlock);
555 if (!RW_READ_HELD(&rwp->rw_rwlock)) {
556 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
557 "rwlock should be read lock: %d\n",
558 RW_READ_HELD(&rwp->rw_rwlock));
559 goto out;
560 }
561
562 rc = 0;
563 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "%s",
564 "rwlock properly downgraded\n");
565 out:
566 rw_exit(&rwp->rw_rwlock);
567 rw_destroy(&rwp->rw_rwlock);
568 kfree(rwp);
569
570 return rc;
571 }
572
573 static int
574 splat_rwlock_test6(struct file *file, void *arg)
575 {
576 rw_priv_t *rwp;
577 int rc;
578
579 rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
580 if (rwp == NULL)
581 return -ENOMEM;
582
583 splat_init_rw_priv(rwp, file);
584
585 rw_enter(&rwp->rw_rwlock, RW_READER);
586 if (!RW_READ_HELD(&rwp->rw_rwlock)) {
587 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
588 "rwlock should be read lock: %d\n",
589 RW_READ_HELD(&rwp->rw_rwlock));
590 rc = -ENOLCK;
591 goto out;
592 }
593
594 #if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
595 /* With one reader upgrade should never fail. */
596 rc = rw_tryupgrade(&rwp->rw_rwlock);
597 if (!rc) {
598 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
599 "rwlock failed upgrade from reader: %d\n",
600 RW_READ_HELD(&rwp->rw_rwlock));
601 rc = -ENOLCK;
602 goto out;
603 }
604
605 if (RW_READ_HELD(&rwp->rw_rwlock) || !RW_WRITE_HELD(&rwp->rw_rwlock)) {
606 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should "
607 "have 0 (not %d) reader and 1 (not %d) writer\n",
608 RW_READ_HELD(&rwp->rw_rwlock),
609 RW_WRITE_HELD(&rwp->rw_rwlock));
610 goto out;
611 }
612
613 rc = 0;
614 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s",
615 "rwlock properly upgraded\n");
616 #else
617 rc = 0;
618 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s",
619 "rw_tryupgrade() is disabled for this arch\n");
620 #endif
621 out:
622 rw_exit(&rwp->rw_rwlock);
623 rw_destroy(&rwp->rw_rwlock);
624 kfree(rwp);
625
626 return rc;
627 }
628
629 splat_subsystem_t *
630 splat_rwlock_init(void)
631 {
632 splat_subsystem_t *sub;
633
634 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
635 if (sub == NULL)
636 return NULL;
637
638 memset(sub, 0, sizeof(*sub));
639 strncpy(sub->desc.name, SPLAT_RWLOCK_NAME, SPLAT_NAME_SIZE);
640 strncpy(sub->desc.desc, SPLAT_RWLOCK_DESC, SPLAT_DESC_SIZE);
641 INIT_LIST_HEAD(&sub->subsystem_list);
642 INIT_LIST_HEAD(&sub->test_list);
643 spin_lock_init(&sub->test_lock);
644 sub->desc.id = SPLAT_SUBSYSTEM_RWLOCK;
645
646 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST1_NAME, SPLAT_RWLOCK_TEST1_DESC,
647 SPLAT_RWLOCK_TEST1_ID, splat_rwlock_test1);
648 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST2_NAME, SPLAT_RWLOCK_TEST2_DESC,
649 SPLAT_RWLOCK_TEST2_ID, splat_rwlock_test2);
650 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST3_NAME, SPLAT_RWLOCK_TEST3_DESC,
651 SPLAT_RWLOCK_TEST3_ID, splat_rwlock_test3);
652 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST4_NAME, SPLAT_RWLOCK_TEST4_DESC,
653 SPLAT_RWLOCK_TEST4_ID, splat_rwlock_test4);
654 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST5_NAME, SPLAT_RWLOCK_TEST5_DESC,
655 SPLAT_RWLOCK_TEST5_ID, splat_rwlock_test5);
656 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST6_NAME, SPLAT_RWLOCK_TEST6_DESC,
657 SPLAT_RWLOCK_TEST6_ID, splat_rwlock_test6);
658
659 return sub;
660 }
661
662 void
663 splat_rwlock_fini(splat_subsystem_t *sub)
664 {
665 ASSERT(sub);
666 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST6_ID);
667 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST5_ID);
668 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST4_ID);
669 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST3_ID);
670 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST2_ID);
671 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST1_ID);
672 kfree(sub);
673 }
674
675 int
676 splat_rwlock_id(void) {
677 return SPLAT_SUBSYSTEM_RWLOCK;
678 }