1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Read/Writer Lock Tests.
25 \*****************************************************************************/
27 #include <sys/random.h>
28 #include <sys/rwlock.h>
29 #include <sys/taskq.h>
30 #include <linux/delay.h>
31 #include <linux/mm_compat.h>
32 #include "splat-internal.h"
34 #define SPLAT_RWLOCK_NAME "rwlock"
35 #define SPLAT_RWLOCK_DESC "Kernel RW Lock Tests"
37 #define SPLAT_RWLOCK_TEST1_ID 0x0701
38 #define SPLAT_RWLOCK_TEST1_NAME "N-rd/1-wr"
39 #define SPLAT_RWLOCK_TEST1_DESC "Multiple readers one writer"
41 #define SPLAT_RWLOCK_TEST2_ID 0x0702
42 #define SPLAT_RWLOCK_TEST2_NAME "0-rd/N-wr"
43 #define SPLAT_RWLOCK_TEST2_DESC "Multiple writers"
45 #define SPLAT_RWLOCK_TEST3_ID 0x0703
46 #define SPLAT_RWLOCK_TEST3_NAME "held"
47 #define SPLAT_RWLOCK_TEST3_DESC "RW_{LOCK|READ|WRITE}_HELD"
49 #define SPLAT_RWLOCK_TEST4_ID 0x0704
50 #define SPLAT_RWLOCK_TEST4_NAME "tryenter"
51 #define SPLAT_RWLOCK_TEST4_DESC "Tryenter"
53 #define SPLAT_RWLOCK_TEST5_ID 0x0705
54 #define SPLAT_RWLOCK_TEST5_NAME "rw_downgrade"
55 #define SPLAT_RWLOCK_TEST5_DESC "Write downgrade"
57 #define SPLAT_RWLOCK_TEST6_ID 0x0706
58 #define SPLAT_RWLOCK_TEST6_NAME "rw_tryupgrade-1"
59 #define SPLAT_RWLOCK_TEST6_DESC "rwsem->count value"
61 #define SPLAT_RWLOCK_TEST7_ID 0x0707
62 #define SPLAT_RWLOCK_TEST7_NAME "rw_tryupgrade-2"
63 #define SPLAT_RWLOCK_TEST7_DESC "Read upgrade"
65 #define SPLAT_RWLOCK_TEST_MAGIC 0x115599DDUL
66 #define SPLAT_RWLOCK_TEST_NAME "rwlock_test"
67 #define SPLAT_RWLOCK_TEST_TASKQ "rwlock_taskq"
68 #define SPLAT_RWLOCK_TEST_COUNT 8
70 #define SPLAT_RWLOCK_RELEASE_INIT 0
71 #define SPLAT_RWLOCK_RELEASE_WR 1
72 #define SPLAT_RWLOCK_RELEASE_RD 2
74 typedef struct rw_priv
{
75 unsigned long rw_magic
;
79 spl_wait_queue_head_t rw_waitq
;
88 typedef struct rw_thr
{
91 struct task_struct
*rwt_thread
;
94 void splat_init_rw_priv(rw_priv_t
*rwp
, struct file
*file
)
96 rwp
->rw_magic
= SPLAT_RWLOCK_TEST_MAGIC
;
98 rw_init(&rwp
->rw_rwlock
, SPLAT_RWLOCK_TEST_NAME
, RW_DEFAULT
, NULL
);
99 spin_lock_init(&rwp
->rw_lock
);
100 init_waitqueue_head(&rwp
->rw_waitq
);
101 rwp
->rw_completed
= 0;
104 rwp
->rw_release
= SPLAT_RWLOCK_RELEASE_INIT
;
109 #if defined(CONFIG_PREEMPT_RT_FULL)
111 splat_rwlock_test1(struct file
*file
, void *arg
)
114 * This test will never succeed on PREEMPT_RT_FULL because these
115 * kernels only allow a single thread to hold the lock.
121 splat_rwlock_wr_thr(void *arg
)
123 rw_thr_t
*rwt
= (rw_thr_t
*)arg
;
124 rw_priv_t
*rwp
= rwt
->rwt_rwp
;
127 ASSERT(rwp
->rw_magic
== SPLAT_RWLOCK_TEST_MAGIC
);
129 get_random_bytes((void *)&rnd
, 1);
130 msleep((unsigned int)rnd
);
132 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
133 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
134 rwt
->rwt_thread
->comm
, rwp
->rw_holders
, rwp
->rw_waiters
);
135 spin_lock(&rwp
->rw_lock
);
137 spin_unlock(&rwp
->rw_lock
);
138 rw_enter(&rwp
->rw_rwlock
, RW_WRITER
);
140 spin_lock(&rwp
->rw_lock
);
143 spin_unlock(&rwp
->rw_lock
);
144 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
145 "%s acquired rwlock (%d holding/%d waiting)\n",
146 rwt
->rwt_thread
->comm
, rwp
->rw_holders
, rwp
->rw_waiters
);
148 /* Wait for control thread to signal we can release the write lock */
149 wait_event_interruptible(rwp
->rw_waitq
, splat_locked_test(&rwp
->rw_lock
,
150 rwp
->rw_release
== SPLAT_RWLOCK_RELEASE_WR
));
152 spin_lock(&rwp
->rw_lock
);
155 spin_unlock(&rwp
->rw_lock
);
156 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
157 "%s dropped rwlock (%d holding/%d waiting)\n",
158 rwt
->rwt_thread
->comm
, rwp
->rw_holders
, rwp
->rw_waiters
);
160 rw_exit(&rwp
->rw_rwlock
);
166 splat_rwlock_rd_thr(void *arg
)
168 rw_thr_t
*rwt
= (rw_thr_t
*)arg
;
169 rw_priv_t
*rwp
= rwt
->rwt_rwp
;
172 ASSERT(rwp
->rw_magic
== SPLAT_RWLOCK_TEST_MAGIC
);
174 get_random_bytes((void *)&rnd
, 1);
175 msleep((unsigned int)rnd
);
177 /* Don't try and take the semaphore until after someone has it */
178 wait_event_interruptible(rwp
->rw_waitq
,
179 splat_locked_test(&rwp
->rw_lock
, rwp
->rw_holders
> 0));
181 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
182 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
183 rwt
->rwt_thread
->comm
, rwp
->rw_holders
, rwp
->rw_waiters
);
184 spin_lock(&rwp
->rw_lock
);
186 spin_unlock(&rwp
->rw_lock
);
187 rw_enter(&rwp
->rw_rwlock
, RW_READER
);
189 spin_lock(&rwp
->rw_lock
);
192 spin_unlock(&rwp
->rw_lock
);
193 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
194 "%s acquired rwlock (%d holding/%d waiting)\n",
195 rwt
->rwt_thread
->comm
, rwp
->rw_holders
, rwp
->rw_waiters
);
197 /* Wait for control thread to signal we can release the read lock */
198 wait_event_interruptible(rwp
->rw_waitq
, splat_locked_test(&rwp
->rw_lock
,
199 rwp
->rw_release
== SPLAT_RWLOCK_RELEASE_RD
));
201 spin_lock(&rwp
->rw_lock
);
204 spin_unlock(&rwp
->rw_lock
);
205 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
206 "%s dropped rwlock (%d holding/%d waiting)\n",
207 rwt
->rwt_thread
->comm
, rwp
->rw_holders
, rwp
->rw_waiters
);
209 rw_exit(&rwp
->rw_rwlock
);
215 splat_rwlock_test1(struct file
*file
, void *arg
)
217 int i
, count
= 0, rc
= 0;
218 rw_thr_t rwt
[SPLAT_RWLOCK_TEST_COUNT
];
221 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
225 splat_init_rw_priv(rwp
, file
);
227 /* Create some threads, the exact number isn't important just as
228 * long as we know how many we managed to create and should expect. */
229 for (i
= 0; i
< SPLAT_RWLOCK_TEST_COUNT
; i
++) {
230 rwt
[i
].rwt_rwp
= rwp
;
231 rwt
[i
].rwt_name
= SPLAT_RWLOCK_TEST1_NAME
;
233 /* The first thread will be the writer */
235 rwt
[i
].rwt_thread
= spl_kthread_create(splat_rwlock_wr_thr
,
236 &rwt
[i
], "%s/%d", SPLAT_RWLOCK_TEST_NAME
, i
);
238 rwt
[i
].rwt_thread
= spl_kthread_create(splat_rwlock_rd_thr
,
239 &rwt
[i
], "%s/%d", SPLAT_RWLOCK_TEST_NAME
, i
);
241 if (!IS_ERR(rwt
[i
].rwt_thread
)) {
242 wake_up_process(rwt
[i
].rwt_thread
);
247 /* Wait for the writer */
248 while (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_holders
== 0)) {
249 wake_up_interruptible(&rwp
->rw_waitq
);
253 /* Wait for 'count-1' readers */
254 while (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_waiters
< count
- 1)) {
255 wake_up_interruptible(&rwp
->rw_waitq
);
259 /* Verify there is only one lock holder */
260 if (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_holders
) != 1) {
261 splat_vprint(file
, SPLAT_RWLOCK_TEST1_NAME
, "Only 1 holder "
262 "expected for rwlock (%d holding/%d waiting)\n",
263 rwp
->rw_holders
, rwp
->rw_waiters
);
267 /* Verify 'count-1' readers */
268 if (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_waiters
!= count
- 1)) {
269 splat_vprint(file
, SPLAT_RWLOCK_TEST1_NAME
, "Only %d waiters "
270 "expected for rwlock (%d holding/%d waiting)\n",
271 count
- 1, rwp
->rw_holders
, rwp
->rw_waiters
);
275 /* Signal the writer to release, allows readers to acquire */
276 spin_lock(&rwp
->rw_lock
);
277 rwp
->rw_release
= SPLAT_RWLOCK_RELEASE_WR
;
278 wake_up_interruptible(&rwp
->rw_waitq
);
279 spin_unlock(&rwp
->rw_lock
);
281 /* Wait for 'count-1' readers to hold the lock */
282 while (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_holders
< count
- 1)) {
283 wake_up_interruptible(&rwp
->rw_waitq
);
287 /* Verify there are 'count-1' readers */
288 if (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_holders
!= count
- 1)) {
289 splat_vprint(file
, SPLAT_RWLOCK_TEST1_NAME
, "Only %d holders "
290 "expected for rwlock (%d holding/%d waiting)\n",
291 count
- 1, rwp
->rw_holders
, rwp
->rw_waiters
);
295 /* Release 'count-1' readers */
296 spin_lock(&rwp
->rw_lock
);
297 rwp
->rw_release
= SPLAT_RWLOCK_RELEASE_RD
;
298 wake_up_interruptible(&rwp
->rw_waitq
);
299 spin_unlock(&rwp
->rw_lock
);
301 /* Wait for the test to complete */
302 while (splat_locked_test(&rwp
->rw_lock
,
303 rwp
->rw_holders
>0 || rwp
->rw_waiters
>0))
306 rw_destroy(&(rwp
->rw_rwlock
));
314 splat_rwlock_test2_func(void *arg
)
316 rw_priv_t
*rwp
= (rw_priv_t
*)arg
;
318 ASSERT(rwp
->rw_magic
== SPLAT_RWLOCK_TEST_MAGIC
);
320 /* Read the value before sleeping and write it after we wake up to
321 * maximize the chance of a race if rwlocks are not working properly */
322 rw_enter(&rwp
->rw_rwlock
, RW_WRITER
);
324 set_current_state(TASK_INTERRUPTIBLE
);
325 schedule_timeout(HZ
/ 100); /* 1/100 of a second */
326 VERIFY(rwp
->rw_rc
== rc
);
328 rw_exit(&rwp
->rw_rwlock
);
332 splat_rwlock_test2(struct file
*file
, void *arg
)
336 int i
, rc
= 0, tq_count
= 256;
338 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
342 splat_init_rw_priv(rwp
, file
);
344 /* Create several threads allowing tasks to race with each other */
345 tq
= taskq_create(SPLAT_RWLOCK_TEST_TASKQ
, num_online_cpus(),
346 defclsyspri
, 50, INT_MAX
, TASKQ_PREPOPULATE
);
353 * Schedule N work items to the work queue each of which enters the
354 * writer rwlock, sleeps briefly, then exits the writer rwlock. On a
355 * multiprocessor box these work items will be handled by all available
356 * CPUs. The task function checks to ensure the tracked shared variable
357 * is always only incremented by one. Additionally, the rwlock itself
358 * is instrumented such that if any two processors are in the
359 * critical region at the same time the system will panic. If the
360 * rwlock is implemented right this will never happy, that's a pass.
362 for (i
= 0; i
< tq_count
; i
++) {
363 if (taskq_dispatch(tq
, splat_rwlock_test2_func
, rwp
,
364 TQ_SLEEP
) == TASKQID_INVALID
) {
365 splat_vprint(file
, SPLAT_RWLOCK_TEST2_NAME
,
366 "Failed to queue task %d\n", i
);
373 if (rwp
->rw_rc
== tq_count
) {
374 splat_vprint(file
, SPLAT_RWLOCK_TEST2_NAME
, "%d racing threads "
375 "correctly entered/exited the rwlock %d times\n",
376 num_online_cpus(), rwp
->rw_rc
);
378 splat_vprint(file
, SPLAT_RWLOCK_TEST2_NAME
, "%d racing threads "
379 "only processed %d/%d w rwlock work items\n",
380 num_online_cpus(), rwp
->rw_rc
, tq_count
);
385 rw_destroy(&(rwp
->rw_rwlock
));
391 #define splat_rwlock_test3_helper(rwp,rex1,rex2,wex1,wex2,held_func,rc) \
393 int result, _rc1_, _rc2_, _rc3_, _rc4_; \
396 rw_enter(&(rwp)->rw_rwlock, RW_READER); \
397 _rc1_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex1); \
398 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
399 " returned %d (expected %d) when RW_READER\n", \
400 _rc1_ ? "Fail " : "", result, rex1); \
401 rw_exit(&(rwp)->rw_rwlock); \
402 _rc2_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex2); \
403 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
404 " returned %d (expected %d) when !RW_READER\n", \
405 _rc2_ ? "Fail " : "", result, rex2); \
407 rw_enter(&(rwp)->rw_rwlock, RW_WRITER); \
408 _rc3_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex1); \
409 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
410 " returned %d (expected %d) when RW_WRITER\n", \
411 _rc3_ ? "Fail " : "", result, wex1); \
412 rw_exit(&(rwp)->rw_rwlock); \
413 _rc4_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex2); \
414 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
415 " returned %d (expected %d) when !RW_WRITER\n", \
416 _rc4_ ? "Fail " : "", result, wex2); \
418 rc = ((_rc1_ || _rc2_ || _rc3_ || _rc4_) ? -EINVAL : 0); \
422 splat_rwlock_test3(struct file
*file
, void *arg
)
427 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
431 splat_init_rw_priv(rwp
, file
);
433 splat_rwlock_test3_helper(rwp
, 1, 0, 1, 0, RW_LOCK_HELD
, rc1
);
434 splat_rwlock_test3_helper(rwp
, 1, 0, 0, 0, RW_READ_HELD
, rc2
);
435 splat_rwlock_test3_helper(rwp
, 0, 0, 1, 0, RW_WRITE_HELD
, rc3
);
437 rw_destroy(&rwp
->rw_rwlock
);
440 return ((rc1
|| rc2
|| rc3
) ? -EINVAL
: 0);
444 splat_rwlock_test4_func(void *arg
)
446 rw_priv_t
*rwp
= (rw_priv_t
*)arg
;
447 ASSERT(rwp
->rw_magic
== SPLAT_RWLOCK_TEST_MAGIC
);
449 if (rw_tryenter(&rwp
->rw_rwlock
, rwp
->rw_type
)) {
451 rw_exit(&rwp
->rw_rwlock
);
458 splat_rwlock_test4_name(krw_t type
)
461 case RW_NONE
: return "RW_NONE";
462 case RW_WRITER
: return "RW_WRITER";
463 case RW_READER
: return "RW_READER";
470 splat_rwlock_test4_type(taskq_t
*tq
, rw_priv_t
*rwp
, int expected_rc
,
471 krw_t holder_type
, krw_t try_type
)
475 /* Schedule a task function which will try and acquire the rwlock
476 * using type try_type while the rwlock is being held as holder_type.
477 * The result must match expected_rc for the test to pass */
478 rwp
->rw_rc
= -EINVAL
;
479 rwp
->rw_type
= try_type
;
481 if (holder_type
== RW_WRITER
|| holder_type
== RW_READER
)
482 rw_enter(&rwp
->rw_rwlock
, holder_type
);
484 id
= taskq_dispatch(tq
, splat_rwlock_test4_func
, rwp
, TQ_SLEEP
);
485 if (id
== TASKQID_INVALID
) {
486 splat_vprint(rwp
->rw_file
, SPLAT_RWLOCK_TEST4_NAME
, "%s",
487 "taskq_dispatch() failed\n");
492 taskq_wait_id(tq
, id
);
494 if (rwp
->rw_rc
!= expected_rc
)
497 splat_vprint(rwp
->rw_file
, SPLAT_RWLOCK_TEST4_NAME
,
498 "%srw_tryenter(%s) returned %d (expected %d) when %s\n",
499 rc
? "Fail " : "", splat_rwlock_test4_name(try_type
),
500 rwp
->rw_rc
, expected_rc
,
501 splat_rwlock_test4_name(holder_type
));
503 if (holder_type
== RW_WRITER
|| holder_type
== RW_READER
)
504 rw_exit(&rwp
->rw_rwlock
);
510 splat_rwlock_test4(struct file
*file
, void *arg
)
514 int rc
= 0, rc1
, rc2
, rc3
, rc4
, rc5
, rc6
;
516 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
520 tq
= taskq_create(SPLAT_RWLOCK_TEST_TASKQ
, 1, defclsyspri
,
521 50, INT_MAX
, TASKQ_PREPOPULATE
);
527 splat_init_rw_priv(rwp
, file
);
530 * Validate all combinations of rw_tryenter() contention.
532 * The concurrent reader test is modified for PREEMPT_RT_FULL
533 * kernels which do not permit concurrent read locks to be taken
534 * from different threads. The same thread is allowed to take
535 * the read lock multiple times.
537 rc1
= splat_rwlock_test4_type(tq
, rwp
, -EBUSY
, RW_WRITER
, RW_WRITER
);
538 rc2
= splat_rwlock_test4_type(tq
, rwp
, -EBUSY
, RW_WRITER
, RW_READER
);
539 rc3
= splat_rwlock_test4_type(tq
, rwp
, -EBUSY
, RW_READER
, RW_WRITER
);
540 #if defined(CONFIG_PREEMPT_RT_FULL)
541 rc4
= splat_rwlock_test4_type(tq
, rwp
, -EBUSY
, RW_READER
, RW_READER
);
543 rc4
= splat_rwlock_test4_type(tq
, rwp
, 0, RW_READER
, RW_READER
);
545 rc5
= splat_rwlock_test4_type(tq
, rwp
, 0, RW_NONE
, RW_WRITER
);
546 rc6
= splat_rwlock_test4_type(tq
, rwp
, 0, RW_NONE
, RW_READER
);
548 if (rc1
|| rc2
|| rc3
|| rc4
|| rc5
|| rc6
)
553 rw_destroy(&(rwp
->rw_rwlock
));
560 splat_rwlock_test5(struct file
*file
, void *arg
)
565 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
569 splat_init_rw_priv(rwp
, file
);
571 rw_enter(&rwp
->rw_rwlock
, RW_WRITER
);
572 if (!RW_WRITE_HELD(&rwp
->rw_rwlock
)) {
573 splat_vprint(file
, SPLAT_RWLOCK_TEST5_NAME
,
574 "rwlock should be write lock: %d\n",
575 RW_WRITE_HELD(&rwp
->rw_rwlock
));
579 rw_downgrade(&rwp
->rw_rwlock
);
580 if (!RW_READ_HELD(&rwp
->rw_rwlock
)) {
581 splat_vprint(file
, SPLAT_RWLOCK_TEST5_NAME
,
582 "rwlock should be read lock: %d\n",
583 RW_READ_HELD(&rwp
->rw_rwlock
));
588 splat_vprint(file
, SPLAT_RWLOCK_TEST5_NAME
, "%s",
589 "rwlock properly downgraded\n");
591 rw_exit(&rwp
->rw_rwlock
);
592 rw_destroy(&rwp
->rw_rwlock
);
599 splat_rwlock_test6(struct file
*file
, void *arg
)
604 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
608 splat_init_rw_priv(rwp
, file
);
610 rw_enter(&rwp
->rw_rwlock
, RW_READER
);
611 if (RWSEM_COUNT(SEM(&rwp
->rw_rwlock
)) !=
612 SPL_RWSEM_SINGLE_READER_VALUE
) {
613 splat_vprint(file
, SPLAT_RWLOCK_TEST6_NAME
,
614 "We assumed single reader rwsem->count "
615 "should be %ld, but is %ld\n",
616 (long int)SPL_RWSEM_SINGLE_READER_VALUE
,
617 (long int)RWSEM_COUNT(SEM(&rwp
->rw_rwlock
)));
621 rw_exit(&rwp
->rw_rwlock
);
623 rw_enter(&rwp
->rw_rwlock
, RW_WRITER
);
624 if (RWSEM_COUNT(SEM(&rwp
->rw_rwlock
)) !=
625 SPL_RWSEM_SINGLE_WRITER_VALUE
) {
626 splat_vprint(file
, SPLAT_RWLOCK_TEST6_NAME
,
627 "We assumed single writer rwsem->count "
628 "should be %ld, but is %ld\n",
629 (long int)SPL_RWSEM_SINGLE_WRITER_VALUE
,
630 (long int)RWSEM_COUNT(SEM(&rwp
->rw_rwlock
)));
635 splat_vprint(file
, SPLAT_RWLOCK_TEST6_NAME
, "%s",
636 "rwsem->count same as we assumed\n");
638 rw_exit(&rwp
->rw_rwlock
);
639 rw_destroy(&rwp
->rw_rwlock
);
646 splat_rwlock_test7(struct file
*file
, void *arg
)
651 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
655 splat_init_rw_priv(rwp
, file
);
657 rw_enter(&rwp
->rw_rwlock
, RW_READER
);
658 if (!RW_READ_HELD(&rwp
->rw_rwlock
)) {
659 splat_vprint(file
, SPLAT_RWLOCK_TEST7_NAME
,
660 "rwlock should be read lock: %d\n",
661 RW_READ_HELD(&rwp
->rw_rwlock
));
666 /* With one reader upgrade should never fail. */
667 rc
= rw_tryupgrade(&rwp
->rw_rwlock
);
669 splat_vprint(file
, SPLAT_RWLOCK_TEST7_NAME
,
670 "rwlock failed upgrade from reader: %d\n",
671 RW_READ_HELD(&rwp
->rw_rwlock
));
676 if (RW_READ_HELD(&rwp
->rw_rwlock
) || !RW_WRITE_HELD(&rwp
->rw_rwlock
)) {
677 splat_vprint(file
, SPLAT_RWLOCK_TEST7_NAME
, "rwlock should "
678 "have 0 (not %d) reader and 1 (not %d) writer\n",
679 RW_READ_HELD(&rwp
->rw_rwlock
),
680 RW_WRITE_HELD(&rwp
->rw_rwlock
));
685 splat_vprint(file
, SPLAT_RWLOCK_TEST7_NAME
, "%s",
686 "rwlock properly upgraded\n");
688 rw_exit(&rwp
->rw_rwlock
);
689 rw_destroy(&rwp
->rw_rwlock
);
696 splat_rwlock_init(void)
698 splat_subsystem_t
*sub
;
700 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
704 memset(sub
, 0, sizeof(*sub
));
705 strncpy(sub
->desc
.name
, SPLAT_RWLOCK_NAME
, SPLAT_NAME_SIZE
);
706 strncpy(sub
->desc
.desc
, SPLAT_RWLOCK_DESC
, SPLAT_DESC_SIZE
);
707 INIT_LIST_HEAD(&sub
->subsystem_list
);
708 INIT_LIST_HEAD(&sub
->test_list
);
709 spin_lock_init(&sub
->test_lock
);
710 sub
->desc
.id
= SPLAT_SUBSYSTEM_RWLOCK
;
712 splat_test_init(sub
, SPLAT_RWLOCK_TEST1_NAME
, SPLAT_RWLOCK_TEST1_DESC
,
713 SPLAT_RWLOCK_TEST1_ID
, splat_rwlock_test1
);
714 splat_test_init(sub
, SPLAT_RWLOCK_TEST2_NAME
, SPLAT_RWLOCK_TEST2_DESC
,
715 SPLAT_RWLOCK_TEST2_ID
, splat_rwlock_test2
);
716 splat_test_init(sub
, SPLAT_RWLOCK_TEST3_NAME
, SPLAT_RWLOCK_TEST3_DESC
,
717 SPLAT_RWLOCK_TEST3_ID
, splat_rwlock_test3
);
718 splat_test_init(sub
, SPLAT_RWLOCK_TEST4_NAME
, SPLAT_RWLOCK_TEST4_DESC
,
719 SPLAT_RWLOCK_TEST4_ID
, splat_rwlock_test4
);
720 splat_test_init(sub
, SPLAT_RWLOCK_TEST5_NAME
, SPLAT_RWLOCK_TEST5_DESC
,
721 SPLAT_RWLOCK_TEST5_ID
, splat_rwlock_test5
);
722 splat_test_init(sub
, SPLAT_RWLOCK_TEST6_NAME
, SPLAT_RWLOCK_TEST6_DESC
,
723 SPLAT_RWLOCK_TEST6_ID
, splat_rwlock_test6
);
724 splat_test_init(sub
, SPLAT_RWLOCK_TEST7_NAME
, SPLAT_RWLOCK_TEST7_DESC
,
725 SPLAT_RWLOCK_TEST7_ID
, splat_rwlock_test7
);
731 splat_rwlock_fini(splat_subsystem_t
*sub
)
734 splat_test_fini(sub
, SPLAT_RWLOCK_TEST7_ID
);
735 splat_test_fini(sub
, SPLAT_RWLOCK_TEST6_ID
);
736 splat_test_fini(sub
, SPLAT_RWLOCK_TEST5_ID
);
737 splat_test_fini(sub
, SPLAT_RWLOCK_TEST4_ID
);
738 splat_test_fini(sub
, SPLAT_RWLOCK_TEST3_ID
);
739 splat_test_fini(sub
, SPLAT_RWLOCK_TEST2_ID
);
740 splat_test_fini(sub
, SPLAT_RWLOCK_TEST1_ID
);
745 splat_rwlock_id(void) {
746 return SPLAT_SUBSYSTEM_RWLOCK
;