1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Read/Writer Lock Tests.
25 \*****************************************************************************/
27 #include <sys/rwlock.h>
28 #include <sys/taskq.h>
29 #include <sys/random.h>
30 #include "splat-internal.h"
32 #define SPLAT_RWLOCK_NAME "rwlock"
33 #define SPLAT_RWLOCK_DESC "Kernel RW Lock Tests"
35 #define SPLAT_RWLOCK_TEST1_ID 0x0701
36 #define SPLAT_RWLOCK_TEST1_NAME "N-rd/1-wr"
37 #define SPLAT_RWLOCK_TEST1_DESC "Multiple readers one writer"
39 #define SPLAT_RWLOCK_TEST2_ID 0x0702
40 #define SPLAT_RWLOCK_TEST2_NAME "0-rd/N-wr"
41 #define SPLAT_RWLOCK_TEST2_DESC "Multiple writers"
43 #define SPLAT_RWLOCK_TEST3_ID 0x0703
44 #define SPLAT_RWLOCK_TEST3_NAME "held"
45 #define SPLAT_RWLOCK_TEST3_DESC "RW_{LOCK|READ|WRITE}_HELD"
47 #define SPLAT_RWLOCK_TEST4_ID 0x0704
48 #define SPLAT_RWLOCK_TEST4_NAME "tryenter"
49 #define SPLAT_RWLOCK_TEST4_DESC "Tryenter"
51 #define SPLAT_RWLOCK_TEST5_ID 0x0705
52 #define SPLAT_RWLOCK_TEST5_NAME "rw_downgrade"
53 #define SPLAT_RWLOCK_TEST5_DESC "Write downgrade"
55 #define SPLAT_RWLOCK_TEST6_ID 0x0706
56 #define SPLAT_RWLOCK_TEST6_NAME "rw_tryupgrade"
57 #define SPLAT_RWLOCK_TEST6_DESC "Read upgrade"
59 #define SPLAT_RWLOCK_TEST_MAGIC 0x115599DDUL
60 #define SPLAT_RWLOCK_TEST_NAME "rwlock_test"
61 #define SPLAT_RWLOCK_TEST_TASKQ "rwlock_taskq"
62 #define SPLAT_RWLOCK_TEST_COUNT 8
64 #define SPLAT_RWLOCK_RELEASE_INIT 0
65 #define SPLAT_RWLOCK_RELEASE_WR 1
66 #define SPLAT_RWLOCK_RELEASE_RD 2
68 typedef struct rw_priv
{
69 unsigned long rw_magic
;
73 wait_queue_head_t rw_waitq
;
82 typedef struct rw_thr
{
88 void splat_init_rw_priv(rw_priv_t
*rwp
, struct file
*file
)
90 rwp
->rw_magic
= SPLAT_RWLOCK_TEST_MAGIC
;
92 rw_init(&rwp
->rw_rwlock
, SPLAT_RWLOCK_TEST_NAME
, RW_DEFAULT
, NULL
);
93 spin_lock_init(&rwp
->rw_lock
);
94 init_waitqueue_head(&rwp
->rw_waitq
);
95 rwp
->rw_completed
= 0;
98 rwp
->rw_release
= SPLAT_RWLOCK_RELEASE_INIT
;
104 splat_rwlock_wr_thr(void *arg
)
106 rw_thr_t
*rwt
= (rw_thr_t
*)arg
;
107 rw_priv_t
*rwp
= rwt
->rwt_rwp
;
111 ASSERT(rwp
->rw_magic
== SPLAT_RWLOCK_TEST_MAGIC
);
112 snprintf(name
, sizeof(name
), "rwlock_wr_thr%d", rwt
->rwt_id
);
114 get_random_bytes((void *)&rnd
, 1);
115 msleep((unsigned int)rnd
);
117 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
118 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
119 name
, rwp
->rw_holders
, rwp
->rw_waiters
);
120 spin_lock(&rwp
->rw_lock
);
122 spin_unlock(&rwp
->rw_lock
);
123 rw_enter(&rwp
->rw_rwlock
, RW_WRITER
);
125 spin_lock(&rwp
->rw_lock
);
128 spin_unlock(&rwp
->rw_lock
);
129 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
130 "%s acquired rwlock (%d holding/%d waiting)\n",
131 name
, rwp
->rw_holders
, rwp
->rw_waiters
);
133 /* Wait for control thread to signal we can release the write lock */
134 wait_event_interruptible(rwp
->rw_waitq
, splat_locked_test(&rwp
->rw_lock
,
135 rwp
->rw_release
== SPLAT_RWLOCK_RELEASE_WR
));
137 spin_lock(&rwp
->rw_lock
);
140 spin_unlock(&rwp
->rw_lock
);
141 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
142 "%s dropped rwlock (%d holding/%d waiting)\n",
143 name
, rwp
->rw_holders
, rwp
->rw_waiters
);
145 rw_exit(&rwp
->rw_rwlock
);
151 splat_rwlock_rd_thr(void *arg
)
153 rw_thr_t
*rwt
= (rw_thr_t
*)arg
;
154 rw_priv_t
*rwp
= rwt
->rwt_rwp
;
158 ASSERT(rwp
->rw_magic
== SPLAT_RWLOCK_TEST_MAGIC
);
159 snprintf(name
, sizeof(name
), "rwlock_rd_thr%d", rwt
->rwt_id
);
161 get_random_bytes((void *)&rnd
, 1);
162 msleep((unsigned int)rnd
);
164 /* Don't try and take the semaphore until after someone has it */
165 wait_event_interruptible(rwp
->rw_waitq
, splat_locked_test(&rwp
->rw_lock
,
166 rwp
->rw_holders
> 0));
168 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
169 "%s trying to acquire rwlock (%d holding/%d waiting)\n",
170 name
, rwp
->rw_holders
, rwp
->rw_waiters
);
171 spin_lock(&rwp
->rw_lock
);
173 spin_unlock(&rwp
->rw_lock
);
174 rw_enter(&rwp
->rw_rwlock
, RW_READER
);
176 spin_lock(&rwp
->rw_lock
);
179 spin_unlock(&rwp
->rw_lock
);
180 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
181 "%s acquired rwlock (%d holding/%d waiting)\n",
182 name
, rwp
->rw_holders
, rwp
->rw_waiters
);
184 /* Wait for control thread to signal we can release the read lock */
185 wait_event_interruptible(rwp
->rw_waitq
, splat_locked_test(&rwp
->rw_lock
,
186 rwp
->rw_release
== SPLAT_RWLOCK_RELEASE_RD
));
188 spin_lock(&rwp
->rw_lock
);
191 spin_unlock(&rwp
->rw_lock
);
192 splat_vprint(rwp
->rw_file
, rwt
->rwt_name
,
193 "%s dropped rwlock (%d holding/%d waiting)\n",
194 name
, rwp
->rw_holders
, rwp
->rw_waiters
);
196 rw_exit(&rwp
->rw_rwlock
);
202 splat_rwlock_test1(struct file
*file
, void *arg
)
204 int i
, count
= 0, rc
= 0;
205 long pids
[SPLAT_RWLOCK_TEST_COUNT
];
206 rw_thr_t rwt
[SPLAT_RWLOCK_TEST_COUNT
];
209 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
213 splat_init_rw_priv(rwp
, file
);
215 /* Create some threads, the exact number isn't important just as
216 * long as we know how many we managed to create and should expect. */
220 for (i
= 0; i
< SPLAT_RWLOCK_TEST_COUNT
; i
++) {
221 rwt
[i
].rwt_rwp
= rwp
;
223 rwt
[i
].rwt_name
= SPLAT_RWLOCK_TEST1_NAME
;
225 /* The first thread will be the writer */
227 pids
[i
] = kernel_thread(splat_rwlock_wr_thr
, &rwt
[i
], 0);
229 pids
[i
] = kernel_thread(splat_rwlock_rd_thr
, &rwt
[i
], 0);
235 /* Wait for the writer */
236 while (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_holders
== 0)) {
237 wake_up_interruptible(&rwp
->rw_waitq
);
241 /* Wait for 'count-1' readers */
242 while (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_waiters
< count
- 1)) {
243 wake_up_interruptible(&rwp
->rw_waitq
);
247 /* Verify there is only one lock holder */
248 if (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_holders
) != 1) {
249 splat_vprint(file
, SPLAT_RWLOCK_TEST1_NAME
, "Only 1 holder "
250 "expected for rwlock (%d holding/%d waiting)\n",
251 rwp
->rw_holders
, rwp
->rw_waiters
);
255 /* Verify 'count-1' readers */
256 if (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_waiters
!= count
- 1)) {
257 splat_vprint(file
, SPLAT_RWLOCK_TEST1_NAME
, "Only %d waiters "
258 "expected for rwlock (%d holding/%d waiting)\n",
259 count
- 1, rwp
->rw_holders
, rwp
->rw_waiters
);
263 /* Signal the writer to release, allows readers to acquire */
264 spin_lock(&rwp
->rw_lock
);
265 rwp
->rw_release
= SPLAT_RWLOCK_RELEASE_WR
;
266 wake_up_interruptible(&rwp
->rw_waitq
);
267 spin_unlock(&rwp
->rw_lock
);
269 /* Wait for 'count-1' readers to hold the lock */
270 while (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_holders
< count
- 1)) {
271 wake_up_interruptible(&rwp
->rw_waitq
);
275 /* Verify there are 'count-1' readers */
276 if (splat_locked_test(&rwp
->rw_lock
, rwp
->rw_holders
!= count
- 1)) {
277 splat_vprint(file
, SPLAT_RWLOCK_TEST1_NAME
, "Only %d holders "
278 "expected for rwlock (%d holding/%d waiting)\n",
279 count
- 1, rwp
->rw_holders
, rwp
->rw_waiters
);
283 /* Release 'count-1' readers */
284 spin_lock(&rwp
->rw_lock
);
285 rwp
->rw_release
= SPLAT_RWLOCK_RELEASE_RD
;
286 wake_up_interruptible(&rwp
->rw_waitq
);
287 spin_unlock(&rwp
->rw_lock
);
289 /* Wait for the test to complete */
290 while (splat_locked_test(&rwp
->rw_lock
,
291 rwp
->rw_holders
>0 || rwp
->rw_waiters
>0))
294 rw_destroy(&(rwp
->rw_rwlock
));
301 splat_rwlock_test2_func(void *arg
)
303 rw_priv_t
*rwp
= (rw_priv_t
*)arg
;
305 ASSERT(rwp
->rw_magic
== SPLAT_RWLOCK_TEST_MAGIC
);
307 /* Read the value before sleeping and write it after we wake up to
308 * maximize the chance of a race if rwlocks are not working properly */
309 rw_enter(&rwp
->rw_rwlock
, RW_WRITER
);
311 set_current_state(TASK_INTERRUPTIBLE
);
312 schedule_timeout(HZ
/ 100); /* 1/100 of a second */
313 VERIFY(rwp
->rw_rc
== rc
);
315 rw_exit(&rwp
->rw_rwlock
);
319 splat_rwlock_test2(struct file
*file
, void *arg
)
323 int i
, rc
= 0, tq_count
= 256;
325 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
329 splat_init_rw_priv(rwp
, file
);
331 /* Create several threads allowing tasks to race with each other */
332 tq
= taskq_create(SPLAT_RWLOCK_TEST_TASKQ
, num_online_cpus(),
333 maxclsyspri
, 50, INT_MAX
, TASKQ_PREPOPULATE
);
340 * Schedule N work items to the work queue each of which enters the
341 * writer rwlock, sleeps briefly, then exits the writer rwlock. On a
342 * multiprocessor box these work items will be handled by all available
343 * CPUs. The task function checks to ensure the tracked shared variable
344 * is always only incremented by one. Additionally, the rwlock itself
345 * is instrumented such that if any two processors are in the
346 * critical region at the same time the system will panic. If the
347 * rwlock is implemented right this will never happy, that's a pass.
349 for (i
= 0; i
< tq_count
; i
++) {
350 if (!taskq_dispatch(tq
,splat_rwlock_test2_func
,rwp
,TQ_SLEEP
)) {
351 splat_vprint(file
, SPLAT_RWLOCK_TEST2_NAME
,
352 "Failed to queue task %d\n", i
);
359 if (rwp
->rw_rc
== tq_count
) {
360 splat_vprint(file
, SPLAT_RWLOCK_TEST2_NAME
, "%d racing threads "
361 "correctly entered/exited the rwlock %d times\n",
362 num_online_cpus(), rwp
->rw_rc
);
364 splat_vprint(file
, SPLAT_RWLOCK_TEST2_NAME
, "%d racing threads "
365 "only processed %d/%d w rwlock work items\n",
366 num_online_cpus(), rwp
->rw_rc
, tq_count
);
371 rw_destroy(&(rwp
->rw_rwlock
));
377 #define splat_rwlock_test3_helper(rwp,rex1,rex2,wex1,wex2,held_func,rc) \
379 int result, _rc1_, _rc2_, _rc3_, _rc4_; \
382 rw_enter(&(rwp)->rw_rwlock, RW_READER); \
383 _rc1_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex1); \
384 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
385 " returned %d (expected %d) when RW_READER\n", \
386 _rc1_ ? "Fail " : "", result, rex1); \
387 rw_exit(&(rwp)->rw_rwlock); \
388 _rc2_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex2); \
389 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
390 " returned %d (expected %d) when !RW_READER\n", \
391 _rc2_ ? "Fail " : "", result, rex2); \
393 rw_enter(&(rwp)->rw_rwlock, RW_WRITER); \
394 _rc3_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex1); \
395 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
396 " returned %d (expected %d) when RW_WRITER\n", \
397 _rc3_ ? "Fail " : "", result, wex1); \
398 rw_exit(&(rwp)->rw_rwlock); \
399 _rc4_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex2); \
400 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \
401 " returned %d (expected %d) when !RW_WRITER\n", \
402 _rc4_ ? "Fail " : "", result, wex2); \
404 rc = ((_rc1_ || _rc2_ || _rc3_ || _rc4_) ? -EINVAL : 0); \
408 splat_rwlock_test3(struct file
*file
, void *arg
)
413 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
417 splat_init_rw_priv(rwp
, file
);
419 splat_rwlock_test3_helper(rwp
, 1, 0, 1, 0, RW_LOCK_HELD
, rc1
);
420 splat_rwlock_test3_helper(rwp
, 1, 0, 0, 0, RW_READ_HELD
, rc2
);
421 splat_rwlock_test3_helper(rwp
, 0, 0, 1, 0, RW_WRITE_HELD
, rc3
);
423 rw_destroy(&rwp
->rw_rwlock
);
426 return ((rc1
|| rc2
|| rc3
) ? -EINVAL
: 0);
430 splat_rwlock_test4_func(void *arg
)
432 rw_priv_t
*rwp
= (rw_priv_t
*)arg
;
433 ASSERT(rwp
->rw_magic
== SPLAT_RWLOCK_TEST_MAGIC
);
435 if (rw_tryenter(&rwp
->rw_rwlock
, rwp
->rw_type
)) {
437 rw_exit(&rwp
->rw_rwlock
);
444 splat_rwlock_test4_name(krw_t type
)
447 case RW_NONE
: return "RW_NONE";
448 case RW_WRITER
: return "RW_WRITER";
449 case RW_READER
: return "RW_READER";
456 splat_rwlock_test4_type(taskq_t
*tq
, rw_priv_t
*rwp
, int expected_rc
,
457 krw_t holder_type
, krw_t try_type
)
461 /* Schedule a task function which will try and acquire the rwlock
462 * using type try_type while the rwlock is being held as holder_type.
463 * The result must match expected_rc for the test to pass */
464 rwp
->rw_rc
= -EINVAL
;
465 rwp
->rw_type
= try_type
;
467 if (holder_type
== RW_WRITER
|| holder_type
== RW_READER
)
468 rw_enter(&rwp
->rw_rwlock
, holder_type
);
470 id
= taskq_dispatch(tq
, splat_rwlock_test4_func
, rwp
, TQ_SLEEP
);
472 splat_vprint(rwp
->rw_file
, SPLAT_RWLOCK_TEST4_NAME
, "%s",
473 "taskq_dispatch() failed\n");
478 taskq_wait_id(tq
, id
);
480 if (rwp
->rw_rc
!= expected_rc
)
483 splat_vprint(rwp
->rw_file
, SPLAT_RWLOCK_TEST4_NAME
,
484 "%srw_tryenter(%s) returned %d (expected %d) when %s\n",
485 rc
? "Fail " : "", splat_rwlock_test4_name(try_type
),
486 rwp
->rw_rc
, expected_rc
,
487 splat_rwlock_test4_name(holder_type
));
489 if (holder_type
== RW_WRITER
|| holder_type
== RW_READER
)
490 rw_exit(&rwp
->rw_rwlock
);
496 splat_rwlock_test4(struct file
*file
, void *arg
)
500 int rc
= 0, rc1
, rc2
, rc3
, rc4
, rc5
, rc6
;
502 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
506 tq
= taskq_create(SPLAT_RWLOCK_TEST_TASKQ
, 1, maxclsyspri
,
507 50, INT_MAX
, TASKQ_PREPOPULATE
);
513 splat_init_rw_priv(rwp
, file
);
515 /* Validate all combinations of rw_tryenter() contention */
516 rc1
= splat_rwlock_test4_type(tq
, rwp
, -EBUSY
, RW_WRITER
, RW_WRITER
);
517 rc2
= splat_rwlock_test4_type(tq
, rwp
, -EBUSY
, RW_WRITER
, RW_READER
);
518 rc3
= splat_rwlock_test4_type(tq
, rwp
, -EBUSY
, RW_READER
, RW_WRITER
);
519 rc4
= splat_rwlock_test4_type(tq
, rwp
, 0, RW_READER
, RW_READER
);
520 rc5
= splat_rwlock_test4_type(tq
, rwp
, 0, RW_NONE
, RW_WRITER
);
521 rc6
= splat_rwlock_test4_type(tq
, rwp
, 0, RW_NONE
, RW_READER
);
523 if (rc1
|| rc2
|| rc3
|| rc4
|| rc5
|| rc6
)
528 rw_destroy(&(rwp
->rw_rwlock
));
535 splat_rwlock_test5(struct file
*file
, void *arg
)
540 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
544 splat_init_rw_priv(rwp
, file
);
546 rw_enter(&rwp
->rw_rwlock
, RW_WRITER
);
547 if (!RW_WRITE_HELD(&rwp
->rw_rwlock
)) {
548 splat_vprint(file
, SPLAT_RWLOCK_TEST5_NAME
,
549 "rwlock should be write lock: %d\n",
550 RW_WRITE_HELD(&rwp
->rw_rwlock
));
554 rw_downgrade(&rwp
->rw_rwlock
);
555 if (!RW_READ_HELD(&rwp
->rw_rwlock
)) {
556 splat_vprint(file
, SPLAT_RWLOCK_TEST5_NAME
,
557 "rwlock should be read lock: %d\n",
558 RW_READ_HELD(&rwp
->rw_rwlock
));
563 splat_vprint(file
, SPLAT_RWLOCK_TEST5_NAME
, "%s",
564 "rwlock properly downgraded\n");
566 rw_exit(&rwp
->rw_rwlock
);
567 rw_destroy(&rwp
->rw_rwlock
);
574 splat_rwlock_test6(struct file
*file
, void *arg
)
579 rwp
= (rw_priv_t
*)kmalloc(sizeof(*rwp
), GFP_KERNEL
);
583 splat_init_rw_priv(rwp
, file
);
585 rw_enter(&rwp
->rw_rwlock
, RW_READER
);
586 if (!RW_READ_HELD(&rwp
->rw_rwlock
)) {
587 splat_vprint(file
, SPLAT_RWLOCK_TEST6_NAME
,
588 "rwlock should be read lock: %d\n",
589 RW_READ_HELD(&rwp
->rw_rwlock
));
594 #if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
595 /* With one reader upgrade should never fail. */
596 rc
= rw_tryupgrade(&rwp
->rw_rwlock
);
598 splat_vprint(file
, SPLAT_RWLOCK_TEST6_NAME
,
599 "rwlock failed upgrade from reader: %d\n",
600 RW_READ_HELD(&rwp
->rw_rwlock
));
605 if (RW_READ_HELD(&rwp
->rw_rwlock
) || !RW_WRITE_HELD(&rwp
->rw_rwlock
)) {
606 splat_vprint(file
, SPLAT_RWLOCK_TEST6_NAME
, "rwlock should "
607 "have 0 (not %d) reader and 1 (not %d) writer\n",
608 RW_READ_HELD(&rwp
->rw_rwlock
),
609 RW_WRITE_HELD(&rwp
->rw_rwlock
));
614 splat_vprint(file
, SPLAT_RWLOCK_TEST6_NAME
, "%s",
615 "rwlock properly upgraded\n");
618 splat_vprint(file
, SPLAT_RWLOCK_TEST6_NAME
, "%s",
619 "rw_tryupgrade() is disabled for this arch\n");
622 rw_exit(&rwp
->rw_rwlock
);
623 rw_destroy(&rwp
->rw_rwlock
);
630 splat_rwlock_init(void)
632 splat_subsystem_t
*sub
;
634 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
638 memset(sub
, 0, sizeof(*sub
));
639 strncpy(sub
->desc
.name
, SPLAT_RWLOCK_NAME
, SPLAT_NAME_SIZE
);
640 strncpy(sub
->desc
.desc
, SPLAT_RWLOCK_DESC
, SPLAT_DESC_SIZE
);
641 INIT_LIST_HEAD(&sub
->subsystem_list
);
642 INIT_LIST_HEAD(&sub
->test_list
);
643 spin_lock_init(&sub
->test_lock
);
644 sub
->desc
.id
= SPLAT_SUBSYSTEM_RWLOCK
;
646 SPLAT_TEST_INIT(sub
, SPLAT_RWLOCK_TEST1_NAME
, SPLAT_RWLOCK_TEST1_DESC
,
647 SPLAT_RWLOCK_TEST1_ID
, splat_rwlock_test1
);
648 SPLAT_TEST_INIT(sub
, SPLAT_RWLOCK_TEST2_NAME
, SPLAT_RWLOCK_TEST2_DESC
,
649 SPLAT_RWLOCK_TEST2_ID
, splat_rwlock_test2
);
650 SPLAT_TEST_INIT(sub
, SPLAT_RWLOCK_TEST3_NAME
, SPLAT_RWLOCK_TEST3_DESC
,
651 SPLAT_RWLOCK_TEST3_ID
, splat_rwlock_test3
);
652 SPLAT_TEST_INIT(sub
, SPLAT_RWLOCK_TEST4_NAME
, SPLAT_RWLOCK_TEST4_DESC
,
653 SPLAT_RWLOCK_TEST4_ID
, splat_rwlock_test4
);
654 SPLAT_TEST_INIT(sub
, SPLAT_RWLOCK_TEST5_NAME
, SPLAT_RWLOCK_TEST5_DESC
,
655 SPLAT_RWLOCK_TEST5_ID
, splat_rwlock_test5
);
656 SPLAT_TEST_INIT(sub
, SPLAT_RWLOCK_TEST6_NAME
, SPLAT_RWLOCK_TEST6_DESC
,
657 SPLAT_RWLOCK_TEST6_ID
, splat_rwlock_test6
);
663 splat_rwlock_fini(splat_subsystem_t
*sub
)
666 SPLAT_TEST_FINI(sub
, SPLAT_RWLOCK_TEST6_ID
);
667 SPLAT_TEST_FINI(sub
, SPLAT_RWLOCK_TEST5_ID
);
668 SPLAT_TEST_FINI(sub
, SPLAT_RWLOCK_TEST4_ID
);
669 SPLAT_TEST_FINI(sub
, SPLAT_RWLOCK_TEST3_ID
);
670 SPLAT_TEST_FINI(sub
, SPLAT_RWLOCK_TEST2_ID
);
671 SPLAT_TEST_FINI(sub
, SPLAT_RWLOCK_TEST1_ID
);
676 splat_rwlock_id(void) {
677 return SPLAT_SUBSYSTEM_RWLOCK
;