]> git.proxmox.com Git - mirror_spl.git/blame - modules/splat/splat-rwlock.c
Prep for for 0.3.0 tag, this is the tag which was used for all
[mirror_spl.git] / modules / splat / splat-rwlock.c
CommitLineData
7c50328b 1#include "splat-internal.h"
f1ca4da6 2
7c50328b 3#define SPLAT_SUBSYSTEM_RWLOCK 0x0700
4#define SPLAT_RWLOCK_NAME "rwlock"
5#define SPLAT_RWLOCK_DESC "Kernel RW Lock Tests"
f1ca4da6 6
7c50328b 7#define SPLAT_RWLOCK_TEST1_ID 0x0701
8#define SPLAT_RWLOCK_TEST1_NAME "rwtest1"
9#define SPLAT_RWLOCK_TEST1_DESC "Multiple Readers One Writer"
f1ca4da6 10
7c50328b 11#define SPLAT_RWLOCK_TEST2_ID 0x0702
12#define SPLAT_RWLOCK_TEST2_NAME "rwtest2"
13#define SPLAT_RWLOCK_TEST2_DESC "Multiple Writers"
f1ca4da6 14
7c50328b 15#define SPLAT_RWLOCK_TEST3_ID 0x0703
16#define SPLAT_RWLOCK_TEST3_NAME "rwtest3"
17#define SPLAT_RWLOCK_TEST3_DESC "Owner Verification"
f1ca4da6 18
7c50328b 19#define SPLAT_RWLOCK_TEST4_ID 0x0704
20#define SPLAT_RWLOCK_TEST4_NAME "rwtest4"
21#define SPLAT_RWLOCK_TEST4_DESC "Trylock Test"
f1ca4da6 22
7c50328b 23#define SPLAT_RWLOCK_TEST5_ID 0x0705
24#define SPLAT_RWLOCK_TEST5_NAME "rwtest5"
25#define SPLAT_RWLOCK_TEST5_DESC "Write Downgrade Test"
f1ca4da6 26
7c50328b 27#define SPLAT_RWLOCK_TEST6_ID 0x0706
28#define SPLAT_RWLOCK_TEST6_NAME "rwtest6"
29#define SPLAT_RWLOCK_TEST6_DESC "Read Upgrade Test"
f1ca4da6 30
7c50328b 31#define SPLAT_RWLOCK_TEST_MAGIC 0x115599DDUL
32#define SPLAT_RWLOCK_TEST_NAME "rwlock_test"
33#define SPLAT_RWLOCK_TEST_COUNT 8
f1ca4da6 34
7c50328b 35#define SPLAT_RWLOCK_RELEASE_INIT 0
36#define SPLAT_RWLOCK_RELEASE_WRITERS 1
37#define SPLAT_RWLOCK_RELEASE_READERS 2
f1ca4da6 38
39typedef struct rw_priv {
40 unsigned long rw_magic;
41 struct file *rw_file;
42 krwlock_t rwl;
43 spinlock_t rw_priv_lock;
44 wait_queue_head_t rw_waitq;
45 atomic_t rw_completed;
46 atomic_t rw_acquired;
47 atomic_t rw_waiters;
48 atomic_t rw_release;
49} rw_priv_t;
50
51typedef struct rw_thr {
52 int rwt_id;
53 const char *rwt_name;
54 rw_priv_t *rwt_rwp;
55 int rwt_rc;
56} rw_thr_t;
57
58static inline void
7c50328b 59splat_rwlock_sleep(signed long delay)
f1ca4da6 60{
61 set_current_state(TASK_INTERRUPTIBLE);
62 schedule_timeout(delay);
63}
64
7c50328b 65#define splat_rwlock_lock_and_test(lock,test) \
f1ca4da6 66({ \
67 int ret = 0; \
68 \
69 spin_lock(lock); \
70 ret = (test) ? 1 : 0; \
71 spin_unlock(lock); \
72 ret; \
73})
74
7c50328b 75void splat_init_rw_priv(rw_priv_t *rwv, struct file *file)
f1ca4da6 76{
7c50328b 77 rwv->rw_magic = SPLAT_RWLOCK_TEST_MAGIC;
f1ca4da6 78 rwv->rw_file = file;
79 spin_lock_init(&rwv->rw_priv_lock);
80 init_waitqueue_head(&rwv->rw_waitq);
81 atomic_set(&rwv->rw_completed, 0);
82 atomic_set(&rwv->rw_acquired, 0);
83 atomic_set(&rwv->rw_waiters, 0);
7c50328b 84 atomic_set(&rwv->rw_release, SPLAT_RWLOCK_RELEASE_INIT);
85
f1ca4da6 86 /* Initialize the read/write lock */
7c50328b 87 rw_init(&rwv->rwl, SPLAT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
f1ca4da6 88}
89
90int
7c50328b 91splat_rwlock_test1_writer_thread(void *arg)
f1ca4da6 92{
93 rw_thr_t *rwt = (rw_thr_t *)arg;
94 rw_priv_t *rwv = rwt->rwt_rwp;
95 uint8_t rnd = 0;
96 char name[16];
97
7c50328b 98 ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
99 snprintf(name, sizeof(name), "%s%d",
100 SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
f1ca4da6 101 daemonize(name);
102 get_random_bytes((void *)&rnd, 1);
7c50328b 103 splat_rwlock_sleep(rnd * HZ / 1000);
f1ca4da6 104
105 spin_lock(&rwv->rw_priv_lock);
7c50328b 106 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 107 "%s writer thread trying to acquire rwlock with "
108 "%d holding lock and %d waiting\n",
109 name, atomic_read(&rwv->rw_acquired),
110 atomic_read(&rwv->rw_waiters));
111 atomic_inc(&rwv->rw_waiters);
112 spin_unlock(&rwv->rw_priv_lock);
113
7c50328b 114 /* Take the semaphore for writing
f1ca4da6 115 * release it when we are told to */
116 rw_enter(&rwv->rwl, RW_WRITER);
117
118 spin_lock(&rwv->rw_priv_lock);
119 atomic_dec(&rwv->rw_waiters);
120 atomic_inc(&rwv->rw_acquired);
7c50328b 121 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 122 "%s writer thread acquired rwlock with "
123 "%d holding lock and %d waiting\n",
124 name, atomic_read(&rwv->rw_acquired),
125 atomic_read(&rwv->rw_waiters));
126 spin_unlock(&rwv->rw_priv_lock);
127
128 /* Wait here until the control thread
129 * says we can release the write lock */
130 wait_event_interruptible(rwv->rw_waitq,
7c50328b 131 splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
f1ca4da6 132 atomic_read(&rwv->rw_release) ==
7c50328b 133 SPLAT_RWLOCK_RELEASE_WRITERS));
f1ca4da6 134 spin_lock(&rwv->rw_priv_lock);
135 atomic_inc(&rwv->rw_completed);
136 atomic_dec(&rwv->rw_acquired);
7c50328b 137 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 138 "%s writer thread dropped rwlock with "
139 "%d holding lock and %d waiting\n",
140 name, atomic_read(&rwv->rw_acquired),
141 atomic_read(&rwv->rw_waiters));
142 spin_unlock(&rwv->rw_priv_lock);
143
144 /* Release the semaphore */
145 rw_exit(&rwv->rwl);
146 return 0;
147}
148
149int
7c50328b 150splat_rwlock_test1_reader_thread(void *arg)
f1ca4da6 151{
152 rw_thr_t *rwt = (rw_thr_t *)arg;
153 rw_priv_t *rwv = rwt->rwt_rwp;
154 uint8_t rnd = 0;
155 char name[16];
156
7c50328b 157 ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
f1ca4da6 158 snprintf(name, sizeof(name), "%s%d",
7c50328b 159 SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
f1ca4da6 160 daemonize(name);
161 get_random_bytes((void *)&rnd, 1);
7c50328b 162 splat_rwlock_sleep(rnd * HZ / 1000);
f1ca4da6 163
164 /* Don't try and and take the semaphore until
165 * someone else has already acquired it */
166 wait_event_interruptible(rwv->rw_waitq,
7c50328b 167 splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
f1ca4da6 168 atomic_read(&rwv->rw_acquired) > 0));
169
170 spin_lock(&rwv->rw_priv_lock);
7c50328b 171 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 172 "%s reader thread trying to acquire rwlock with "
173 "%d holding lock and %d waiting\n",
174 name, atomic_read(&rwv->rw_acquired),
175 atomic_read(&rwv->rw_waiters));
176 atomic_inc(&rwv->rw_waiters);
177 spin_unlock(&rwv->rw_priv_lock);
178
179 /* Take the semaphore for reading
180 * release it when we are told to */
181 rw_enter(&rwv->rwl, RW_READER);
182
183 spin_lock(&rwv->rw_priv_lock);
184 atomic_dec(&rwv->rw_waiters);
185 atomic_inc(&rwv->rw_acquired);
7c50328b 186 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 187 "%s reader thread acquired rwlock with "
188 "%d holding lock and %d waiting\n",
189 name, atomic_read(&rwv->rw_acquired),
190 atomic_read(&rwv->rw_waiters));
191 spin_unlock(&rwv->rw_priv_lock);
192
193 /* Wait here until the control thread
194 * says we can release the read lock */
195 wait_event_interruptible(rwv->rw_waitq,
7c50328b 196 splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
f1ca4da6 197 atomic_read(&rwv->rw_release) ==
7c50328b 198 SPLAT_RWLOCK_RELEASE_READERS));
f1ca4da6 199
200 spin_lock(&rwv->rw_priv_lock);
201 atomic_inc(&rwv->rw_completed);
202 atomic_dec(&rwv->rw_acquired);
7c50328b 203 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 204 "%s reader thread dropped rwlock with "
205 "%d holding lock and %d waiting\n",
206 name, atomic_read(&rwv->rw_acquired),
207 atomic_read(&rwv->rw_waiters));
208 spin_unlock(&rwv->rw_priv_lock);
209
210 /* Release the semaphore */
211 rw_exit(&rwv->rwl);
212 return 0;
213}
214
215static int
7c50328b 216splat_rwlock_test1(struct file *file, void *arg)
f1ca4da6 217{
218 int i, count = 0, rc = 0;
7c50328b 219 long pids[SPLAT_RWLOCK_TEST_COUNT];
220 rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
f1ca4da6 221 rw_priv_t rwv;
222
7c50328b 223 /* Initialize private data including the rwlock */
224 splat_init_rw_priv(&rwv, file);
f1ca4da6 225
226 /* Create some threads, the exact number isn't important just as
227 * long as we know how many we managed to create and should expect. */
7c50328b 228 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
f1ca4da6 229 rwt[i].rwt_rwp = &rwv;
230 rwt[i].rwt_id = i;
7c50328b 231 rwt[i].rwt_name = SPLAT_RWLOCK_TEST1_NAME;
f1ca4da6 232 rwt[i].rwt_rc = 0;
233
234 /* The first thread will be a writer */
235 if (i == 0) {
7c50328b 236 pids[i] = kernel_thread(splat_rwlock_test1_writer_thread,
f1ca4da6 237 &rwt[i], 0);
238 } else {
7c50328b 239 pids[i] = kernel_thread(splat_rwlock_test1_reader_thread,
f1ca4da6 240 &rwt[i], 0);
241 }
7c50328b 242
f1ca4da6 243 if (pids[i] >= 0) {
244 count++;
245 }
246 }
247
248 /* Once the writer has the lock, release the readers */
7c50328b 249 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, atomic_read(&rwv.rw_acquired) <= 0)) {
250 splat_rwlock_sleep(1 * HZ);
f1ca4da6 251 }
252 wake_up_interruptible(&rwv.rw_waitq);
253
254 /* Ensure that there is only 1 writer and all readers are waiting */
7c50328b 255 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
f1ca4da6 256 atomic_read(&rwv.rw_acquired) != 1 ||
257 atomic_read(&rwv.rw_waiters) !=
7c50328b 258 SPLAT_RWLOCK_TEST_COUNT - 1)) {
f1ca4da6 259
7c50328b 260 splat_rwlock_sleep(1 * HZ);
f1ca4da6 261 }
262 /* Relase the writer */
263 spin_lock(&rwv.rw_priv_lock);
7c50328b 264 atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS);
f1ca4da6 265 spin_unlock(&rwv.rw_priv_lock);
266 wake_up_interruptible(&rwv.rw_waitq);
267
268 /* Now ensure that there are multiple reader threads holding the lock */
7c50328b 269 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
f1ca4da6 270 atomic_read(&rwv.rw_acquired) <= 1)) {
7c50328b 271 splat_rwlock_sleep(1 * HZ);
f1ca4da6 272 }
273 /* Release the readers */
274 spin_lock(&rwv.rw_priv_lock);
7c50328b 275 atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_READERS);
f1ca4da6 276 spin_unlock(&rwv.rw_priv_lock);
277 wake_up_interruptible(&rwv.rw_waitq);
278
279 /* Wait for the test to complete */
7c50328b 280 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
f1ca4da6 281 atomic_read(&rwv.rw_acquired) != 0 ||
282 atomic_read(&rwv.rw_waiters) != 0)) {
7c50328b 283 splat_rwlock_sleep(1 * HZ);
f1ca4da6 284
285 }
286
287 rw_destroy(&rwv.rwl);
288 return rc;
289}
290
291int
7c50328b 292splat_rwlock_test2_writer_thread(void *arg)
f1ca4da6 293{
294 rw_thr_t *rwt = (rw_thr_t *)arg;
295 rw_priv_t *rwv = rwt->rwt_rwp;
296 uint8_t rnd = 0;
297 char name[16];
7c50328b 298
299 ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
f1ca4da6 300 snprintf(name, sizeof(name), "%s%d",
7c50328b 301 SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
f1ca4da6 302 daemonize(name);
303 get_random_bytes((void *)&rnd, 1);
7c50328b 304 splat_rwlock_sleep(rnd * HZ / 1000);
f1ca4da6 305
306 /* Here just increment the waiters count even if we are not
307 * exactly about to call rw_enter(). Not really a big deal
308 * since more than likely will be true when we simulate work
309 * later on */
310 spin_lock(&rwv->rw_priv_lock);
7c50328b 311 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 312 "%s writer thread trying to acquire rwlock with "
313 "%d holding lock and %d waiting\n",
314 name, atomic_read(&rwv->rw_acquired),
315 atomic_read(&rwv->rw_waiters));
316 atomic_inc(&rwv->rw_waiters);
317 spin_unlock(&rwv->rw_priv_lock);
318
319 /* Wait here until the control thread
320 * says we can acquire the write lock */
321 wait_event_interruptible(rwv->rw_waitq,
7c50328b 322 splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
f1ca4da6 323 atomic_read(&rwv->rw_release) ==
7c50328b 324 SPLAT_RWLOCK_RELEASE_WRITERS));
325
f1ca4da6 326 /* Take the semaphore for writing */
327 rw_enter(&rwv->rwl, RW_WRITER);
328
329 spin_lock(&rwv->rw_priv_lock);
330 atomic_dec(&rwv->rw_waiters);
331 atomic_inc(&rwv->rw_acquired);
7c50328b 332 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 333 "%s writer thread acquired rwlock with "
334 "%d holding lock and %d waiting\n",
335 name, atomic_read(&rwv->rw_acquired),
336 atomic_read(&rwv->rw_waiters));
337 spin_unlock(&rwv->rw_priv_lock);
338
339 /* Give up the processor for a bit to simulate
340 * doing some work while taking the write lock */
7c50328b 341 splat_rwlock_sleep(rnd * HZ / 1000);
f1ca4da6 342
343 /* Ensure that we are the only one writing */
344 if (atomic_read(&rwv->rw_acquired) > 1) {
345 rwt->rwt_rc = 1;
346 } else {
347 rwt->rwt_rc = 0;
348 }
349
350 spin_lock(&rwv->rw_priv_lock);
351 atomic_inc(&rwv->rw_completed);
352 atomic_dec(&rwv->rw_acquired);
7c50328b 353 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 354 "%s writer thread dropped rwlock with "
355 "%d holding lock and %d waiting\n",
356 name, atomic_read(&rwv->rw_acquired),
357 atomic_read(&rwv->rw_waiters));
358 spin_unlock(&rwv->rw_priv_lock);
359
360 rw_exit(&rwv->rwl);
f1ca4da6 361
362 return 0;
363}
364
365static int
7c50328b 366splat_rwlock_test2(struct file *file, void *arg)
f1ca4da6 367{
368 int i, count = 0, rc = 0;
7c50328b 369 long pids[SPLAT_RWLOCK_TEST_COUNT];
370 rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
f1ca4da6 371 rw_priv_t rwv;
372
7c50328b 373 /* Initialize private data including the rwlock */
374 splat_init_rw_priv(&rwv, file);
f1ca4da6 375
376 /* Create some threads, the exact number isn't important just as
377 * long as we know how many we managed to create and should expect. */
7c50328b 378 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
f1ca4da6 379 rwt[i].rwt_rwp = &rwv;
380 rwt[i].rwt_id = i;
7c50328b 381 rwt[i].rwt_name = SPLAT_RWLOCK_TEST2_NAME;
f1ca4da6 382 rwt[i].rwt_rc = 0;
383
384 /* The first thread will be a writer */
7c50328b 385 pids[i] = kernel_thread(splat_rwlock_test2_writer_thread,
f1ca4da6 386 &rwt[i], 0);
387
388 if (pids[i] >= 0) {
389 count++;
390 }
391 }
392
393 /* Wait for writers to get queued up */
7c50328b 394 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
395 atomic_read(&rwv.rw_waiters) < SPLAT_RWLOCK_TEST_COUNT)) {
396 splat_rwlock_sleep(1 * HZ);
f1ca4da6 397 }
398 /* Relase the writers */
399 spin_lock(&rwv.rw_priv_lock);
7c50328b 400 atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS);
f1ca4da6 401 spin_unlock(&rwv.rw_priv_lock);
402 wake_up_interruptible(&rwv.rw_waitq);
403
404 /* Wait for the test to complete */
7c50328b 405 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
f1ca4da6 406 atomic_read(&rwv.rw_acquired) != 0 ||
407 atomic_read(&rwv.rw_waiters) != 0)) {
7c50328b 408 splat_rwlock_sleep(1 * HZ);
f1ca4da6 409 }
410
411 /* If any of the write threads ever acquired the lock
412 * while another thread had it, make sure we return
413 * an error */
7c50328b 414 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
f1ca4da6 415 if (rwt[i].rwt_rc) {
416 rc++;
417 }
418 }
419
420 rw_destroy(&rwv.rwl);
421 return rc;
422}
423
424static int
7c50328b 425splat_rwlock_test3(struct file *file, void *arg)
f1ca4da6 426{
427 kthread_t *owner;
428 rw_priv_t rwv;
429 int rc = 0;
430
431 /* Initialize private data
432 * including the rwlock */
7c50328b 433 splat_init_rw_priv(&rwv, file);
f1ca4da6 434
435 /* Take the rwlock for writing */
436 rw_enter(&rwv.rwl, RW_WRITER);
437 owner = rw_owner(&rwv.rwl);
438 if (current != owner) {
7c50328b 439 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should "
f1ca4da6 440 "be owned by pid %d but is owned by pid %d\n",
441 current->pid, owner ? owner->pid : -1);
442 rc = -EINVAL;
443 goto out;
444 }
445
446 /* Release the rwlock */
447 rw_exit(&rwv.rwl);
448 owner = rw_owner(&rwv.rwl);
449 if (owner) {
7c50328b 450 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should not "
f1ca4da6 451 "be owned but is owned by pid %d\n", owner->pid);
452 rc = -EINVAL;
453 goto out;
454 }
455
456 /* Take the rwlock for reading.
457 * Should not have an owner */
458 rw_enter(&rwv.rwl, RW_READER);
459 owner = rw_owner(&rwv.rwl);
460 if (owner) {
7c50328b 461 splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should not "
f1ca4da6 462 "be owned but is owned by pid %d\n", owner->pid);
463 /* Release the rwlock */
464 rw_exit(&rwv.rwl);
465 rc = -EINVAL;
466 goto out;
467 }
468
469 /* Release the rwlock */
470 rw_exit(&rwv.rwl);
471
472out:
473 rw_destroy(&rwv.rwl);
474 return rc;
475}
476
477int
7c50328b 478splat_rwlock_test4_reader_thread(void *arg)
f1ca4da6 479{
480 rw_thr_t *rwt = (rw_thr_t *)arg;
481 rw_priv_t *rwv = rwt->rwt_rwp;
482 uint8_t rnd = 0;
483 char name[16];
484
7c50328b 485 ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
f1ca4da6 486 snprintf(name, sizeof(name), "%s%d",
7c50328b 487 SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
f1ca4da6 488 daemonize(name);
489 get_random_bytes((void *)&rnd, 1);
7c50328b 490 splat_rwlock_sleep(rnd * HZ / 1000);
f1ca4da6 491
492 /* Don't try and and take the semaphore until
493 * someone else has already acquired it */
494 wait_event_interruptible(rwv->rw_waitq,
7c50328b 495 splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
f1ca4da6 496 atomic_read(&rwv->rw_acquired) > 0));
497
498 spin_lock(&rwv->rw_priv_lock);
7c50328b 499 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 500 "%s reader thread trying to acquire rwlock with "
501 "%d holding lock and %d waiting\n",
502 name, atomic_read(&rwv->rw_acquired),
503 atomic_read(&rwv->rw_waiters));
504 spin_unlock(&rwv->rw_priv_lock);
505
506 /* Take the semaphore for reading
507 * release it when we are told to */
508 rwt->rwt_rc = rw_tryenter(&rwv->rwl, RW_READER);
509
510 /* Here we acquired the lock this is a
511 * failure since the writer should be
512 * holding the lock */
513 if (rwt->rwt_rc == 1) {
514 spin_lock(&rwv->rw_priv_lock);
515 atomic_inc(&rwv->rw_acquired);
7c50328b 516 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 517 "%s reader thread acquired rwlock with "
518 "%d holding lock and %d waiting\n",
519 name, atomic_read(&rwv->rw_acquired),
520 atomic_read(&rwv->rw_waiters));
521 spin_unlock(&rwv->rw_priv_lock);
7c50328b 522
f1ca4da6 523 spin_lock(&rwv->rw_priv_lock);
524 atomic_dec(&rwv->rw_acquired);
7c50328b 525 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 526 "%s reader thread dropped rwlock with "
527 "%d holding lock and %d waiting\n",
528 name, atomic_read(&rwv->rw_acquired),
529 atomic_read(&rwv->rw_waiters));
530 spin_unlock(&rwv->rw_priv_lock);
7c50328b 531
f1ca4da6 532 /* Release the semaphore */
533 rw_exit(&rwv->rwl);
534 }
535 /* Here we know we didn't block and didn't
536 * acquire the rwlock for reading */
537 else {
538 spin_lock(&rwv->rw_priv_lock);
539 atomic_inc(&rwv->rw_completed);
7c50328b 540 splat_vprint(rwv->rw_file, rwt->rwt_name,
f1ca4da6 541 "%s reader thread could not acquire rwlock with "
542 "%d holding lock and %d waiting\n",
543 name, atomic_read(&rwv->rw_acquired),
544 atomic_read(&rwv->rw_waiters));
545 spin_unlock(&rwv->rw_priv_lock);
546 }
547
548 return 0;
549}
550
551static int
7c50328b 552splat_rwlock_test4(struct file *file, void *arg)
f1ca4da6 553{
554 int i, count = 0, rc = 0;
7c50328b 555 long pids[SPLAT_RWLOCK_TEST_COUNT];
556 rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
f1ca4da6 557 rw_priv_t rwv;
558
559 /* Initialize private data
560 * including the rwlock */
7c50328b 561 splat_init_rw_priv(&rwv, file);
f1ca4da6 562
563 /* Create some threads, the exact number isn't important just as
564 * long as we know how many we managed to create and should expect. */
7c50328b 565 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
f1ca4da6 566 rwt[i].rwt_rwp = &rwv;
567 rwt[i].rwt_id = i;
7c50328b 568 rwt[i].rwt_name = SPLAT_RWLOCK_TEST4_NAME;
f1ca4da6 569 rwt[i].rwt_rc = 0;
570
571 /* The first thread will be a writer */
572 if (i == 0) {
573 /* We can reuse the test1 writer thread here */
7c50328b 574 pids[i] = kernel_thread(splat_rwlock_test1_writer_thread,
f1ca4da6 575 &rwt[i], 0);
576 } else {
7c50328b 577 pids[i] = kernel_thread(splat_rwlock_test4_reader_thread,
f1ca4da6 578 &rwt[i], 0);
579 }
580
581 if (pids[i] >= 0) {
582 count++;
583 }
584 }
585
586 /* Once the writer has the lock, release the readers */
7c50328b 587 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
f1ca4da6 588 atomic_read(&rwv.rw_acquired) <= 0)) {
7c50328b 589 splat_rwlock_sleep(1 * HZ);
f1ca4da6 590 }
591 wake_up_interruptible(&rwv.rw_waitq);
592
593 /* Make sure that the reader threads complete */
7c50328b 594 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
595 atomic_read(&rwv.rw_completed) != SPLAT_RWLOCK_TEST_COUNT - 1)) {
596 splat_rwlock_sleep(1 * HZ);
f1ca4da6 597 }
598 /* Release the writer */
599 spin_lock(&rwv.rw_priv_lock);
7c50328b 600 atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS);
f1ca4da6 601 spin_unlock(&rwv.rw_priv_lock);
602 wake_up_interruptible(&rwv.rw_waitq);
603
604 /* Wait for the test to complete */
7c50328b 605 while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
f1ca4da6 606 atomic_read(&rwv.rw_acquired) != 0 ||
607 atomic_read(&rwv.rw_waiters) != 0)) {
7c50328b 608 splat_rwlock_sleep(1 * HZ);
f1ca4da6 609 }
610
611 /* If any of the reader threads ever acquired the lock
612 * while another thread had it, make sure we return
613 * an error since the rw_tryenter() should have failed */
7c50328b 614 for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
f1ca4da6 615 if (rwt[i].rwt_rc) {
616 rc++;
617 }
618 }
619
620 rw_destroy(&rwv.rwl);
621 return rc;
622}
623
624static int
7c50328b 625splat_rwlock_test5(struct file *file, void *arg)
f1ca4da6 626{
627 kthread_t *owner;
628 rw_priv_t rwv;
629 int rc = 0;
630
631 /* Initialize private data
632 * including the rwlock */
7c50328b 633 splat_init_rw_priv(&rwv, file);
f1ca4da6 634
635 /* Take the rwlock for writing */
636 rw_enter(&rwv.rwl, RW_WRITER);
637 owner = rw_owner(&rwv.rwl);
638 if (current != owner) {
7c50328b 639 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "rwlock should "
f1ca4da6 640 "be owned by pid %d but is owned by pid %d\n",
641 current->pid, owner ? owner->pid : -1);
642 rc = -EINVAL;
643 goto out;
644 }
645
646 /* Make sure that the downgrade
647 * worked properly */
648 rw_downgrade(&rwv.rwl);
649
650 owner = rw_owner(&rwv.rwl);
651 if (owner) {
7c50328b 652 splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "rwlock should not "
f1ca4da6 653 "be owned but is owned by pid %d\n", owner->pid);
654 /* Release the rwlock */
655 rw_exit(&rwv.rwl);
656 rc = -EINVAL;
657 goto out;
658 }
659
660 /* Release the rwlock */
661 rw_exit(&rwv.rwl);
662
663out:
664 rw_destroy(&rwv.rwl);
665 return rc;
666}
667
668static int
7c50328b 669splat_rwlock_test6(struct file *file, void *arg)
f1ca4da6 670{
671 kthread_t *owner;
672 rw_priv_t rwv;
673 int rc = 0;
674
675 /* Initialize private data
676 * including the rwlock */
7c50328b 677 splat_init_rw_priv(&rwv, file);
f1ca4da6 678
679 /* Take the rwlock for reading */
680 rw_enter(&rwv.rwl, RW_READER);
681 owner = rw_owner(&rwv.rwl);
682 if (owner) {
7c50328b 683 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should not "
f1ca4da6 684 "be owned but is owned by pid %d\n", owner->pid);
685 rc = -EINVAL;
686 goto out;
687 }
688
689 /* Make sure that the upgrade
690 * worked properly */
691 rc = !rw_tryupgrade(&rwv.rwl);
692
693 owner = rw_owner(&rwv.rwl);
694 if (rc || current != owner) {
7c50328b 695 splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should "
f1ca4da6 696 "be owned by pid %d but is owned by pid %d "
697 "trylock rc %d\n",
698 current->pid, owner ? owner->pid : -1, rc);
699 rc = -EINVAL;
700 goto out;
701 }
702
703 /* Release the rwlock */
704 rw_exit(&rwv.rwl);
705
706out:
707 rw_destroy(&rwv.rwl);
708 return rc;
709}
710
7c50328b 711splat_subsystem_t *
712splat_rwlock_init(void)
f1ca4da6 713{
7c50328b 714 splat_subsystem_t *sub;
f1ca4da6 715
716 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
717 if (sub == NULL)
718 return NULL;
719
720 memset(sub, 0, sizeof(*sub));
7c50328b 721 strncpy(sub->desc.name, SPLAT_RWLOCK_NAME, SPLAT_NAME_SIZE);
722 strncpy(sub->desc.desc, SPLAT_RWLOCK_DESC, SPLAT_DESC_SIZE);
f1ca4da6 723 INIT_LIST_HEAD(&sub->subsystem_list);
724 INIT_LIST_HEAD(&sub->test_list);
725 spin_lock_init(&sub->test_lock);
7c50328b 726 sub->desc.id = SPLAT_SUBSYSTEM_RWLOCK;
727
728 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST1_NAME, SPLAT_RWLOCK_TEST1_DESC,
729 SPLAT_RWLOCK_TEST1_ID, splat_rwlock_test1);
730 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST2_NAME, SPLAT_RWLOCK_TEST2_DESC,
731 SPLAT_RWLOCK_TEST2_ID, splat_rwlock_test2);
732 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST3_NAME, SPLAT_RWLOCK_TEST3_DESC,
733 SPLAT_RWLOCK_TEST3_ID, splat_rwlock_test3);
734 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST4_NAME, SPLAT_RWLOCK_TEST4_DESC,
735 SPLAT_RWLOCK_TEST4_ID, splat_rwlock_test4);
736 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST5_NAME, SPLAT_RWLOCK_TEST5_DESC,
737 SPLAT_RWLOCK_TEST5_ID, splat_rwlock_test5);
738 SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST6_NAME, SPLAT_RWLOCK_TEST6_DESC,
739 SPLAT_RWLOCK_TEST6_ID, splat_rwlock_test6);
f1ca4da6 740
741 return sub;
742}
743
744void
7c50328b 745splat_rwlock_fini(splat_subsystem_t *sub)
f1ca4da6 746{
747 ASSERT(sub);
7c50328b 748 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST6_ID);
749 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST5_ID);
750 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST4_ID);
751 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST3_ID);
752 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST2_ID);
753 SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST1_ID);
f1ca4da6 754 kfree(sub);
755}
756
757int
7c50328b 758splat_rwlock_id(void) {
759 return SPLAT_SUBSYSTEM_RWLOCK;
f1ca4da6 760}