]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/locking/locktorture.c
Merge tag 'rtc-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[mirror_ubuntu-artful-kernel.git] / kernel / locking / locktorture.c
1 /*
2 * Module-based torture test facility for locking
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2014
19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Davidlohr Bueso <dave@stgolabs.net>
22 * Based on kernel/rcu/torture.c.
23 */
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/kthread.h>
27 #include <linux/sched/rt.h>
28 #include <linux/spinlock.h>
29 #include <linux/rwlock.h>
30 #include <linux/mutex.h>
31 #include <linux/rwsem.h>
32 #include <linux/smp.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <linux/atomic.h>
36 #include <linux/moduleparam.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/percpu-rwsem.h>
40 #include <linux/torture.h>
41
42 MODULE_LICENSE("GPL");
43 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
44
45 torture_param(int, nwriters_stress, -1,
46 "Number of write-locking stress-test threads");
47 torture_param(int, nreaders_stress, -1,
48 "Number of read-locking stress-test threads");
49 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
50 torture_param(int, onoff_interval, 0,
51 "Time between CPU hotplugs (s), 0=disable");
52 torture_param(int, shuffle_interval, 3,
53 "Number of jiffies between shuffles, 0=disable");
54 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
55 torture_param(int, stat_interval, 60,
56 "Number of seconds between stats printk()s");
57 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
58 torture_param(bool, verbose, true,
59 "Enable verbose debugging printk()s");
60
61 static char *torture_type = "spin_lock";
62 module_param(torture_type, charp, 0444);
63 MODULE_PARM_DESC(torture_type,
64 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
65
66 static struct task_struct *stats_task;
67 static struct task_struct **writer_tasks;
68 static struct task_struct **reader_tasks;
69
70 static bool lock_is_write_held;
71 static bool lock_is_read_held;
72
73 struct lock_stress_stats {
74 long n_lock_fail;
75 long n_lock_acquired;
76 };
77
78 int torture_runnable = IS_ENABLED(MODULE);
79 module_param(torture_runnable, int, 0444);
80 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
81
82 /* Forward reference. */
83 static void lock_torture_cleanup(void);
84
85 /*
86 * Operations vector for selecting different types of tests.
87 */
88 struct lock_torture_ops {
89 void (*init)(void);
90 int (*writelock)(void);
91 void (*write_delay)(struct torture_random_state *trsp);
92 void (*task_boost)(struct torture_random_state *trsp);
93 void (*writeunlock)(void);
94 int (*readlock)(void);
95 void (*read_delay)(struct torture_random_state *trsp);
96 void (*readunlock)(void);
97
98 unsigned long flags; /* for irq spinlocks */
99 const char *name;
100 };
101
102 struct lock_torture_cxt {
103 int nrealwriters_stress;
104 int nrealreaders_stress;
105 bool debug_lock;
106 atomic_t n_lock_torture_errors;
107 struct lock_torture_ops *cur_ops;
108 struct lock_stress_stats *lwsa; /* writer statistics */
109 struct lock_stress_stats *lrsa; /* reader statistics */
110 };
111 static struct lock_torture_cxt cxt = { 0, 0, false,
112 ATOMIC_INIT(0),
113 NULL, NULL};
114 /*
115 * Definitions for lock torture testing.
116 */
117
118 static int torture_lock_busted_write_lock(void)
119 {
120 return 0; /* BUGGY, do not use in real life!!! */
121 }
122
123 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
124 {
125 const unsigned long longdelay_ms = 100;
126
127 /* We want a long delay occasionally to force massive contention. */
128 if (!(torture_random(trsp) %
129 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
130 mdelay(longdelay_ms);
131 #ifdef CONFIG_PREEMPT
132 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
133 preempt_schedule(); /* Allow test to be preempted. */
134 #endif
135 }
136
137 static void torture_lock_busted_write_unlock(void)
138 {
139 /* BUGGY, do not use in real life!!! */
140 }
141
142 static void torture_boost_dummy(struct torture_random_state *trsp)
143 {
144 /* Only rtmutexes care about priority */
145 }
146
147 static struct lock_torture_ops lock_busted_ops = {
148 .writelock = torture_lock_busted_write_lock,
149 .write_delay = torture_lock_busted_write_delay,
150 .task_boost = torture_boost_dummy,
151 .writeunlock = torture_lock_busted_write_unlock,
152 .readlock = NULL,
153 .read_delay = NULL,
154 .readunlock = NULL,
155 .name = "lock_busted"
156 };
157
158 static DEFINE_SPINLOCK(torture_spinlock);
159
160 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
161 {
162 spin_lock(&torture_spinlock);
163 return 0;
164 }
165
166 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
167 {
168 const unsigned long shortdelay_us = 2;
169 const unsigned long longdelay_ms = 100;
170
171 /* We want a short delay mostly to emulate likely code, and
172 * we want a long delay occasionally to force massive contention.
173 */
174 if (!(torture_random(trsp) %
175 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
176 mdelay(longdelay_ms);
177 if (!(torture_random(trsp) %
178 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
179 udelay(shortdelay_us);
180 #ifdef CONFIG_PREEMPT
181 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
182 preempt_schedule(); /* Allow test to be preempted. */
183 #endif
184 }
185
186 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
187 {
188 spin_unlock(&torture_spinlock);
189 }
190
191 static struct lock_torture_ops spin_lock_ops = {
192 .writelock = torture_spin_lock_write_lock,
193 .write_delay = torture_spin_lock_write_delay,
194 .task_boost = torture_boost_dummy,
195 .writeunlock = torture_spin_lock_write_unlock,
196 .readlock = NULL,
197 .read_delay = NULL,
198 .readunlock = NULL,
199 .name = "spin_lock"
200 };
201
202 static int torture_spin_lock_write_lock_irq(void)
203 __acquires(torture_spinlock)
204 {
205 unsigned long flags;
206
207 spin_lock_irqsave(&torture_spinlock, flags);
208 cxt.cur_ops->flags = flags;
209 return 0;
210 }
211
212 static void torture_lock_spin_write_unlock_irq(void)
213 __releases(torture_spinlock)
214 {
215 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
216 }
217
218 static struct lock_torture_ops spin_lock_irq_ops = {
219 .writelock = torture_spin_lock_write_lock_irq,
220 .write_delay = torture_spin_lock_write_delay,
221 .task_boost = torture_boost_dummy,
222 .writeunlock = torture_lock_spin_write_unlock_irq,
223 .readlock = NULL,
224 .read_delay = NULL,
225 .readunlock = NULL,
226 .name = "spin_lock_irq"
227 };
228
229 static DEFINE_RWLOCK(torture_rwlock);
230
231 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
232 {
233 write_lock(&torture_rwlock);
234 return 0;
235 }
236
237 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
238 {
239 const unsigned long shortdelay_us = 2;
240 const unsigned long longdelay_ms = 100;
241
242 /* We want a short delay mostly to emulate likely code, and
243 * we want a long delay occasionally to force massive contention.
244 */
245 if (!(torture_random(trsp) %
246 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
247 mdelay(longdelay_ms);
248 else
249 udelay(shortdelay_us);
250 }
251
252 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
253 {
254 write_unlock(&torture_rwlock);
255 }
256
257 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
258 {
259 read_lock(&torture_rwlock);
260 return 0;
261 }
262
263 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
264 {
265 const unsigned long shortdelay_us = 10;
266 const unsigned long longdelay_ms = 100;
267
268 /* We want a short delay mostly to emulate likely code, and
269 * we want a long delay occasionally to force massive contention.
270 */
271 if (!(torture_random(trsp) %
272 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
273 mdelay(longdelay_ms);
274 else
275 udelay(shortdelay_us);
276 }
277
278 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
279 {
280 read_unlock(&torture_rwlock);
281 }
282
283 static struct lock_torture_ops rw_lock_ops = {
284 .writelock = torture_rwlock_write_lock,
285 .write_delay = torture_rwlock_write_delay,
286 .task_boost = torture_boost_dummy,
287 .writeunlock = torture_rwlock_write_unlock,
288 .readlock = torture_rwlock_read_lock,
289 .read_delay = torture_rwlock_read_delay,
290 .readunlock = torture_rwlock_read_unlock,
291 .name = "rw_lock"
292 };
293
294 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
295 {
296 unsigned long flags;
297
298 write_lock_irqsave(&torture_rwlock, flags);
299 cxt.cur_ops->flags = flags;
300 return 0;
301 }
302
303 static void torture_rwlock_write_unlock_irq(void)
304 __releases(torture_rwlock)
305 {
306 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
307 }
308
309 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
310 {
311 unsigned long flags;
312
313 read_lock_irqsave(&torture_rwlock, flags);
314 cxt.cur_ops->flags = flags;
315 return 0;
316 }
317
318 static void torture_rwlock_read_unlock_irq(void)
319 __releases(torture_rwlock)
320 {
321 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
322 }
323
324 static struct lock_torture_ops rw_lock_irq_ops = {
325 .writelock = torture_rwlock_write_lock_irq,
326 .write_delay = torture_rwlock_write_delay,
327 .task_boost = torture_boost_dummy,
328 .writeunlock = torture_rwlock_write_unlock_irq,
329 .readlock = torture_rwlock_read_lock_irq,
330 .read_delay = torture_rwlock_read_delay,
331 .readunlock = torture_rwlock_read_unlock_irq,
332 .name = "rw_lock_irq"
333 };
334
335 static DEFINE_MUTEX(torture_mutex);
336
337 static int torture_mutex_lock(void) __acquires(torture_mutex)
338 {
339 mutex_lock(&torture_mutex);
340 return 0;
341 }
342
343 static void torture_mutex_delay(struct torture_random_state *trsp)
344 {
345 const unsigned long longdelay_ms = 100;
346
347 /* We want a long delay occasionally to force massive contention. */
348 if (!(torture_random(trsp) %
349 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
350 mdelay(longdelay_ms * 5);
351 else
352 mdelay(longdelay_ms / 5);
353 #ifdef CONFIG_PREEMPT
354 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
355 preempt_schedule(); /* Allow test to be preempted. */
356 #endif
357 }
358
359 static void torture_mutex_unlock(void) __releases(torture_mutex)
360 {
361 mutex_unlock(&torture_mutex);
362 }
363
364 static struct lock_torture_ops mutex_lock_ops = {
365 .writelock = torture_mutex_lock,
366 .write_delay = torture_mutex_delay,
367 .task_boost = torture_boost_dummy,
368 .writeunlock = torture_mutex_unlock,
369 .readlock = NULL,
370 .read_delay = NULL,
371 .readunlock = NULL,
372 .name = "mutex_lock"
373 };
374
375 #include <linux/ww_mutex.h>
376 static DEFINE_WW_CLASS(torture_ww_class);
377 static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
378 static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
379 static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
380
381 static int torture_ww_mutex_lock(void)
382 __acquires(torture_ww_mutex_0)
383 __acquires(torture_ww_mutex_1)
384 __acquires(torture_ww_mutex_2)
385 {
386 LIST_HEAD(list);
387 struct reorder_lock {
388 struct list_head link;
389 struct ww_mutex *lock;
390 } locks[3], *ll, *ln;
391 struct ww_acquire_ctx ctx;
392
393 locks[0].lock = &torture_ww_mutex_0;
394 list_add(&locks[0].link, &list);
395
396 locks[1].lock = &torture_ww_mutex_1;
397 list_add(&locks[1].link, &list);
398
399 locks[2].lock = &torture_ww_mutex_2;
400 list_add(&locks[2].link, &list);
401
402 ww_acquire_init(&ctx, &torture_ww_class);
403
404 list_for_each_entry(ll, &list, link) {
405 int err;
406
407 err = ww_mutex_lock(ll->lock, &ctx);
408 if (!err)
409 continue;
410
411 ln = ll;
412 list_for_each_entry_continue_reverse(ln, &list, link)
413 ww_mutex_unlock(ln->lock);
414
415 if (err != -EDEADLK)
416 return err;
417
418 ww_mutex_lock_slow(ll->lock, &ctx);
419 list_move(&ll->link, &list);
420 }
421
422 ww_acquire_fini(&ctx);
423 return 0;
424 }
425
426 static void torture_ww_mutex_unlock(void)
427 __releases(torture_ww_mutex_0)
428 __releases(torture_ww_mutex_1)
429 __releases(torture_ww_mutex_2)
430 {
431 ww_mutex_unlock(&torture_ww_mutex_0);
432 ww_mutex_unlock(&torture_ww_mutex_1);
433 ww_mutex_unlock(&torture_ww_mutex_2);
434 }
435
436 static struct lock_torture_ops ww_mutex_lock_ops = {
437 .writelock = torture_ww_mutex_lock,
438 .write_delay = torture_mutex_delay,
439 .task_boost = torture_boost_dummy,
440 .writeunlock = torture_ww_mutex_unlock,
441 .readlock = NULL,
442 .read_delay = NULL,
443 .readunlock = NULL,
444 .name = "ww_mutex_lock"
445 };
446
447 #ifdef CONFIG_RT_MUTEXES
448 static DEFINE_RT_MUTEX(torture_rtmutex);
449
450 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
451 {
452 rt_mutex_lock(&torture_rtmutex);
453 return 0;
454 }
455
456 static void torture_rtmutex_boost(struct torture_random_state *trsp)
457 {
458 int policy;
459 struct sched_param param;
460 const unsigned int factor = 50000; /* yes, quite arbitrary */
461
462 if (!rt_task(current)) {
463 /*
464 * Boost priority once every ~50k operations. When the
465 * task tries to take the lock, the rtmutex it will account
466 * for the new priority, and do any corresponding pi-dance.
467 */
468 if (trsp && !(torture_random(trsp) %
469 (cxt.nrealwriters_stress * factor))) {
470 policy = SCHED_FIFO;
471 param.sched_priority = MAX_RT_PRIO - 1;
472 } else /* common case, do nothing */
473 return;
474 } else {
475 /*
476 * The task will remain boosted for another ~500k operations,
477 * then restored back to its original prio, and so forth.
478 *
479 * When @trsp is nil, we want to force-reset the task for
480 * stopping the kthread.
481 */
482 if (!trsp || !(torture_random(trsp) %
483 (cxt.nrealwriters_stress * factor * 2))) {
484 policy = SCHED_NORMAL;
485 param.sched_priority = 0;
486 } else /* common case, do nothing */
487 return;
488 }
489
490 sched_setscheduler_nocheck(current, policy, &param);
491 }
492
493 static void torture_rtmutex_delay(struct torture_random_state *trsp)
494 {
495 const unsigned long shortdelay_us = 2;
496 const unsigned long longdelay_ms = 100;
497
498 /*
499 * We want a short delay mostly to emulate likely code, and
500 * we want a long delay occasionally to force massive contention.
501 */
502 if (!(torture_random(trsp) %
503 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
504 mdelay(longdelay_ms);
505 if (!(torture_random(trsp) %
506 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
507 udelay(shortdelay_us);
508 #ifdef CONFIG_PREEMPT
509 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
510 preempt_schedule(); /* Allow test to be preempted. */
511 #endif
512 }
513
514 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
515 {
516 rt_mutex_unlock(&torture_rtmutex);
517 }
518
519 static struct lock_torture_ops rtmutex_lock_ops = {
520 .writelock = torture_rtmutex_lock,
521 .write_delay = torture_rtmutex_delay,
522 .task_boost = torture_rtmutex_boost,
523 .writeunlock = torture_rtmutex_unlock,
524 .readlock = NULL,
525 .read_delay = NULL,
526 .readunlock = NULL,
527 .name = "rtmutex_lock"
528 };
529 #endif
530
531 static DECLARE_RWSEM(torture_rwsem);
532 static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
533 {
534 down_write(&torture_rwsem);
535 return 0;
536 }
537
538 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
539 {
540 const unsigned long longdelay_ms = 100;
541
542 /* We want a long delay occasionally to force massive contention. */
543 if (!(torture_random(trsp) %
544 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
545 mdelay(longdelay_ms * 10);
546 else
547 mdelay(longdelay_ms / 10);
548 #ifdef CONFIG_PREEMPT
549 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
550 preempt_schedule(); /* Allow test to be preempted. */
551 #endif
552 }
553
554 static void torture_rwsem_up_write(void) __releases(torture_rwsem)
555 {
556 up_write(&torture_rwsem);
557 }
558
559 static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
560 {
561 down_read(&torture_rwsem);
562 return 0;
563 }
564
565 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
566 {
567 const unsigned long longdelay_ms = 100;
568
569 /* We want a long delay occasionally to force massive contention. */
570 if (!(torture_random(trsp) %
571 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
572 mdelay(longdelay_ms * 2);
573 else
574 mdelay(longdelay_ms / 2);
575 #ifdef CONFIG_PREEMPT
576 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
577 preempt_schedule(); /* Allow test to be preempted. */
578 #endif
579 }
580
581 static void torture_rwsem_up_read(void) __releases(torture_rwsem)
582 {
583 up_read(&torture_rwsem);
584 }
585
586 static struct lock_torture_ops rwsem_lock_ops = {
587 .writelock = torture_rwsem_down_write,
588 .write_delay = torture_rwsem_write_delay,
589 .task_boost = torture_boost_dummy,
590 .writeunlock = torture_rwsem_up_write,
591 .readlock = torture_rwsem_down_read,
592 .read_delay = torture_rwsem_read_delay,
593 .readunlock = torture_rwsem_up_read,
594 .name = "rwsem_lock"
595 };
596
597 #include <linux/percpu-rwsem.h>
598 static struct percpu_rw_semaphore pcpu_rwsem;
599
600 void torture_percpu_rwsem_init(void)
601 {
602 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
603 }
604
605 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
606 {
607 percpu_down_write(&pcpu_rwsem);
608 return 0;
609 }
610
611 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
612 {
613 percpu_up_write(&pcpu_rwsem);
614 }
615
616 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
617 {
618 percpu_down_read(&pcpu_rwsem);
619 return 0;
620 }
621
622 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
623 {
624 percpu_up_read(&pcpu_rwsem);
625 }
626
627 static struct lock_torture_ops percpu_rwsem_lock_ops = {
628 .init = torture_percpu_rwsem_init,
629 .writelock = torture_percpu_rwsem_down_write,
630 .write_delay = torture_rwsem_write_delay,
631 .task_boost = torture_boost_dummy,
632 .writeunlock = torture_percpu_rwsem_up_write,
633 .readlock = torture_percpu_rwsem_down_read,
634 .read_delay = torture_rwsem_read_delay,
635 .readunlock = torture_percpu_rwsem_up_read,
636 .name = "percpu_rwsem_lock"
637 };
638
639 /*
640 * Lock torture writer kthread. Repeatedly acquires and releases
641 * the lock, checking for duplicate acquisitions.
642 */
643 static int lock_torture_writer(void *arg)
644 {
645 struct lock_stress_stats *lwsp = arg;
646 static DEFINE_TORTURE_RANDOM(rand);
647
648 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
649 set_user_nice(current, MAX_NICE);
650
651 do {
652 if ((torture_random(&rand) & 0xfffff) == 0)
653 schedule_timeout_uninterruptible(1);
654
655 cxt.cur_ops->task_boost(&rand);
656 cxt.cur_ops->writelock();
657 if (WARN_ON_ONCE(lock_is_write_held))
658 lwsp->n_lock_fail++;
659 lock_is_write_held = 1;
660 if (WARN_ON_ONCE(lock_is_read_held))
661 lwsp->n_lock_fail++; /* rare, but... */
662
663 lwsp->n_lock_acquired++;
664 cxt.cur_ops->write_delay(&rand);
665 lock_is_write_held = 0;
666 cxt.cur_ops->writeunlock();
667
668 stutter_wait("lock_torture_writer");
669 } while (!torture_must_stop());
670
671 cxt.cur_ops->task_boost(NULL); /* reset prio */
672 torture_kthread_stopping("lock_torture_writer");
673 return 0;
674 }
675
676 /*
677 * Lock torture reader kthread. Repeatedly acquires and releases
678 * the reader lock.
679 */
680 static int lock_torture_reader(void *arg)
681 {
682 struct lock_stress_stats *lrsp = arg;
683 static DEFINE_TORTURE_RANDOM(rand);
684
685 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
686 set_user_nice(current, MAX_NICE);
687
688 do {
689 if ((torture_random(&rand) & 0xfffff) == 0)
690 schedule_timeout_uninterruptible(1);
691
692 cxt.cur_ops->readlock();
693 lock_is_read_held = 1;
694 if (WARN_ON_ONCE(lock_is_write_held))
695 lrsp->n_lock_fail++; /* rare, but... */
696
697 lrsp->n_lock_acquired++;
698 cxt.cur_ops->read_delay(&rand);
699 lock_is_read_held = 0;
700 cxt.cur_ops->readunlock();
701
702 stutter_wait("lock_torture_reader");
703 } while (!torture_must_stop());
704 torture_kthread_stopping("lock_torture_reader");
705 return 0;
706 }
707
708 /*
709 * Create an lock-torture-statistics message in the specified buffer.
710 */
711 static void __torture_print_stats(char *page,
712 struct lock_stress_stats *statp, bool write)
713 {
714 bool fail = 0;
715 int i, n_stress;
716 long max = 0;
717 long min = statp[0].n_lock_acquired;
718 long long sum = 0;
719
720 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
721 for (i = 0; i < n_stress; i++) {
722 if (statp[i].n_lock_fail)
723 fail = true;
724 sum += statp[i].n_lock_acquired;
725 if (max < statp[i].n_lock_fail)
726 max = statp[i].n_lock_fail;
727 if (min > statp[i].n_lock_fail)
728 min = statp[i].n_lock_fail;
729 }
730 page += sprintf(page,
731 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
732 write ? "Writes" : "Reads ",
733 sum, max, min, max / 2 > min ? "???" : "",
734 fail, fail ? "!!!" : "");
735 if (fail)
736 atomic_inc(&cxt.n_lock_torture_errors);
737 }
738
739 /*
740 * Print torture statistics. Caller must ensure that there is only one
741 * call to this function at a given time!!! This is normally accomplished
742 * by relying on the module system to only have one copy of the module
743 * loaded, and then by giving the lock_torture_stats kthread full control
744 * (or the init/cleanup functions when lock_torture_stats thread is not
745 * running).
746 */
747 static void lock_torture_stats_print(void)
748 {
749 int size = cxt.nrealwriters_stress * 200 + 8192;
750 char *buf;
751
752 if (cxt.cur_ops->readlock)
753 size += cxt.nrealreaders_stress * 200 + 8192;
754
755 buf = kmalloc(size, GFP_KERNEL);
756 if (!buf) {
757 pr_err("lock_torture_stats_print: Out of memory, need: %d",
758 size);
759 return;
760 }
761
762 __torture_print_stats(buf, cxt.lwsa, true);
763 pr_alert("%s", buf);
764 kfree(buf);
765
766 if (cxt.cur_ops->readlock) {
767 buf = kmalloc(size, GFP_KERNEL);
768 if (!buf) {
769 pr_err("lock_torture_stats_print: Out of memory, need: %d",
770 size);
771 return;
772 }
773
774 __torture_print_stats(buf, cxt.lrsa, false);
775 pr_alert("%s", buf);
776 kfree(buf);
777 }
778 }
779
780 /*
781 * Periodically prints torture statistics, if periodic statistics printing
782 * was specified via the stat_interval module parameter.
783 *
784 * No need to worry about fullstop here, since this one doesn't reference
785 * volatile state or register callbacks.
786 */
787 static int lock_torture_stats(void *arg)
788 {
789 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
790 do {
791 schedule_timeout_interruptible(stat_interval * HZ);
792 lock_torture_stats_print();
793 torture_shutdown_absorb("lock_torture_stats");
794 } while (!torture_must_stop());
795 torture_kthread_stopping("lock_torture_stats");
796 return 0;
797 }
798
799 static inline void
800 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
801 const char *tag)
802 {
803 pr_alert("%s" TORTURE_FLAG
804 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
805 torture_type, tag, cxt.debug_lock ? " [debug]": "",
806 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
807 verbose, shuffle_interval, stutter, shutdown_secs,
808 onoff_interval, onoff_holdoff);
809 }
810
811 static void lock_torture_cleanup(void)
812 {
813 int i;
814
815 if (torture_cleanup_begin())
816 return;
817
818 /*
819 * Indicates early cleanup, meaning that the test has not run,
820 * such as when passing bogus args when loading the module. As
821 * such, only perform the underlying torture-specific cleanups,
822 * and avoid anything related to locktorture.
823 */
824 if (!cxt.lwsa)
825 goto end;
826
827 if (writer_tasks) {
828 for (i = 0; i < cxt.nrealwriters_stress; i++)
829 torture_stop_kthread(lock_torture_writer,
830 writer_tasks[i]);
831 kfree(writer_tasks);
832 writer_tasks = NULL;
833 }
834
835 if (reader_tasks) {
836 for (i = 0; i < cxt.nrealreaders_stress; i++)
837 torture_stop_kthread(lock_torture_reader,
838 reader_tasks[i]);
839 kfree(reader_tasks);
840 reader_tasks = NULL;
841 }
842
843 torture_stop_kthread(lock_torture_stats, stats_task);
844 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
845
846 if (atomic_read(&cxt.n_lock_torture_errors))
847 lock_torture_print_module_parms(cxt.cur_ops,
848 "End of test: FAILURE");
849 else if (torture_onoff_failures())
850 lock_torture_print_module_parms(cxt.cur_ops,
851 "End of test: LOCK_HOTPLUG");
852 else
853 lock_torture_print_module_parms(cxt.cur_ops,
854 "End of test: SUCCESS");
855
856 kfree(cxt.lwsa);
857 kfree(cxt.lrsa);
858
859 end:
860 torture_cleanup_end();
861 }
862
863 static int __init lock_torture_init(void)
864 {
865 int i, j;
866 int firsterr = 0;
867 static struct lock_torture_ops *torture_ops[] = {
868 &lock_busted_ops,
869 &spin_lock_ops, &spin_lock_irq_ops,
870 &rw_lock_ops, &rw_lock_irq_ops,
871 &mutex_lock_ops,
872 &ww_mutex_lock_ops,
873 #ifdef CONFIG_RT_MUTEXES
874 &rtmutex_lock_ops,
875 #endif
876 &rwsem_lock_ops,
877 &percpu_rwsem_lock_ops,
878 };
879
880 if (!torture_init_begin(torture_type, verbose, &torture_runnable))
881 return -EBUSY;
882
883 /* Process args and tell the world that the torturer is on the job. */
884 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
885 cxt.cur_ops = torture_ops[i];
886 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
887 break;
888 }
889 if (i == ARRAY_SIZE(torture_ops)) {
890 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
891 torture_type);
892 pr_alert("lock-torture types:");
893 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
894 pr_alert(" %s", torture_ops[i]->name);
895 pr_alert("\n");
896 firsterr = -EINVAL;
897 goto unwind;
898 }
899 if (cxt.cur_ops->init)
900 cxt.cur_ops->init();
901
902 if (nwriters_stress >= 0)
903 cxt.nrealwriters_stress = nwriters_stress;
904 else
905 cxt.nrealwriters_stress = 2 * num_online_cpus();
906
907 #ifdef CONFIG_DEBUG_MUTEXES
908 if (strncmp(torture_type, "mutex", 5) == 0)
909 cxt.debug_lock = true;
910 #endif
911 #ifdef CONFIG_DEBUG_RT_MUTEXES
912 if (strncmp(torture_type, "rtmutex", 7) == 0)
913 cxt.debug_lock = true;
914 #endif
915 #ifdef CONFIG_DEBUG_SPINLOCK
916 if ((strncmp(torture_type, "spin", 4) == 0) ||
917 (strncmp(torture_type, "rw_lock", 7) == 0))
918 cxt.debug_lock = true;
919 #endif
920
921 /* Initialize the statistics so that each run gets its own numbers. */
922
923 lock_is_write_held = 0;
924 cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
925 if (cxt.lwsa == NULL) {
926 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
927 firsterr = -ENOMEM;
928 goto unwind;
929 }
930 for (i = 0; i < cxt.nrealwriters_stress; i++) {
931 cxt.lwsa[i].n_lock_fail = 0;
932 cxt.lwsa[i].n_lock_acquired = 0;
933 }
934
935 if (cxt.cur_ops->readlock) {
936 if (nreaders_stress >= 0)
937 cxt.nrealreaders_stress = nreaders_stress;
938 else {
939 /*
940 * By default distribute evenly the number of
941 * readers and writers. We still run the same number
942 * of threads as the writer-only locks default.
943 */
944 if (nwriters_stress < 0) /* user doesn't care */
945 cxt.nrealwriters_stress = num_online_cpus();
946 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
947 }
948
949 lock_is_read_held = 0;
950 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
951 if (cxt.lrsa == NULL) {
952 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
953 firsterr = -ENOMEM;
954 kfree(cxt.lwsa);
955 cxt.lwsa = NULL;
956 goto unwind;
957 }
958
959 for (i = 0; i < cxt.nrealreaders_stress; i++) {
960 cxt.lrsa[i].n_lock_fail = 0;
961 cxt.lrsa[i].n_lock_acquired = 0;
962 }
963 }
964
965 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
966
967 /* Prepare torture context. */
968 if (onoff_interval > 0) {
969 firsterr = torture_onoff_init(onoff_holdoff * HZ,
970 onoff_interval * HZ);
971 if (firsterr)
972 goto unwind;
973 }
974 if (shuffle_interval > 0) {
975 firsterr = torture_shuffle_init(shuffle_interval);
976 if (firsterr)
977 goto unwind;
978 }
979 if (shutdown_secs > 0) {
980 firsterr = torture_shutdown_init(shutdown_secs,
981 lock_torture_cleanup);
982 if (firsterr)
983 goto unwind;
984 }
985 if (stutter > 0) {
986 firsterr = torture_stutter_init(stutter);
987 if (firsterr)
988 goto unwind;
989 }
990
991 writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
992 GFP_KERNEL);
993 if (writer_tasks == NULL) {
994 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
995 firsterr = -ENOMEM;
996 goto unwind;
997 }
998
999 if (cxt.cur_ops->readlock) {
1000 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
1001 GFP_KERNEL);
1002 if (reader_tasks == NULL) {
1003 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
1004 kfree(writer_tasks);
1005 writer_tasks = NULL;
1006 firsterr = -ENOMEM;
1007 goto unwind;
1008 }
1009 }
1010
1011 /*
1012 * Create the kthreads and start torturing (oh, those poor little locks).
1013 *
1014 * TODO: Note that we interleave writers with readers, giving writers a
1015 * slight advantage, by creating its kthread first. This can be modified
1016 * for very specific needs, or even let the user choose the policy, if
1017 * ever wanted.
1018 */
1019 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1020 j < cxt.nrealreaders_stress; i++, j++) {
1021 if (i >= cxt.nrealwriters_stress)
1022 goto create_reader;
1023
1024 /* Create writer. */
1025 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
1026 writer_tasks[i]);
1027 if (firsterr)
1028 goto unwind;
1029
1030 create_reader:
1031 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1032 continue;
1033 /* Create reader. */
1034 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1035 reader_tasks[j]);
1036 if (firsterr)
1037 goto unwind;
1038 }
1039 if (stat_interval > 0) {
1040 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1041 stats_task);
1042 if (firsterr)
1043 goto unwind;
1044 }
1045 torture_init_end();
1046 return 0;
1047
1048 unwind:
1049 torture_init_end();
1050 lock_torture_cleanup();
1051 return firsterr;
1052 }
1053
1054 module_init(lock_torture_init);
1055 module_exit(lock_torture_cleanup);