2 * Module-based torture test facility for locking
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2014
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Davidlohr Bueso <dave@stgolabs.net>
22 * Based on kernel/rcu/torture.c.
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/kthread.h>
27 #include <linux/sched/rt.h>
28 #include <linux/spinlock.h>
29 #include <linux/rwlock.h>
30 #include <linux/mutex.h>
31 #include <linux/rwsem.h>
32 #include <linux/smp.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <uapi/linux/sched/types.h>
36 #include <linux/rtmutex.h>
37 #include <linux/atomic.h>
38 #include <linux/moduleparam.h>
39 #include <linux/delay.h>
40 #include <linux/slab.h>
41 #include <linux/percpu-rwsem.h>
42 #include <linux/torture.h>
44 MODULE_LICENSE("GPL");
45 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
47 torture_param(int, nwriters_stress
, -1,
48 "Number of write-locking stress-test threads");
49 torture_param(int, nreaders_stress
, -1,
50 "Number of read-locking stress-test threads");
51 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
52 torture_param(int, onoff_interval
, 0,
53 "Time between CPU hotplugs (s), 0=disable");
54 torture_param(int, shuffle_interval
, 3,
55 "Number of jiffies between shuffles, 0=disable");
56 torture_param(int, shutdown_secs
, 0, "Shutdown time (j), <= zero to disable.");
57 torture_param(int, stat_interval
, 60,
58 "Number of seconds between stats printk()s");
59 torture_param(int, stutter
, 5, "Number of jiffies to run/halt test, 0=disable");
60 torture_param(bool, verbose
, true,
61 "Enable verbose debugging printk()s");
63 static char *torture_type
= "spin_lock";
64 module_param(torture_type
, charp
, 0444);
65 MODULE_PARM_DESC(torture_type
,
66 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
68 static struct task_struct
*stats_task
;
69 static struct task_struct
**writer_tasks
;
70 static struct task_struct
**reader_tasks
;
72 static bool lock_is_write_held
;
73 static bool lock_is_read_held
;
75 struct lock_stress_stats
{
80 int torture_runnable
= IS_ENABLED(MODULE
);
81 module_param(torture_runnable
, int, 0444);
82 MODULE_PARM_DESC(torture_runnable
, "Start locktorture at module init");
84 /* Forward reference. */
85 static void lock_torture_cleanup(void);
88 * Operations vector for selecting different types of tests.
90 struct lock_torture_ops
{
92 int (*writelock
)(void);
93 void (*write_delay
)(struct torture_random_state
*trsp
);
94 void (*task_boost
)(struct torture_random_state
*trsp
);
95 void (*writeunlock
)(void);
96 int (*readlock
)(void);
97 void (*read_delay
)(struct torture_random_state
*trsp
);
98 void (*readunlock
)(void);
100 unsigned long flags
; /* for irq spinlocks */
104 struct lock_torture_cxt
{
105 int nrealwriters_stress
;
106 int nrealreaders_stress
;
108 atomic_t n_lock_torture_errors
;
109 struct lock_torture_ops
*cur_ops
;
110 struct lock_stress_stats
*lwsa
; /* writer statistics */
111 struct lock_stress_stats
*lrsa
; /* reader statistics */
113 static struct lock_torture_cxt cxt
= { 0, 0, false,
117 * Definitions for lock torture testing.
120 static int torture_lock_busted_write_lock(void)
122 return 0; /* BUGGY, do not use in real life!!! */
125 static void torture_lock_busted_write_delay(struct torture_random_state
*trsp
)
127 const unsigned long longdelay_ms
= 100;
129 /* We want a long delay occasionally to force massive contention. */
130 if (!(torture_random(trsp
) %
131 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
132 mdelay(longdelay_ms
);
133 #ifdef CONFIG_PREEMPT
134 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
135 preempt_schedule(); /* Allow test to be preempted. */
139 static void torture_lock_busted_write_unlock(void)
141 /* BUGGY, do not use in real life!!! */
144 static void torture_boost_dummy(struct torture_random_state
*trsp
)
146 /* Only rtmutexes care about priority */
149 static struct lock_torture_ops lock_busted_ops
= {
150 .writelock
= torture_lock_busted_write_lock
,
151 .write_delay
= torture_lock_busted_write_delay
,
152 .task_boost
= torture_boost_dummy
,
153 .writeunlock
= torture_lock_busted_write_unlock
,
157 .name
= "lock_busted"
160 static DEFINE_SPINLOCK(torture_spinlock
);
162 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock
)
164 spin_lock(&torture_spinlock
);
168 static void torture_spin_lock_write_delay(struct torture_random_state
*trsp
)
170 const unsigned long shortdelay_us
= 2;
171 const unsigned long longdelay_ms
= 100;
173 /* We want a short delay mostly to emulate likely code, and
174 * we want a long delay occasionally to force massive contention.
176 if (!(torture_random(trsp
) %
177 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
178 mdelay(longdelay_ms
);
179 if (!(torture_random(trsp
) %
180 (cxt
.nrealwriters_stress
* 2 * shortdelay_us
)))
181 udelay(shortdelay_us
);
182 #ifdef CONFIG_PREEMPT
183 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
184 preempt_schedule(); /* Allow test to be preempted. */
188 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock
)
190 spin_unlock(&torture_spinlock
);
193 static struct lock_torture_ops spin_lock_ops
= {
194 .writelock
= torture_spin_lock_write_lock
,
195 .write_delay
= torture_spin_lock_write_delay
,
196 .task_boost
= torture_boost_dummy
,
197 .writeunlock
= torture_spin_lock_write_unlock
,
204 static int torture_spin_lock_write_lock_irq(void)
205 __acquires(torture_spinlock
)
209 spin_lock_irqsave(&torture_spinlock
, flags
);
210 cxt
.cur_ops
->flags
= flags
;
214 static void torture_lock_spin_write_unlock_irq(void)
215 __releases(torture_spinlock
)
217 spin_unlock_irqrestore(&torture_spinlock
, cxt
.cur_ops
->flags
);
220 static struct lock_torture_ops spin_lock_irq_ops
= {
221 .writelock
= torture_spin_lock_write_lock_irq
,
222 .write_delay
= torture_spin_lock_write_delay
,
223 .task_boost
= torture_boost_dummy
,
224 .writeunlock
= torture_lock_spin_write_unlock_irq
,
228 .name
= "spin_lock_irq"
231 static DEFINE_RWLOCK(torture_rwlock
);
233 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock
)
235 write_lock(&torture_rwlock
);
239 static void torture_rwlock_write_delay(struct torture_random_state
*trsp
)
241 const unsigned long shortdelay_us
= 2;
242 const unsigned long longdelay_ms
= 100;
244 /* We want a short delay mostly to emulate likely code, and
245 * we want a long delay occasionally to force massive contention.
247 if (!(torture_random(trsp
) %
248 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
249 mdelay(longdelay_ms
);
251 udelay(shortdelay_us
);
254 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock
)
256 write_unlock(&torture_rwlock
);
259 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock
)
261 read_lock(&torture_rwlock
);
265 static void torture_rwlock_read_delay(struct torture_random_state
*trsp
)
267 const unsigned long shortdelay_us
= 10;
268 const unsigned long longdelay_ms
= 100;
270 /* We want a short delay mostly to emulate likely code, and
271 * we want a long delay occasionally to force massive contention.
273 if (!(torture_random(trsp
) %
274 (cxt
.nrealreaders_stress
* 2000 * longdelay_ms
)))
275 mdelay(longdelay_ms
);
277 udelay(shortdelay_us
);
280 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock
)
282 read_unlock(&torture_rwlock
);
285 static struct lock_torture_ops rw_lock_ops
= {
286 .writelock
= torture_rwlock_write_lock
,
287 .write_delay
= torture_rwlock_write_delay
,
288 .task_boost
= torture_boost_dummy
,
289 .writeunlock
= torture_rwlock_write_unlock
,
290 .readlock
= torture_rwlock_read_lock
,
291 .read_delay
= torture_rwlock_read_delay
,
292 .readunlock
= torture_rwlock_read_unlock
,
296 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock
)
300 write_lock_irqsave(&torture_rwlock
, flags
);
301 cxt
.cur_ops
->flags
= flags
;
305 static void torture_rwlock_write_unlock_irq(void)
306 __releases(torture_rwlock
)
308 write_unlock_irqrestore(&torture_rwlock
, cxt
.cur_ops
->flags
);
311 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock
)
315 read_lock_irqsave(&torture_rwlock
, flags
);
316 cxt
.cur_ops
->flags
= flags
;
320 static void torture_rwlock_read_unlock_irq(void)
321 __releases(torture_rwlock
)
323 read_unlock_irqrestore(&torture_rwlock
, cxt
.cur_ops
->flags
);
326 static struct lock_torture_ops rw_lock_irq_ops
= {
327 .writelock
= torture_rwlock_write_lock_irq
,
328 .write_delay
= torture_rwlock_write_delay
,
329 .task_boost
= torture_boost_dummy
,
330 .writeunlock
= torture_rwlock_write_unlock_irq
,
331 .readlock
= torture_rwlock_read_lock_irq
,
332 .read_delay
= torture_rwlock_read_delay
,
333 .readunlock
= torture_rwlock_read_unlock_irq
,
334 .name
= "rw_lock_irq"
337 static DEFINE_MUTEX(torture_mutex
);
339 static int torture_mutex_lock(void) __acquires(torture_mutex
)
341 mutex_lock(&torture_mutex
);
345 static void torture_mutex_delay(struct torture_random_state
*trsp
)
347 const unsigned long longdelay_ms
= 100;
349 /* We want a long delay occasionally to force massive contention. */
350 if (!(torture_random(trsp
) %
351 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
352 mdelay(longdelay_ms
* 5);
354 mdelay(longdelay_ms
/ 5);
355 #ifdef CONFIG_PREEMPT
356 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
357 preempt_schedule(); /* Allow test to be preempted. */
361 static void torture_mutex_unlock(void) __releases(torture_mutex
)
363 mutex_unlock(&torture_mutex
);
366 static struct lock_torture_ops mutex_lock_ops
= {
367 .writelock
= torture_mutex_lock
,
368 .write_delay
= torture_mutex_delay
,
369 .task_boost
= torture_boost_dummy
,
370 .writeunlock
= torture_mutex_unlock
,
377 #include <linux/ww_mutex.h>
378 static DEFINE_WW_CLASS(torture_ww_class
);
379 static DEFINE_WW_MUTEX(torture_ww_mutex_0
, &torture_ww_class
);
380 static DEFINE_WW_MUTEX(torture_ww_mutex_1
, &torture_ww_class
);
381 static DEFINE_WW_MUTEX(torture_ww_mutex_2
, &torture_ww_class
);
383 static int torture_ww_mutex_lock(void)
384 __acquires(torture_ww_mutex_0
)
385 __acquires(torture_ww_mutex_1
)
386 __acquires(torture_ww_mutex_2
)
389 struct reorder_lock
{
390 struct list_head link
;
391 struct ww_mutex
*lock
;
392 } locks
[3], *ll
, *ln
;
393 struct ww_acquire_ctx ctx
;
395 locks
[0].lock
= &torture_ww_mutex_0
;
396 list_add(&locks
[0].link
, &list
);
398 locks
[1].lock
= &torture_ww_mutex_1
;
399 list_add(&locks
[1].link
, &list
);
401 locks
[2].lock
= &torture_ww_mutex_2
;
402 list_add(&locks
[2].link
, &list
);
404 ww_acquire_init(&ctx
, &torture_ww_class
);
406 list_for_each_entry(ll
, &list
, link
) {
409 err
= ww_mutex_lock(ll
->lock
, &ctx
);
414 list_for_each_entry_continue_reverse(ln
, &list
, link
)
415 ww_mutex_unlock(ln
->lock
);
420 ww_mutex_lock_slow(ll
->lock
, &ctx
);
421 list_move(&ll
->link
, &list
);
424 ww_acquire_fini(&ctx
);
428 static void torture_ww_mutex_unlock(void)
429 __releases(torture_ww_mutex_0
)
430 __releases(torture_ww_mutex_1
)
431 __releases(torture_ww_mutex_2
)
433 ww_mutex_unlock(&torture_ww_mutex_0
);
434 ww_mutex_unlock(&torture_ww_mutex_1
);
435 ww_mutex_unlock(&torture_ww_mutex_2
);
438 static struct lock_torture_ops ww_mutex_lock_ops
= {
439 .writelock
= torture_ww_mutex_lock
,
440 .write_delay
= torture_mutex_delay
,
441 .task_boost
= torture_boost_dummy
,
442 .writeunlock
= torture_ww_mutex_unlock
,
446 .name
= "ww_mutex_lock"
449 #ifdef CONFIG_RT_MUTEXES
450 static DEFINE_RT_MUTEX(torture_rtmutex
);
452 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex
)
454 rt_mutex_lock(&torture_rtmutex
);
458 static void torture_rtmutex_boost(struct torture_random_state
*trsp
)
461 struct sched_param param
;
462 const unsigned int factor
= 50000; /* yes, quite arbitrary */
464 if (!rt_task(current
)) {
466 * Boost priority once every ~50k operations. When the
467 * task tries to take the lock, the rtmutex it will account
468 * for the new priority, and do any corresponding pi-dance.
470 if (trsp
&& !(torture_random(trsp
) %
471 (cxt
.nrealwriters_stress
* factor
))) {
473 param
.sched_priority
= MAX_RT_PRIO
- 1;
474 } else /* common case, do nothing */
478 * The task will remain boosted for another ~500k operations,
479 * then restored back to its original prio, and so forth.
481 * When @trsp is nil, we want to force-reset the task for
482 * stopping the kthread.
484 if (!trsp
|| !(torture_random(trsp
) %
485 (cxt
.nrealwriters_stress
* factor
* 2))) {
486 policy
= SCHED_NORMAL
;
487 param
.sched_priority
= 0;
488 } else /* common case, do nothing */
492 sched_setscheduler_nocheck(current
, policy
, ¶m
);
495 static void torture_rtmutex_delay(struct torture_random_state
*trsp
)
497 const unsigned long shortdelay_us
= 2;
498 const unsigned long longdelay_ms
= 100;
501 * We want a short delay mostly to emulate likely code, and
502 * we want a long delay occasionally to force massive contention.
504 if (!(torture_random(trsp
) %
505 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
506 mdelay(longdelay_ms
);
507 if (!(torture_random(trsp
) %
508 (cxt
.nrealwriters_stress
* 2 * shortdelay_us
)))
509 udelay(shortdelay_us
);
510 #ifdef CONFIG_PREEMPT
511 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
512 preempt_schedule(); /* Allow test to be preempted. */
516 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex
)
518 rt_mutex_unlock(&torture_rtmutex
);
521 static struct lock_torture_ops rtmutex_lock_ops
= {
522 .writelock
= torture_rtmutex_lock
,
523 .write_delay
= torture_rtmutex_delay
,
524 .task_boost
= torture_rtmutex_boost
,
525 .writeunlock
= torture_rtmutex_unlock
,
529 .name
= "rtmutex_lock"
533 static DECLARE_RWSEM(torture_rwsem
);
534 static int torture_rwsem_down_write(void) __acquires(torture_rwsem
)
536 down_write(&torture_rwsem
);
540 static void torture_rwsem_write_delay(struct torture_random_state
*trsp
)
542 const unsigned long longdelay_ms
= 100;
544 /* We want a long delay occasionally to force massive contention. */
545 if (!(torture_random(trsp
) %
546 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
547 mdelay(longdelay_ms
* 10);
549 mdelay(longdelay_ms
/ 10);
550 #ifdef CONFIG_PREEMPT
551 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
552 preempt_schedule(); /* Allow test to be preempted. */
556 static void torture_rwsem_up_write(void) __releases(torture_rwsem
)
558 up_write(&torture_rwsem
);
561 static int torture_rwsem_down_read(void) __acquires(torture_rwsem
)
563 down_read(&torture_rwsem
);
567 static void torture_rwsem_read_delay(struct torture_random_state
*trsp
)
569 const unsigned long longdelay_ms
= 100;
571 /* We want a long delay occasionally to force massive contention. */
572 if (!(torture_random(trsp
) %
573 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
574 mdelay(longdelay_ms
* 2);
576 mdelay(longdelay_ms
/ 2);
577 #ifdef CONFIG_PREEMPT
578 if (!(torture_random(trsp
) % (cxt
.nrealreaders_stress
* 20000)))
579 preempt_schedule(); /* Allow test to be preempted. */
583 static void torture_rwsem_up_read(void) __releases(torture_rwsem
)
585 up_read(&torture_rwsem
);
588 static struct lock_torture_ops rwsem_lock_ops
= {
589 .writelock
= torture_rwsem_down_write
,
590 .write_delay
= torture_rwsem_write_delay
,
591 .task_boost
= torture_boost_dummy
,
592 .writeunlock
= torture_rwsem_up_write
,
593 .readlock
= torture_rwsem_down_read
,
594 .read_delay
= torture_rwsem_read_delay
,
595 .readunlock
= torture_rwsem_up_read
,
599 #include <linux/percpu-rwsem.h>
600 static struct percpu_rw_semaphore pcpu_rwsem
;
602 void torture_percpu_rwsem_init(void)
604 BUG_ON(percpu_init_rwsem(&pcpu_rwsem
));
607 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem
)
609 percpu_down_write(&pcpu_rwsem
);
613 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem
)
615 percpu_up_write(&pcpu_rwsem
);
618 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem
)
620 percpu_down_read(&pcpu_rwsem
);
624 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem
)
626 percpu_up_read(&pcpu_rwsem
);
629 static struct lock_torture_ops percpu_rwsem_lock_ops
= {
630 .init
= torture_percpu_rwsem_init
,
631 .writelock
= torture_percpu_rwsem_down_write
,
632 .write_delay
= torture_rwsem_write_delay
,
633 .task_boost
= torture_boost_dummy
,
634 .writeunlock
= torture_percpu_rwsem_up_write
,
635 .readlock
= torture_percpu_rwsem_down_read
,
636 .read_delay
= torture_rwsem_read_delay
,
637 .readunlock
= torture_percpu_rwsem_up_read
,
638 .name
= "percpu_rwsem_lock"
642 * Lock torture writer kthread. Repeatedly acquires and releases
643 * the lock, checking for duplicate acquisitions.
645 static int lock_torture_writer(void *arg
)
647 struct lock_stress_stats
*lwsp
= arg
;
648 static DEFINE_TORTURE_RANDOM(rand
);
650 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
651 set_user_nice(current
, MAX_NICE
);
654 if ((torture_random(&rand
) & 0xfffff) == 0)
655 schedule_timeout_uninterruptible(1);
657 cxt
.cur_ops
->task_boost(&rand
);
658 cxt
.cur_ops
->writelock();
659 if (WARN_ON_ONCE(lock_is_write_held
))
661 lock_is_write_held
= 1;
662 if (WARN_ON_ONCE(lock_is_read_held
))
663 lwsp
->n_lock_fail
++; /* rare, but... */
665 lwsp
->n_lock_acquired
++;
666 cxt
.cur_ops
->write_delay(&rand
);
667 lock_is_write_held
= 0;
668 cxt
.cur_ops
->writeunlock();
670 stutter_wait("lock_torture_writer");
671 } while (!torture_must_stop());
673 cxt
.cur_ops
->task_boost(NULL
); /* reset prio */
674 torture_kthread_stopping("lock_torture_writer");
679 * Lock torture reader kthread. Repeatedly acquires and releases
682 static int lock_torture_reader(void *arg
)
684 struct lock_stress_stats
*lrsp
= arg
;
685 static DEFINE_TORTURE_RANDOM(rand
);
687 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
688 set_user_nice(current
, MAX_NICE
);
691 if ((torture_random(&rand
) & 0xfffff) == 0)
692 schedule_timeout_uninterruptible(1);
694 cxt
.cur_ops
->readlock();
695 lock_is_read_held
= 1;
696 if (WARN_ON_ONCE(lock_is_write_held
))
697 lrsp
->n_lock_fail
++; /* rare, but... */
699 lrsp
->n_lock_acquired
++;
700 cxt
.cur_ops
->read_delay(&rand
);
701 lock_is_read_held
= 0;
702 cxt
.cur_ops
->readunlock();
704 stutter_wait("lock_torture_reader");
705 } while (!torture_must_stop());
706 torture_kthread_stopping("lock_torture_reader");
711 * Create an lock-torture-statistics message in the specified buffer.
713 static void __torture_print_stats(char *page
,
714 struct lock_stress_stats
*statp
, bool write
)
718 long max
= 0, min
= statp
? statp
[0].n_lock_acquired
: 0;
721 n_stress
= write
? cxt
.nrealwriters_stress
: cxt
.nrealreaders_stress
;
722 for (i
= 0; i
< n_stress
; i
++) {
723 if (statp
[i
].n_lock_fail
)
725 sum
+= statp
[i
].n_lock_acquired
;
726 if (max
< statp
[i
].n_lock_fail
)
727 max
= statp
[i
].n_lock_fail
;
728 if (min
> statp
[i
].n_lock_fail
)
729 min
= statp
[i
].n_lock_fail
;
731 page
+= sprintf(page
,
732 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
733 write
? "Writes" : "Reads ",
734 sum
, max
, min
, max
/ 2 > min
? "???" : "",
735 fail
, fail
? "!!!" : "");
737 atomic_inc(&cxt
.n_lock_torture_errors
);
741 * Print torture statistics. Caller must ensure that there is only one
742 * call to this function at a given time!!! This is normally accomplished
743 * by relying on the module system to only have one copy of the module
744 * loaded, and then by giving the lock_torture_stats kthread full control
745 * (or the init/cleanup functions when lock_torture_stats thread is not
748 static void lock_torture_stats_print(void)
750 int size
= cxt
.nrealwriters_stress
* 200 + 8192;
753 if (cxt
.cur_ops
->readlock
)
754 size
+= cxt
.nrealreaders_stress
* 200 + 8192;
756 buf
= kmalloc(size
, GFP_KERNEL
);
758 pr_err("lock_torture_stats_print: Out of memory, need: %d",
763 __torture_print_stats(buf
, cxt
.lwsa
, true);
767 if (cxt
.cur_ops
->readlock
) {
768 buf
= kmalloc(size
, GFP_KERNEL
);
770 pr_err("lock_torture_stats_print: Out of memory, need: %d",
775 __torture_print_stats(buf
, cxt
.lrsa
, false);
782 * Periodically prints torture statistics, if periodic statistics printing
783 * was specified via the stat_interval module parameter.
785 * No need to worry about fullstop here, since this one doesn't reference
786 * volatile state or register callbacks.
788 static int lock_torture_stats(void *arg
)
790 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
792 schedule_timeout_interruptible(stat_interval
* HZ
);
793 lock_torture_stats_print();
794 torture_shutdown_absorb("lock_torture_stats");
795 } while (!torture_must_stop());
796 torture_kthread_stopping("lock_torture_stats");
801 lock_torture_print_module_parms(struct lock_torture_ops
*cur_ops
,
804 pr_alert("%s" TORTURE_FLAG
805 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
806 torture_type
, tag
, cxt
.debug_lock
? " [debug]": "",
807 cxt
.nrealwriters_stress
, cxt
.nrealreaders_stress
, stat_interval
,
808 verbose
, shuffle_interval
, stutter
, shutdown_secs
,
809 onoff_interval
, onoff_holdoff
);
812 static void lock_torture_cleanup(void)
816 if (torture_cleanup_begin())
820 * Indicates early cleanup, meaning that the test has not run,
821 * such as when passing bogus args when loading the module. As
822 * such, only perform the underlying torture-specific cleanups,
823 * and avoid anything related to locktorture.
825 if (!cxt
.lwsa
&& !cxt
.lrsa
)
829 for (i
= 0; i
< cxt
.nrealwriters_stress
; i
++)
830 torture_stop_kthread(lock_torture_writer
,
837 for (i
= 0; i
< cxt
.nrealreaders_stress
; i
++)
838 torture_stop_kthread(lock_torture_reader
,
844 torture_stop_kthread(lock_torture_stats
, stats_task
);
845 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
847 if (atomic_read(&cxt
.n_lock_torture_errors
))
848 lock_torture_print_module_parms(cxt
.cur_ops
,
849 "End of test: FAILURE");
850 else if (torture_onoff_failures())
851 lock_torture_print_module_parms(cxt
.cur_ops
,
852 "End of test: LOCK_HOTPLUG");
854 lock_torture_print_module_parms(cxt
.cur_ops
,
855 "End of test: SUCCESS");
861 torture_cleanup_end();
864 static int __init
lock_torture_init(void)
868 static struct lock_torture_ops
*torture_ops
[] = {
870 &spin_lock_ops
, &spin_lock_irq_ops
,
871 &rw_lock_ops
, &rw_lock_irq_ops
,
874 #ifdef CONFIG_RT_MUTEXES
878 &percpu_rwsem_lock_ops
,
881 if (!torture_init_begin(torture_type
, verbose
, &torture_runnable
))
884 /* Process args and tell the world that the torturer is on the job. */
885 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
886 cxt
.cur_ops
= torture_ops
[i
];
887 if (strcmp(torture_type
, cxt
.cur_ops
->name
) == 0)
890 if (i
== ARRAY_SIZE(torture_ops
)) {
891 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
893 pr_alert("lock-torture types:");
894 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++)
895 pr_alert(" %s", torture_ops
[i
]->name
);
901 if (nwriters_stress
== 0 && nreaders_stress
== 0) {
902 pr_alert("lock-torture: must run at least one locking thread\n");
907 if (cxt
.cur_ops
->init
)
910 if (nwriters_stress
>= 0)
911 cxt
.nrealwriters_stress
= nwriters_stress
;
913 cxt
.nrealwriters_stress
= 2 * num_online_cpus();
915 #ifdef CONFIG_DEBUG_MUTEXES
916 if (strncmp(torture_type
, "mutex", 5) == 0)
917 cxt
.debug_lock
= true;
919 #ifdef CONFIG_DEBUG_RT_MUTEXES
920 if (strncmp(torture_type
, "rtmutex", 7) == 0)
921 cxt
.debug_lock
= true;
923 #ifdef CONFIG_DEBUG_SPINLOCK
924 if ((strncmp(torture_type
, "spin", 4) == 0) ||
925 (strncmp(torture_type
, "rw_lock", 7) == 0))
926 cxt
.debug_lock
= true;
929 /* Initialize the statistics so that each run gets its own numbers. */
930 if (nwriters_stress
) {
931 lock_is_write_held
= 0;
932 cxt
.lwsa
= kmalloc(sizeof(*cxt
.lwsa
) * cxt
.nrealwriters_stress
, GFP_KERNEL
);
933 if (cxt
.lwsa
== NULL
) {
934 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
939 for (i
= 0; i
< cxt
.nrealwriters_stress
; i
++) {
940 cxt
.lwsa
[i
].n_lock_fail
= 0;
941 cxt
.lwsa
[i
].n_lock_acquired
= 0;
945 if (cxt
.cur_ops
->readlock
) {
946 if (nreaders_stress
>= 0)
947 cxt
.nrealreaders_stress
= nreaders_stress
;
950 * By default distribute evenly the number of
951 * readers and writers. We still run the same number
952 * of threads as the writer-only locks default.
954 if (nwriters_stress
< 0) /* user doesn't care */
955 cxt
.nrealwriters_stress
= num_online_cpus();
956 cxt
.nrealreaders_stress
= cxt
.nrealwriters_stress
;
959 if (nreaders_stress
) {
960 lock_is_read_held
= 0;
961 cxt
.lrsa
= kmalloc(sizeof(*cxt
.lrsa
) * cxt
.nrealreaders_stress
, GFP_KERNEL
);
962 if (cxt
.lrsa
== NULL
) {
963 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
970 for (i
= 0; i
< cxt
.nrealreaders_stress
; i
++) {
971 cxt
.lrsa
[i
].n_lock_fail
= 0;
972 cxt
.lrsa
[i
].n_lock_acquired
= 0;
977 lock_torture_print_module_parms(cxt
.cur_ops
, "Start of test");
979 /* Prepare torture context. */
980 if (onoff_interval
> 0) {
981 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
,
982 onoff_interval
* HZ
);
986 if (shuffle_interval
> 0) {
987 firsterr
= torture_shuffle_init(shuffle_interval
);
991 if (shutdown_secs
> 0) {
992 firsterr
= torture_shutdown_init(shutdown_secs
,
993 lock_torture_cleanup
);
998 firsterr
= torture_stutter_init(stutter
);
1003 if (nwriters_stress
) {
1004 writer_tasks
= kzalloc(cxt
.nrealwriters_stress
* sizeof(writer_tasks
[0]),
1006 if (writer_tasks
== NULL
) {
1007 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
1013 if (cxt
.cur_ops
->readlock
) {
1014 reader_tasks
= kzalloc(cxt
.nrealreaders_stress
* sizeof(reader_tasks
[0]),
1016 if (reader_tasks
== NULL
) {
1017 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
1018 kfree(writer_tasks
);
1019 writer_tasks
= NULL
;
1026 * Create the kthreads and start torturing (oh, those poor little locks).
1028 * TODO: Note that we interleave writers with readers, giving writers a
1029 * slight advantage, by creating its kthread first. This can be modified
1030 * for very specific needs, or even let the user choose the policy, if
1033 for (i
= 0, j
= 0; i
< cxt
.nrealwriters_stress
||
1034 j
< cxt
.nrealreaders_stress
; i
++, j
++) {
1035 if (i
>= cxt
.nrealwriters_stress
)
1038 /* Create writer. */
1039 firsterr
= torture_create_kthread(lock_torture_writer
, &cxt
.lwsa
[i
],
1045 if (cxt
.cur_ops
->readlock
== NULL
|| (j
>= cxt
.nrealreaders_stress
))
1047 /* Create reader. */
1048 firsterr
= torture_create_kthread(lock_torture_reader
, &cxt
.lrsa
[j
],
1053 if (stat_interval
> 0) {
1054 firsterr
= torture_create_kthread(lock_torture_stats
, NULL
,
1064 lock_torture_cleanup();
1068 module_init(lock_torture_init
);
1069 module_exit(lock_torture_cleanup
);