]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/locking/locktorture.c
sched/headers: Prepare for the removal of <asm/ptrace.h> from <linux/sched.h>
[mirror_ubuntu-jammy-kernel.git] / kernel / locking / locktorture.c
CommitLineData
0af3fe1e
PM
1/*
2 * Module-based torture test facility for locking
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2014
19 *
095777c4
DB
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Davidlohr Bueso <dave@stgolabs.net>
0af3fe1e
PM
22 * Based on kernel/rcu/torture.c.
23 */
0af3fe1e 24#include <linux/kernel.h>
0af3fe1e
PM
25#include <linux/module.h>
26#include <linux/kthread.h>
095777c4 27#include <linux/sched/rt.h>
0af3fe1e 28#include <linux/spinlock.h>
e34191fa 29#include <linux/rwlock.h>
42ddc75d 30#include <linux/mutex.h>
c98fed9f 31#include <linux/rwsem.h>
0af3fe1e
PM
32#include <linux/smp.h>
33#include <linux/interrupt.h>
34#include <linux/sched.h>
ae7e81c0 35#include <uapi/linux/sched/types.h>
0af3fe1e 36#include <linux/atomic.h>
0af3fe1e 37#include <linux/moduleparam.h>
0af3fe1e 38#include <linux/delay.h>
0af3fe1e 39#include <linux/slab.h>
617783dd 40#include <linux/percpu-rwsem.h>
0af3fe1e
PM
41#include <linux/torture.h>
42
43MODULE_LICENSE("GPL");
44MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
45
46torture_param(int, nwriters_stress, -1,
47 "Number of write-locking stress-test threads");
4f6332c1
DB
48torture_param(int, nreaders_stress, -1,
49 "Number of read-locking stress-test threads");
0af3fe1e
PM
50torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
51torture_param(int, onoff_interval, 0,
52 "Time between CPU hotplugs (s), 0=disable");
53torture_param(int, shuffle_interval, 3,
54 "Number of jiffies between shuffles, 0=disable");
55torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
56torture_param(int, stat_interval, 60,
57 "Number of seconds between stats printk()s");
58torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
59torture_param(bool, verbose, true,
60 "Enable verbose debugging printk()s");
61
62static char *torture_type = "spin_lock";
63module_param(torture_type, charp, 0444);
64MODULE_PARM_DESC(torture_type,
42ddc75d 65 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
0af3fe1e 66
0af3fe1e
PM
67static struct task_struct *stats_task;
68static struct task_struct **writer_tasks;
4f6332c1 69static struct task_struct **reader_tasks;
0af3fe1e 70
0af3fe1e 71static bool lock_is_write_held;
4f6332c1 72static bool lock_is_read_held;
0af3fe1e 73
1e6757a9
DB
74struct lock_stress_stats {
75 long n_lock_fail;
76 long n_lock_acquired;
0af3fe1e 77};
0af3fe1e 78
5db42981 79int torture_runnable = IS_ENABLED(MODULE);
23a8e5c2
DB
80module_param(torture_runnable, int, 0444);
81MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
0af3fe1e
PM
82
83/* Forward reference. */
84static void lock_torture_cleanup(void);
85
86/*
87 * Operations vector for selecting different types of tests.
88 */
89struct lock_torture_ops {
90 void (*init)(void);
91 int (*writelock)(void);
92 void (*write_delay)(struct torture_random_state *trsp);
095777c4 93 void (*task_boost)(struct torture_random_state *trsp);
0af3fe1e 94 void (*writeunlock)(void);
4f6332c1
DB
95 int (*readlock)(void);
96 void (*read_delay)(struct torture_random_state *trsp);
97 void (*readunlock)(void);
095777c4
DB
98
99 unsigned long flags; /* for irq spinlocks */
0af3fe1e
PM
100 const char *name;
101};
102
630952c2
DB
103struct lock_torture_cxt {
104 int nrealwriters_stress;
105 int nrealreaders_stress;
106 bool debug_lock;
107 atomic_t n_lock_torture_errors;
108 struct lock_torture_ops *cur_ops;
109 struct lock_stress_stats *lwsa; /* writer statistics */
110 struct lock_stress_stats *lrsa; /* reader statistics */
111};
112static struct lock_torture_cxt cxt = { 0, 0, false,
113 ATOMIC_INIT(0),
114 NULL, NULL};
0af3fe1e
PM
115/*
116 * Definitions for lock torture testing.
117 */
118
e086481b
PM
119static int torture_lock_busted_write_lock(void)
120{
121 return 0; /* BUGGY, do not use in real life!!! */
122}
123
124static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
125{
61d49d2f 126 const unsigned long longdelay_ms = 100;
e086481b
PM
127
128 /* We want a long delay occasionally to force massive contention. */
129 if (!(torture_random(trsp) %
61d49d2f
PM
130 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
131 mdelay(longdelay_ms);
e086481b 132#ifdef CONFIG_PREEMPT
630952c2 133 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
e086481b
PM
134 preempt_schedule(); /* Allow test to be preempted. */
135#endif
136}
137
138static void torture_lock_busted_write_unlock(void)
139{
140 /* BUGGY, do not use in real life!!! */
141}
142
095777c4
DB
143static void torture_boost_dummy(struct torture_random_state *trsp)
144{
145 /* Only rtmutexes care about priority */
146}
147
e086481b
PM
148static struct lock_torture_ops lock_busted_ops = {
149 .writelock = torture_lock_busted_write_lock,
150 .write_delay = torture_lock_busted_write_delay,
095777c4 151 .task_boost = torture_boost_dummy,
e086481b 152 .writeunlock = torture_lock_busted_write_unlock,
4f6332c1
DB
153 .readlock = NULL,
154 .read_delay = NULL,
155 .readunlock = NULL,
e086481b
PM
156 .name = "lock_busted"
157};
158
0af3fe1e
PM
159static DEFINE_SPINLOCK(torture_spinlock);
160
161static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
162{
163 spin_lock(&torture_spinlock);
164 return 0;
165}
166
167static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
168{
169 const unsigned long shortdelay_us = 2;
61d49d2f 170 const unsigned long longdelay_ms = 100;
0af3fe1e
PM
171
172 /* We want a short delay mostly to emulate likely code, and
173 * we want a long delay occasionally to force massive contention.
174 */
175 if (!(torture_random(trsp) %
61d49d2f
PM
176 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
177 mdelay(longdelay_ms);
0af3fe1e 178 if (!(torture_random(trsp) %
630952c2 179 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
0af3fe1e
PM
180 udelay(shortdelay_us);
181#ifdef CONFIG_PREEMPT
630952c2 182 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
0af3fe1e
PM
183 preempt_schedule(); /* Allow test to be preempted. */
184#endif
185}
186
187static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
188{
189 spin_unlock(&torture_spinlock);
190}
191
192static struct lock_torture_ops spin_lock_ops = {
193 .writelock = torture_spin_lock_write_lock,
194 .write_delay = torture_spin_lock_write_delay,
095777c4 195 .task_boost = torture_boost_dummy,
0af3fe1e 196 .writeunlock = torture_spin_lock_write_unlock,
4f6332c1
DB
197 .readlock = NULL,
198 .read_delay = NULL,
199 .readunlock = NULL,
0af3fe1e
PM
200 .name = "spin_lock"
201};
202
203static int torture_spin_lock_write_lock_irq(void)
219f800f 204__acquires(torture_spinlock)
0af3fe1e
PM
205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&torture_spinlock, flags);
630952c2 209 cxt.cur_ops->flags = flags;
0af3fe1e
PM
210 return 0;
211}
212
213static void torture_lock_spin_write_unlock_irq(void)
214__releases(torture_spinlock)
215{
630952c2 216 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
0af3fe1e
PM
217}
218
219static struct lock_torture_ops spin_lock_irq_ops = {
220 .writelock = torture_spin_lock_write_lock_irq,
221 .write_delay = torture_spin_lock_write_delay,
095777c4 222 .task_boost = torture_boost_dummy,
0af3fe1e 223 .writeunlock = torture_lock_spin_write_unlock_irq,
4f6332c1
DB
224 .readlock = NULL,
225 .read_delay = NULL,
226 .readunlock = NULL,
0af3fe1e
PM
227 .name = "spin_lock_irq"
228};
229
e34191fa
DB
230static DEFINE_RWLOCK(torture_rwlock);
231
232static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
233{
234 write_lock(&torture_rwlock);
235 return 0;
236}
237
238static void torture_rwlock_write_delay(struct torture_random_state *trsp)
239{
240 const unsigned long shortdelay_us = 2;
241 const unsigned long longdelay_ms = 100;
242
243 /* We want a short delay mostly to emulate likely code, and
244 * we want a long delay occasionally to force massive contention.
245 */
246 if (!(torture_random(trsp) %
247 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
248 mdelay(longdelay_ms);
249 else
250 udelay(shortdelay_us);
251}
252
253static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
254{
255 write_unlock(&torture_rwlock);
256}
257
258static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
259{
260 read_lock(&torture_rwlock);
261 return 0;
262}
263
264static void torture_rwlock_read_delay(struct torture_random_state *trsp)
265{
266 const unsigned long shortdelay_us = 10;
267 const unsigned long longdelay_ms = 100;
268
269 /* We want a short delay mostly to emulate likely code, and
270 * we want a long delay occasionally to force massive contention.
271 */
272 if (!(torture_random(trsp) %
273 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
274 mdelay(longdelay_ms);
275 else
276 udelay(shortdelay_us);
277}
278
279static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
280{
281 read_unlock(&torture_rwlock);
282}
283
284static struct lock_torture_ops rw_lock_ops = {
285 .writelock = torture_rwlock_write_lock,
286 .write_delay = torture_rwlock_write_delay,
095777c4 287 .task_boost = torture_boost_dummy,
e34191fa
DB
288 .writeunlock = torture_rwlock_write_unlock,
289 .readlock = torture_rwlock_read_lock,
290 .read_delay = torture_rwlock_read_delay,
291 .readunlock = torture_rwlock_read_unlock,
292 .name = "rw_lock"
293};
294
295static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
296{
297 unsigned long flags;
298
299 write_lock_irqsave(&torture_rwlock, flags);
300 cxt.cur_ops->flags = flags;
301 return 0;
302}
303
304static void torture_rwlock_write_unlock_irq(void)
305__releases(torture_rwlock)
306{
307 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
308}
309
310static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
311{
312 unsigned long flags;
313
314 read_lock_irqsave(&torture_rwlock, flags);
315 cxt.cur_ops->flags = flags;
316 return 0;
317}
318
319static void torture_rwlock_read_unlock_irq(void)
320__releases(torture_rwlock)
321{
f548d99e 322 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
e34191fa
DB
323}
324
325static struct lock_torture_ops rw_lock_irq_ops = {
326 .writelock = torture_rwlock_write_lock_irq,
327 .write_delay = torture_rwlock_write_delay,
095777c4 328 .task_boost = torture_boost_dummy,
e34191fa
DB
329 .writeunlock = torture_rwlock_write_unlock_irq,
330 .readlock = torture_rwlock_read_lock_irq,
331 .read_delay = torture_rwlock_read_delay,
332 .readunlock = torture_rwlock_read_unlock_irq,
333 .name = "rw_lock_irq"
334};
335
42ddc75d
DB
336static DEFINE_MUTEX(torture_mutex);
337
338static int torture_mutex_lock(void) __acquires(torture_mutex)
339{
340 mutex_lock(&torture_mutex);
341 return 0;
342}
343
344static void torture_mutex_delay(struct torture_random_state *trsp)
345{
346 const unsigned long longdelay_ms = 100;
347
348 /* We want a long delay occasionally to force massive contention. */
349 if (!(torture_random(trsp) %
630952c2 350 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
42ddc75d
DB
351 mdelay(longdelay_ms * 5);
352 else
353 mdelay(longdelay_ms / 5);
354#ifdef CONFIG_PREEMPT
630952c2 355 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
42ddc75d
DB
356 preempt_schedule(); /* Allow test to be preempted. */
357#endif
358}
359
360static void torture_mutex_unlock(void) __releases(torture_mutex)
361{
362 mutex_unlock(&torture_mutex);
363}
364
365static struct lock_torture_ops mutex_lock_ops = {
366 .writelock = torture_mutex_lock,
367 .write_delay = torture_mutex_delay,
095777c4 368 .task_boost = torture_boost_dummy,
42ddc75d 369 .writeunlock = torture_mutex_unlock,
4f6332c1
DB
370 .readlock = NULL,
371 .read_delay = NULL,
372 .readunlock = NULL,
42ddc75d
DB
373 .name = "mutex_lock"
374};
375
0186a6cb
CW
376#include <linux/ww_mutex.h>
377static DEFINE_WW_CLASS(torture_ww_class);
378static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
379static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
380static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
381
382static int torture_ww_mutex_lock(void)
383__acquires(torture_ww_mutex_0)
384__acquires(torture_ww_mutex_1)
385__acquires(torture_ww_mutex_2)
386{
387 LIST_HEAD(list);
388 struct reorder_lock {
389 struct list_head link;
390 struct ww_mutex *lock;
391 } locks[3], *ll, *ln;
392 struct ww_acquire_ctx ctx;
393
394 locks[0].lock = &torture_ww_mutex_0;
395 list_add(&locks[0].link, &list);
396
397 locks[1].lock = &torture_ww_mutex_1;
398 list_add(&locks[1].link, &list);
399
400 locks[2].lock = &torture_ww_mutex_2;
401 list_add(&locks[2].link, &list);
402
403 ww_acquire_init(&ctx, &torture_ww_class);
404
405 list_for_each_entry(ll, &list, link) {
406 int err;
407
408 err = ww_mutex_lock(ll->lock, &ctx);
409 if (!err)
410 continue;
411
412 ln = ll;
413 list_for_each_entry_continue_reverse(ln, &list, link)
414 ww_mutex_unlock(ln->lock);
415
416 if (err != -EDEADLK)
417 return err;
418
419 ww_mutex_lock_slow(ll->lock, &ctx);
420 list_move(&ll->link, &list);
421 }
422
423 ww_acquire_fini(&ctx);
424 return 0;
425}
426
427static void torture_ww_mutex_unlock(void)
428__releases(torture_ww_mutex_0)
429__releases(torture_ww_mutex_1)
430__releases(torture_ww_mutex_2)
431{
432 ww_mutex_unlock(&torture_ww_mutex_0);
433 ww_mutex_unlock(&torture_ww_mutex_1);
434 ww_mutex_unlock(&torture_ww_mutex_2);
435}
436
437static struct lock_torture_ops ww_mutex_lock_ops = {
438 .writelock = torture_ww_mutex_lock,
439 .write_delay = torture_mutex_delay,
440 .task_boost = torture_boost_dummy,
441 .writeunlock = torture_ww_mutex_unlock,
442 .readlock = NULL,
443 .read_delay = NULL,
444 .readunlock = NULL,
445 .name = "ww_mutex_lock"
446};
447
095777c4
DB
448#ifdef CONFIG_RT_MUTEXES
449static DEFINE_RT_MUTEX(torture_rtmutex);
450
451static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
452{
453 rt_mutex_lock(&torture_rtmutex);
454 return 0;
455}
456
457static void torture_rtmutex_boost(struct torture_random_state *trsp)
458{
459 int policy;
460 struct sched_param param;
461 const unsigned int factor = 50000; /* yes, quite arbitrary */
462
463 if (!rt_task(current)) {
464 /*
1f190931 465 * Boost priority once every ~50k operations. When the
095777c4
DB
466 * task tries to take the lock, the rtmutex it will account
467 * for the new priority, and do any corresponding pi-dance.
468 */
1f190931
DB
469 if (trsp && !(torture_random(trsp) %
470 (cxt.nrealwriters_stress * factor))) {
095777c4
DB
471 policy = SCHED_FIFO;
472 param.sched_priority = MAX_RT_PRIO - 1;
473 } else /* common case, do nothing */
474 return;
475 } else {
476 /*
477 * The task will remain boosted for another ~500k operations,
478 * then restored back to its original prio, and so forth.
479 *
480 * When @trsp is nil, we want to force-reset the task for
481 * stopping the kthread.
482 */
483 if (!trsp || !(torture_random(trsp) %
484 (cxt.nrealwriters_stress * factor * 2))) {
485 policy = SCHED_NORMAL;
486 param.sched_priority = 0;
487 } else /* common case, do nothing */
488 return;
489 }
490
491 sched_setscheduler_nocheck(current, policy, &param);
492}
493
494static void torture_rtmutex_delay(struct torture_random_state *trsp)
495{
496 const unsigned long shortdelay_us = 2;
497 const unsigned long longdelay_ms = 100;
498
499 /*
500 * We want a short delay mostly to emulate likely code, and
501 * we want a long delay occasionally to force massive contention.
502 */
503 if (!(torture_random(trsp) %
504 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
505 mdelay(longdelay_ms);
506 if (!(torture_random(trsp) %
507 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
508 udelay(shortdelay_us);
509#ifdef CONFIG_PREEMPT
510 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
511 preempt_schedule(); /* Allow test to be preempted. */
512#endif
513}
514
515static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
516{
517 rt_mutex_unlock(&torture_rtmutex);
518}
519
520static struct lock_torture_ops rtmutex_lock_ops = {
521 .writelock = torture_rtmutex_lock,
522 .write_delay = torture_rtmutex_delay,
523 .task_boost = torture_rtmutex_boost,
524 .writeunlock = torture_rtmutex_unlock,
525 .readlock = NULL,
526 .read_delay = NULL,
527 .readunlock = NULL,
528 .name = "rtmutex_lock"
529};
530#endif
531
4a3b427f
DB
532static DECLARE_RWSEM(torture_rwsem);
533static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
534{
535 down_write(&torture_rwsem);
536 return 0;
537}
538
539static void torture_rwsem_write_delay(struct torture_random_state *trsp)
540{
541 const unsigned long longdelay_ms = 100;
542
543 /* We want a long delay occasionally to force massive contention. */
544 if (!(torture_random(trsp) %
630952c2 545 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
4a3b427f
DB
546 mdelay(longdelay_ms * 10);
547 else
548 mdelay(longdelay_ms / 10);
549#ifdef CONFIG_PREEMPT
630952c2 550 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
4a3b427f
DB
551 preempt_schedule(); /* Allow test to be preempted. */
552#endif
553}
554
555static void torture_rwsem_up_write(void) __releases(torture_rwsem)
556{
557 up_write(&torture_rwsem);
558}
559
560static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
561{
562 down_read(&torture_rwsem);
563 return 0;
564}
565
566static void torture_rwsem_read_delay(struct torture_random_state *trsp)
567{
568 const unsigned long longdelay_ms = 100;
569
570 /* We want a long delay occasionally to force massive contention. */
571 if (!(torture_random(trsp) %
630952c2 572 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
4a3b427f
DB
573 mdelay(longdelay_ms * 2);
574 else
575 mdelay(longdelay_ms / 2);
576#ifdef CONFIG_PREEMPT
630952c2 577 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
4a3b427f
DB
578 preempt_schedule(); /* Allow test to be preempted. */
579#endif
580}
581
582static void torture_rwsem_up_read(void) __releases(torture_rwsem)
583{
584 up_read(&torture_rwsem);
585}
586
587static struct lock_torture_ops rwsem_lock_ops = {
588 .writelock = torture_rwsem_down_write,
589 .write_delay = torture_rwsem_write_delay,
095777c4 590 .task_boost = torture_boost_dummy,
4a3b427f
DB
591 .writeunlock = torture_rwsem_up_write,
592 .readlock = torture_rwsem_down_read,
593 .read_delay = torture_rwsem_read_delay,
594 .readunlock = torture_rwsem_up_read,
595 .name = "rwsem_lock"
596};
597
617783dd
PM
598#include <linux/percpu-rwsem.h>
599static struct percpu_rw_semaphore pcpu_rwsem;
600
601void torture_percpu_rwsem_init(void)
602{
603 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
604}
605
606static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
607{
608 percpu_down_write(&pcpu_rwsem);
609 return 0;
610}
611
612static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
613{
614 percpu_up_write(&pcpu_rwsem);
615}
616
617static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
618{
619 percpu_down_read(&pcpu_rwsem);
620 return 0;
621}
622
623static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
624{
625 percpu_up_read(&pcpu_rwsem);
626}
627
628static struct lock_torture_ops percpu_rwsem_lock_ops = {
629 .init = torture_percpu_rwsem_init,
630 .writelock = torture_percpu_rwsem_down_write,
631 .write_delay = torture_rwsem_write_delay,
632 .task_boost = torture_boost_dummy,
633 .writeunlock = torture_percpu_rwsem_up_write,
634 .readlock = torture_percpu_rwsem_down_read,
635 .read_delay = torture_rwsem_read_delay,
636 .readunlock = torture_percpu_rwsem_up_read,
637 .name = "percpu_rwsem_lock"
638};
639
0af3fe1e
PM
640/*
641 * Lock torture writer kthread. Repeatedly acquires and releases
642 * the lock, checking for duplicate acquisitions.
643 */
644static int lock_torture_writer(void *arg)
645{
1e6757a9 646 struct lock_stress_stats *lwsp = arg;
0af3fe1e
PM
647 static DEFINE_TORTURE_RANDOM(rand);
648
649 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
8698a745 650 set_user_nice(current, MAX_NICE);
0af3fe1e
PM
651
652 do {
da601c63
PM
653 if ((torture_random(&rand) & 0xfffff) == 0)
654 schedule_timeout_uninterruptible(1);
a1229491 655
095777c4 656 cxt.cur_ops->task_boost(&rand);
630952c2 657 cxt.cur_ops->writelock();
0af3fe1e 658 if (WARN_ON_ONCE(lock_is_write_held))
1e6757a9 659 lwsp->n_lock_fail++;
0af3fe1e 660 lock_is_write_held = 1;
a1229491
DB
661 if (WARN_ON_ONCE(lock_is_read_held))
662 lwsp->n_lock_fail++; /* rare, but... */
663
1e6757a9 664 lwsp->n_lock_acquired++;
630952c2 665 cxt.cur_ops->write_delay(&rand);
0af3fe1e 666 lock_is_write_held = 0;
630952c2 667 cxt.cur_ops->writeunlock();
a1229491 668
0af3fe1e
PM
669 stutter_wait("lock_torture_writer");
670 } while (!torture_must_stop());
095777c4
DB
671
672 cxt.cur_ops->task_boost(NULL); /* reset prio */
0af3fe1e
PM
673 torture_kthread_stopping("lock_torture_writer");
674 return 0;
675}
676
4f6332c1
DB
677/*
678 * Lock torture reader kthread. Repeatedly acquires and releases
679 * the reader lock.
680 */
681static int lock_torture_reader(void *arg)
682{
683 struct lock_stress_stats *lrsp = arg;
684 static DEFINE_TORTURE_RANDOM(rand);
685
686 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
687 set_user_nice(current, MAX_NICE);
688
689 do {
690 if ((torture_random(&rand) & 0xfffff) == 0)
691 schedule_timeout_uninterruptible(1);
a1229491 692
630952c2 693 cxt.cur_ops->readlock();
4f6332c1 694 lock_is_read_held = 1;
a1229491
DB
695 if (WARN_ON_ONCE(lock_is_write_held))
696 lrsp->n_lock_fail++; /* rare, but... */
697
4f6332c1 698 lrsp->n_lock_acquired++;
630952c2 699 cxt.cur_ops->read_delay(&rand);
4f6332c1 700 lock_is_read_held = 0;
630952c2 701 cxt.cur_ops->readunlock();
a1229491 702
4f6332c1
DB
703 stutter_wait("lock_torture_reader");
704 } while (!torture_must_stop());
705 torture_kthread_stopping("lock_torture_reader");
706 return 0;
707}
708
0af3fe1e
PM
709/*
710 * Create an lock-torture-statistics message in the specified buffer.
711 */
4f6332c1
DB
712static void __torture_print_stats(char *page,
713 struct lock_stress_stats *statp, bool write)
0af3fe1e
PM
714{
715 bool fail = 0;
4f6332c1 716 int i, n_stress;
0af3fe1e 717 long max = 0;
4f6332c1 718 long min = statp[0].n_lock_acquired;
0af3fe1e
PM
719 long long sum = 0;
720
630952c2 721 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
4f6332c1
DB
722 for (i = 0; i < n_stress; i++) {
723 if (statp[i].n_lock_fail)
0af3fe1e 724 fail = true;
4f6332c1
DB
725 sum += statp[i].n_lock_acquired;
726 if (max < statp[i].n_lock_fail)
727 max = statp[i].n_lock_fail;
728 if (min > statp[i].n_lock_fail)
729 min = statp[i].n_lock_fail;
0af3fe1e 730 }
0af3fe1e 731 page += sprintf(page,
4f6332c1
DB
732 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
733 write ? "Writes" : "Reads ",
0af3fe1e
PM
734 sum, max, min, max / 2 > min ? "???" : "",
735 fail, fail ? "!!!" : "");
736 if (fail)
630952c2 737 atomic_inc(&cxt.n_lock_torture_errors);
0af3fe1e
PM
738}
739
740/*
741 * Print torture statistics. Caller must ensure that there is only one
742 * call to this function at a given time!!! This is normally accomplished
743 * by relying on the module system to only have one copy of the module
744 * loaded, and then by giving the lock_torture_stats kthread full control
745 * (or the init/cleanup functions when lock_torture_stats thread is not
746 * running).
747 */
748static void lock_torture_stats_print(void)
749{
630952c2 750 int size = cxt.nrealwriters_stress * 200 + 8192;
0af3fe1e
PM
751 char *buf;
752
630952c2
DB
753 if (cxt.cur_ops->readlock)
754 size += cxt.nrealreaders_stress * 200 + 8192;
4f6332c1 755
0af3fe1e
PM
756 buf = kmalloc(size, GFP_KERNEL);
757 if (!buf) {
758 pr_err("lock_torture_stats_print: Out of memory, need: %d",
759 size);
760 return;
761 }
4f6332c1 762
630952c2 763 __torture_print_stats(buf, cxt.lwsa, true);
0af3fe1e
PM
764 pr_alert("%s", buf);
765 kfree(buf);
4f6332c1 766
630952c2 767 if (cxt.cur_ops->readlock) {
4f6332c1
DB
768 buf = kmalloc(size, GFP_KERNEL);
769 if (!buf) {
770 pr_err("lock_torture_stats_print: Out of memory, need: %d",
771 size);
772 return;
773 }
774
630952c2 775 __torture_print_stats(buf, cxt.lrsa, false);
4f6332c1
DB
776 pr_alert("%s", buf);
777 kfree(buf);
778 }
0af3fe1e
PM
779}
780
781/*
782 * Periodically prints torture statistics, if periodic statistics printing
783 * was specified via the stat_interval module parameter.
784 *
785 * No need to worry about fullstop here, since this one doesn't reference
786 * volatile state or register callbacks.
787 */
788static int lock_torture_stats(void *arg)
789{
790 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
791 do {
792 schedule_timeout_interruptible(stat_interval * HZ);
793 lock_torture_stats_print();
794 torture_shutdown_absorb("lock_torture_stats");
795 } while (!torture_must_stop());
796 torture_kthread_stopping("lock_torture_stats");
797 return 0;
798}
799
800static inline void
801lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
802 const char *tag)
803{
804 pr_alert("%s" TORTURE_FLAG
4f6332c1 805 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
630952c2
DB
806 torture_type, tag, cxt.debug_lock ? " [debug]": "",
807 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
4f6332c1 808 verbose, shuffle_interval, stutter, shutdown_secs,
0af3fe1e
PM
809 onoff_interval, onoff_holdoff);
810}
811
812static void lock_torture_cleanup(void)
813{
814 int i;
815
d36a7a0d 816 if (torture_cleanup_begin())
0af3fe1e
PM
817 return;
818
c1c33b92
DB
819 /*
820 * Indicates early cleanup, meaning that the test has not run,
821 * such as when passing bogus args when loading the module. As
822 * such, only perform the underlying torture-specific cleanups,
823 * and avoid anything related to locktorture.
824 */
825 if (!cxt.lwsa)
826 goto end;
827
0af3fe1e 828 if (writer_tasks) {
630952c2 829 for (i = 0; i < cxt.nrealwriters_stress; i++)
0af3fe1e
PM
830 torture_stop_kthread(lock_torture_writer,
831 writer_tasks[i]);
832 kfree(writer_tasks);
833 writer_tasks = NULL;
834 }
835
4f6332c1 836 if (reader_tasks) {
630952c2 837 for (i = 0; i < cxt.nrealreaders_stress; i++)
4f6332c1
DB
838 torture_stop_kthread(lock_torture_reader,
839 reader_tasks[i]);
840 kfree(reader_tasks);
841 reader_tasks = NULL;
842 }
843
0af3fe1e
PM
844 torture_stop_kthread(lock_torture_stats, stats_task);
845 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
846
630952c2
DB
847 if (atomic_read(&cxt.n_lock_torture_errors))
848 lock_torture_print_module_parms(cxt.cur_ops,
0af3fe1e
PM
849 "End of test: FAILURE");
850 else if (torture_onoff_failures())
630952c2 851 lock_torture_print_module_parms(cxt.cur_ops,
0af3fe1e
PM
852 "End of test: LOCK_HOTPLUG");
853 else
630952c2 854 lock_torture_print_module_parms(cxt.cur_ops,
0af3fe1e 855 "End of test: SUCCESS");
f4dbba59
YS
856
857 kfree(cxt.lwsa);
858 kfree(cxt.lrsa);
859
c1c33b92 860end:
d36a7a0d 861 torture_cleanup_end();
0af3fe1e
PM
862}
863
864static int __init lock_torture_init(void)
865{
4f6332c1 866 int i, j;
0af3fe1e
PM
867 int firsterr = 0;
868 static struct lock_torture_ops *torture_ops[] = {
e34191fa
DB
869 &lock_busted_ops,
870 &spin_lock_ops, &spin_lock_irq_ops,
871 &rw_lock_ops, &rw_lock_irq_ops,
872 &mutex_lock_ops,
0186a6cb 873 &ww_mutex_lock_ops,
095777c4
DB
874#ifdef CONFIG_RT_MUTEXES
875 &rtmutex_lock_ops,
876#endif
e34191fa 877 &rwsem_lock_ops,
617783dd 878 &percpu_rwsem_lock_ops,
0af3fe1e
PM
879 };
880
23a8e5c2 881 if (!torture_init_begin(torture_type, verbose, &torture_runnable))
5228084e 882 return -EBUSY;
0af3fe1e
PM
883
884 /* Process args and tell the world that the torturer is on the job. */
885 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
630952c2
DB
886 cxt.cur_ops = torture_ops[i];
887 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
0af3fe1e
PM
888 break;
889 }
890 if (i == ARRAY_SIZE(torture_ops)) {
891 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
892 torture_type);
893 pr_alert("lock-torture types:");
894 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
895 pr_alert(" %s", torture_ops[i]->name);
896 pr_alert("\n");
a36a9961
PM
897 firsterr = -EINVAL;
898 goto unwind;
0af3fe1e 899 }
630952c2 900 if (cxt.cur_ops->init)
a36a9961 901 cxt.cur_ops->init();
0af3fe1e
PM
902
903 if (nwriters_stress >= 0)
630952c2 904 cxt.nrealwriters_stress = nwriters_stress;
0af3fe1e 905 else
630952c2 906 cxt.nrealwriters_stress = 2 * num_online_cpus();
f095bfc0
DB
907
908#ifdef CONFIG_DEBUG_MUTEXES
909 if (strncmp(torture_type, "mutex", 5) == 0)
630952c2 910 cxt.debug_lock = true;
f095bfc0 911#endif
095777c4
DB
912#ifdef CONFIG_DEBUG_RT_MUTEXES
913 if (strncmp(torture_type, "rtmutex", 7) == 0)
914 cxt.debug_lock = true;
915#endif
f095bfc0 916#ifdef CONFIG_DEBUG_SPINLOCK
e34191fa
DB
917 if ((strncmp(torture_type, "spin", 4) == 0) ||
918 (strncmp(torture_type, "rw_lock", 7) == 0))
630952c2 919 cxt.debug_lock = true;
f095bfc0 920#endif
0af3fe1e
PM
921
922 /* Initialize the statistics so that each run gets its own numbers. */
923
924 lock_is_write_held = 0;
630952c2
DB
925 cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
926 if (cxt.lwsa == NULL) {
927 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
0af3fe1e
PM
928 firsterr = -ENOMEM;
929 goto unwind;
930 }
630952c2
DB
931 for (i = 0; i < cxt.nrealwriters_stress; i++) {
932 cxt.lwsa[i].n_lock_fail = 0;
933 cxt.lwsa[i].n_lock_acquired = 0;
0af3fe1e
PM
934 }
935
630952c2 936 if (cxt.cur_ops->readlock) {
4f6332c1 937 if (nreaders_stress >= 0)
630952c2 938 cxt.nrealreaders_stress = nreaders_stress;
4f6332c1
DB
939 else {
940 /*
941 * By default distribute evenly the number of
942 * readers and writers. We still run the same number
943 * of threads as the writer-only locks default.
944 */
945 if (nwriters_stress < 0) /* user doesn't care */
630952c2
DB
946 cxt.nrealwriters_stress = num_online_cpus();
947 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
4f6332c1
DB
948 }
949
950 lock_is_read_held = 0;
630952c2
DB
951 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
952 if (cxt.lrsa == NULL) {
953 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
4f6332c1 954 firsterr = -ENOMEM;
630952c2 955 kfree(cxt.lwsa);
c1c33b92 956 cxt.lwsa = NULL;
4f6332c1
DB
957 goto unwind;
958 }
0af3fe1e 959
630952c2
DB
960 for (i = 0; i < cxt.nrealreaders_stress; i++) {
961 cxt.lrsa[i].n_lock_fail = 0;
962 cxt.lrsa[i].n_lock_acquired = 0;
4f6332c1
DB
963 }
964 }
c1c33b92 965
630952c2 966 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
4f6332c1
DB
967
968 /* Prepare torture context. */
0af3fe1e
PM
969 if (onoff_interval > 0) {
970 firsterr = torture_onoff_init(onoff_holdoff * HZ,
971 onoff_interval * HZ);
972 if (firsterr)
973 goto unwind;
974 }
975 if (shuffle_interval > 0) {
976 firsterr = torture_shuffle_init(shuffle_interval);
977 if (firsterr)
978 goto unwind;
979 }
980 if (shutdown_secs > 0) {
981 firsterr = torture_shutdown_init(shutdown_secs,
982 lock_torture_cleanup);
983 if (firsterr)
984 goto unwind;
985 }
986 if (stutter > 0) {
987 firsterr = torture_stutter_init(stutter);
988 if (firsterr)
989 goto unwind;
990 }
991
630952c2 992 writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
0af3fe1e
PM
993 GFP_KERNEL);
994 if (writer_tasks == NULL) {
995 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
996 firsterr = -ENOMEM;
997 goto unwind;
998 }
4f6332c1 999
630952c2
DB
1000 if (cxt.cur_ops->readlock) {
1001 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
4f6332c1
DB
1002 GFP_KERNEL);
1003 if (reader_tasks == NULL) {
1004 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
f4dbba59
YS
1005 kfree(writer_tasks);
1006 writer_tasks = NULL;
4f6332c1
DB
1007 firsterr = -ENOMEM;
1008 goto unwind;
1009 }
1010 }
1011
1012 /*
1013 * Create the kthreads and start torturing (oh, those poor little locks).
1014 *
1015 * TODO: Note that we interleave writers with readers, giving writers a
1016 * slight advantage, by creating its kthread first. This can be modified
1017 * for very specific needs, or even let the user choose the policy, if
1018 * ever wanted.
1019 */
630952c2
DB
1020 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1021 j < cxt.nrealreaders_stress; i++, j++) {
1022 if (i >= cxt.nrealwriters_stress)
4f6332c1
DB
1023 goto create_reader;
1024
1025 /* Create writer. */
630952c2 1026 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
0af3fe1e
PM
1027 writer_tasks[i]);
1028 if (firsterr)
1029 goto unwind;
4f6332c1
DB
1030
1031 create_reader:
630952c2 1032 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
4f6332c1
DB
1033 continue;
1034 /* Create reader. */
630952c2 1035 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
4f6332c1
DB
1036 reader_tasks[j]);
1037 if (firsterr)
1038 goto unwind;
0af3fe1e
PM
1039 }
1040 if (stat_interval > 0) {
1041 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1042 stats_task);
1043 if (firsterr)
1044 goto unwind;
1045 }
1046 torture_init_end();
1047 return 0;
1048
1049unwind:
1050 torture_init_end();
1051 lock_torture_cleanup();
1052 return firsterr;
1053}
1054
1055module_init(lock_torture_init);
1056module_exit(lock_torture_cleanup);