1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based torture test facility
5 * Copyright (C) IBM Corporation, 2005, 2006
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
10 * See also: Documentation/RCU/torture.txt
13 #define pr_fmt(fmt) fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
54 /* Bits for ->extendables field, extendables param, and related definitions. */
55 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
56 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
57 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
58 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
59 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
60 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
61 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
62 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
63 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
64 #define RCUTORTURE_MAX_EXTEND \
65 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
66 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
67 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
68 /* Must be power of two minus one. */
69 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
71 torture_param(int, extendables
, RCUTORTURE_MAX_EXTEND
,
72 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
73 torture_param(int, fqs_duration
, 0,
74 "Duration of fqs bursts (us), 0 to disable");
75 torture_param(int, fqs_holdoff
, 0, "Holdoff time within fqs bursts (us)");
76 torture_param(int, fqs_stutter
, 3, "Wait time between fqs bursts (s)");
77 torture_param(bool, fwd_progress
, 1, "Test grace-period forward progress");
78 torture_param(int, fwd_progress_div
, 4, "Fraction of CPU stall to wait");
79 torture_param(int, fwd_progress_holdoff
, 60,
80 "Time between forward-progress tests (s)");
81 torture_param(bool, fwd_progress_need_resched
, 1,
82 "Hide cond_resched() behind need_resched()");
83 torture_param(bool, gp_cond
, false, "Use conditional/async GP wait primitives");
84 torture_param(bool, gp_exp
, false, "Use expedited GP wait primitives");
85 torture_param(bool, gp_normal
, false,
86 "Use normal (non-expedited) GP wait primitives");
87 torture_param(bool, gp_sync
, false, "Use synchronous GP wait primitives");
88 torture_param(int, irqreader
, 1, "Allow RCU readers from irq handlers");
89 torture_param(int, n_barrier_cbs
, 0,
90 "# of callbacks/kthreads for barrier testing");
91 torture_param(int, nfakewriters
, 4, "Number of RCU fake writer threads");
92 torture_param(int, nreaders
, -1, "Number of RCU reader threads");
93 torture_param(int, object_debug
, 0,
94 "Enable debug-object double call_rcu() testing");
95 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
96 torture_param(int, onoff_interval
, 0,
97 "Time between CPU hotplugs (jiffies), 0=disable");
98 torture_param(int, shuffle_interval
, 3, "Number of seconds between shuffles");
99 torture_param(int, shutdown_secs
, 0, "Shutdown time (s), <= zero to disable.");
100 torture_param(int, stall_cpu
, 0, "Stall duration (s), zero to disable.");
101 torture_param(int, stall_cpu_holdoff
, 10,
102 "Time to wait before starting stall (s).");
103 torture_param(int, stall_cpu_irqsoff
, 0, "Disable interrupts while stalling.");
104 torture_param(int, stat_interval
, 60,
105 "Number of seconds between stats printk()s");
106 torture_param(int, stutter
, 5, "Number of seconds to run/halt test");
107 torture_param(int, test_boost
, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
108 torture_param(int, test_boost_duration
, 4,
109 "Duration of each boost test, seconds.");
110 torture_param(int, test_boost_interval
, 7,
111 "Interval between boost tests, seconds.");
112 torture_param(bool, test_no_idle_hz
, true,
113 "Test support for tickless idle CPUs");
114 torture_param(int, verbose
, 1,
115 "Enable verbose debugging printk()s");
117 static char *torture_type
= "rcu";
118 module_param(torture_type
, charp
, 0444);
119 MODULE_PARM_DESC(torture_type
, "Type of RCU to torture (rcu, srcu, ...)");
121 static int nrealreaders
;
122 static struct task_struct
*writer_task
;
123 static struct task_struct
**fakewriter_tasks
;
124 static struct task_struct
**reader_tasks
;
125 static struct task_struct
*stats_task
;
126 static struct task_struct
*fqs_task
;
127 static struct task_struct
*boost_tasks
[NR_CPUS
];
128 static struct task_struct
*stall_task
;
129 static struct task_struct
*fwd_prog_task
;
130 static struct task_struct
**barrier_cbs_tasks
;
131 static struct task_struct
*barrier_task
;
133 #define RCU_TORTURE_PIPE_LEN 10
136 struct rcu_head rtort_rcu
;
137 int rtort_pipe_count
;
138 struct list_head rtort_free
;
142 static LIST_HEAD(rcu_torture_freelist
);
143 static struct rcu_torture __rcu
*rcu_torture_current
;
144 static unsigned long rcu_torture_current_version
;
145 static struct rcu_torture rcu_tortures
[10 * RCU_TORTURE_PIPE_LEN
];
146 static DEFINE_SPINLOCK(rcu_torture_lock
);
147 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_count
);
148 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_batch
);
149 static atomic_t rcu_torture_wcount
[RCU_TORTURE_PIPE_LEN
+ 1];
150 static atomic_t n_rcu_torture_alloc
;
151 static atomic_t n_rcu_torture_alloc_fail
;
152 static atomic_t n_rcu_torture_free
;
153 static atomic_t n_rcu_torture_mberror
;
154 static atomic_t n_rcu_torture_error
;
155 static long n_rcu_torture_barrier_error
;
156 static long n_rcu_torture_boost_ktrerror
;
157 static long n_rcu_torture_boost_rterror
;
158 static long n_rcu_torture_boost_failure
;
159 static long n_rcu_torture_boosts
;
160 static atomic_long_t n_rcu_torture_timers
;
161 static long n_barrier_attempts
;
162 static long n_barrier_successes
; /* did rcu_barrier test succeed? */
163 static struct list_head rcu_torture_removed
;
164 static unsigned long shutdown_jiffies
;
166 static int rcu_torture_writer_state
;
167 #define RTWS_FIXED_DELAY 0
169 #define RTWS_REPLACE 2
170 #define RTWS_DEF_FREE 3
171 #define RTWS_EXP_SYNC 4
172 #define RTWS_COND_GET 5
173 #define RTWS_COND_SYNC 6
175 #define RTWS_STUTTER 8
176 #define RTWS_STOPPING 9
177 static const char * const rcu_torture_writer_state_names
[] = {
190 /* Record reader segment types and duration for first failing read. */
193 unsigned long rt_delay_jiffies
;
194 unsigned long rt_delay_ms
;
195 unsigned long rt_delay_us
;
198 static int err_segs_recorded
;
199 static struct rt_read_seg err_segs
[RCUTORTURE_RDR_MAX_SEGS
];
200 static int rt_read_nsegs
;
202 static const char *rcu_torture_writer_state_getname(void)
204 unsigned int i
= READ_ONCE(rcu_torture_writer_state
);
206 if (i
>= ARRAY_SIZE(rcu_torture_writer_state_names
))
208 return rcu_torture_writer_state_names
[i
];
211 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
212 #define rcu_can_boost() 1
213 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
214 #define rcu_can_boost() 0
215 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
217 #ifdef CONFIG_RCU_TRACE
218 static u64 notrace
rcu_trace_clock_local(void)
220 u64 ts
= trace_clock_local();
222 (void)do_div(ts
, NSEC_PER_USEC
);
225 #else /* #ifdef CONFIG_RCU_TRACE */
226 static u64 notrace
rcu_trace_clock_local(void)
230 #endif /* #else #ifdef CONFIG_RCU_TRACE */
233 * Stop aggressive CPU-hog tests a bit before the end of the test in order
234 * to avoid interfering with test shutdown.
236 static bool shutdown_time_arrived(void)
238 return shutdown_secs
&& time_after(jiffies
, shutdown_jiffies
- 30 * HZ
);
241 static unsigned long boost_starttime
; /* jiffies of next boost test start. */
242 static DEFINE_MUTEX(boost_mutex
); /* protect setting boost_starttime */
243 /* and boost task create/destroy. */
244 static atomic_t barrier_cbs_count
; /* Barrier callbacks registered. */
245 static bool barrier_phase
; /* Test phase. */
246 static atomic_t barrier_cbs_invoked
; /* Barrier callbacks invoked. */
247 static wait_queue_head_t
*barrier_cbs_wq
; /* Coordinate barrier testing. */
248 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq
);
250 static bool rcu_fwd_cb_nodelay
; /* Short rcu_torture_delay() delays. */
253 * Allocate an element from the rcu_tortures pool.
255 static struct rcu_torture
*
256 rcu_torture_alloc(void)
260 spin_lock_bh(&rcu_torture_lock
);
261 if (list_empty(&rcu_torture_freelist
)) {
262 atomic_inc(&n_rcu_torture_alloc_fail
);
263 spin_unlock_bh(&rcu_torture_lock
);
266 atomic_inc(&n_rcu_torture_alloc
);
267 p
= rcu_torture_freelist
.next
;
269 spin_unlock_bh(&rcu_torture_lock
);
270 return container_of(p
, struct rcu_torture
, rtort_free
);
274 * Free an element to the rcu_tortures pool.
277 rcu_torture_free(struct rcu_torture
*p
)
279 atomic_inc(&n_rcu_torture_free
);
280 spin_lock_bh(&rcu_torture_lock
);
281 list_add_tail(&p
->rtort_free
, &rcu_torture_freelist
);
282 spin_unlock_bh(&rcu_torture_lock
);
286 * Operations vector for selecting different types of tests.
289 struct rcu_torture_ops
{
292 void (*cleanup
)(void);
293 int (*readlock
)(void);
294 void (*read_delay
)(struct torture_random_state
*rrsp
,
295 struct rt_read_seg
*rtrsp
);
296 void (*readunlock
)(int idx
);
297 unsigned long (*get_gp_seq
)(void);
298 unsigned long (*gp_diff
)(unsigned long new, unsigned long old
);
299 void (*deferred_free
)(struct rcu_torture
*p
);
301 void (*exp_sync
)(void);
302 unsigned long (*get_state
)(void);
303 void (*cond_sync
)(unsigned long oldstate
);
304 call_rcu_func_t call
;
305 void (*cb_barrier
)(void);
308 int (*stall_dur
)(void);
316 static struct rcu_torture_ops
*cur_ops
;
319 * Definitions for rcu torture testing.
322 static int rcu_torture_read_lock(void) __acquires(RCU
)
329 rcu_read_delay(struct torture_random_state
*rrsp
, struct rt_read_seg
*rtrsp
)
331 unsigned long started
;
332 unsigned long completed
;
333 const unsigned long shortdelay_us
= 200;
334 unsigned long longdelay_ms
= 300;
335 unsigned long long ts
;
337 /* We want a short delay sometimes to make a reader delay the grace
338 * period, and we want a long delay occasionally to trigger
339 * force_quiescent_state. */
341 if (!rcu_fwd_cb_nodelay
&&
342 !(torture_random(rrsp
) % (nrealreaders
* 2000 * longdelay_ms
))) {
343 started
= cur_ops
->get_gp_seq();
344 ts
= rcu_trace_clock_local();
345 if (preempt_count() & (SOFTIRQ_MASK
| HARDIRQ_MASK
))
346 longdelay_ms
= 5; /* Avoid triggering BH limits. */
347 mdelay(longdelay_ms
);
348 rtrsp
->rt_delay_ms
= longdelay_ms
;
349 completed
= cur_ops
->get_gp_seq();
350 do_trace_rcu_torture_read(cur_ops
->name
, NULL
, ts
,
353 if (!(torture_random(rrsp
) % (nrealreaders
* 2 * shortdelay_us
))) {
354 udelay(shortdelay_us
);
355 rtrsp
->rt_delay_us
= shortdelay_us
;
357 if (!preempt_count() &&
358 !(torture_random(rrsp
) % (nrealreaders
* 500))) {
359 torture_preempt_schedule(); /* QS only if preemptible. */
360 rtrsp
->rt_preempted
= true;
364 static void rcu_torture_read_unlock(int idx
) __releases(RCU
)
370 * Update callback in the pipe. This should be invoked after a grace period.
373 rcu_torture_pipe_update_one(struct rcu_torture
*rp
)
377 i
= rp
->rtort_pipe_count
;
378 if (i
> RCU_TORTURE_PIPE_LEN
)
379 i
= RCU_TORTURE_PIPE_LEN
;
380 atomic_inc(&rcu_torture_wcount
[i
]);
381 if (++rp
->rtort_pipe_count
>= RCU_TORTURE_PIPE_LEN
) {
382 rp
->rtort_mbtest
= 0;
389 * Update all callbacks in the pipe. Suitable for synchronous grace-period
393 rcu_torture_pipe_update(struct rcu_torture
*old_rp
)
395 struct rcu_torture
*rp
;
396 struct rcu_torture
*rp1
;
399 list_add(&old_rp
->rtort_free
, &rcu_torture_removed
);
400 list_for_each_entry_safe(rp
, rp1
, &rcu_torture_removed
, rtort_free
) {
401 if (rcu_torture_pipe_update_one(rp
)) {
402 list_del(&rp
->rtort_free
);
403 rcu_torture_free(rp
);
409 rcu_torture_cb(struct rcu_head
*p
)
411 struct rcu_torture
*rp
= container_of(p
, struct rcu_torture
, rtort_rcu
);
413 if (torture_must_stop_irq()) {
414 /* Test is ending, just drop callbacks on the floor. */
415 /* The next initialization will pick up the pieces. */
418 if (rcu_torture_pipe_update_one(rp
))
419 rcu_torture_free(rp
);
421 cur_ops
->deferred_free(rp
);
424 static unsigned long rcu_no_completed(void)
429 static void rcu_torture_deferred_free(struct rcu_torture
*p
)
431 call_rcu(&p
->rtort_rcu
, rcu_torture_cb
);
434 static void rcu_sync_torture_init(void)
436 INIT_LIST_HEAD(&rcu_torture_removed
);
439 static struct rcu_torture_ops rcu_ops
= {
441 .init
= rcu_sync_torture_init
,
442 .readlock
= rcu_torture_read_lock
,
443 .read_delay
= rcu_read_delay
,
444 .readunlock
= rcu_torture_read_unlock
,
445 .get_gp_seq
= rcu_get_gp_seq
,
446 .gp_diff
= rcu_seq_diff
,
447 .deferred_free
= rcu_torture_deferred_free
,
448 .sync
= synchronize_rcu
,
449 .exp_sync
= synchronize_rcu_expedited
,
450 .get_state
= get_state_synchronize_rcu
,
451 .cond_sync
= cond_synchronize_rcu
,
453 .cb_barrier
= rcu_barrier
,
454 .fqs
= rcu_force_quiescent_state
,
456 .stall_dur
= rcu_jiffies_till_stall_check
,
458 .can_boost
= rcu_can_boost(),
459 .extendables
= RCUTORTURE_MAX_EXTEND
,
464 * Don't even think about trying any of these in real life!!!
465 * The names includes "busted", and they really means it!
466 * The only purpose of these functions is to provide a buggy RCU
467 * implementation to make sure that rcutorture correctly emits
468 * buggy-RCU error messages.
470 static void rcu_busted_torture_deferred_free(struct rcu_torture
*p
)
472 /* This is a deliberate bug for testing purposes only! */
473 rcu_torture_cb(&p
->rtort_rcu
);
476 static void synchronize_rcu_busted(void)
478 /* This is a deliberate bug for testing purposes only! */
482 call_rcu_busted(struct rcu_head
*head
, rcu_callback_t func
)
484 /* This is a deliberate bug for testing purposes only! */
488 static struct rcu_torture_ops rcu_busted_ops
= {
489 .ttype
= INVALID_RCU_FLAVOR
,
490 .init
= rcu_sync_torture_init
,
491 .readlock
= rcu_torture_read_lock
,
492 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
493 .readunlock
= rcu_torture_read_unlock
,
494 .get_gp_seq
= rcu_no_completed
,
495 .deferred_free
= rcu_busted_torture_deferred_free
,
496 .sync
= synchronize_rcu_busted
,
497 .exp_sync
= synchronize_rcu_busted
,
498 .call
= call_rcu_busted
,
507 * Definitions for srcu torture testing.
510 DEFINE_STATIC_SRCU(srcu_ctl
);
511 static struct srcu_struct srcu_ctld
;
512 static struct srcu_struct
*srcu_ctlp
= &srcu_ctl
;
514 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp
)
516 return srcu_read_lock(srcu_ctlp
);
520 srcu_read_delay(struct torture_random_state
*rrsp
, struct rt_read_seg
*rtrsp
)
523 const long uspertick
= 1000000 / HZ
;
524 const long longdelay
= 10;
526 /* We want there to be long-running readers, but not all the time. */
528 delay
= torture_random(rrsp
) %
529 (nrealreaders
* 2 * longdelay
* uspertick
);
530 if (!delay
&& in_task()) {
531 schedule_timeout_interruptible(longdelay
);
532 rtrsp
->rt_delay_jiffies
= longdelay
;
534 rcu_read_delay(rrsp
, rtrsp
);
538 static void srcu_torture_read_unlock(int idx
) __releases(srcu_ctlp
)
540 srcu_read_unlock(srcu_ctlp
, idx
);
543 static unsigned long srcu_torture_completed(void)
545 return srcu_batches_completed(srcu_ctlp
);
548 static void srcu_torture_deferred_free(struct rcu_torture
*rp
)
550 call_srcu(srcu_ctlp
, &rp
->rtort_rcu
, rcu_torture_cb
);
553 static void srcu_torture_synchronize(void)
555 synchronize_srcu(srcu_ctlp
);
558 static void srcu_torture_call(struct rcu_head
*head
,
561 call_srcu(srcu_ctlp
, head
, func
);
564 static void srcu_torture_barrier(void)
566 srcu_barrier(srcu_ctlp
);
569 static void srcu_torture_stats(void)
571 srcu_torture_stats_print(srcu_ctlp
, torture_type
, TORTURE_FLAG
);
574 static void srcu_torture_synchronize_expedited(void)
576 synchronize_srcu_expedited(srcu_ctlp
);
579 static struct rcu_torture_ops srcu_ops
= {
580 .ttype
= SRCU_FLAVOR
,
581 .init
= rcu_sync_torture_init
,
582 .readlock
= srcu_torture_read_lock
,
583 .read_delay
= srcu_read_delay
,
584 .readunlock
= srcu_torture_read_unlock
,
585 .get_gp_seq
= srcu_torture_completed
,
586 .deferred_free
= srcu_torture_deferred_free
,
587 .sync
= srcu_torture_synchronize
,
588 .exp_sync
= srcu_torture_synchronize_expedited
,
589 .call
= srcu_torture_call
,
590 .cb_barrier
= srcu_torture_barrier
,
591 .stats
= srcu_torture_stats
,
596 static void srcu_torture_init(void)
598 rcu_sync_torture_init();
599 WARN_ON(init_srcu_struct(&srcu_ctld
));
600 srcu_ctlp
= &srcu_ctld
;
603 static void srcu_torture_cleanup(void)
605 cleanup_srcu_struct(&srcu_ctld
);
606 srcu_ctlp
= &srcu_ctl
; /* In case of a later rcutorture run. */
609 /* As above, but dynamically allocated. */
610 static struct rcu_torture_ops srcud_ops
= {
611 .ttype
= SRCU_FLAVOR
,
612 .init
= srcu_torture_init
,
613 .cleanup
= srcu_torture_cleanup
,
614 .readlock
= srcu_torture_read_lock
,
615 .read_delay
= srcu_read_delay
,
616 .readunlock
= srcu_torture_read_unlock
,
617 .get_gp_seq
= srcu_torture_completed
,
618 .deferred_free
= srcu_torture_deferred_free
,
619 .sync
= srcu_torture_synchronize
,
620 .exp_sync
= srcu_torture_synchronize_expedited
,
621 .call
= srcu_torture_call
,
622 .cb_barrier
= srcu_torture_barrier
,
623 .stats
= srcu_torture_stats
,
628 /* As above, but broken due to inappropriate reader extension. */
629 static struct rcu_torture_ops busted_srcud_ops
= {
630 .ttype
= SRCU_FLAVOR
,
631 .init
= srcu_torture_init
,
632 .cleanup
= srcu_torture_cleanup
,
633 .readlock
= srcu_torture_read_lock
,
634 .read_delay
= rcu_read_delay
,
635 .readunlock
= srcu_torture_read_unlock
,
636 .get_gp_seq
= srcu_torture_completed
,
637 .deferred_free
= srcu_torture_deferred_free
,
638 .sync
= srcu_torture_synchronize
,
639 .exp_sync
= srcu_torture_synchronize_expedited
,
640 .call
= srcu_torture_call
,
641 .cb_barrier
= srcu_torture_barrier
,
642 .stats
= srcu_torture_stats
,
644 .extendables
= RCUTORTURE_MAX_EXTEND
,
645 .name
= "busted_srcud"
649 * Definitions for RCU-tasks torture testing.
652 static int tasks_torture_read_lock(void)
657 static void tasks_torture_read_unlock(int idx
)
661 static void rcu_tasks_torture_deferred_free(struct rcu_torture
*p
)
663 call_rcu_tasks(&p
->rtort_rcu
, rcu_torture_cb
);
666 static struct rcu_torture_ops tasks_ops
= {
667 .ttype
= RCU_TASKS_FLAVOR
,
668 .init
= rcu_sync_torture_init
,
669 .readlock
= tasks_torture_read_lock
,
670 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
671 .readunlock
= tasks_torture_read_unlock
,
672 .get_gp_seq
= rcu_no_completed
,
673 .deferred_free
= rcu_tasks_torture_deferred_free
,
674 .sync
= synchronize_rcu_tasks
,
675 .exp_sync
= synchronize_rcu_tasks
,
676 .call
= call_rcu_tasks
,
677 .cb_barrier
= rcu_barrier_tasks
,
686 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
687 * This implementation does not necessarily work well with CPU hotplug.
690 static void synchronize_rcu_trivial(void)
694 for_each_online_cpu(cpu
) {
695 rcutorture_sched_setaffinity(current
->pid
, cpumask_of(cpu
));
696 WARN_ON_ONCE(raw_smp_processor_id() != cpu
);
700 static int rcu_torture_read_lock_trivial(void) __acquires(RCU
)
706 static void rcu_torture_read_unlock_trivial(int idx
) __releases(RCU
)
711 static struct rcu_torture_ops trivial_ops
= {
712 .ttype
= RCU_TRIVIAL_FLAVOR
,
713 .init
= rcu_sync_torture_init
,
714 .readlock
= rcu_torture_read_lock_trivial
,
715 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
716 .readunlock
= rcu_torture_read_unlock_trivial
,
717 .get_gp_seq
= rcu_no_completed
,
718 .sync
= synchronize_rcu_trivial
,
719 .exp_sync
= synchronize_rcu_trivial
,
726 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old
)
728 if (!cur_ops
->gp_diff
)
730 return cur_ops
->gp_diff(new, old
);
733 static bool __maybe_unused
torturing_tasks(void)
735 return cur_ops
== &tasks_ops
;
739 * RCU torture priority-boost testing. Runs one real-time thread per
740 * CPU for moderate bursts, repeatedly registering RCU callbacks and
741 * spinning waiting for them to be invoked. If a given callback takes
742 * too long to be invoked, we assume that priority inversion has occurred.
745 struct rcu_boost_inflight
{
750 static void rcu_torture_boost_cb(struct rcu_head
*head
)
752 struct rcu_boost_inflight
*rbip
=
753 container_of(head
, struct rcu_boost_inflight
, rcu
);
755 /* Ensure RCU-core accesses precede clearing ->inflight */
756 smp_store_release(&rbip
->inflight
, 0);
759 static int old_rt_runtime
= -1;
761 static void rcu_torture_disable_rt_throttle(void)
764 * Disable RT throttling so that rcutorture's boost threads don't get
765 * throttled. Only possible if rcutorture is built-in otherwise the
766 * user should manually do this by setting the sched_rt_period_us and
767 * sched_rt_runtime sysctls.
769 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
) || old_rt_runtime
!= -1)
772 old_rt_runtime
= sysctl_sched_rt_runtime
;
773 sysctl_sched_rt_runtime
= -1;
776 static void rcu_torture_enable_rt_throttle(void)
778 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
) || old_rt_runtime
== -1)
781 sysctl_sched_rt_runtime
= old_rt_runtime
;
785 static bool rcu_torture_boost_failed(unsigned long start
, unsigned long end
)
787 if (end
- start
> test_boost_duration
* HZ
- HZ
/ 2) {
788 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
789 n_rcu_torture_boost_failure
++;
791 return true; /* failed */
794 return false; /* passed */
797 static int rcu_torture_boost(void *arg
)
799 unsigned long call_rcu_time
;
800 unsigned long endtime
;
801 unsigned long oldstarttime
;
802 struct rcu_boost_inflight rbi
= { .inflight
= 0 };
803 struct sched_param sp
;
805 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
807 /* Set real-time priority. */
808 sp
.sched_priority
= 1;
809 if (sched_setscheduler(current
, SCHED_FIFO
, &sp
) < 0) {
810 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
811 n_rcu_torture_boost_rterror
++;
814 init_rcu_head_on_stack(&rbi
.rcu
);
815 /* Each pass through the following loop does one boost-test cycle. */
817 /* Track if the test failed already in this test interval? */
820 /* Increment n_rcu_torture_boosts once per boost-test */
821 while (!kthread_should_stop()) {
822 if (mutex_trylock(&boost_mutex
)) {
823 n_rcu_torture_boosts
++;
824 mutex_unlock(&boost_mutex
);
827 schedule_timeout_uninterruptible(1);
829 if (kthread_should_stop())
832 /* Wait for the next test interval. */
833 oldstarttime
= boost_starttime
;
834 while (ULONG_CMP_LT(jiffies
, oldstarttime
)) {
835 schedule_timeout_interruptible(oldstarttime
- jiffies
);
836 stutter_wait("rcu_torture_boost");
837 if (torture_must_stop())
841 /* Do one boost-test interval. */
842 endtime
= oldstarttime
+ test_boost_duration
* HZ
;
843 call_rcu_time
= jiffies
;
844 while (ULONG_CMP_LT(jiffies
, endtime
)) {
845 /* If we don't have a callback in flight, post one. */
846 if (!smp_load_acquire(&rbi
.inflight
)) {
847 /* RCU core before ->inflight = 1. */
848 smp_store_release(&rbi
.inflight
, 1);
849 call_rcu(&rbi
.rcu
, rcu_torture_boost_cb
);
850 /* Check if the boost test failed */
852 rcu_torture_boost_failed(call_rcu_time
,
854 call_rcu_time
= jiffies
;
856 stutter_wait("rcu_torture_boost");
857 if (torture_must_stop())
862 * If boost never happened, then inflight will always be 1, in
863 * this case the boost check would never happen in the above
864 * loop so do another one here.
866 if (!failed
&& smp_load_acquire(&rbi
.inflight
))
867 rcu_torture_boost_failed(call_rcu_time
, jiffies
);
870 * Set the start time of the next test interval.
871 * Yes, this is vulnerable to long delays, but such
872 * delays simply cause a false negative for the next
873 * interval. Besides, we are running at RT priority,
874 * so delays should be relatively rare.
876 while (oldstarttime
== boost_starttime
&&
877 !kthread_should_stop()) {
878 if (mutex_trylock(&boost_mutex
)) {
879 boost_starttime
= jiffies
+
880 test_boost_interval
* HZ
;
881 mutex_unlock(&boost_mutex
);
884 schedule_timeout_uninterruptible(1);
887 /* Go do the stutter. */
888 checkwait
: stutter_wait("rcu_torture_boost");
889 } while (!torture_must_stop());
891 /* Clean up and exit. */
892 while (!kthread_should_stop() || smp_load_acquire(&rbi
.inflight
)) {
893 torture_shutdown_absorb("rcu_torture_boost");
894 schedule_timeout_uninterruptible(1);
896 destroy_rcu_head_on_stack(&rbi
.rcu
);
897 torture_kthread_stopping("rcu_torture_boost");
902 * RCU torture force-quiescent-state kthread. Repeatedly induces
903 * bursts of calls to force_quiescent_state(), increasing the probability
904 * of occurrence of some important types of race conditions.
907 rcu_torture_fqs(void *arg
)
909 unsigned long fqs_resume_time
;
910 int fqs_burst_remaining
;
912 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
914 fqs_resume_time
= jiffies
+ fqs_stutter
* HZ
;
915 while (ULONG_CMP_LT(jiffies
, fqs_resume_time
) &&
916 !kthread_should_stop()) {
917 schedule_timeout_interruptible(1);
919 fqs_burst_remaining
= fqs_duration
;
920 while (fqs_burst_remaining
> 0 &&
921 !kthread_should_stop()) {
924 fqs_burst_remaining
-= fqs_holdoff
;
926 stutter_wait("rcu_torture_fqs");
927 } while (!torture_must_stop());
928 torture_kthread_stopping("rcu_torture_fqs");
933 * RCU torture writer kthread. Repeatedly substitutes a new structure
934 * for that pointed to by rcu_torture_current, freeing the old structure
935 * after a series of grace periods (the "pipeline").
938 rcu_torture_writer(void *arg
)
940 bool can_expedite
= !rcu_gp_is_expedited() && !rcu_gp_is_normal();
942 unsigned long gp_snap
;
943 bool gp_cond1
= gp_cond
, gp_exp1
= gp_exp
, gp_normal1
= gp_normal
;
944 bool gp_sync1
= gp_sync
;
946 struct rcu_torture
*rp
;
947 struct rcu_torture
*old_rp
;
948 static DEFINE_TORTURE_RANDOM(rand
);
949 int synctype
[] = { RTWS_DEF_FREE
, RTWS_EXP_SYNC
,
950 RTWS_COND_GET
, RTWS_SYNC
};
953 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
955 pr_alert("%s" TORTURE_FLAG
956 " GP expediting controlled from boot/sysfs for %s.\n",
957 torture_type
, cur_ops
->name
);
959 /* Initialize synctype[] array. If none set, take default. */
960 if (!gp_cond1
&& !gp_exp1
&& !gp_normal1
&& !gp_sync1
)
961 gp_cond1
= gp_exp1
= gp_normal1
= gp_sync1
= true;
962 if (gp_cond1
&& cur_ops
->get_state
&& cur_ops
->cond_sync
) {
963 synctype
[nsynctypes
++] = RTWS_COND_GET
;
964 pr_info("%s: Testing conditional GPs.\n", __func__
);
965 } else if (gp_cond
&& (!cur_ops
->get_state
|| !cur_ops
->cond_sync
)) {
966 pr_alert("%s: gp_cond without primitives.\n", __func__
);
968 if (gp_exp1
&& cur_ops
->exp_sync
) {
969 synctype
[nsynctypes
++] = RTWS_EXP_SYNC
;
970 pr_info("%s: Testing expedited GPs.\n", __func__
);
971 } else if (gp_exp
&& !cur_ops
->exp_sync
) {
972 pr_alert("%s: gp_exp without primitives.\n", __func__
);
974 if (gp_normal1
&& cur_ops
->deferred_free
) {
975 synctype
[nsynctypes
++] = RTWS_DEF_FREE
;
976 pr_info("%s: Testing asynchronous GPs.\n", __func__
);
977 } else if (gp_normal
&& !cur_ops
->deferred_free
) {
978 pr_alert("%s: gp_normal without primitives.\n", __func__
);
980 if (gp_sync1
&& cur_ops
->sync
) {
981 synctype
[nsynctypes
++] = RTWS_SYNC
;
982 pr_info("%s: Testing normal GPs.\n", __func__
);
983 } else if (gp_sync
&& !cur_ops
->sync
) {
984 pr_alert("%s: gp_sync without primitives.\n", __func__
);
986 if (WARN_ONCE(nsynctypes
== 0,
987 "rcu_torture_writer: No update-side primitives.\n")) {
989 * No updates primitives, so don't try updating.
990 * The resulting test won't be testing much, hence the
993 rcu_torture_writer_state
= RTWS_STOPPING
;
994 torture_kthread_stopping("rcu_torture_writer");
998 rcu_torture_writer_state
= RTWS_FIXED_DELAY
;
999 schedule_timeout_uninterruptible(1);
1000 rp
= rcu_torture_alloc();
1003 rp
->rtort_pipe_count
= 0;
1004 rcu_torture_writer_state
= RTWS_DELAY
;
1005 udelay(torture_random(&rand
) & 0x3ff);
1006 rcu_torture_writer_state
= RTWS_REPLACE
;
1007 old_rp
= rcu_dereference_check(rcu_torture_current
,
1008 current
== writer_task
);
1009 rp
->rtort_mbtest
= 1;
1010 rcu_assign_pointer(rcu_torture_current
, rp
);
1011 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1013 i
= old_rp
->rtort_pipe_count
;
1014 if (i
> RCU_TORTURE_PIPE_LEN
)
1015 i
= RCU_TORTURE_PIPE_LEN
;
1016 atomic_inc(&rcu_torture_wcount
[i
]);
1017 old_rp
->rtort_pipe_count
++;
1018 switch (synctype
[torture_random(&rand
) % nsynctypes
]) {
1020 rcu_torture_writer_state
= RTWS_DEF_FREE
;
1021 cur_ops
->deferred_free(old_rp
);
1024 rcu_torture_writer_state
= RTWS_EXP_SYNC
;
1025 cur_ops
->exp_sync();
1026 rcu_torture_pipe_update(old_rp
);
1029 rcu_torture_writer_state
= RTWS_COND_GET
;
1030 gp_snap
= cur_ops
->get_state();
1031 i
= torture_random(&rand
) % 16;
1033 schedule_timeout_interruptible(i
);
1034 udelay(torture_random(&rand
) % 1000);
1035 rcu_torture_writer_state
= RTWS_COND_SYNC
;
1036 cur_ops
->cond_sync(gp_snap
);
1037 rcu_torture_pipe_update(old_rp
);
1040 rcu_torture_writer_state
= RTWS_SYNC
;
1042 rcu_torture_pipe_update(old_rp
);
1049 WRITE_ONCE(rcu_torture_current_version
,
1050 rcu_torture_current_version
+ 1);
1051 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1053 !(torture_random(&rand
) & 0xff & (!!expediting
- 1))) {
1054 WARN_ON_ONCE(expediting
== 0 && rcu_gp_is_expedited());
1055 if (expediting
>= 0)
1058 rcu_unexpedite_gp();
1059 if (++expediting
> 3)
1060 expediting
= -expediting
;
1061 } else if (!can_expedite
) { /* Disabled during boot, recheck. */
1062 can_expedite
= !rcu_gp_is_expedited() &&
1063 !rcu_gp_is_normal();
1065 rcu_torture_writer_state
= RTWS_STUTTER
;
1066 if (stutter_wait("rcu_torture_writer") &&
1067 !READ_ONCE(rcu_fwd_cb_nodelay
) &&
1068 !cur_ops
->slow_gps
&&
1069 !torture_must_stop())
1070 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++)
1071 if (list_empty(&rcu_tortures
[i
].rtort_free
) &&
1072 rcu_access_pointer(rcu_torture_current
) !=
1074 rcu_ftrace_dump(DUMP_ALL
);
1075 WARN(1, "%s: rtort_pipe_count: %d\n", __func__
, rcu_tortures
[i
].rtort_pipe_count
);
1077 } while (!torture_must_stop());
1078 /* Reset expediting back to unexpedited. */
1080 expediting
= -expediting
;
1081 while (can_expedite
&& expediting
++ < 0)
1082 rcu_unexpedite_gp();
1083 WARN_ON_ONCE(can_expedite
&& rcu_gp_is_expedited());
1085 pr_alert("%s" TORTURE_FLAG
1086 " Dynamic grace-period expediting was disabled.\n",
1088 rcu_torture_writer_state
= RTWS_STOPPING
;
1089 torture_kthread_stopping("rcu_torture_writer");
1094 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1095 * delay between calls.
1098 rcu_torture_fakewriter(void *arg
)
1100 DEFINE_TORTURE_RANDOM(rand
);
1102 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1103 set_user_nice(current
, MAX_NICE
);
1106 schedule_timeout_uninterruptible(1 + torture_random(&rand
)%10);
1107 udelay(torture_random(&rand
) & 0x3ff);
1108 if (cur_ops
->cb_barrier
!= NULL
&&
1109 torture_random(&rand
) % (nfakewriters
* 8) == 0) {
1110 cur_ops
->cb_barrier();
1111 } else if (gp_normal
== gp_exp
) {
1112 if (cur_ops
->sync
&& torture_random(&rand
) & 0x80)
1114 else if (cur_ops
->exp_sync
)
1115 cur_ops
->exp_sync();
1116 } else if (gp_normal
&& cur_ops
->sync
) {
1118 } else if (cur_ops
->exp_sync
) {
1119 cur_ops
->exp_sync();
1121 stutter_wait("rcu_torture_fakewriter");
1122 } while (!torture_must_stop());
1124 torture_kthread_stopping("rcu_torture_fakewriter");
1128 static void rcu_torture_timer_cb(struct rcu_head
*rhp
)
1134 * Do one extension of an RCU read-side critical section using the
1135 * current reader state in readstate (set to zero for initial entry
1136 * to extended critical section), set the new state as specified by
1137 * newstate (set to zero for final exit from extended critical section),
1138 * and random-number-generator state in trsp. If this is neither the
1139 * beginning or end of the critical section and if there was actually a
1140 * change, do a ->read_delay().
1142 static void rcutorture_one_extend(int *readstate
, int newstate
,
1143 struct torture_random_state
*trsp
,
1144 struct rt_read_seg
*rtrsp
)
1147 int idxold
= *readstate
;
1148 int statesnew
= ~*readstate
& newstate
;
1149 int statesold
= *readstate
& ~newstate
;
1151 WARN_ON_ONCE(idxold
< 0);
1152 WARN_ON_ONCE((idxold
>> RCUTORTURE_RDR_SHIFT
) > 1);
1153 rtrsp
->rt_readstate
= newstate
;
1155 /* First, put new protection in place to avoid critical-section gap. */
1156 if (statesnew
& RCUTORTURE_RDR_BH
)
1158 if (statesnew
& RCUTORTURE_RDR_IRQ
)
1159 local_irq_disable();
1160 if (statesnew
& RCUTORTURE_RDR_PREEMPT
)
1162 if (statesnew
& RCUTORTURE_RDR_RBH
)
1164 if (statesnew
& RCUTORTURE_RDR_SCHED
)
1165 rcu_read_lock_sched();
1166 if (statesnew
& RCUTORTURE_RDR_RCU
)
1167 idxnew
= cur_ops
->readlock() << RCUTORTURE_RDR_SHIFT
;
1169 /* Next, remove old protection, irq first due to bh conflict. */
1170 if (statesold
& RCUTORTURE_RDR_IRQ
)
1172 if (statesold
& RCUTORTURE_RDR_BH
)
1174 if (statesold
& RCUTORTURE_RDR_PREEMPT
)
1176 if (statesold
& RCUTORTURE_RDR_RBH
)
1177 rcu_read_unlock_bh();
1178 if (statesold
& RCUTORTURE_RDR_SCHED
)
1179 rcu_read_unlock_sched();
1180 if (statesold
& RCUTORTURE_RDR_RCU
)
1181 cur_ops
->readunlock(idxold
>> RCUTORTURE_RDR_SHIFT
);
1183 /* Delay if neither beginning nor end and there was a change. */
1184 if ((statesnew
|| statesold
) && *readstate
&& newstate
)
1185 cur_ops
->read_delay(trsp
, rtrsp
);
1187 /* Update the reader state. */
1189 idxnew
= idxold
& ~RCUTORTURE_RDR_MASK
;
1190 WARN_ON_ONCE(idxnew
< 0);
1191 WARN_ON_ONCE((idxnew
>> RCUTORTURE_RDR_SHIFT
) > 1);
1192 *readstate
= idxnew
| newstate
;
1193 WARN_ON_ONCE((*readstate
>> RCUTORTURE_RDR_SHIFT
) < 0);
1194 WARN_ON_ONCE((*readstate
>> RCUTORTURE_RDR_SHIFT
) > 1);
1197 /* Return the biggest extendables mask given current RCU and boot parameters. */
1198 static int rcutorture_extend_mask_max(void)
1202 WARN_ON_ONCE(extendables
& ~RCUTORTURE_MAX_EXTEND
);
1203 mask
= extendables
& RCUTORTURE_MAX_EXTEND
& cur_ops
->extendables
;
1204 mask
= mask
| RCUTORTURE_RDR_RCU
;
1208 /* Return a random protection state mask, but with at least one bit set. */
1210 rcutorture_extend_mask(int oldmask
, struct torture_random_state
*trsp
)
1212 int mask
= rcutorture_extend_mask_max();
1213 unsigned long randmask1
= torture_random(trsp
) >> 8;
1214 unsigned long randmask2
= randmask1
>> 3;
1216 WARN_ON_ONCE(mask
>> RCUTORTURE_RDR_SHIFT
);
1217 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1218 if (!(randmask1
& 0x7))
1219 mask
= mask
& randmask2
;
1221 mask
= mask
& (1 << (randmask2
% RCUTORTURE_RDR_NBITS
));
1222 /* Can't enable bh w/irq disabled. */
1223 if ((mask
& RCUTORTURE_RDR_IRQ
) &&
1224 ((!(mask
& RCUTORTURE_RDR_BH
) && (oldmask
& RCUTORTURE_RDR_BH
)) ||
1225 (!(mask
& RCUTORTURE_RDR_RBH
) && (oldmask
& RCUTORTURE_RDR_RBH
))))
1226 mask
|= RCUTORTURE_RDR_BH
| RCUTORTURE_RDR_RBH
;
1227 return mask
?: RCUTORTURE_RDR_RCU
;
1231 * Do a randomly selected number of extensions of an existing RCU read-side
1234 static struct rt_read_seg
*
1235 rcutorture_loop_extend(int *readstate
, struct torture_random_state
*trsp
,
1236 struct rt_read_seg
*rtrsp
)
1240 int mask
= rcutorture_extend_mask_max();
1242 WARN_ON_ONCE(!*readstate
); /* -Existing- RCU read-side critsect! */
1243 if (!((mask
- 1) & mask
))
1244 return rtrsp
; /* Current RCU reader not extendable. */
1245 /* Bias towards larger numbers of loops. */
1246 i
= (torture_random(trsp
) >> 3);
1247 i
= ((i
| (i
>> 3)) & RCUTORTURE_RDR_MAX_LOOPS
) + 1;
1248 for (j
= 0; j
< i
; j
++) {
1249 mask
= rcutorture_extend_mask(*readstate
, trsp
);
1250 rcutorture_one_extend(readstate
, mask
, trsp
, &rtrsp
[j
]);
1256 * Do one read-side critical section, returning false if there was
1257 * no data to read. Can be invoked both from process context and
1258 * from a timer handler.
1260 static bool rcu_torture_one_read(struct torture_random_state
*trsp
)
1263 unsigned long started
;
1264 unsigned long completed
;
1266 struct rcu_torture
*p
;
1269 struct rt_read_seg rtseg
[RCUTORTURE_RDR_MAX_SEGS
] = { { 0 } };
1270 struct rt_read_seg
*rtrsp
= &rtseg
[0];
1271 struct rt_read_seg
*rtrsp1
;
1272 unsigned long long ts
;
1274 newstate
= rcutorture_extend_mask(readstate
, trsp
);
1275 rcutorture_one_extend(&readstate
, newstate
, trsp
, rtrsp
++);
1276 started
= cur_ops
->get_gp_seq();
1277 ts
= rcu_trace_clock_local();
1278 p
= rcu_dereference_check(rcu_torture_current
,
1279 rcu_read_lock_bh_held() ||
1280 rcu_read_lock_sched_held() ||
1281 srcu_read_lock_held(srcu_ctlp
) ||
1284 /* Wait for rcu_torture_writer to get underway */
1285 rcutorture_one_extend(&readstate
, 0, trsp
, rtrsp
);
1288 if (p
->rtort_mbtest
== 0)
1289 atomic_inc(&n_rcu_torture_mberror
);
1290 rtrsp
= rcutorture_loop_extend(&readstate
, trsp
, rtrsp
);
1292 pipe_count
= p
->rtort_pipe_count
;
1293 if (pipe_count
> RCU_TORTURE_PIPE_LEN
) {
1294 /* Should not happen, but... */
1295 pipe_count
= RCU_TORTURE_PIPE_LEN
;
1297 completed
= cur_ops
->get_gp_seq();
1298 if (pipe_count
> 1) {
1299 do_trace_rcu_torture_read(cur_ops
->name
, &p
->rtort_rcu
,
1300 ts
, started
, completed
);
1301 rcu_ftrace_dump(DUMP_ALL
);
1303 __this_cpu_inc(rcu_torture_count
[pipe_count
]);
1304 completed
= rcutorture_seq_diff(completed
, started
);
1305 if (completed
> RCU_TORTURE_PIPE_LEN
) {
1306 /* Should not happen, but... */
1307 completed
= RCU_TORTURE_PIPE_LEN
;
1309 __this_cpu_inc(rcu_torture_batch
[completed
]);
1311 rcutorture_one_extend(&readstate
, 0, trsp
, rtrsp
);
1312 WARN_ON_ONCE(readstate
& RCUTORTURE_RDR_MASK
);
1314 /* If error or close call, record the sequence of reader protections. */
1315 if ((pipe_count
> 1 || completed
> 1) && !xchg(&err_segs_recorded
, 1)) {
1317 for (rtrsp1
= &rtseg
[0]; rtrsp1
< rtrsp
; rtrsp1
++)
1318 err_segs
[i
++] = *rtrsp1
;
1325 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand
);
1328 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1329 * incrementing the corresponding element of the pipeline array. The
1330 * counter in the element should never be greater than 1, otherwise, the
1331 * RCU implementation is broken.
1333 static void rcu_torture_timer(struct timer_list
*unused
)
1335 atomic_long_inc(&n_rcu_torture_timers
);
1336 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand
));
1338 /* Test call_rcu() invocation from interrupt handler. */
1339 if (cur_ops
->call
) {
1340 struct rcu_head
*rhp
= kmalloc(sizeof(*rhp
), GFP_NOWAIT
);
1343 cur_ops
->call(rhp
, rcu_torture_timer_cb
);
1348 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1349 * incrementing the corresponding element of the pipeline array. The
1350 * counter in the element should never be greater than 1, otherwise, the
1351 * RCU implementation is broken.
1354 rcu_torture_reader(void *arg
)
1356 unsigned long lastsleep
= jiffies
;
1357 long myid
= (long)arg
;
1358 int mynumonline
= myid
;
1359 DEFINE_TORTURE_RANDOM(rand
);
1360 struct timer_list t
;
1362 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1363 set_user_nice(current
, MAX_NICE
);
1364 if (irqreader
&& cur_ops
->irq_capable
)
1365 timer_setup_on_stack(&t
, rcu_torture_timer
, 0);
1368 if (irqreader
&& cur_ops
->irq_capable
) {
1369 if (!timer_pending(&t
))
1370 mod_timer(&t
, jiffies
+ 1);
1372 if (!rcu_torture_one_read(&rand
))
1373 schedule_timeout_interruptible(HZ
);
1374 if (time_after(jiffies
, lastsleep
)) {
1375 schedule_timeout_interruptible(1);
1376 lastsleep
= jiffies
+ 10;
1378 while (num_online_cpus() < mynumonline
&& !torture_must_stop())
1379 schedule_timeout_interruptible(HZ
/ 5);
1380 stutter_wait("rcu_torture_reader");
1381 } while (!torture_must_stop());
1382 if (irqreader
&& cur_ops
->irq_capable
) {
1384 destroy_timer_on_stack(&t
);
1386 torture_kthread_stopping("rcu_torture_reader");
1391 * Print torture statistics. Caller must ensure that there is only
1392 * one call to this function at a given time!!! This is normally
1393 * accomplished by relying on the module system to only have one copy
1394 * of the module loaded, and then by giving the rcu_torture_stats
1395 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1396 * thread is not running).
1399 rcu_torture_stats_print(void)
1403 long pipesummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1404 long batchsummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1405 static unsigned long rtcv_snap
= ULONG_MAX
;
1406 static bool splatted
;
1407 struct task_struct
*wtp
;
1409 for_each_possible_cpu(cpu
) {
1410 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1411 pipesummary
[i
] += per_cpu(rcu_torture_count
, cpu
)[i
];
1412 batchsummary
[i
] += per_cpu(rcu_torture_batch
, cpu
)[i
];
1415 for (i
= RCU_TORTURE_PIPE_LEN
- 1; i
>= 0; i
--) {
1416 if (pipesummary
[i
] != 0)
1420 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1421 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1422 rcu_torture_current
,
1423 rcu_torture_current
? "ver" : "VER",
1424 rcu_torture_current_version
,
1425 list_empty(&rcu_torture_freelist
),
1426 atomic_read(&n_rcu_torture_alloc
),
1427 atomic_read(&n_rcu_torture_alloc_fail
),
1428 atomic_read(&n_rcu_torture_free
));
1429 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1430 atomic_read(&n_rcu_torture_mberror
),
1431 n_rcu_torture_barrier_error
,
1432 n_rcu_torture_boost_ktrerror
,
1433 n_rcu_torture_boost_rterror
);
1434 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1435 n_rcu_torture_boost_failure
,
1436 n_rcu_torture_boosts
,
1437 atomic_long_read(&n_rcu_torture_timers
));
1438 torture_onoff_stats();
1439 pr_cont("barrier: %ld/%ld:%ld\n",
1440 n_barrier_successes
,
1442 n_rcu_torture_barrier_error
);
1444 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1445 if (atomic_read(&n_rcu_torture_mberror
) != 0 ||
1446 n_rcu_torture_barrier_error
!= 0 ||
1447 n_rcu_torture_boost_ktrerror
!= 0 ||
1448 n_rcu_torture_boost_rterror
!= 0 ||
1449 n_rcu_torture_boost_failure
!= 0 ||
1451 pr_cont("%s", "!!! ");
1452 atomic_inc(&n_rcu_torture_error
);
1455 pr_cont("Reader Pipe: ");
1456 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1457 pr_cont(" %ld", pipesummary
[i
]);
1460 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1461 pr_cont("Reader Batch: ");
1462 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1463 pr_cont(" %ld", batchsummary
[i
]);
1466 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1467 pr_cont("Free-Block Circulation: ");
1468 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1469 pr_cont(" %d", atomic_read(&rcu_torture_wcount
[i
]));
1475 if (rtcv_snap
== rcu_torture_current_version
&&
1476 rcu_torture_current
!= NULL
) {
1477 int __maybe_unused flags
= 0;
1478 unsigned long __maybe_unused gp_seq
= 0;
1480 rcutorture_get_gp_data(cur_ops
->ttype
,
1482 srcutorture_get_gp_data(cur_ops
->ttype
, srcu_ctlp
,
1484 wtp
= READ_ONCE(writer_task
);
1485 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1486 rcu_torture_writer_state_getname(),
1487 rcu_torture_writer_state
, gp_seq
, flags
,
1488 wtp
== NULL
? ~0UL : wtp
->state
,
1489 wtp
== NULL
? -1 : (int)task_cpu(wtp
));
1490 if (!splatted
&& wtp
) {
1491 sched_show_task(wtp
);
1494 show_rcu_gp_kthreads();
1495 rcu_ftrace_dump(DUMP_ALL
);
1497 rtcv_snap
= rcu_torture_current_version
;
1501 * Periodically prints torture statistics, if periodic statistics printing
1502 * was specified via the stat_interval module parameter.
1505 rcu_torture_stats(void *arg
)
1507 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1509 schedule_timeout_interruptible(stat_interval
* HZ
);
1510 rcu_torture_stats_print();
1511 torture_shutdown_absorb("rcu_torture_stats");
1512 } while (!torture_must_stop());
1513 torture_kthread_stopping("rcu_torture_stats");
1518 rcu_torture_print_module_parms(struct rcu_torture_ops
*cur_ops
, const char *tag
)
1520 pr_alert("%s" TORTURE_FLAG
1521 "--- %s: nreaders=%d nfakewriters=%d "
1522 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1523 "shuffle_interval=%d stutter=%d irqreader=%d "
1524 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1525 "test_boost=%d/%d test_boost_interval=%d "
1526 "test_boost_duration=%d shutdown_secs=%d "
1527 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1529 "onoff_interval=%d onoff_holdoff=%d\n",
1530 torture_type
, tag
, nrealreaders
, nfakewriters
,
1531 stat_interval
, verbose
, test_no_idle_hz
, shuffle_interval
,
1532 stutter
, irqreader
, fqs_duration
, fqs_holdoff
, fqs_stutter
,
1533 test_boost
, cur_ops
->can_boost
,
1534 test_boost_interval
, test_boost_duration
, shutdown_secs
,
1535 stall_cpu
, stall_cpu_holdoff
, stall_cpu_irqsoff
,
1537 onoff_interval
, onoff_holdoff
);
1540 static int rcutorture_booster_cleanup(unsigned int cpu
)
1542 struct task_struct
*t
;
1544 if (boost_tasks
[cpu
] == NULL
)
1546 mutex_lock(&boost_mutex
);
1547 t
= boost_tasks
[cpu
];
1548 boost_tasks
[cpu
] = NULL
;
1549 rcu_torture_enable_rt_throttle();
1550 mutex_unlock(&boost_mutex
);
1552 /* This must be outside of the mutex, otherwise deadlock! */
1553 torture_stop_kthread(rcu_torture_boost
, t
);
1557 static int rcutorture_booster_init(unsigned int cpu
)
1561 if (boost_tasks
[cpu
] != NULL
)
1562 return 0; /* Already created, nothing more to do. */
1564 /* Don't allow time recalculation while creating a new task. */
1565 mutex_lock(&boost_mutex
);
1566 rcu_torture_disable_rt_throttle();
1567 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1568 boost_tasks
[cpu
] = kthread_create_on_node(rcu_torture_boost
, NULL
,
1570 "rcu_torture_boost");
1571 if (IS_ERR(boost_tasks
[cpu
])) {
1572 retval
= PTR_ERR(boost_tasks
[cpu
]);
1573 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1574 n_rcu_torture_boost_ktrerror
++;
1575 boost_tasks
[cpu
] = NULL
;
1576 mutex_unlock(&boost_mutex
);
1579 kthread_bind(boost_tasks
[cpu
], cpu
);
1580 wake_up_process(boost_tasks
[cpu
]);
1581 mutex_unlock(&boost_mutex
);
1586 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1587 * induces a CPU stall for the time specified by stall_cpu.
1589 static int rcu_torture_stall(void *args
)
1591 unsigned long stop_at
;
1593 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1594 if (stall_cpu_holdoff
> 0) {
1595 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1596 schedule_timeout_interruptible(stall_cpu_holdoff
* HZ
);
1597 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1599 if (!kthread_should_stop()) {
1600 stop_at
= ktime_get_seconds() + stall_cpu
;
1601 /* RCU CPU stall is expected behavior in following code. */
1603 if (stall_cpu_irqsoff
)
1604 local_irq_disable();
1607 pr_alert("rcu_torture_stall start on CPU %d.\n",
1608 smp_processor_id());
1609 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1611 continue; /* Induce RCU CPU stall warning. */
1612 if (stall_cpu_irqsoff
)
1617 pr_alert("rcu_torture_stall end.\n");
1619 torture_shutdown_absorb("rcu_torture_stall");
1620 while (!kthread_should_stop())
1621 schedule_timeout_interruptible(10 * HZ
);
1625 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1626 static int __init
rcu_torture_stall_init(void)
1630 return torture_create_kthread(rcu_torture_stall
, NULL
, stall_task
);
1633 /* State structure for forward-progress self-propagating RCU callback. */
1634 struct fwd_cb_state
{
1640 * Forward-progress self-propagating RCU callback function. Because
1641 * callbacks run from softirq, this function is an implicit RCU read-side
1644 static void rcu_torture_fwd_prog_cb(struct rcu_head
*rhp
)
1646 struct fwd_cb_state
*fcsp
= container_of(rhp
, struct fwd_cb_state
, rh
);
1648 if (READ_ONCE(fcsp
->stop
)) {
1649 WRITE_ONCE(fcsp
->stop
, 2);
1652 cur_ops
->call(&fcsp
->rh
, rcu_torture_fwd_prog_cb
);
1655 /* State for continuous-flood RCU callbacks. */
1658 struct rcu_fwd_cb
*rfc_next
;
1661 static DEFINE_SPINLOCK(rcu_fwd_lock
);
1662 static struct rcu_fwd_cb
*rcu_fwd_cb_head
;
1663 static struct rcu_fwd_cb
**rcu_fwd_cb_tail
= &rcu_fwd_cb_head
;
1664 static long n_launders_cb
;
1665 static unsigned long rcu_fwd_startat
;
1666 static bool rcu_fwd_emergency_stop
;
1667 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1668 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1669 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
1670 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
1671 struct rcu_launder_hist
{
1673 unsigned long launder_gp_seq
;
1675 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1676 static struct rcu_launder_hist n_launders_hist
[N_LAUNDERS_HIST
];
1677 static unsigned long rcu_launder_gp_seq_start
;
1679 static void rcu_torture_fwd_cb_hist(void)
1682 unsigned long gps_old
;
1686 for (i
= ARRAY_SIZE(n_launders_hist
) - 1; i
> 0; i
--)
1687 if (n_launders_hist
[i
].n_launders
> 0)
1689 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
1690 __func__
, jiffies
- rcu_fwd_startat
);
1691 gps_old
= rcu_launder_gp_seq_start
;
1692 for (j
= 0; j
<= i
; j
++) {
1693 gps
= n_launders_hist
[j
].launder_gp_seq
;
1694 pr_cont(" %ds/%d: %ld:%ld",
1695 j
+ 1, FWD_CBS_HIST_DIV
, n_launders_hist
[j
].n_launders
,
1696 rcutorture_seq_diff(gps
, gps_old
));
1702 /* Callback function for continuous-flood RCU callbacks. */
1703 static void rcu_torture_fwd_cb_cr(struct rcu_head
*rhp
)
1705 unsigned long flags
;
1707 struct rcu_fwd_cb
*rfcp
= container_of(rhp
, struct rcu_fwd_cb
, rh
);
1708 struct rcu_fwd_cb
**rfcpp
;
1710 rfcp
->rfc_next
= NULL
;
1712 spin_lock_irqsave(&rcu_fwd_lock
, flags
);
1713 rfcpp
= rcu_fwd_cb_tail
;
1714 rcu_fwd_cb_tail
= &rfcp
->rfc_next
;
1715 WRITE_ONCE(*rfcpp
, rfcp
);
1716 WRITE_ONCE(n_launders_cb
, n_launders_cb
+ 1);
1717 i
= ((jiffies
- rcu_fwd_startat
) / (HZ
/ FWD_CBS_HIST_DIV
));
1718 if (i
>= ARRAY_SIZE(n_launders_hist
))
1719 i
= ARRAY_SIZE(n_launders_hist
) - 1;
1720 n_launders_hist
[i
].n_launders
++;
1721 n_launders_hist
[i
].launder_gp_seq
= cur_ops
->get_gp_seq();
1722 spin_unlock_irqrestore(&rcu_fwd_lock
, flags
);
1725 // Give the scheduler a chance, even on nohz_full CPUs.
1726 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter
)
1728 if (IS_ENABLED(CONFIG_PREEMPT
) && IS_ENABLED(CONFIG_NO_HZ_FULL
)) {
1729 // Real call_rcu() floods hit userspace, so emulate that.
1730 if (need_resched() || (iter
& 0xfff))
1733 // No userspace emulation: CB invocation throttles call_rcu()
1739 * Free all callbacks on the rcu_fwd_cb_head list, either because the
1740 * test is over or because we hit an OOM event.
1742 static unsigned long rcu_torture_fwd_prog_cbfree(void)
1744 unsigned long flags
;
1745 unsigned long freed
= 0;
1746 struct rcu_fwd_cb
*rfcp
;
1749 spin_lock_irqsave(&rcu_fwd_lock
, flags
);
1750 rfcp
= rcu_fwd_cb_head
;
1752 spin_unlock_irqrestore(&rcu_fwd_lock
, flags
);
1755 rcu_fwd_cb_head
= rfcp
->rfc_next
;
1756 if (!rcu_fwd_cb_head
)
1757 rcu_fwd_cb_tail
= &rcu_fwd_cb_head
;
1758 spin_unlock_irqrestore(&rcu_fwd_lock
, flags
);
1761 rcu_torture_fwd_prog_cond_resched(freed
);
1766 /* Carry out need_resched()/cond_resched() forward-progress testing. */
1767 static void rcu_torture_fwd_prog_nr(int *tested
, int *tested_tries
)
1771 struct fwd_cb_state fcs
;
1776 bool selfpropcb
= false;
1777 unsigned long stopat
;
1778 static DEFINE_TORTURE_RANDOM(trs
);
1780 if (cur_ops
->call
&& cur_ops
->sync
&& cur_ops
->cb_barrier
) {
1781 init_rcu_head_on_stack(&fcs
.rh
);
1785 /* Tight loop containing cond_resched(). */
1786 WRITE_ONCE(rcu_fwd_cb_nodelay
, true);
1787 cur_ops
->sync(); /* Later readers see above write. */
1789 WRITE_ONCE(fcs
.stop
, 0);
1790 cur_ops
->call(&fcs
.rh
, rcu_torture_fwd_prog_cb
);
1792 cver
= READ_ONCE(rcu_torture_current_version
);
1793 gps
= cur_ops
->get_gp_seq();
1794 sd
= cur_ops
->stall_dur() + 1;
1795 sd4
= (sd
+ fwd_progress_div
- 1) / fwd_progress_div
;
1796 dur
= sd4
+ torture_random(&trs
) % (sd
- sd4
);
1797 WRITE_ONCE(rcu_fwd_startat
, jiffies
);
1798 stopat
= rcu_fwd_startat
+ dur
;
1799 while (time_before(jiffies
, stopat
) &&
1800 !shutdown_time_arrived() &&
1801 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1802 idx
= cur_ops
->readlock();
1804 cur_ops
->readunlock(idx
);
1805 if (!fwd_progress_need_resched
|| need_resched())
1806 rcu_torture_fwd_prog_cond_resched(1);
1809 if (!time_before(jiffies
, stopat
) &&
1810 !shutdown_time_arrived() &&
1811 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1813 cver
= READ_ONCE(rcu_torture_current_version
) - cver
;
1814 gps
= rcutorture_seq_diff(cur_ops
->get_gp_seq(), gps
);
1815 WARN_ON(!cver
&& gps
< 2);
1816 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__
, dur
, cver
, gps
);
1819 WRITE_ONCE(fcs
.stop
, 1);
1820 cur_ops
->sync(); /* Wait for running CB to complete. */
1821 cur_ops
->cb_barrier(); /* Wait for queued callbacks. */
1825 WARN_ON(READ_ONCE(fcs
.stop
) != 2);
1826 destroy_rcu_head_on_stack(&fcs
.rh
);
1828 schedule_timeout_uninterruptible(HZ
/ 10); /* Let kthreads recover. */
1829 WRITE_ONCE(rcu_fwd_cb_nodelay
, false);
1832 /* Carry out call_rcu() forward-progress testing. */
1833 static void rcu_torture_fwd_prog_cr(void)
1839 long n_launders_cb_snap
;
1843 struct rcu_fwd_cb
*rfcp
;
1844 struct rcu_fwd_cb
*rfcpn
;
1845 unsigned long stopat
;
1846 unsigned long stoppedat
;
1848 if (READ_ONCE(rcu_fwd_emergency_stop
))
1849 return; /* Get out of the way quickly, no GP wait! */
1851 return; /* Can't do call_rcu() fwd prog without ->call. */
1853 /* Loop continuously posting RCU callbacks. */
1854 WRITE_ONCE(rcu_fwd_cb_nodelay
, true);
1855 cur_ops
->sync(); /* Later readers see above write. */
1856 WRITE_ONCE(rcu_fwd_startat
, jiffies
);
1857 stopat
= rcu_fwd_startat
+ MAX_FWD_CB_JIFFIES
;
1863 for (i
= 0; i
< ARRAY_SIZE(n_launders_hist
); i
++)
1864 n_launders_hist
[i
].n_launders
= 0;
1865 cver
= READ_ONCE(rcu_torture_current_version
);
1866 gps
= cur_ops
->get_gp_seq();
1867 rcu_launder_gp_seq_start
= gps
;
1868 while (time_before(jiffies
, stopat
) &&
1869 !shutdown_time_arrived() &&
1870 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1871 rfcp
= READ_ONCE(rcu_fwd_cb_head
);
1874 rfcpn
= READ_ONCE(rfcp
->rfc_next
);
1876 if (rfcp
->rfc_gps
>= MIN_FWD_CB_LAUNDERS
&&
1877 ++n_max_gps
>= MIN_FWD_CBS_LAUNDERED
)
1879 rcu_fwd_cb_head
= rfcpn
;
1883 rfcp
= kmalloc(sizeof(*rfcp
), GFP_KERNEL
);
1884 if (WARN_ON_ONCE(!rfcp
)) {
1885 schedule_timeout_interruptible(1);
1892 cur_ops
->call(&rfcp
->rh
, rcu_torture_fwd_cb_cr
);
1893 rcu_torture_fwd_prog_cond_resched(n_launders
+ n_max_cbs
);
1895 stoppedat
= jiffies
;
1896 n_launders_cb_snap
= READ_ONCE(n_launders_cb
);
1897 cver
= READ_ONCE(rcu_torture_current_version
) - cver
;
1898 gps
= rcutorture_seq_diff(cur_ops
->get_gp_seq(), gps
);
1899 cur_ops
->cb_barrier(); /* Wait for callbacks to be invoked. */
1900 (void)rcu_torture_fwd_prog_cbfree();
1902 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop
) &&
1903 !shutdown_time_arrived()) {
1904 WARN_ON(n_max_gps
< MIN_FWD_CBS_LAUNDERED
);
1905 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
1907 stoppedat
- rcu_fwd_startat
, jiffies
- stoppedat
,
1908 n_launders
+ n_max_cbs
- n_launders_cb_snap
,
1909 n_launders
, n_launders_sa
,
1910 n_max_gps
, n_max_cbs
, cver
, gps
);
1911 rcu_torture_fwd_cb_hist();
1913 schedule_timeout_uninterruptible(HZ
); /* Let CBs drain. */
1914 WRITE_ONCE(rcu_fwd_cb_nodelay
, false);
1919 * OOM notifier, but this only prints diagnostic information for the
1920 * current forward-progress test.
1922 static int rcutorture_oom_notify(struct notifier_block
*self
,
1923 unsigned long notused
, void *nfreed
)
1925 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
1927 rcu_torture_fwd_cb_hist();
1928 rcu_fwd_progress_check(1 + (jiffies
- READ_ONCE(rcu_fwd_startat
)) / 2);
1929 WRITE_ONCE(rcu_fwd_emergency_stop
, true);
1930 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
1931 pr_info("%s: Freed %lu RCU callbacks.\n",
1932 __func__
, rcu_torture_fwd_prog_cbfree());
1934 pr_info("%s: Freed %lu RCU callbacks.\n",
1935 __func__
, rcu_torture_fwd_prog_cbfree());
1937 pr_info("%s: Freed %lu RCU callbacks.\n",
1938 __func__
, rcu_torture_fwd_prog_cbfree());
1939 smp_mb(); /* Frees before return to avoid redoing OOM. */
1940 (*(unsigned long *)nfreed
)++; /* Forward progress CBs freed! */
1941 pr_info("%s returning after OOM processing.\n", __func__
);
1945 static struct notifier_block rcutorture_oom_nb
= {
1946 .notifier_call
= rcutorture_oom_notify
1949 /* Carry out grace-period forward-progress testing. */
1950 static int rcu_torture_fwd_prog(void *args
)
1953 int tested_tries
= 0;
1955 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
1956 rcu_bind_current_to_nocb();
1957 if (!IS_ENABLED(CONFIG_SMP
) || !IS_ENABLED(CONFIG_RCU_BOOST
))
1958 set_user_nice(current
, MAX_NICE
);
1960 schedule_timeout_interruptible(fwd_progress_holdoff
* HZ
);
1961 WRITE_ONCE(rcu_fwd_emergency_stop
, false);
1962 register_oom_notifier(&rcutorture_oom_nb
);
1963 rcu_torture_fwd_prog_nr(&tested
, &tested_tries
);
1964 rcu_torture_fwd_prog_cr();
1965 unregister_oom_notifier(&rcutorture_oom_nb
);
1967 /* Avoid slow periods, better to test when busy. */
1968 stutter_wait("rcu_torture_fwd_prog");
1969 } while (!torture_must_stop());
1970 /* Short runs might not contain a valid forward-progress attempt. */
1971 WARN_ON(!tested
&& tested_tries
>= 5);
1972 pr_alert("%s: tested %d tested_tries %d\n", __func__
, tested
, tested_tries
);
1973 torture_kthread_stopping("rcu_torture_fwd_prog");
1977 /* If forward-progress checking is requested and feasible, spawn the thread. */
1978 static int __init
rcu_torture_fwd_prog_init(void)
1981 return 0; /* Not requested, so don't do it. */
1982 if (!cur_ops
->stall_dur
|| cur_ops
->stall_dur() <= 0 ||
1983 cur_ops
== &rcu_busted_ops
) {
1984 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
1987 if (stall_cpu
> 0) {
1988 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
1989 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS
))
1990 return -EINVAL
; /* In module, can fail back to user. */
1991 WARN_ON(1); /* Make sure rcutorture notices conflict. */
1994 if (fwd_progress_holdoff
<= 0)
1995 fwd_progress_holdoff
= 1;
1996 if (fwd_progress_div
<= 0)
1997 fwd_progress_div
= 4;
1998 return torture_create_kthread(rcu_torture_fwd_prog
,
1999 NULL
, fwd_prog_task
);
2002 /* Callback function for RCU barrier testing. */
2003 static void rcu_torture_barrier_cbf(struct rcu_head
*rcu
)
2005 atomic_inc(&barrier_cbs_invoked
);
2008 /* kthread function to register callbacks used to test RCU barriers. */
2009 static int rcu_torture_barrier_cbs(void *arg
)
2011 long myid
= (long)arg
;
2014 struct rcu_head rcu
;
2016 init_rcu_head_on_stack(&rcu
);
2017 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2018 set_user_nice(current
, MAX_NICE
);
2020 wait_event(barrier_cbs_wq
[myid
],
2022 smp_load_acquire(&barrier_phase
)) != lastphase
||
2023 torture_must_stop());
2024 lastphase
= newphase
;
2025 if (torture_must_stop())
2028 * The above smp_load_acquire() ensures barrier_phase load
2029 * is ordered before the following ->call().
2031 local_irq_disable(); /* Just to test no-irq call_rcu(). */
2032 cur_ops
->call(&rcu
, rcu_torture_barrier_cbf
);
2034 if (atomic_dec_and_test(&barrier_cbs_count
))
2035 wake_up(&barrier_wq
);
2036 } while (!torture_must_stop());
2037 if (cur_ops
->cb_barrier
!= NULL
)
2038 cur_ops
->cb_barrier();
2039 destroy_rcu_head_on_stack(&rcu
);
2040 torture_kthread_stopping("rcu_torture_barrier_cbs");
2044 /* kthread function to drive and coordinate RCU barrier testing. */
2045 static int rcu_torture_barrier(void *arg
)
2049 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2051 atomic_set(&barrier_cbs_invoked
, 0);
2052 atomic_set(&barrier_cbs_count
, n_barrier_cbs
);
2053 /* Ensure barrier_phase ordered after prior assignments. */
2054 smp_store_release(&barrier_phase
, !barrier_phase
);
2055 for (i
= 0; i
< n_barrier_cbs
; i
++)
2056 wake_up(&barrier_cbs_wq
[i
]);
2057 wait_event(barrier_wq
,
2058 atomic_read(&barrier_cbs_count
) == 0 ||
2059 torture_must_stop());
2060 if (torture_must_stop())
2062 n_barrier_attempts
++;
2063 cur_ops
->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2064 if (atomic_read(&barrier_cbs_invoked
) != n_barrier_cbs
) {
2065 n_rcu_torture_barrier_error
++;
2066 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2067 atomic_read(&barrier_cbs_invoked
),
2071 n_barrier_successes
++;
2073 schedule_timeout_interruptible(HZ
/ 10);
2074 } while (!torture_must_stop());
2075 torture_kthread_stopping("rcu_torture_barrier");
2079 /* Initialize RCU barrier testing. */
2080 static int rcu_torture_barrier_init(void)
2085 if (n_barrier_cbs
<= 0)
2087 if (cur_ops
->call
== NULL
|| cur_ops
->cb_barrier
== NULL
) {
2088 pr_alert("%s" TORTURE_FLAG
2089 " Call or barrier ops missing for %s,\n",
2090 torture_type
, cur_ops
->name
);
2091 pr_alert("%s" TORTURE_FLAG
2092 " RCU barrier testing omitted from run.\n",
2096 atomic_set(&barrier_cbs_count
, 0);
2097 atomic_set(&barrier_cbs_invoked
, 0);
2099 kcalloc(n_barrier_cbs
, sizeof(barrier_cbs_tasks
[0]),
2102 kcalloc(n_barrier_cbs
, sizeof(barrier_cbs_wq
[0]), GFP_KERNEL
);
2103 if (barrier_cbs_tasks
== NULL
|| !barrier_cbs_wq
)
2105 for (i
= 0; i
< n_barrier_cbs
; i
++) {
2106 init_waitqueue_head(&barrier_cbs_wq
[i
]);
2107 ret
= torture_create_kthread(rcu_torture_barrier_cbs
,
2109 barrier_cbs_tasks
[i
]);
2113 return torture_create_kthread(rcu_torture_barrier
, NULL
, barrier_task
);
2116 /* Clean up after RCU barrier testing. */
2117 static void rcu_torture_barrier_cleanup(void)
2121 torture_stop_kthread(rcu_torture_barrier
, barrier_task
);
2122 if (barrier_cbs_tasks
!= NULL
) {
2123 for (i
= 0; i
< n_barrier_cbs
; i
++)
2124 torture_stop_kthread(rcu_torture_barrier_cbs
,
2125 barrier_cbs_tasks
[i
]);
2126 kfree(barrier_cbs_tasks
);
2127 barrier_cbs_tasks
= NULL
;
2129 if (barrier_cbs_wq
!= NULL
) {
2130 kfree(barrier_cbs_wq
);
2131 barrier_cbs_wq
= NULL
;
2135 static bool rcu_torture_can_boost(void)
2137 static int boost_warn_once
;
2140 if (!(test_boost
== 1 && cur_ops
->can_boost
) && test_boost
!= 2)
2143 prio
= rcu_get_gp_kthreads_prio();
2148 if (boost_warn_once
== 1)
2151 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME
);
2152 boost_warn_once
= 1;
2159 static enum cpuhp_state rcutor_hp
;
2162 rcu_torture_cleanup(void)
2166 unsigned long gp_seq
= 0;
2169 if (torture_cleanup_begin()) {
2170 if (cur_ops
->cb_barrier
!= NULL
)
2171 cur_ops
->cb_barrier();
2175 torture_cleanup_end();
2179 show_rcu_gp_kthreads();
2180 rcu_torture_barrier_cleanup();
2181 torture_stop_kthread(rcu_torture_fwd_prog
, fwd_prog_task
);
2182 torture_stop_kthread(rcu_torture_stall
, stall_task
);
2183 torture_stop_kthread(rcu_torture_writer
, writer_task
);
2186 for (i
= 0; i
< nrealreaders
; i
++)
2187 torture_stop_kthread(rcu_torture_reader
,
2189 kfree(reader_tasks
);
2191 rcu_torture_current
= NULL
;
2193 if (fakewriter_tasks
) {
2194 for (i
= 0; i
< nfakewriters
; i
++) {
2195 torture_stop_kthread(rcu_torture_fakewriter
,
2196 fakewriter_tasks
[i
]);
2198 kfree(fakewriter_tasks
);
2199 fakewriter_tasks
= NULL
;
2202 rcutorture_get_gp_data(cur_ops
->ttype
, &flags
, &gp_seq
);
2203 srcutorture_get_gp_data(cur_ops
->ttype
, srcu_ctlp
, &flags
, &gp_seq
);
2204 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
2205 cur_ops
->name
, gp_seq
, flags
);
2206 torture_stop_kthread(rcu_torture_stats
, stats_task
);
2207 torture_stop_kthread(rcu_torture_fqs
, fqs_task
);
2208 if (rcu_torture_can_boost())
2209 cpuhp_remove_state(rcutor_hp
);
2212 * Wait for all RCU callbacks to fire, then do torture-type-specific
2213 * cleanup operations.
2215 if (cur_ops
->cb_barrier
!= NULL
)
2216 cur_ops
->cb_barrier();
2217 if (cur_ops
->cleanup
!= NULL
)
2220 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
2222 if (err_segs_recorded
) {
2223 pr_alert("Failure/close-call rcutorture reader segments:\n");
2224 if (rt_read_nsegs
== 0)
2225 pr_alert("\t: No segments recorded!!!\n");
2227 for (i
= 0; i
< rt_read_nsegs
; i
++) {
2228 pr_alert("\t%d: %#x ", i
, err_segs
[i
].rt_readstate
);
2229 if (err_segs
[i
].rt_delay_jiffies
!= 0) {
2230 pr_cont("%s%ldjiffies", firsttime
? "" : "+",
2231 err_segs
[i
].rt_delay_jiffies
);
2234 if (err_segs
[i
].rt_delay_ms
!= 0) {
2235 pr_cont("%s%ldms", firsttime
? "" : "+",
2236 err_segs
[i
].rt_delay_ms
);
2239 if (err_segs
[i
].rt_delay_us
!= 0) {
2240 pr_cont("%s%ldus", firsttime
? "" : "+",
2241 err_segs
[i
].rt_delay_us
);
2245 err_segs
[i
].rt_preempted
? "preempted" : "");
2249 if (atomic_read(&n_rcu_torture_error
) || n_rcu_torture_barrier_error
)
2250 rcu_torture_print_module_parms(cur_ops
, "End of test: FAILURE");
2251 else if (torture_onoff_failures())
2252 rcu_torture_print_module_parms(cur_ops
,
2253 "End of test: RCU_HOTPLUG");
2255 rcu_torture_print_module_parms(cur_ops
, "End of test: SUCCESS");
2256 torture_cleanup_end();
2259 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2260 static void rcu_torture_leak_cb(struct rcu_head
*rhp
)
2264 static void rcu_torture_err_cb(struct rcu_head
*rhp
)
2267 * This -might- happen due to race conditions, but is unlikely.
2268 * The scenario that leads to this happening is that the
2269 * first of the pair of duplicate callbacks is queued,
2270 * someone else starts a grace period that includes that
2271 * callback, then the second of the pair must wait for the
2272 * next grace period. Unlikely, but can happen. If it
2273 * does happen, the debug-objects subsystem won't have splatted.
2275 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME
);
2277 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2280 * Verify that double-free causes debug-objects to complain, but only
2281 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2282 * cannot be carried out.
2284 static void rcu_test_debug_objects(void)
2286 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2287 struct rcu_head rh1
;
2288 struct rcu_head rh2
;
2290 init_rcu_head_on_stack(&rh1
);
2291 init_rcu_head_on_stack(&rh2
);
2292 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME
);
2294 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2295 preempt_disable(); /* Prevent preemption from interrupting test. */
2296 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2297 call_rcu(&rh1
, rcu_torture_leak_cb
); /* Start grace period. */
2298 local_irq_disable(); /* Make it harder to start a new grace period. */
2299 call_rcu(&rh2
, rcu_torture_leak_cb
);
2300 call_rcu(&rh2
, rcu_torture_err_cb
); /* Duplicate callback. */
2305 /* Wait for them all to get done so we can safely return. */
2307 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME
);
2308 destroy_rcu_head_on_stack(&rh1
);
2309 destroy_rcu_head_on_stack(&rh2
);
2310 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2311 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME
);
2312 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2315 static void rcutorture_sync(void)
2317 static unsigned long n
;
2319 if (cur_ops
->sync
&& !(++n
& 0xfff))
2324 rcu_torture_init(void)
2329 static struct rcu_torture_ops
*torture_ops
[] = {
2330 &rcu_ops
, &rcu_busted_ops
, &srcu_ops
, &srcud_ops
,
2331 &busted_srcud_ops
, &tasks_ops
, &trivial_ops
,
2334 if (!torture_init_begin(torture_type
, verbose
))
2337 /* Process args and tell the world that the torturer is on the job. */
2338 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
2339 cur_ops
= torture_ops
[i
];
2340 if (strcmp(torture_type
, cur_ops
->name
) == 0)
2343 if (i
== ARRAY_SIZE(torture_ops
)) {
2344 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2346 pr_alert("rcu-torture types:");
2347 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++)
2348 pr_cont(" %s", torture_ops
[i
]->name
);
2350 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST
));
2355 if (cur_ops
->fqs
== NULL
&& fqs_duration
!= 0) {
2356 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2362 if (nreaders
>= 0) {
2363 nrealreaders
= nreaders
;
2365 nrealreaders
= num_online_cpus() - 2 - nreaders
;
2366 if (nrealreaders
<= 0)
2369 rcu_torture_print_module_parms(cur_ops
, "Start of test");
2371 /* Set up the freelist. */
2373 INIT_LIST_HEAD(&rcu_torture_freelist
);
2374 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++) {
2375 rcu_tortures
[i
].rtort_mbtest
= 0;
2376 list_add_tail(&rcu_tortures
[i
].rtort_free
,
2377 &rcu_torture_freelist
);
2380 /* Initialize the statistics so that each run gets its own numbers. */
2382 rcu_torture_current
= NULL
;
2383 rcu_torture_current_version
= 0;
2384 atomic_set(&n_rcu_torture_alloc
, 0);
2385 atomic_set(&n_rcu_torture_alloc_fail
, 0);
2386 atomic_set(&n_rcu_torture_free
, 0);
2387 atomic_set(&n_rcu_torture_mberror
, 0);
2388 atomic_set(&n_rcu_torture_error
, 0);
2389 n_rcu_torture_barrier_error
= 0;
2390 n_rcu_torture_boost_ktrerror
= 0;
2391 n_rcu_torture_boost_rterror
= 0;
2392 n_rcu_torture_boost_failure
= 0;
2393 n_rcu_torture_boosts
= 0;
2394 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
2395 atomic_set(&rcu_torture_wcount
[i
], 0);
2396 for_each_possible_cpu(cpu
) {
2397 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
2398 per_cpu(rcu_torture_count
, cpu
)[i
] = 0;
2399 per_cpu(rcu_torture_batch
, cpu
)[i
] = 0;
2402 err_segs_recorded
= 0;
2405 /* Start up the kthreads. */
2407 firsterr
= torture_create_kthread(rcu_torture_writer
, NULL
,
2411 if (nfakewriters
> 0) {
2412 fakewriter_tasks
= kcalloc(nfakewriters
,
2413 sizeof(fakewriter_tasks
[0]),
2415 if (fakewriter_tasks
== NULL
) {
2416 VERBOSE_TOROUT_ERRSTRING("out of memory");
2421 for (i
= 0; i
< nfakewriters
; i
++) {
2422 firsterr
= torture_create_kthread(rcu_torture_fakewriter
,
2423 NULL
, fakewriter_tasks
[i
]);
2427 reader_tasks
= kcalloc(nrealreaders
, sizeof(reader_tasks
[0]),
2429 if (reader_tasks
== NULL
) {
2430 VERBOSE_TOROUT_ERRSTRING("out of memory");
2434 for (i
= 0; i
< nrealreaders
; i
++) {
2435 firsterr
= torture_create_kthread(rcu_torture_reader
, (void *)i
,
2440 if (stat_interval
> 0) {
2441 firsterr
= torture_create_kthread(rcu_torture_stats
, NULL
,
2446 if (test_no_idle_hz
&& shuffle_interval
> 0) {
2447 firsterr
= torture_shuffle_init(shuffle_interval
* HZ
);
2456 t
= cur_ops
->stall_dur
? cur_ops
->stall_dur() : stutter
* HZ
;
2457 firsterr
= torture_stutter_init(stutter
* HZ
, t
);
2461 if (fqs_duration
< 0)
2464 /* Create the fqs thread */
2465 firsterr
= torture_create_kthread(rcu_torture_fqs
, NULL
,
2470 if (test_boost_interval
< 1)
2471 test_boost_interval
= 1;
2472 if (test_boost_duration
< 2)
2473 test_boost_duration
= 2;
2474 if (rcu_torture_can_boost()) {
2476 boost_starttime
= jiffies
+ test_boost_interval
* HZ
;
2478 firsterr
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "RCU_TORTURE",
2479 rcutorture_booster_init
,
2480 rcutorture_booster_cleanup
);
2483 rcutor_hp
= firsterr
;
2485 shutdown_jiffies
= jiffies
+ shutdown_secs
* HZ
;
2486 firsterr
= torture_shutdown_init(shutdown_secs
, rcu_torture_cleanup
);
2489 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
, onoff_interval
,
2493 firsterr
= rcu_torture_stall_init();
2496 firsterr
= rcu_torture_fwd_prog_init();
2499 firsterr
= rcu_torture_barrier_init();
2503 rcu_test_debug_objects();
2509 rcu_torture_cleanup();
2513 module_init(rcu_torture_init
);
2514 module_exit(rcu_torture_cleanup
);