1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based torture test facility
5 * Copyright (C) IBM Corporation, 2005, 2006
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
10 * See also: Documentation/RCU/torture.txt
13 #define pr_fmt(fmt) fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
54 /* Bits for ->extendables field, extendables param, and related definitions. */
55 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
56 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
57 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
58 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
59 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
60 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
61 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
62 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
63 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
64 #define RCUTORTURE_MAX_EXTEND \
65 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
66 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
67 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
68 /* Must be power of two minus one. */
69 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
71 torture_param(int, extendables
, RCUTORTURE_MAX_EXTEND
,
72 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
73 torture_param(int, fqs_duration
, 0,
74 "Duration of fqs bursts (us), 0 to disable");
75 torture_param(int, fqs_holdoff
, 0, "Holdoff time within fqs bursts (us)");
76 torture_param(int, fqs_stutter
, 3, "Wait time between fqs bursts (s)");
77 torture_param(bool, fwd_progress
, 1, "Test grace-period forward progress");
78 torture_param(int, fwd_progress_div
, 4, "Fraction of CPU stall to wait");
79 torture_param(int, fwd_progress_holdoff
, 60,
80 "Time between forward-progress tests (s)");
81 torture_param(bool, fwd_progress_need_resched
, 1,
82 "Hide cond_resched() behind need_resched()");
83 torture_param(bool, gp_cond
, false, "Use conditional/async GP wait primitives");
84 torture_param(bool, gp_exp
, false, "Use expedited GP wait primitives");
85 torture_param(bool, gp_normal
, false,
86 "Use normal (non-expedited) GP wait primitives");
87 torture_param(bool, gp_sync
, false, "Use synchronous GP wait primitives");
88 torture_param(int, irqreader
, 1, "Allow RCU readers from irq handlers");
89 torture_param(int, n_barrier_cbs
, 0,
90 "# of callbacks/kthreads for barrier testing");
91 torture_param(int, nfakewriters
, 4, "Number of RCU fake writer threads");
92 torture_param(int, nreaders
, -1, "Number of RCU reader threads");
93 torture_param(int, object_debug
, 0,
94 "Enable debug-object double call_rcu() testing");
95 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
96 torture_param(int, onoff_interval
, 0,
97 "Time between CPU hotplugs (jiffies), 0=disable");
98 torture_param(int, shuffle_interval
, 3, "Number of seconds between shuffles");
99 torture_param(int, shutdown_secs
, 0, "Shutdown time (s), <= zero to disable.");
100 torture_param(int, stall_cpu
, 0, "Stall duration (s), zero to disable.");
101 torture_param(int, stall_cpu_holdoff
, 10,
102 "Time to wait before starting stall (s).");
103 torture_param(int, stall_cpu_irqsoff
, 0, "Disable interrupts while stalling.");
104 torture_param(int, stat_interval
, 60,
105 "Number of seconds between stats printk()s");
106 torture_param(int, stutter
, 5, "Number of seconds to run/halt test");
107 torture_param(int, test_boost
, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
108 torture_param(int, test_boost_duration
, 4,
109 "Duration of each boost test, seconds.");
110 torture_param(int, test_boost_interval
, 7,
111 "Interval between boost tests, seconds.");
112 torture_param(bool, test_no_idle_hz
, true,
113 "Test support for tickless idle CPUs");
114 torture_param(int, verbose
, 1,
115 "Enable verbose debugging printk()s");
117 static char *torture_type
= "rcu";
118 module_param(torture_type
, charp
, 0444);
119 MODULE_PARM_DESC(torture_type
, "Type of RCU to torture (rcu, srcu, ...)");
121 static int nrealreaders
;
122 static struct task_struct
*writer_task
;
123 static struct task_struct
**fakewriter_tasks
;
124 static struct task_struct
**reader_tasks
;
125 static struct task_struct
*stats_task
;
126 static struct task_struct
*fqs_task
;
127 static struct task_struct
*boost_tasks
[NR_CPUS
];
128 static struct task_struct
*stall_task
;
129 static struct task_struct
*fwd_prog_task
;
130 static struct task_struct
**barrier_cbs_tasks
;
131 static struct task_struct
*barrier_task
;
133 #define RCU_TORTURE_PIPE_LEN 10
136 struct rcu_head rtort_rcu
;
137 int rtort_pipe_count
;
138 struct list_head rtort_free
;
142 static LIST_HEAD(rcu_torture_freelist
);
143 static struct rcu_torture __rcu
*rcu_torture_current
;
144 static unsigned long rcu_torture_current_version
;
145 static struct rcu_torture rcu_tortures
[10 * RCU_TORTURE_PIPE_LEN
];
146 static DEFINE_SPINLOCK(rcu_torture_lock
);
147 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_count
);
148 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_batch
);
149 static atomic_t rcu_torture_wcount
[RCU_TORTURE_PIPE_LEN
+ 1];
150 static atomic_t n_rcu_torture_alloc
;
151 static atomic_t n_rcu_torture_alloc_fail
;
152 static atomic_t n_rcu_torture_free
;
153 static atomic_t n_rcu_torture_mberror
;
154 static atomic_t n_rcu_torture_error
;
155 static long n_rcu_torture_barrier_error
;
156 static long n_rcu_torture_boost_ktrerror
;
157 static long n_rcu_torture_boost_rterror
;
158 static long n_rcu_torture_boost_failure
;
159 static long n_rcu_torture_boosts
;
160 static atomic_long_t n_rcu_torture_timers
;
161 static long n_barrier_attempts
;
162 static long n_barrier_successes
; /* did rcu_barrier test succeed? */
163 static struct list_head rcu_torture_removed
;
165 static int rcu_torture_writer_state
;
166 #define RTWS_FIXED_DELAY 0
168 #define RTWS_REPLACE 2
169 #define RTWS_DEF_FREE 3
170 #define RTWS_EXP_SYNC 4
171 #define RTWS_COND_GET 5
172 #define RTWS_COND_SYNC 6
174 #define RTWS_STUTTER 8
175 #define RTWS_STOPPING 9
176 static const char * const rcu_torture_writer_state_names
[] = {
189 /* Record reader segment types and duration for first failing read. */
192 unsigned long rt_delay_jiffies
;
193 unsigned long rt_delay_ms
;
194 unsigned long rt_delay_us
;
197 static int err_segs_recorded
;
198 static struct rt_read_seg err_segs
[RCUTORTURE_RDR_MAX_SEGS
];
199 static int rt_read_nsegs
;
201 static const char *rcu_torture_writer_state_getname(void)
203 unsigned int i
= READ_ONCE(rcu_torture_writer_state
);
205 if (i
>= ARRAY_SIZE(rcu_torture_writer_state_names
))
207 return rcu_torture_writer_state_names
[i
];
210 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
211 #define rcu_can_boost() 1
212 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
213 #define rcu_can_boost() 0
214 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
216 #ifdef CONFIG_RCU_TRACE
217 static u64 notrace
rcu_trace_clock_local(void)
219 u64 ts
= trace_clock_local();
221 (void)do_div(ts
, NSEC_PER_USEC
);
224 #else /* #ifdef CONFIG_RCU_TRACE */
225 static u64 notrace
rcu_trace_clock_local(void)
229 #endif /* #else #ifdef CONFIG_RCU_TRACE */
231 static unsigned long boost_starttime
; /* jiffies of next boost test start. */
232 static DEFINE_MUTEX(boost_mutex
); /* protect setting boost_starttime */
233 /* and boost task create/destroy. */
234 static atomic_t barrier_cbs_count
; /* Barrier callbacks registered. */
235 static bool barrier_phase
; /* Test phase. */
236 static atomic_t barrier_cbs_invoked
; /* Barrier callbacks invoked. */
237 static wait_queue_head_t
*barrier_cbs_wq
; /* Coordinate barrier testing. */
238 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq
);
240 static bool rcu_fwd_cb_nodelay
; /* Short rcu_torture_delay() delays. */
243 * Allocate an element from the rcu_tortures pool.
245 static struct rcu_torture
*
246 rcu_torture_alloc(void)
250 spin_lock_bh(&rcu_torture_lock
);
251 if (list_empty(&rcu_torture_freelist
)) {
252 atomic_inc(&n_rcu_torture_alloc_fail
);
253 spin_unlock_bh(&rcu_torture_lock
);
256 atomic_inc(&n_rcu_torture_alloc
);
257 p
= rcu_torture_freelist
.next
;
259 spin_unlock_bh(&rcu_torture_lock
);
260 return container_of(p
, struct rcu_torture
, rtort_free
);
264 * Free an element to the rcu_tortures pool.
267 rcu_torture_free(struct rcu_torture
*p
)
269 atomic_inc(&n_rcu_torture_free
);
270 spin_lock_bh(&rcu_torture_lock
);
271 list_add_tail(&p
->rtort_free
, &rcu_torture_freelist
);
272 spin_unlock_bh(&rcu_torture_lock
);
276 * Operations vector for selecting different types of tests.
279 struct rcu_torture_ops
{
282 void (*cleanup
)(void);
283 int (*readlock
)(void);
284 void (*read_delay
)(struct torture_random_state
*rrsp
,
285 struct rt_read_seg
*rtrsp
);
286 void (*readunlock
)(int idx
);
287 unsigned long (*get_gp_seq
)(void);
288 unsigned long (*gp_diff
)(unsigned long new, unsigned long old
);
289 void (*deferred_free
)(struct rcu_torture
*p
);
291 void (*exp_sync
)(void);
292 unsigned long (*get_state
)(void);
293 void (*cond_sync
)(unsigned long oldstate
);
294 call_rcu_func_t call
;
295 void (*cb_barrier
)(void);
298 int (*stall_dur
)(void);
302 int ext_irq_conflict
;
306 static struct rcu_torture_ops
*cur_ops
;
309 * Definitions for rcu torture testing.
312 static int rcu_torture_read_lock(void) __acquires(RCU
)
319 rcu_read_delay(struct torture_random_state
*rrsp
, struct rt_read_seg
*rtrsp
)
321 unsigned long started
;
322 unsigned long completed
;
323 const unsigned long shortdelay_us
= 200;
324 unsigned long longdelay_ms
= 300;
325 unsigned long long ts
;
327 /* We want a short delay sometimes to make a reader delay the grace
328 * period, and we want a long delay occasionally to trigger
329 * force_quiescent_state. */
331 if (!rcu_fwd_cb_nodelay
&&
332 !(torture_random(rrsp
) % (nrealreaders
* 2000 * longdelay_ms
))) {
333 started
= cur_ops
->get_gp_seq();
334 ts
= rcu_trace_clock_local();
335 if (preempt_count() & (SOFTIRQ_MASK
| HARDIRQ_MASK
))
336 longdelay_ms
= 5; /* Avoid triggering BH limits. */
337 mdelay(longdelay_ms
);
338 rtrsp
->rt_delay_ms
= longdelay_ms
;
339 completed
= cur_ops
->get_gp_seq();
340 do_trace_rcu_torture_read(cur_ops
->name
, NULL
, ts
,
343 if (!(torture_random(rrsp
) % (nrealreaders
* 2 * shortdelay_us
))) {
344 udelay(shortdelay_us
);
345 rtrsp
->rt_delay_us
= shortdelay_us
;
347 if (!preempt_count() &&
348 !(torture_random(rrsp
) % (nrealreaders
* 500))) {
349 torture_preempt_schedule(); /* QS only if preemptible. */
350 rtrsp
->rt_preempted
= true;
354 static void rcu_torture_read_unlock(int idx
) __releases(RCU
)
360 * Update callback in the pipe. This should be invoked after a grace period.
363 rcu_torture_pipe_update_one(struct rcu_torture
*rp
)
367 i
= rp
->rtort_pipe_count
;
368 if (i
> RCU_TORTURE_PIPE_LEN
)
369 i
= RCU_TORTURE_PIPE_LEN
;
370 atomic_inc(&rcu_torture_wcount
[i
]);
371 if (++rp
->rtort_pipe_count
>= RCU_TORTURE_PIPE_LEN
) {
372 rp
->rtort_mbtest
= 0;
379 * Update all callbacks in the pipe. Suitable for synchronous grace-period
383 rcu_torture_pipe_update(struct rcu_torture
*old_rp
)
385 struct rcu_torture
*rp
;
386 struct rcu_torture
*rp1
;
389 list_add(&old_rp
->rtort_free
, &rcu_torture_removed
);
390 list_for_each_entry_safe(rp
, rp1
, &rcu_torture_removed
, rtort_free
) {
391 if (rcu_torture_pipe_update_one(rp
)) {
392 list_del(&rp
->rtort_free
);
393 rcu_torture_free(rp
);
399 rcu_torture_cb(struct rcu_head
*p
)
401 struct rcu_torture
*rp
= container_of(p
, struct rcu_torture
, rtort_rcu
);
403 if (torture_must_stop_irq()) {
404 /* Test is ending, just drop callbacks on the floor. */
405 /* The next initialization will pick up the pieces. */
408 if (rcu_torture_pipe_update_one(rp
))
409 rcu_torture_free(rp
);
411 cur_ops
->deferred_free(rp
);
414 static unsigned long rcu_no_completed(void)
419 static void rcu_torture_deferred_free(struct rcu_torture
*p
)
421 call_rcu(&p
->rtort_rcu
, rcu_torture_cb
);
424 static void rcu_sync_torture_init(void)
426 INIT_LIST_HEAD(&rcu_torture_removed
);
429 static struct rcu_torture_ops rcu_ops
= {
431 .init
= rcu_sync_torture_init
,
432 .readlock
= rcu_torture_read_lock
,
433 .read_delay
= rcu_read_delay
,
434 .readunlock
= rcu_torture_read_unlock
,
435 .get_gp_seq
= rcu_get_gp_seq
,
436 .gp_diff
= rcu_seq_diff
,
437 .deferred_free
= rcu_torture_deferred_free
,
438 .sync
= synchronize_rcu
,
439 .exp_sync
= synchronize_rcu_expedited
,
440 .get_state
= get_state_synchronize_rcu
,
441 .cond_sync
= cond_synchronize_rcu
,
443 .cb_barrier
= rcu_barrier
,
444 .fqs
= rcu_force_quiescent_state
,
446 .stall_dur
= rcu_jiffies_till_stall_check
,
448 .can_boost
= rcu_can_boost(),
449 .extendables
= RCUTORTURE_MAX_EXTEND
,
454 * Don't even think about trying any of these in real life!!!
455 * The names includes "busted", and they really means it!
456 * The only purpose of these functions is to provide a buggy RCU
457 * implementation to make sure that rcutorture correctly emits
458 * buggy-RCU error messages.
460 static void rcu_busted_torture_deferred_free(struct rcu_torture
*p
)
462 /* This is a deliberate bug for testing purposes only! */
463 rcu_torture_cb(&p
->rtort_rcu
);
466 static void synchronize_rcu_busted(void)
468 /* This is a deliberate bug for testing purposes only! */
472 call_rcu_busted(struct rcu_head
*head
, rcu_callback_t func
)
474 /* This is a deliberate bug for testing purposes only! */
478 static struct rcu_torture_ops rcu_busted_ops
= {
479 .ttype
= INVALID_RCU_FLAVOR
,
480 .init
= rcu_sync_torture_init
,
481 .readlock
= rcu_torture_read_lock
,
482 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
483 .readunlock
= rcu_torture_read_unlock
,
484 .get_gp_seq
= rcu_no_completed
,
485 .deferred_free
= rcu_busted_torture_deferred_free
,
486 .sync
= synchronize_rcu_busted
,
487 .exp_sync
= synchronize_rcu_busted
,
488 .call
= call_rcu_busted
,
497 * Definitions for srcu torture testing.
500 DEFINE_STATIC_SRCU(srcu_ctl
);
501 static struct srcu_struct srcu_ctld
;
502 static struct srcu_struct
*srcu_ctlp
= &srcu_ctl
;
504 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp
)
506 return srcu_read_lock(srcu_ctlp
);
510 srcu_read_delay(struct torture_random_state
*rrsp
, struct rt_read_seg
*rtrsp
)
513 const long uspertick
= 1000000 / HZ
;
514 const long longdelay
= 10;
516 /* We want there to be long-running readers, but not all the time. */
518 delay
= torture_random(rrsp
) %
519 (nrealreaders
* 2 * longdelay
* uspertick
);
520 if (!delay
&& in_task()) {
521 schedule_timeout_interruptible(longdelay
);
522 rtrsp
->rt_delay_jiffies
= longdelay
;
524 rcu_read_delay(rrsp
, rtrsp
);
528 static void srcu_torture_read_unlock(int idx
) __releases(srcu_ctlp
)
530 srcu_read_unlock(srcu_ctlp
, idx
);
533 static unsigned long srcu_torture_completed(void)
535 return srcu_batches_completed(srcu_ctlp
);
538 static void srcu_torture_deferred_free(struct rcu_torture
*rp
)
540 call_srcu(srcu_ctlp
, &rp
->rtort_rcu
, rcu_torture_cb
);
543 static void srcu_torture_synchronize(void)
545 synchronize_srcu(srcu_ctlp
);
548 static void srcu_torture_call(struct rcu_head
*head
,
551 call_srcu(srcu_ctlp
, head
, func
);
554 static void srcu_torture_barrier(void)
556 srcu_barrier(srcu_ctlp
);
559 static void srcu_torture_stats(void)
561 srcu_torture_stats_print(srcu_ctlp
, torture_type
, TORTURE_FLAG
);
564 static void srcu_torture_synchronize_expedited(void)
566 synchronize_srcu_expedited(srcu_ctlp
);
569 static struct rcu_torture_ops srcu_ops
= {
570 .ttype
= SRCU_FLAVOR
,
571 .init
= rcu_sync_torture_init
,
572 .readlock
= srcu_torture_read_lock
,
573 .read_delay
= srcu_read_delay
,
574 .readunlock
= srcu_torture_read_unlock
,
575 .get_gp_seq
= srcu_torture_completed
,
576 .deferred_free
= srcu_torture_deferred_free
,
577 .sync
= srcu_torture_synchronize
,
578 .exp_sync
= srcu_torture_synchronize_expedited
,
579 .call
= srcu_torture_call
,
580 .cb_barrier
= srcu_torture_barrier
,
581 .stats
= srcu_torture_stats
,
586 static void srcu_torture_init(void)
588 rcu_sync_torture_init();
589 WARN_ON(init_srcu_struct(&srcu_ctld
));
590 srcu_ctlp
= &srcu_ctld
;
593 static void srcu_torture_cleanup(void)
595 static DEFINE_TORTURE_RANDOM(rand
);
597 if (torture_random(&rand
) & 0x800)
598 cleanup_srcu_struct(&srcu_ctld
);
600 cleanup_srcu_struct_quiesced(&srcu_ctld
);
601 srcu_ctlp
= &srcu_ctl
; /* In case of a later rcutorture run. */
604 /* As above, but dynamically allocated. */
605 static struct rcu_torture_ops srcud_ops
= {
606 .ttype
= SRCU_FLAVOR
,
607 .init
= srcu_torture_init
,
608 .cleanup
= srcu_torture_cleanup
,
609 .readlock
= srcu_torture_read_lock
,
610 .read_delay
= srcu_read_delay
,
611 .readunlock
= srcu_torture_read_unlock
,
612 .get_gp_seq
= srcu_torture_completed
,
613 .deferred_free
= srcu_torture_deferred_free
,
614 .sync
= srcu_torture_synchronize
,
615 .exp_sync
= srcu_torture_synchronize_expedited
,
616 .call
= srcu_torture_call
,
617 .cb_barrier
= srcu_torture_barrier
,
618 .stats
= srcu_torture_stats
,
623 /* As above, but broken due to inappropriate reader extension. */
624 static struct rcu_torture_ops busted_srcud_ops
= {
625 .ttype
= SRCU_FLAVOR
,
626 .init
= srcu_torture_init
,
627 .cleanup
= srcu_torture_cleanup
,
628 .readlock
= srcu_torture_read_lock
,
629 .read_delay
= rcu_read_delay
,
630 .readunlock
= srcu_torture_read_unlock
,
631 .get_gp_seq
= srcu_torture_completed
,
632 .deferred_free
= srcu_torture_deferred_free
,
633 .sync
= srcu_torture_synchronize
,
634 .exp_sync
= srcu_torture_synchronize_expedited
,
635 .call
= srcu_torture_call
,
636 .cb_barrier
= srcu_torture_barrier
,
637 .stats
= srcu_torture_stats
,
639 .extendables
= RCUTORTURE_MAX_EXTEND
,
640 .name
= "busted_srcud"
644 * Definitions for RCU-tasks torture testing.
647 static int tasks_torture_read_lock(void)
652 static void tasks_torture_read_unlock(int idx
)
656 static void rcu_tasks_torture_deferred_free(struct rcu_torture
*p
)
658 call_rcu_tasks(&p
->rtort_rcu
, rcu_torture_cb
);
661 static struct rcu_torture_ops tasks_ops
= {
662 .ttype
= RCU_TASKS_FLAVOR
,
663 .init
= rcu_sync_torture_init
,
664 .readlock
= tasks_torture_read_lock
,
665 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
666 .readunlock
= tasks_torture_read_unlock
,
667 .get_gp_seq
= rcu_no_completed
,
668 .deferred_free
= rcu_tasks_torture_deferred_free
,
669 .sync
= synchronize_rcu_tasks
,
670 .exp_sync
= synchronize_rcu_tasks
,
671 .call
= call_rcu_tasks
,
672 .cb_barrier
= rcu_barrier_tasks
,
679 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old
)
681 if (!cur_ops
->gp_diff
)
683 return cur_ops
->gp_diff(new, old
);
686 static bool __maybe_unused
torturing_tasks(void)
688 return cur_ops
== &tasks_ops
;
692 * RCU torture priority-boost testing. Runs one real-time thread per
693 * CPU for moderate bursts, repeatedly registering RCU callbacks and
694 * spinning waiting for them to be invoked. If a given callback takes
695 * too long to be invoked, we assume that priority inversion has occurred.
698 struct rcu_boost_inflight
{
703 static void rcu_torture_boost_cb(struct rcu_head
*head
)
705 struct rcu_boost_inflight
*rbip
=
706 container_of(head
, struct rcu_boost_inflight
, rcu
);
708 /* Ensure RCU-core accesses precede clearing ->inflight */
709 smp_store_release(&rbip
->inflight
, 0);
712 static int old_rt_runtime
= -1;
714 static void rcu_torture_disable_rt_throttle(void)
717 * Disable RT throttling so that rcutorture's boost threads don't get
718 * throttled. Only possible if rcutorture is built-in otherwise the
719 * user should manually do this by setting the sched_rt_period_us and
720 * sched_rt_runtime sysctls.
722 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
) || old_rt_runtime
!= -1)
725 old_rt_runtime
= sysctl_sched_rt_runtime
;
726 sysctl_sched_rt_runtime
= -1;
729 static void rcu_torture_enable_rt_throttle(void)
731 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
) || old_rt_runtime
== -1)
734 sysctl_sched_rt_runtime
= old_rt_runtime
;
738 static bool rcu_torture_boost_failed(unsigned long start
, unsigned long end
)
740 if (end
- start
> test_boost_duration
* HZ
- HZ
/ 2) {
741 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
742 n_rcu_torture_boost_failure
++;
744 return true; /* failed */
747 return false; /* passed */
750 static int rcu_torture_boost(void *arg
)
752 unsigned long call_rcu_time
;
753 unsigned long endtime
;
754 unsigned long oldstarttime
;
755 struct rcu_boost_inflight rbi
= { .inflight
= 0 };
756 struct sched_param sp
;
758 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
760 /* Set real-time priority. */
761 sp
.sched_priority
= 1;
762 if (sched_setscheduler(current
, SCHED_FIFO
, &sp
) < 0) {
763 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
764 n_rcu_torture_boost_rterror
++;
767 init_rcu_head_on_stack(&rbi
.rcu
);
768 /* Each pass through the following loop does one boost-test cycle. */
770 /* Track if the test failed already in this test interval? */
773 /* Increment n_rcu_torture_boosts once per boost-test */
774 while (!kthread_should_stop()) {
775 if (mutex_trylock(&boost_mutex
)) {
776 n_rcu_torture_boosts
++;
777 mutex_unlock(&boost_mutex
);
780 schedule_timeout_uninterruptible(1);
782 if (kthread_should_stop())
785 /* Wait for the next test interval. */
786 oldstarttime
= boost_starttime
;
787 while (ULONG_CMP_LT(jiffies
, oldstarttime
)) {
788 schedule_timeout_interruptible(oldstarttime
- jiffies
);
789 stutter_wait("rcu_torture_boost");
790 if (torture_must_stop())
794 /* Do one boost-test interval. */
795 endtime
= oldstarttime
+ test_boost_duration
* HZ
;
796 call_rcu_time
= jiffies
;
797 while (ULONG_CMP_LT(jiffies
, endtime
)) {
798 /* If we don't have a callback in flight, post one. */
799 if (!smp_load_acquire(&rbi
.inflight
)) {
800 /* RCU core before ->inflight = 1. */
801 smp_store_release(&rbi
.inflight
, 1);
802 call_rcu(&rbi
.rcu
, rcu_torture_boost_cb
);
803 /* Check if the boost test failed */
805 rcu_torture_boost_failed(call_rcu_time
,
807 call_rcu_time
= jiffies
;
809 stutter_wait("rcu_torture_boost");
810 if (torture_must_stop())
815 * If boost never happened, then inflight will always be 1, in
816 * this case the boost check would never happen in the above
817 * loop so do another one here.
819 if (!failed
&& smp_load_acquire(&rbi
.inflight
))
820 rcu_torture_boost_failed(call_rcu_time
, jiffies
);
823 * Set the start time of the next test interval.
824 * Yes, this is vulnerable to long delays, but such
825 * delays simply cause a false negative for the next
826 * interval. Besides, we are running at RT priority,
827 * so delays should be relatively rare.
829 while (oldstarttime
== boost_starttime
&&
830 !kthread_should_stop()) {
831 if (mutex_trylock(&boost_mutex
)) {
832 boost_starttime
= jiffies
+
833 test_boost_interval
* HZ
;
834 mutex_unlock(&boost_mutex
);
837 schedule_timeout_uninterruptible(1);
840 /* Go do the stutter. */
841 checkwait
: stutter_wait("rcu_torture_boost");
842 } while (!torture_must_stop());
844 /* Clean up and exit. */
845 while (!kthread_should_stop() || smp_load_acquire(&rbi
.inflight
)) {
846 torture_shutdown_absorb("rcu_torture_boost");
847 schedule_timeout_uninterruptible(1);
849 destroy_rcu_head_on_stack(&rbi
.rcu
);
850 torture_kthread_stopping("rcu_torture_boost");
855 * RCU torture force-quiescent-state kthread. Repeatedly induces
856 * bursts of calls to force_quiescent_state(), increasing the probability
857 * of occurrence of some important types of race conditions.
860 rcu_torture_fqs(void *arg
)
862 unsigned long fqs_resume_time
;
863 int fqs_burst_remaining
;
865 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
867 fqs_resume_time
= jiffies
+ fqs_stutter
* HZ
;
868 while (ULONG_CMP_LT(jiffies
, fqs_resume_time
) &&
869 !kthread_should_stop()) {
870 schedule_timeout_interruptible(1);
872 fqs_burst_remaining
= fqs_duration
;
873 while (fqs_burst_remaining
> 0 &&
874 !kthread_should_stop()) {
877 fqs_burst_remaining
-= fqs_holdoff
;
879 stutter_wait("rcu_torture_fqs");
880 } while (!torture_must_stop());
881 torture_kthread_stopping("rcu_torture_fqs");
886 * RCU torture writer kthread. Repeatedly substitutes a new structure
887 * for that pointed to by rcu_torture_current, freeing the old structure
888 * after a series of grace periods (the "pipeline").
891 rcu_torture_writer(void *arg
)
893 bool can_expedite
= !rcu_gp_is_expedited() && !rcu_gp_is_normal();
895 unsigned long gp_snap
;
896 bool gp_cond1
= gp_cond
, gp_exp1
= gp_exp
, gp_normal1
= gp_normal
;
897 bool gp_sync1
= gp_sync
;
899 struct rcu_torture
*rp
;
900 struct rcu_torture
*old_rp
;
901 static DEFINE_TORTURE_RANDOM(rand
);
902 int synctype
[] = { RTWS_DEF_FREE
, RTWS_EXP_SYNC
,
903 RTWS_COND_GET
, RTWS_SYNC
};
906 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
908 pr_alert("%s" TORTURE_FLAG
909 " GP expediting controlled from boot/sysfs for %s.\n",
910 torture_type
, cur_ops
->name
);
912 /* Initialize synctype[] array. If none set, take default. */
913 if (!gp_cond1
&& !gp_exp1
&& !gp_normal1
&& !gp_sync1
)
914 gp_cond1
= gp_exp1
= gp_normal1
= gp_sync1
= true;
915 if (gp_cond1
&& cur_ops
->get_state
&& cur_ops
->cond_sync
) {
916 synctype
[nsynctypes
++] = RTWS_COND_GET
;
917 pr_info("%s: Testing conditional GPs.\n", __func__
);
918 } else if (gp_cond
&& (!cur_ops
->get_state
|| !cur_ops
->cond_sync
)) {
919 pr_alert("%s: gp_cond without primitives.\n", __func__
);
921 if (gp_exp1
&& cur_ops
->exp_sync
) {
922 synctype
[nsynctypes
++] = RTWS_EXP_SYNC
;
923 pr_info("%s: Testing expedited GPs.\n", __func__
);
924 } else if (gp_exp
&& !cur_ops
->exp_sync
) {
925 pr_alert("%s: gp_exp without primitives.\n", __func__
);
927 if (gp_normal1
&& cur_ops
->deferred_free
) {
928 synctype
[nsynctypes
++] = RTWS_DEF_FREE
;
929 pr_info("%s: Testing asynchronous GPs.\n", __func__
);
930 } else if (gp_normal
&& !cur_ops
->deferred_free
) {
931 pr_alert("%s: gp_normal without primitives.\n", __func__
);
933 if (gp_sync1
&& cur_ops
->sync
) {
934 synctype
[nsynctypes
++] = RTWS_SYNC
;
935 pr_info("%s: Testing normal GPs.\n", __func__
);
936 } else if (gp_sync
&& !cur_ops
->sync
) {
937 pr_alert("%s: gp_sync without primitives.\n", __func__
);
939 if (WARN_ONCE(nsynctypes
== 0,
940 "rcu_torture_writer: No update-side primitives.\n")) {
942 * No updates primitives, so don't try updating.
943 * The resulting test won't be testing much, hence the
946 rcu_torture_writer_state
= RTWS_STOPPING
;
947 torture_kthread_stopping("rcu_torture_writer");
951 rcu_torture_writer_state
= RTWS_FIXED_DELAY
;
952 schedule_timeout_uninterruptible(1);
953 rp
= rcu_torture_alloc();
956 rp
->rtort_pipe_count
= 0;
957 rcu_torture_writer_state
= RTWS_DELAY
;
958 udelay(torture_random(&rand
) & 0x3ff);
959 rcu_torture_writer_state
= RTWS_REPLACE
;
960 old_rp
= rcu_dereference_check(rcu_torture_current
,
961 current
== writer_task
);
962 rp
->rtort_mbtest
= 1;
963 rcu_assign_pointer(rcu_torture_current
, rp
);
964 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
966 i
= old_rp
->rtort_pipe_count
;
967 if (i
> RCU_TORTURE_PIPE_LEN
)
968 i
= RCU_TORTURE_PIPE_LEN
;
969 atomic_inc(&rcu_torture_wcount
[i
]);
970 old_rp
->rtort_pipe_count
++;
971 switch (synctype
[torture_random(&rand
) % nsynctypes
]) {
973 rcu_torture_writer_state
= RTWS_DEF_FREE
;
974 cur_ops
->deferred_free(old_rp
);
977 rcu_torture_writer_state
= RTWS_EXP_SYNC
;
979 rcu_torture_pipe_update(old_rp
);
982 rcu_torture_writer_state
= RTWS_COND_GET
;
983 gp_snap
= cur_ops
->get_state();
984 i
= torture_random(&rand
) % 16;
986 schedule_timeout_interruptible(i
);
987 udelay(torture_random(&rand
) % 1000);
988 rcu_torture_writer_state
= RTWS_COND_SYNC
;
989 cur_ops
->cond_sync(gp_snap
);
990 rcu_torture_pipe_update(old_rp
);
993 rcu_torture_writer_state
= RTWS_SYNC
;
995 rcu_torture_pipe_update(old_rp
);
1002 WRITE_ONCE(rcu_torture_current_version
,
1003 rcu_torture_current_version
+ 1);
1004 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1006 !(torture_random(&rand
) & 0xff & (!!expediting
- 1))) {
1007 WARN_ON_ONCE(expediting
== 0 && rcu_gp_is_expedited());
1008 if (expediting
>= 0)
1011 rcu_unexpedite_gp();
1012 if (++expediting
> 3)
1013 expediting
= -expediting
;
1014 } else if (!can_expedite
) { /* Disabled during boot, recheck. */
1015 can_expedite
= !rcu_gp_is_expedited() &&
1016 !rcu_gp_is_normal();
1018 rcu_torture_writer_state
= RTWS_STUTTER
;
1019 if (stutter_wait("rcu_torture_writer"))
1020 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++)
1021 if (list_empty(&rcu_tortures
[i
].rtort_free
))
1023 } while (!torture_must_stop());
1024 /* Reset expediting back to unexpedited. */
1026 expediting
= -expediting
;
1027 while (can_expedite
&& expediting
++ < 0)
1028 rcu_unexpedite_gp();
1029 WARN_ON_ONCE(can_expedite
&& rcu_gp_is_expedited());
1031 pr_alert("%s" TORTURE_FLAG
1032 " Dynamic grace-period expediting was disabled.\n",
1034 rcu_torture_writer_state
= RTWS_STOPPING
;
1035 torture_kthread_stopping("rcu_torture_writer");
1040 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1041 * delay between calls.
1044 rcu_torture_fakewriter(void *arg
)
1046 DEFINE_TORTURE_RANDOM(rand
);
1048 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1049 set_user_nice(current
, MAX_NICE
);
1052 schedule_timeout_uninterruptible(1 + torture_random(&rand
)%10);
1053 udelay(torture_random(&rand
) & 0x3ff);
1054 if (cur_ops
->cb_barrier
!= NULL
&&
1055 torture_random(&rand
) % (nfakewriters
* 8) == 0) {
1056 cur_ops
->cb_barrier();
1057 } else if (gp_normal
== gp_exp
) {
1058 if (cur_ops
->sync
&& torture_random(&rand
) & 0x80)
1060 else if (cur_ops
->exp_sync
)
1061 cur_ops
->exp_sync();
1062 } else if (gp_normal
&& cur_ops
->sync
) {
1064 } else if (cur_ops
->exp_sync
) {
1065 cur_ops
->exp_sync();
1067 stutter_wait("rcu_torture_fakewriter");
1068 } while (!torture_must_stop());
1070 torture_kthread_stopping("rcu_torture_fakewriter");
1074 static void rcu_torture_timer_cb(struct rcu_head
*rhp
)
1080 * Do one extension of an RCU read-side critical section using the
1081 * current reader state in readstate (set to zero for initial entry
1082 * to extended critical section), set the new state as specified by
1083 * newstate (set to zero for final exit from extended critical section),
1084 * and random-number-generator state in trsp. If this is neither the
1085 * beginning or end of the critical section and if there was actually a
1086 * change, do a ->read_delay().
1088 static void rcutorture_one_extend(int *readstate
, int newstate
,
1089 struct torture_random_state
*trsp
,
1090 struct rt_read_seg
*rtrsp
)
1093 int idxold
= *readstate
;
1094 int statesnew
= ~*readstate
& newstate
;
1095 int statesold
= *readstate
& ~newstate
;
1097 WARN_ON_ONCE(idxold
< 0);
1098 WARN_ON_ONCE((idxold
>> RCUTORTURE_RDR_SHIFT
) > 1);
1099 rtrsp
->rt_readstate
= newstate
;
1101 /* First, put new protection in place to avoid critical-section gap. */
1102 if (statesnew
& RCUTORTURE_RDR_BH
)
1104 if (statesnew
& RCUTORTURE_RDR_IRQ
)
1105 local_irq_disable();
1106 if (statesnew
& RCUTORTURE_RDR_PREEMPT
)
1108 if (statesnew
& RCUTORTURE_RDR_RBH
)
1110 if (statesnew
& RCUTORTURE_RDR_SCHED
)
1111 rcu_read_lock_sched();
1112 if (statesnew
& RCUTORTURE_RDR_RCU
)
1113 idxnew
= cur_ops
->readlock() << RCUTORTURE_RDR_SHIFT
;
1115 /* Next, remove old protection, irq first due to bh conflict. */
1116 if (statesold
& RCUTORTURE_RDR_IRQ
)
1118 if (statesold
& RCUTORTURE_RDR_BH
)
1120 if (statesold
& RCUTORTURE_RDR_PREEMPT
)
1122 if (statesold
& RCUTORTURE_RDR_RBH
)
1123 rcu_read_unlock_bh();
1124 if (statesold
& RCUTORTURE_RDR_SCHED
)
1125 rcu_read_unlock_sched();
1126 if (statesold
& RCUTORTURE_RDR_RCU
)
1127 cur_ops
->readunlock(idxold
>> RCUTORTURE_RDR_SHIFT
);
1129 /* Delay if neither beginning nor end and there was a change. */
1130 if ((statesnew
|| statesold
) && *readstate
&& newstate
)
1131 cur_ops
->read_delay(trsp
, rtrsp
);
1133 /* Update the reader state. */
1135 idxnew
= idxold
& ~RCUTORTURE_RDR_MASK
;
1136 WARN_ON_ONCE(idxnew
< 0);
1137 WARN_ON_ONCE((idxnew
>> RCUTORTURE_RDR_SHIFT
) > 1);
1138 *readstate
= idxnew
| newstate
;
1139 WARN_ON_ONCE((*readstate
>> RCUTORTURE_RDR_SHIFT
) < 0);
1140 WARN_ON_ONCE((*readstate
>> RCUTORTURE_RDR_SHIFT
) > 1);
1143 /* Return the biggest extendables mask given current RCU and boot parameters. */
1144 static int rcutorture_extend_mask_max(void)
1148 WARN_ON_ONCE(extendables
& ~RCUTORTURE_MAX_EXTEND
);
1149 mask
= extendables
& RCUTORTURE_MAX_EXTEND
& cur_ops
->extendables
;
1150 mask
= mask
| RCUTORTURE_RDR_RCU
;
1154 /* Return a random protection state mask, but with at least one bit set. */
1156 rcutorture_extend_mask(int oldmask
, struct torture_random_state
*trsp
)
1158 int mask
= rcutorture_extend_mask_max();
1159 unsigned long randmask1
= torture_random(trsp
) >> 8;
1160 unsigned long randmask2
= randmask1
>> 3;
1162 WARN_ON_ONCE(mask
>> RCUTORTURE_RDR_SHIFT
);
1163 /* Most of the time lots of bits, half the time only one bit. */
1164 if (!(randmask1
& 0x7))
1165 mask
= mask
& randmask2
;
1167 mask
= mask
& (1 << (randmask2
% RCUTORTURE_RDR_NBITS
));
1168 /* Can't enable bh w/irq disabled. */
1169 if ((mask
& RCUTORTURE_RDR_IRQ
) &&
1170 ((!(mask
& RCUTORTURE_RDR_BH
) && (oldmask
& RCUTORTURE_RDR_BH
)) ||
1171 (!(mask
& RCUTORTURE_RDR_RBH
) && (oldmask
& RCUTORTURE_RDR_RBH
))))
1172 mask
|= RCUTORTURE_RDR_BH
| RCUTORTURE_RDR_RBH
;
1173 if ((mask
& RCUTORTURE_RDR_IRQ
) &&
1174 !(mask
& cur_ops
->ext_irq_conflict
) &&
1175 (oldmask
& cur_ops
->ext_irq_conflict
))
1176 mask
|= cur_ops
->ext_irq_conflict
; /* Or if readers object. */
1177 return mask
?: RCUTORTURE_RDR_RCU
;
1181 * Do a randomly selected number of extensions of an existing RCU read-side
1184 static struct rt_read_seg
*
1185 rcutorture_loop_extend(int *readstate
, struct torture_random_state
*trsp
,
1186 struct rt_read_seg
*rtrsp
)
1190 int mask
= rcutorture_extend_mask_max();
1192 WARN_ON_ONCE(!*readstate
); /* -Existing- RCU read-side critsect! */
1193 if (!((mask
- 1) & mask
))
1194 return rtrsp
; /* Current RCU reader not extendable. */
1195 /* Bias towards larger numbers of loops. */
1196 i
= (torture_random(trsp
) >> 3);
1197 i
= ((i
| (i
>> 3)) & RCUTORTURE_RDR_MAX_LOOPS
) + 1;
1198 for (j
= 0; j
< i
; j
++) {
1199 mask
= rcutorture_extend_mask(*readstate
, trsp
);
1200 rcutorture_one_extend(readstate
, mask
, trsp
, &rtrsp
[j
]);
1206 * Do one read-side critical section, returning false if there was
1207 * no data to read. Can be invoked both from process context and
1208 * from a timer handler.
1210 static bool rcu_torture_one_read(struct torture_random_state
*trsp
)
1213 unsigned long started
;
1214 unsigned long completed
;
1216 struct rcu_torture
*p
;
1219 struct rt_read_seg rtseg
[RCUTORTURE_RDR_MAX_SEGS
] = { { 0 } };
1220 struct rt_read_seg
*rtrsp
= &rtseg
[0];
1221 struct rt_read_seg
*rtrsp1
;
1222 unsigned long long ts
;
1224 newstate
= rcutorture_extend_mask(readstate
, trsp
);
1225 rcutorture_one_extend(&readstate
, newstate
, trsp
, rtrsp
++);
1226 started
= cur_ops
->get_gp_seq();
1227 ts
= rcu_trace_clock_local();
1228 p
= rcu_dereference_check(rcu_torture_current
,
1229 rcu_read_lock_bh_held() ||
1230 rcu_read_lock_sched_held() ||
1231 srcu_read_lock_held(srcu_ctlp
) ||
1234 /* Wait for rcu_torture_writer to get underway */
1235 rcutorture_one_extend(&readstate
, 0, trsp
, rtrsp
);
1238 if (p
->rtort_mbtest
== 0)
1239 atomic_inc(&n_rcu_torture_mberror
);
1240 rtrsp
= rcutorture_loop_extend(&readstate
, trsp
, rtrsp
);
1242 pipe_count
= p
->rtort_pipe_count
;
1243 if (pipe_count
> RCU_TORTURE_PIPE_LEN
) {
1244 /* Should not happen, but... */
1245 pipe_count
= RCU_TORTURE_PIPE_LEN
;
1247 completed
= cur_ops
->get_gp_seq();
1248 if (pipe_count
> 1) {
1249 do_trace_rcu_torture_read(cur_ops
->name
, &p
->rtort_rcu
,
1250 ts
, started
, completed
);
1251 rcu_ftrace_dump(DUMP_ALL
);
1253 __this_cpu_inc(rcu_torture_count
[pipe_count
]);
1254 completed
= rcutorture_seq_diff(completed
, started
);
1255 if (completed
> RCU_TORTURE_PIPE_LEN
) {
1256 /* Should not happen, but... */
1257 completed
= RCU_TORTURE_PIPE_LEN
;
1259 __this_cpu_inc(rcu_torture_batch
[completed
]);
1261 rcutorture_one_extend(&readstate
, 0, trsp
, rtrsp
);
1262 WARN_ON_ONCE(readstate
& RCUTORTURE_RDR_MASK
);
1264 /* If error or close call, record the sequence of reader protections. */
1265 if ((pipe_count
> 1 || completed
> 1) && !xchg(&err_segs_recorded
, 1)) {
1267 for (rtrsp1
= &rtseg
[0]; rtrsp1
< rtrsp
; rtrsp1
++)
1268 err_segs
[i
++] = *rtrsp1
;
1275 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand
);
1278 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1279 * incrementing the corresponding element of the pipeline array. The
1280 * counter in the element should never be greater than 1, otherwise, the
1281 * RCU implementation is broken.
1283 static void rcu_torture_timer(struct timer_list
*unused
)
1285 atomic_long_inc(&n_rcu_torture_timers
);
1286 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand
));
1288 /* Test call_rcu() invocation from interrupt handler. */
1289 if (cur_ops
->call
) {
1290 struct rcu_head
*rhp
= kmalloc(sizeof(*rhp
), GFP_NOWAIT
);
1293 cur_ops
->call(rhp
, rcu_torture_timer_cb
);
1298 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1299 * incrementing the corresponding element of the pipeline array. The
1300 * counter in the element should never be greater than 1, otherwise, the
1301 * RCU implementation is broken.
1304 rcu_torture_reader(void *arg
)
1306 unsigned long lastsleep
= jiffies
;
1307 long myid
= (long)arg
;
1308 int mynumonline
= myid
;
1309 DEFINE_TORTURE_RANDOM(rand
);
1310 struct timer_list t
;
1312 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1313 set_user_nice(current
, MAX_NICE
);
1314 if (irqreader
&& cur_ops
->irq_capable
)
1315 timer_setup_on_stack(&t
, rcu_torture_timer
, 0);
1318 if (irqreader
&& cur_ops
->irq_capable
) {
1319 if (!timer_pending(&t
))
1320 mod_timer(&t
, jiffies
+ 1);
1322 if (!rcu_torture_one_read(&rand
))
1323 schedule_timeout_interruptible(HZ
);
1324 if (time_after(jiffies
, lastsleep
)) {
1325 schedule_timeout_interruptible(1);
1326 lastsleep
= jiffies
+ 10;
1328 while (num_online_cpus() < mynumonline
&& !torture_must_stop())
1329 schedule_timeout_interruptible(HZ
/ 5);
1330 stutter_wait("rcu_torture_reader");
1331 } while (!torture_must_stop());
1332 if (irqreader
&& cur_ops
->irq_capable
) {
1334 destroy_timer_on_stack(&t
);
1336 torture_kthread_stopping("rcu_torture_reader");
1341 * Print torture statistics. Caller must ensure that there is only
1342 * one call to this function at a given time!!! This is normally
1343 * accomplished by relying on the module system to only have one copy
1344 * of the module loaded, and then by giving the rcu_torture_stats
1345 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1346 * thread is not running).
1349 rcu_torture_stats_print(void)
1353 long pipesummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1354 long batchsummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1355 static unsigned long rtcv_snap
= ULONG_MAX
;
1356 static bool splatted
;
1357 struct task_struct
*wtp
;
1359 for_each_possible_cpu(cpu
) {
1360 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1361 pipesummary
[i
] += per_cpu(rcu_torture_count
, cpu
)[i
];
1362 batchsummary
[i
] += per_cpu(rcu_torture_batch
, cpu
)[i
];
1365 for (i
= RCU_TORTURE_PIPE_LEN
- 1; i
>= 0; i
--) {
1366 if (pipesummary
[i
] != 0)
1370 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1371 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1372 rcu_torture_current
,
1373 rcu_torture_current_version
,
1374 list_empty(&rcu_torture_freelist
),
1375 atomic_read(&n_rcu_torture_alloc
),
1376 atomic_read(&n_rcu_torture_alloc_fail
),
1377 atomic_read(&n_rcu_torture_free
));
1378 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1379 atomic_read(&n_rcu_torture_mberror
),
1380 n_rcu_torture_barrier_error
,
1381 n_rcu_torture_boost_ktrerror
,
1382 n_rcu_torture_boost_rterror
);
1383 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1384 n_rcu_torture_boost_failure
,
1385 n_rcu_torture_boosts
,
1386 atomic_long_read(&n_rcu_torture_timers
));
1387 torture_onoff_stats();
1388 pr_cont("barrier: %ld/%ld:%ld\n",
1389 n_barrier_successes
,
1391 n_rcu_torture_barrier_error
);
1393 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1394 if (atomic_read(&n_rcu_torture_mberror
) != 0 ||
1395 n_rcu_torture_barrier_error
!= 0 ||
1396 n_rcu_torture_boost_ktrerror
!= 0 ||
1397 n_rcu_torture_boost_rterror
!= 0 ||
1398 n_rcu_torture_boost_failure
!= 0 ||
1400 pr_cont("%s", "!!! ");
1401 atomic_inc(&n_rcu_torture_error
);
1404 pr_cont("Reader Pipe: ");
1405 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1406 pr_cont(" %ld", pipesummary
[i
]);
1409 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1410 pr_cont("Reader Batch: ");
1411 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1412 pr_cont(" %ld", batchsummary
[i
]);
1415 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1416 pr_cont("Free-Block Circulation: ");
1417 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1418 pr_cont(" %d", atomic_read(&rcu_torture_wcount
[i
]));
1424 if (rtcv_snap
== rcu_torture_current_version
&&
1425 rcu_torture_current
!= NULL
) {
1426 int __maybe_unused flags
= 0;
1427 unsigned long __maybe_unused gp_seq
= 0;
1429 rcutorture_get_gp_data(cur_ops
->ttype
,
1431 srcutorture_get_gp_data(cur_ops
->ttype
, srcu_ctlp
,
1433 wtp
= READ_ONCE(writer_task
);
1434 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1435 rcu_torture_writer_state_getname(),
1436 rcu_torture_writer_state
, gp_seq
, flags
,
1437 wtp
== NULL
? ~0UL : wtp
->state
,
1438 wtp
== NULL
? -1 : (int)task_cpu(wtp
));
1439 if (!splatted
&& wtp
) {
1440 sched_show_task(wtp
);
1443 show_rcu_gp_kthreads();
1444 rcu_ftrace_dump(DUMP_ALL
);
1446 rtcv_snap
= rcu_torture_current_version
;
1450 * Periodically prints torture statistics, if periodic statistics printing
1451 * was specified via the stat_interval module parameter.
1454 rcu_torture_stats(void *arg
)
1456 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1458 schedule_timeout_interruptible(stat_interval
* HZ
);
1459 rcu_torture_stats_print();
1460 torture_shutdown_absorb("rcu_torture_stats");
1461 } while (!torture_must_stop());
1462 torture_kthread_stopping("rcu_torture_stats");
1467 rcu_torture_print_module_parms(struct rcu_torture_ops
*cur_ops
, const char *tag
)
1469 pr_alert("%s" TORTURE_FLAG
1470 "--- %s: nreaders=%d nfakewriters=%d "
1471 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1472 "shuffle_interval=%d stutter=%d irqreader=%d "
1473 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1474 "test_boost=%d/%d test_boost_interval=%d "
1475 "test_boost_duration=%d shutdown_secs=%d "
1476 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1478 "onoff_interval=%d onoff_holdoff=%d\n",
1479 torture_type
, tag
, nrealreaders
, nfakewriters
,
1480 stat_interval
, verbose
, test_no_idle_hz
, shuffle_interval
,
1481 stutter
, irqreader
, fqs_duration
, fqs_holdoff
, fqs_stutter
,
1482 test_boost
, cur_ops
->can_boost
,
1483 test_boost_interval
, test_boost_duration
, shutdown_secs
,
1484 stall_cpu
, stall_cpu_holdoff
, stall_cpu_irqsoff
,
1486 onoff_interval
, onoff_holdoff
);
1489 static int rcutorture_booster_cleanup(unsigned int cpu
)
1491 struct task_struct
*t
;
1493 if (boost_tasks
[cpu
] == NULL
)
1495 mutex_lock(&boost_mutex
);
1496 t
= boost_tasks
[cpu
];
1497 boost_tasks
[cpu
] = NULL
;
1498 rcu_torture_enable_rt_throttle();
1499 mutex_unlock(&boost_mutex
);
1501 /* This must be outside of the mutex, otherwise deadlock! */
1502 torture_stop_kthread(rcu_torture_boost
, t
);
1506 static int rcutorture_booster_init(unsigned int cpu
)
1510 if (boost_tasks
[cpu
] != NULL
)
1511 return 0; /* Already created, nothing more to do. */
1513 /* Don't allow time recalculation while creating a new task. */
1514 mutex_lock(&boost_mutex
);
1515 rcu_torture_disable_rt_throttle();
1516 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1517 boost_tasks
[cpu
] = kthread_create_on_node(rcu_torture_boost
, NULL
,
1519 "rcu_torture_boost");
1520 if (IS_ERR(boost_tasks
[cpu
])) {
1521 retval
= PTR_ERR(boost_tasks
[cpu
]);
1522 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1523 n_rcu_torture_boost_ktrerror
++;
1524 boost_tasks
[cpu
] = NULL
;
1525 mutex_unlock(&boost_mutex
);
1528 kthread_bind(boost_tasks
[cpu
], cpu
);
1529 wake_up_process(boost_tasks
[cpu
]);
1530 mutex_unlock(&boost_mutex
);
1535 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1536 * induces a CPU stall for the time specified by stall_cpu.
1538 static int rcu_torture_stall(void *args
)
1540 unsigned long stop_at
;
1542 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1543 if (stall_cpu_holdoff
> 0) {
1544 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1545 schedule_timeout_interruptible(stall_cpu_holdoff
* HZ
);
1546 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1548 if (!kthread_should_stop()) {
1549 stop_at
= ktime_get_seconds() + stall_cpu
;
1550 /* RCU CPU stall is expected behavior in following code. */
1552 if (stall_cpu_irqsoff
)
1553 local_irq_disable();
1556 pr_alert("rcu_torture_stall start on CPU %d.\n",
1557 smp_processor_id());
1558 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1560 continue; /* Induce RCU CPU stall warning. */
1561 if (stall_cpu_irqsoff
)
1566 pr_alert("rcu_torture_stall end.\n");
1568 torture_shutdown_absorb("rcu_torture_stall");
1569 while (!kthread_should_stop())
1570 schedule_timeout_interruptible(10 * HZ
);
1574 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1575 static int __init
rcu_torture_stall_init(void)
1579 return torture_create_kthread(rcu_torture_stall
, NULL
, stall_task
);
1582 /* State structure for forward-progress self-propagating RCU callback. */
1583 struct fwd_cb_state
{
1589 * Forward-progress self-propagating RCU callback function. Because
1590 * callbacks run from softirq, this function is an implicit RCU read-side
1593 static void rcu_torture_fwd_prog_cb(struct rcu_head
*rhp
)
1595 struct fwd_cb_state
*fcsp
= container_of(rhp
, struct fwd_cb_state
, rh
);
1597 if (READ_ONCE(fcsp
->stop
)) {
1598 WRITE_ONCE(fcsp
->stop
, 2);
1601 cur_ops
->call(&fcsp
->rh
, rcu_torture_fwd_prog_cb
);
1604 /* State for continuous-flood RCU callbacks. */
1607 struct rcu_fwd_cb
*rfc_next
;
1610 static DEFINE_SPINLOCK(rcu_fwd_lock
);
1611 static struct rcu_fwd_cb
*rcu_fwd_cb_head
;
1612 static struct rcu_fwd_cb
**rcu_fwd_cb_tail
= &rcu_fwd_cb_head
;
1613 static long n_launders_cb
;
1614 static unsigned long rcu_fwd_startat
;
1615 static bool rcu_fwd_emergency_stop
;
1616 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1617 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1618 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
1619 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
1620 struct rcu_launder_hist
{
1622 unsigned long launder_gp_seq
;
1624 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1625 static struct rcu_launder_hist n_launders_hist
[N_LAUNDERS_HIST
];
1626 static unsigned long rcu_launder_gp_seq_start
;
1628 static void rcu_torture_fwd_cb_hist(void)
1631 unsigned long gps_old
;
1635 for (i
= ARRAY_SIZE(n_launders_hist
) - 1; i
> 0; i
--)
1636 if (n_launders_hist
[i
].n_launders
> 0)
1638 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
1639 __func__
, jiffies
- rcu_fwd_startat
);
1640 gps_old
= rcu_launder_gp_seq_start
;
1641 for (j
= 0; j
<= i
; j
++) {
1642 gps
= n_launders_hist
[j
].launder_gp_seq
;
1643 pr_cont(" %ds/%d: %ld:%ld",
1644 j
+ 1, FWD_CBS_HIST_DIV
, n_launders_hist
[j
].n_launders
,
1645 rcutorture_seq_diff(gps
, gps_old
));
1651 /* Callback function for continuous-flood RCU callbacks. */
1652 static void rcu_torture_fwd_cb_cr(struct rcu_head
*rhp
)
1654 unsigned long flags
;
1656 struct rcu_fwd_cb
*rfcp
= container_of(rhp
, struct rcu_fwd_cb
, rh
);
1657 struct rcu_fwd_cb
**rfcpp
;
1659 rfcp
->rfc_next
= NULL
;
1661 spin_lock_irqsave(&rcu_fwd_lock
, flags
);
1662 rfcpp
= rcu_fwd_cb_tail
;
1663 rcu_fwd_cb_tail
= &rfcp
->rfc_next
;
1664 WRITE_ONCE(*rfcpp
, rfcp
);
1665 WRITE_ONCE(n_launders_cb
, n_launders_cb
+ 1);
1666 i
= ((jiffies
- rcu_fwd_startat
) / (HZ
/ FWD_CBS_HIST_DIV
));
1667 if (i
>= ARRAY_SIZE(n_launders_hist
))
1668 i
= ARRAY_SIZE(n_launders_hist
) - 1;
1669 n_launders_hist
[i
].n_launders
++;
1670 n_launders_hist
[i
].launder_gp_seq
= cur_ops
->get_gp_seq();
1671 spin_unlock_irqrestore(&rcu_fwd_lock
, flags
);
1675 * Free all callbacks on the rcu_fwd_cb_head list, either because the
1676 * test is over or because we hit an OOM event.
1678 static unsigned long rcu_torture_fwd_prog_cbfree(void)
1680 unsigned long flags
;
1681 unsigned long freed
= 0;
1682 struct rcu_fwd_cb
*rfcp
;
1685 spin_lock_irqsave(&rcu_fwd_lock
, flags
);
1686 rfcp
= rcu_fwd_cb_head
;
1689 rcu_fwd_cb_head
= rfcp
->rfc_next
;
1690 if (!rcu_fwd_cb_head
)
1691 rcu_fwd_cb_tail
= &rcu_fwd_cb_head
;
1692 spin_unlock_irqrestore(&rcu_fwd_lock
, flags
);
1696 spin_unlock_irqrestore(&rcu_fwd_lock
, flags
);
1700 /* Carry out need_resched()/cond_resched() forward-progress testing. */
1701 static void rcu_torture_fwd_prog_nr(int *tested
, int *tested_tries
)
1705 struct fwd_cb_state fcs
;
1710 bool selfpropcb
= false;
1711 unsigned long stopat
;
1712 static DEFINE_TORTURE_RANDOM(trs
);
1714 if (cur_ops
->call
&& cur_ops
->sync
&& cur_ops
->cb_barrier
) {
1715 init_rcu_head_on_stack(&fcs
.rh
);
1719 /* Tight loop containing cond_resched(). */
1721 WRITE_ONCE(fcs
.stop
, 0);
1722 cur_ops
->call(&fcs
.rh
, rcu_torture_fwd_prog_cb
);
1724 cver
= READ_ONCE(rcu_torture_current_version
);
1725 gps
= cur_ops
->get_gp_seq();
1726 sd
= cur_ops
->stall_dur() + 1;
1727 sd4
= (sd
+ fwd_progress_div
- 1) / fwd_progress_div
;
1728 dur
= sd4
+ torture_random(&trs
) % (sd
- sd4
);
1729 WRITE_ONCE(rcu_fwd_startat
, jiffies
);
1730 stopat
= rcu_fwd_startat
+ dur
;
1731 while (time_before(jiffies
, stopat
) &&
1732 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1733 idx
= cur_ops
->readlock();
1735 cur_ops
->readunlock(idx
);
1736 if (!fwd_progress_need_resched
|| need_resched())
1740 if (!time_before(jiffies
, stopat
) &&
1741 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1743 cver
= READ_ONCE(rcu_torture_current_version
) - cver
;
1744 gps
= rcutorture_seq_diff(cur_ops
->get_gp_seq(), gps
);
1745 WARN_ON(!cver
&& gps
< 2);
1746 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__
, dur
, cver
, gps
);
1749 WRITE_ONCE(fcs
.stop
, 1);
1750 cur_ops
->sync(); /* Wait for running CB to complete. */
1751 cur_ops
->cb_barrier(); /* Wait for queued callbacks. */
1755 WARN_ON(READ_ONCE(fcs
.stop
) != 2);
1756 destroy_rcu_head_on_stack(&fcs
.rh
);
1760 /* Carry out call_rcu() forward-progress testing. */
1761 static void rcu_torture_fwd_prog_cr(void)
1767 long n_launders_cb_snap
;
1771 struct rcu_fwd_cb
*rfcp
;
1772 struct rcu_fwd_cb
*rfcpn
;
1773 unsigned long stopat
;
1774 unsigned long stoppedat
;
1776 if (READ_ONCE(rcu_fwd_emergency_stop
))
1777 return; /* Get out of the way quickly, no GP wait! */
1779 /* Loop continuously posting RCU callbacks. */
1780 WRITE_ONCE(rcu_fwd_cb_nodelay
, true);
1781 cur_ops
->sync(); /* Later readers see above write. */
1782 WRITE_ONCE(rcu_fwd_startat
, jiffies
);
1783 stopat
= rcu_fwd_startat
+ MAX_FWD_CB_JIFFIES
;
1789 for (i
= 0; i
< ARRAY_SIZE(n_launders_hist
); i
++)
1790 n_launders_hist
[i
].n_launders
= 0;
1791 cver
= READ_ONCE(rcu_torture_current_version
);
1792 gps
= cur_ops
->get_gp_seq();
1793 rcu_launder_gp_seq_start
= gps
;
1794 while (time_before(jiffies
, stopat
) &&
1795 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1796 rfcp
= READ_ONCE(rcu_fwd_cb_head
);
1799 rfcpn
= READ_ONCE(rfcp
->rfc_next
);
1801 if (rfcp
->rfc_gps
>= MIN_FWD_CB_LAUNDERS
&&
1802 ++n_max_gps
>= MIN_FWD_CBS_LAUNDERED
)
1804 rcu_fwd_cb_head
= rfcpn
;
1808 rfcp
= kmalloc(sizeof(*rfcp
), GFP_KERNEL
);
1809 if (WARN_ON_ONCE(!rfcp
)) {
1810 schedule_timeout_interruptible(1);
1817 cur_ops
->call(&rfcp
->rh
, rcu_torture_fwd_cb_cr
);
1820 stoppedat
= jiffies
;
1821 n_launders_cb_snap
= READ_ONCE(n_launders_cb
);
1822 cver
= READ_ONCE(rcu_torture_current_version
) - cver
;
1823 gps
= rcutorture_seq_diff(cur_ops
->get_gp_seq(), gps
);
1824 cur_ops
->cb_barrier(); /* Wait for callbacks to be invoked. */
1825 (void)rcu_torture_fwd_prog_cbfree();
1827 WRITE_ONCE(rcu_fwd_cb_nodelay
, false);
1828 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop
)) {
1829 WARN_ON(n_max_gps
< MIN_FWD_CBS_LAUNDERED
);
1830 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
1832 stoppedat
- rcu_fwd_startat
, jiffies
- stoppedat
,
1833 n_launders
+ n_max_cbs
- n_launders_cb_snap
,
1834 n_launders
, n_launders_sa
,
1835 n_max_gps
, n_max_cbs
, cver
, gps
);
1836 rcu_torture_fwd_cb_hist();
1842 * OOM notifier, but this only prints diagnostic information for the
1843 * current forward-progress test.
1845 static int rcutorture_oom_notify(struct notifier_block
*self
,
1846 unsigned long notused
, void *nfreed
)
1848 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
1850 rcu_torture_fwd_cb_hist();
1851 rcu_fwd_progress_check(1 + (jiffies
- READ_ONCE(rcu_fwd_startat
) / 2));
1852 WRITE_ONCE(rcu_fwd_emergency_stop
, true);
1853 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
1854 pr_info("%s: Freed %lu RCU callbacks.\n",
1855 __func__
, rcu_torture_fwd_prog_cbfree());
1857 pr_info("%s: Freed %lu RCU callbacks.\n",
1858 __func__
, rcu_torture_fwd_prog_cbfree());
1860 pr_info("%s: Freed %lu RCU callbacks.\n",
1861 __func__
, rcu_torture_fwd_prog_cbfree());
1862 smp_mb(); /* Frees before return to avoid redoing OOM. */
1863 (*(unsigned long *)nfreed
)++; /* Forward progress CBs freed! */
1864 pr_info("%s returning after OOM processing.\n", __func__
);
1868 static struct notifier_block rcutorture_oom_nb
= {
1869 .notifier_call
= rcutorture_oom_notify
1872 /* Carry out grace-period forward-progress testing. */
1873 static int rcu_torture_fwd_prog(void *args
)
1876 int tested_tries
= 0;
1878 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
1879 rcu_bind_current_to_nocb();
1880 if (!IS_ENABLED(CONFIG_SMP
) || !IS_ENABLED(CONFIG_RCU_BOOST
))
1881 set_user_nice(current
, MAX_NICE
);
1883 schedule_timeout_interruptible(fwd_progress_holdoff
* HZ
);
1884 WRITE_ONCE(rcu_fwd_emergency_stop
, false);
1885 register_oom_notifier(&rcutorture_oom_nb
);
1886 rcu_torture_fwd_prog_nr(&tested
, &tested_tries
);
1887 rcu_torture_fwd_prog_cr();
1888 unregister_oom_notifier(&rcutorture_oom_nb
);
1890 /* Avoid slow periods, better to test when busy. */
1891 stutter_wait("rcu_torture_fwd_prog");
1892 } while (!torture_must_stop());
1893 /* Short runs might not contain a valid forward-progress attempt. */
1894 WARN_ON(!tested
&& tested_tries
>= 5);
1895 pr_alert("%s: tested %d tested_tries %d\n", __func__
, tested
, tested_tries
);
1896 torture_kthread_stopping("rcu_torture_fwd_prog");
1900 /* If forward-progress checking is requested and feasible, spawn the thread. */
1901 static int __init
rcu_torture_fwd_prog_init(void)
1904 return 0; /* Not requested, so don't do it. */
1905 if (!cur_ops
->stall_dur
|| cur_ops
->stall_dur() <= 0 ||
1906 cur_ops
== &rcu_busted_ops
) {
1907 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
1910 if (stall_cpu
> 0) {
1911 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
1912 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS
))
1913 return -EINVAL
; /* In module, can fail back to user. */
1914 WARN_ON(1); /* Make sure rcutorture notices conflict. */
1917 if (fwd_progress_holdoff
<= 0)
1918 fwd_progress_holdoff
= 1;
1919 if (fwd_progress_div
<= 0)
1920 fwd_progress_div
= 4;
1921 return torture_create_kthread(rcu_torture_fwd_prog
,
1922 NULL
, fwd_prog_task
);
1925 /* Callback function for RCU barrier testing. */
1926 static void rcu_torture_barrier_cbf(struct rcu_head
*rcu
)
1928 atomic_inc(&barrier_cbs_invoked
);
1931 /* kthread function to register callbacks used to test RCU barriers. */
1932 static int rcu_torture_barrier_cbs(void *arg
)
1934 long myid
= (long)arg
;
1937 struct rcu_head rcu
;
1939 init_rcu_head_on_stack(&rcu
);
1940 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
1941 set_user_nice(current
, MAX_NICE
);
1943 wait_event(barrier_cbs_wq
[myid
],
1945 smp_load_acquire(&barrier_phase
)) != lastphase
||
1946 torture_must_stop());
1947 lastphase
= newphase
;
1948 if (torture_must_stop())
1951 * The above smp_load_acquire() ensures barrier_phase load
1952 * is ordered before the following ->call().
1954 local_irq_disable(); /* Just to test no-irq call_rcu(). */
1955 cur_ops
->call(&rcu
, rcu_torture_barrier_cbf
);
1957 if (atomic_dec_and_test(&barrier_cbs_count
))
1958 wake_up(&barrier_wq
);
1959 } while (!torture_must_stop());
1960 if (cur_ops
->cb_barrier
!= NULL
)
1961 cur_ops
->cb_barrier();
1962 destroy_rcu_head_on_stack(&rcu
);
1963 torture_kthread_stopping("rcu_torture_barrier_cbs");
1967 /* kthread function to drive and coordinate RCU barrier testing. */
1968 static int rcu_torture_barrier(void *arg
)
1972 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
1974 atomic_set(&barrier_cbs_invoked
, 0);
1975 atomic_set(&barrier_cbs_count
, n_barrier_cbs
);
1976 /* Ensure barrier_phase ordered after prior assignments. */
1977 smp_store_release(&barrier_phase
, !barrier_phase
);
1978 for (i
= 0; i
< n_barrier_cbs
; i
++)
1979 wake_up(&barrier_cbs_wq
[i
]);
1980 wait_event(barrier_wq
,
1981 atomic_read(&barrier_cbs_count
) == 0 ||
1982 torture_must_stop());
1983 if (torture_must_stop())
1985 n_barrier_attempts
++;
1986 cur_ops
->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1987 if (atomic_read(&barrier_cbs_invoked
) != n_barrier_cbs
) {
1988 n_rcu_torture_barrier_error
++;
1989 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1990 atomic_read(&barrier_cbs_invoked
),
1994 n_barrier_successes
++;
1996 schedule_timeout_interruptible(HZ
/ 10);
1997 } while (!torture_must_stop());
1998 torture_kthread_stopping("rcu_torture_barrier");
2002 /* Initialize RCU barrier testing. */
2003 static int rcu_torture_barrier_init(void)
2008 if (n_barrier_cbs
<= 0)
2010 if (cur_ops
->call
== NULL
|| cur_ops
->cb_barrier
== NULL
) {
2011 pr_alert("%s" TORTURE_FLAG
2012 " Call or barrier ops missing for %s,\n",
2013 torture_type
, cur_ops
->name
);
2014 pr_alert("%s" TORTURE_FLAG
2015 " RCU barrier testing omitted from run.\n",
2019 atomic_set(&barrier_cbs_count
, 0);
2020 atomic_set(&barrier_cbs_invoked
, 0);
2022 kcalloc(n_barrier_cbs
, sizeof(barrier_cbs_tasks
[0]),
2025 kcalloc(n_barrier_cbs
, sizeof(barrier_cbs_wq
[0]), GFP_KERNEL
);
2026 if (barrier_cbs_tasks
== NULL
|| !barrier_cbs_wq
)
2028 for (i
= 0; i
< n_barrier_cbs
; i
++) {
2029 init_waitqueue_head(&barrier_cbs_wq
[i
]);
2030 ret
= torture_create_kthread(rcu_torture_barrier_cbs
,
2032 barrier_cbs_tasks
[i
]);
2036 return torture_create_kthread(rcu_torture_barrier
, NULL
, barrier_task
);
2039 /* Clean up after RCU barrier testing. */
2040 static void rcu_torture_barrier_cleanup(void)
2044 torture_stop_kthread(rcu_torture_barrier
, barrier_task
);
2045 if (barrier_cbs_tasks
!= NULL
) {
2046 for (i
= 0; i
< n_barrier_cbs
; i
++)
2047 torture_stop_kthread(rcu_torture_barrier_cbs
,
2048 barrier_cbs_tasks
[i
]);
2049 kfree(barrier_cbs_tasks
);
2050 barrier_cbs_tasks
= NULL
;
2052 if (barrier_cbs_wq
!= NULL
) {
2053 kfree(barrier_cbs_wq
);
2054 barrier_cbs_wq
= NULL
;
2058 static bool rcu_torture_can_boost(void)
2060 static int boost_warn_once
;
2063 if (!(test_boost
== 1 && cur_ops
->can_boost
) && test_boost
!= 2)
2066 prio
= rcu_get_gp_kthreads_prio();
2071 if (boost_warn_once
== 1)
2074 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME
);
2075 boost_warn_once
= 1;
2082 static enum cpuhp_state rcutor_hp
;
2085 rcu_torture_cleanup(void)
2089 unsigned long gp_seq
= 0;
2092 if (torture_cleanup_begin()) {
2093 if (cur_ops
->cb_barrier
!= NULL
)
2094 cur_ops
->cb_barrier();
2098 rcu_torture_barrier_cleanup();
2099 torture_stop_kthread(rcu_torture_fwd_prog
, fwd_prog_task
);
2100 torture_stop_kthread(rcu_torture_stall
, stall_task
);
2101 torture_stop_kthread(rcu_torture_writer
, writer_task
);
2104 for (i
= 0; i
< nrealreaders
; i
++)
2105 torture_stop_kthread(rcu_torture_reader
,
2107 kfree(reader_tasks
);
2109 rcu_torture_current
= NULL
;
2111 if (fakewriter_tasks
) {
2112 for (i
= 0; i
< nfakewriters
; i
++) {
2113 torture_stop_kthread(rcu_torture_fakewriter
,
2114 fakewriter_tasks
[i
]);
2116 kfree(fakewriter_tasks
);
2117 fakewriter_tasks
= NULL
;
2120 rcutorture_get_gp_data(cur_ops
->ttype
, &flags
, &gp_seq
);
2121 srcutorture_get_gp_data(cur_ops
->ttype
, srcu_ctlp
, &flags
, &gp_seq
);
2122 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
2123 cur_ops
->name
, gp_seq
, flags
);
2124 torture_stop_kthread(rcu_torture_stats
, stats_task
);
2125 torture_stop_kthread(rcu_torture_fqs
, fqs_task
);
2126 if (rcu_torture_can_boost())
2127 cpuhp_remove_state(rcutor_hp
);
2130 * Wait for all RCU callbacks to fire, then do torture-type-specific
2131 * cleanup operations.
2133 if (cur_ops
->cb_barrier
!= NULL
)
2134 cur_ops
->cb_barrier();
2135 if (cur_ops
->cleanup
!= NULL
)
2138 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
2140 if (err_segs_recorded
) {
2141 pr_alert("Failure/close-call rcutorture reader segments:\n");
2142 if (rt_read_nsegs
== 0)
2143 pr_alert("\t: No segments recorded!!!\n");
2145 for (i
= 0; i
< rt_read_nsegs
; i
++) {
2146 pr_alert("\t%d: %#x ", i
, err_segs
[i
].rt_readstate
);
2147 if (err_segs
[i
].rt_delay_jiffies
!= 0) {
2148 pr_cont("%s%ldjiffies", firsttime
? "" : "+",
2149 err_segs
[i
].rt_delay_jiffies
);
2152 if (err_segs
[i
].rt_delay_ms
!= 0) {
2153 pr_cont("%s%ldms", firsttime
? "" : "+",
2154 err_segs
[i
].rt_delay_ms
);
2157 if (err_segs
[i
].rt_delay_us
!= 0) {
2158 pr_cont("%s%ldus", firsttime
? "" : "+",
2159 err_segs
[i
].rt_delay_us
);
2163 err_segs
[i
].rt_preempted
? "preempted" : "");
2167 if (atomic_read(&n_rcu_torture_error
) || n_rcu_torture_barrier_error
)
2168 rcu_torture_print_module_parms(cur_ops
, "End of test: FAILURE");
2169 else if (torture_onoff_failures())
2170 rcu_torture_print_module_parms(cur_ops
,
2171 "End of test: RCU_HOTPLUG");
2173 rcu_torture_print_module_parms(cur_ops
, "End of test: SUCCESS");
2174 torture_cleanup_end();
2177 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2178 static void rcu_torture_leak_cb(struct rcu_head
*rhp
)
2182 static void rcu_torture_err_cb(struct rcu_head
*rhp
)
2185 * This -might- happen due to race conditions, but is unlikely.
2186 * The scenario that leads to this happening is that the
2187 * first of the pair of duplicate callbacks is queued,
2188 * someone else starts a grace period that includes that
2189 * callback, then the second of the pair must wait for the
2190 * next grace period. Unlikely, but can happen. If it
2191 * does happen, the debug-objects subsystem won't have splatted.
2193 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME
);
2195 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2198 * Verify that double-free causes debug-objects to complain, but only
2199 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2200 * cannot be carried out.
2202 static void rcu_test_debug_objects(void)
2204 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2205 struct rcu_head rh1
;
2206 struct rcu_head rh2
;
2208 init_rcu_head_on_stack(&rh1
);
2209 init_rcu_head_on_stack(&rh2
);
2210 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME
);
2212 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2213 preempt_disable(); /* Prevent preemption from interrupting test. */
2214 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2215 call_rcu(&rh1
, rcu_torture_leak_cb
); /* Start grace period. */
2216 local_irq_disable(); /* Make it harder to start a new grace period. */
2217 call_rcu(&rh2
, rcu_torture_leak_cb
);
2218 call_rcu(&rh2
, rcu_torture_err_cb
); /* Duplicate callback. */
2223 /* Wait for them all to get done so we can safely return. */
2225 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME
);
2226 destroy_rcu_head_on_stack(&rh1
);
2227 destroy_rcu_head_on_stack(&rh2
);
2228 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2229 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME
);
2230 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2233 static void rcutorture_sync(void)
2235 static unsigned long n
;
2237 if (cur_ops
->sync
&& !(++n
& 0xfff))
2242 rcu_torture_init(void)
2247 static struct rcu_torture_ops
*torture_ops
[] = {
2248 &rcu_ops
, &rcu_busted_ops
, &srcu_ops
, &srcud_ops
,
2249 &busted_srcud_ops
, &tasks_ops
,
2252 if (!torture_init_begin(torture_type
, verbose
))
2255 /* Process args and tell the world that the torturer is on the job. */
2256 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
2257 cur_ops
= torture_ops
[i
];
2258 if (strcmp(torture_type
, cur_ops
->name
) == 0)
2261 if (i
== ARRAY_SIZE(torture_ops
)) {
2262 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2264 pr_alert("rcu-torture types:");
2265 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++)
2266 pr_cont(" %s", torture_ops
[i
]->name
);
2268 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST
));
2272 if (cur_ops
->fqs
== NULL
&& fqs_duration
!= 0) {
2273 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2279 if (nreaders
>= 0) {
2280 nrealreaders
= nreaders
;
2282 nrealreaders
= num_online_cpus() - 2 - nreaders
;
2283 if (nrealreaders
<= 0)
2286 rcu_torture_print_module_parms(cur_ops
, "Start of test");
2288 /* Set up the freelist. */
2290 INIT_LIST_HEAD(&rcu_torture_freelist
);
2291 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++) {
2292 rcu_tortures
[i
].rtort_mbtest
= 0;
2293 list_add_tail(&rcu_tortures
[i
].rtort_free
,
2294 &rcu_torture_freelist
);
2297 /* Initialize the statistics so that each run gets its own numbers. */
2299 rcu_torture_current
= NULL
;
2300 rcu_torture_current_version
= 0;
2301 atomic_set(&n_rcu_torture_alloc
, 0);
2302 atomic_set(&n_rcu_torture_alloc_fail
, 0);
2303 atomic_set(&n_rcu_torture_free
, 0);
2304 atomic_set(&n_rcu_torture_mberror
, 0);
2305 atomic_set(&n_rcu_torture_error
, 0);
2306 n_rcu_torture_barrier_error
= 0;
2307 n_rcu_torture_boost_ktrerror
= 0;
2308 n_rcu_torture_boost_rterror
= 0;
2309 n_rcu_torture_boost_failure
= 0;
2310 n_rcu_torture_boosts
= 0;
2311 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
2312 atomic_set(&rcu_torture_wcount
[i
], 0);
2313 for_each_possible_cpu(cpu
) {
2314 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
2315 per_cpu(rcu_torture_count
, cpu
)[i
] = 0;
2316 per_cpu(rcu_torture_batch
, cpu
)[i
] = 0;
2319 err_segs_recorded
= 0;
2322 /* Start up the kthreads. */
2324 firsterr
= torture_create_kthread(rcu_torture_writer
, NULL
,
2328 if (nfakewriters
> 0) {
2329 fakewriter_tasks
= kcalloc(nfakewriters
,
2330 sizeof(fakewriter_tasks
[0]),
2332 if (fakewriter_tasks
== NULL
) {
2333 VERBOSE_TOROUT_ERRSTRING("out of memory");
2338 for (i
= 0; i
< nfakewriters
; i
++) {
2339 firsterr
= torture_create_kthread(rcu_torture_fakewriter
,
2340 NULL
, fakewriter_tasks
[i
]);
2344 reader_tasks
= kcalloc(nrealreaders
, sizeof(reader_tasks
[0]),
2346 if (reader_tasks
== NULL
) {
2347 VERBOSE_TOROUT_ERRSTRING("out of memory");
2351 for (i
= 0; i
< nrealreaders
; i
++) {
2352 firsterr
= torture_create_kthread(rcu_torture_reader
, (void *)i
,
2357 if (stat_interval
> 0) {
2358 firsterr
= torture_create_kthread(rcu_torture_stats
, NULL
,
2363 if (test_no_idle_hz
&& shuffle_interval
> 0) {
2364 firsterr
= torture_shuffle_init(shuffle_interval
* HZ
);
2371 firsterr
= torture_stutter_init(stutter
* HZ
);
2375 if (fqs_duration
< 0)
2378 /* Create the fqs thread */
2379 firsterr
= torture_create_kthread(rcu_torture_fqs
, NULL
,
2384 if (test_boost_interval
< 1)
2385 test_boost_interval
= 1;
2386 if (test_boost_duration
< 2)
2387 test_boost_duration
= 2;
2388 if (rcu_torture_can_boost()) {
2390 boost_starttime
= jiffies
+ test_boost_interval
* HZ
;
2392 firsterr
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "RCU_TORTURE",
2393 rcutorture_booster_init
,
2394 rcutorture_booster_cleanup
);
2397 rcutor_hp
= firsterr
;
2399 firsterr
= torture_shutdown_init(shutdown_secs
, rcu_torture_cleanup
);
2402 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
, onoff_interval
,
2406 firsterr
= rcu_torture_stall_init();
2409 firsterr
= rcu_torture_fwd_prog_init();
2412 firsterr
= rcu_torture_barrier_init();
2416 rcu_test_debug_objects();
2422 rcu_torture_cleanup();
2426 module_init(rcu_torture_init
);
2427 module_exit(rcu_torture_cleanup
);