2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched/signal.h>
40 #include <linux/sched/debug.h>
41 #include <linux/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/percpu.h>
44 #include <linux/notifier.h>
45 #include <linux/cpu.h>
46 #include <linux/mutex.h>
47 #include <linux/export.h>
48 #include <linux/hardirq.h>
49 #include <linux/delay.h>
50 #include <linux/moduleparam.h>
51 #include <linux/kthread.h>
52 #include <linux/tick.h>
53 #include <linux/rcupdate_wait.h>
54 #include <linux/sched/isolation.h>
55 #include <linux/kprobes.h>
57 #define CREATE_TRACE_POINTS
61 #ifdef MODULE_PARAM_PREFIX
62 #undef MODULE_PARAM_PREFIX
64 #define MODULE_PARAM_PREFIX "rcupdate."
66 #ifndef CONFIG_TINY_RCU
67 extern int rcu_expedited
; /* from sysctl */
68 module_param(rcu_expedited
, int, 0);
69 extern int rcu_normal
; /* from sysctl */
70 module_param(rcu_normal
, int, 0);
71 static int rcu_normal_after_boot
;
72 module_param(rcu_normal_after_boot
, int, 0);
73 #endif /* #ifndef CONFIG_TINY_RCU */
75 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
79 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
80 * RCU-sched read-side critical section. In absence of
81 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
82 * critical section unless it can prove otherwise. Note that disabling
83 * of preemption (including disabling irqs) counts as an RCU-sched
84 * read-side critical section. This is useful for debug checks in functions
85 * that required that they be called within an RCU-sched read-side
88 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
89 * and while lockdep is disabled.
91 * Note that if the CPU is in the idle loop from an RCU point of
92 * view (ie: that we are in the section between rcu_idle_enter() and
93 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
94 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
95 * that are in such a section, considering these as in extended quiescent
96 * state, so such a CPU is effectively never in an RCU read-side critical
97 * section regardless of what RCU primitives it invokes. This state of
98 * affairs is required --- we need to keep an RCU-free window in idle
99 * where the CPU may possibly enter into low power mode. This way we can
100 * notice an extended quiescent state to other CPUs that started a grace
101 * period. Otherwise we would delay any grace period as long as we run in
104 * Similarly, we avoid claiming an SRCU read lock held if the current
107 int rcu_read_lock_sched_held(void)
109 int lockdep_opinion
= 0;
111 if (!debug_lockdep_rcu_enabled())
113 if (!rcu_is_watching())
115 if (!rcu_lockdep_current_cpu_online())
118 lockdep_opinion
= lock_is_held(&rcu_sched_lock_map
);
119 return lockdep_opinion
|| !preemptible();
121 EXPORT_SYMBOL(rcu_read_lock_sched_held
);
124 #ifndef CONFIG_TINY_RCU
127 * Should expedited grace-period primitives always fall back to their
128 * non-expedited counterparts? Intended for use within RCU. Note
129 * that if the user specifies both rcu_expedited and rcu_normal, then
130 * rcu_normal wins. (Except during the time period during boot from
131 * when the first task is spawned until the rcu_set_runtime_mode()
132 * core_initcall() is invoked, at which point everything is expedited.)
134 bool rcu_gp_is_normal(void)
136 return READ_ONCE(rcu_normal
) &&
137 rcu_scheduler_active
!= RCU_SCHEDULER_INIT
;
139 EXPORT_SYMBOL_GPL(rcu_gp_is_normal
);
141 static atomic_t rcu_expedited_nesting
= ATOMIC_INIT(1);
144 * Should normal grace-period primitives be expedited? Intended for
145 * use within RCU. Note that this function takes the rcu_expedited
146 * sysfs/boot variable and rcu_scheduler_active into account as well
147 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
148 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
150 bool rcu_gp_is_expedited(void)
152 return rcu_expedited
|| atomic_read(&rcu_expedited_nesting
) ||
153 rcu_scheduler_active
== RCU_SCHEDULER_INIT
;
155 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited
);
158 * rcu_expedite_gp - Expedite future RCU grace periods
160 * After a call to this function, future calls to synchronize_rcu() and
161 * friends act as the corresponding synchronize_rcu_expedited() function
162 * had instead been called.
164 void rcu_expedite_gp(void)
166 atomic_inc(&rcu_expedited_nesting
);
168 EXPORT_SYMBOL_GPL(rcu_expedite_gp
);
171 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
173 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
174 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
175 * and if the rcu_expedited sysfs/boot parameter is not set, then all
176 * subsequent calls to synchronize_rcu() and friends will return to
177 * their normal non-expedited behavior.
179 void rcu_unexpedite_gp(void)
181 atomic_dec(&rcu_expedited_nesting
);
183 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp
);
186 * Inform RCU of the end of the in-kernel boot sequence.
188 void rcu_end_inkernel_boot(void)
191 if (rcu_normal_after_boot
)
192 WRITE_ONCE(rcu_normal
, 1);
195 #endif /* #ifndef CONFIG_TINY_RCU */
198 * Test each non-SRCU synchronous grace-period wait API. This is
199 * useful just after a change in mode for these primitives, and
202 void rcu_test_sync_prims(void)
204 if (!IS_ENABLED(CONFIG_PROVE_RCU
))
207 synchronize_rcu_bh();
209 synchronize_rcu_expedited();
210 synchronize_rcu_bh_expedited();
211 synchronize_sched_expedited();
214 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
217 * Switch to run-time mode once RCU has fully initialized.
219 static int __init
rcu_set_runtime_mode(void)
221 rcu_test_sync_prims();
222 rcu_scheduler_active
= RCU_SCHEDULER_RUNNING
;
223 rcu_test_sync_prims();
226 core_initcall(rcu_set_runtime_mode
);
228 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
230 #ifdef CONFIG_PREEMPT_RCU
233 * Preemptible RCU implementation for rcu_read_lock().
234 * Just increment ->rcu_read_lock_nesting, shared state will be updated
237 void __rcu_read_lock(void)
239 current
->rcu_read_lock_nesting
++;
240 barrier(); /* critical section after entry code. */
242 EXPORT_SYMBOL_GPL(__rcu_read_lock
);
245 * Preemptible RCU implementation for rcu_read_unlock().
246 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
247 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
248 * invoke rcu_read_unlock_special() to clean up after a context switch
249 * in an RCU read-side critical section and other special cases.
251 void __rcu_read_unlock(void)
253 struct task_struct
*t
= current
;
255 if (t
->rcu_read_lock_nesting
!= 1) {
256 --t
->rcu_read_lock_nesting
;
258 barrier(); /* critical section before exit code. */
259 t
->rcu_read_lock_nesting
= INT_MIN
;
260 barrier(); /* assign before ->rcu_read_unlock_special load */
261 if (unlikely(READ_ONCE(t
->rcu_read_unlock_special
.s
)))
262 rcu_read_unlock_special(t
);
263 barrier(); /* ->rcu_read_unlock_special load before assign */
264 t
->rcu_read_lock_nesting
= 0;
266 #ifdef CONFIG_PROVE_LOCKING
268 int rrln
= READ_ONCE(t
->rcu_read_lock_nesting
);
270 WARN_ON_ONCE(rrln
< 0 && rrln
> INT_MIN
/ 2);
272 #endif /* #ifdef CONFIG_PROVE_LOCKING */
274 EXPORT_SYMBOL_GPL(__rcu_read_unlock
);
276 #endif /* #ifdef CONFIG_PREEMPT_RCU */
278 #ifdef CONFIG_DEBUG_LOCK_ALLOC
279 static struct lock_class_key rcu_lock_key
;
280 struct lockdep_map rcu_lock_map
=
281 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key
);
282 EXPORT_SYMBOL_GPL(rcu_lock_map
);
284 static struct lock_class_key rcu_bh_lock_key
;
285 struct lockdep_map rcu_bh_lock_map
=
286 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key
);
287 EXPORT_SYMBOL_GPL(rcu_bh_lock_map
);
289 static struct lock_class_key rcu_sched_lock_key
;
290 struct lockdep_map rcu_sched_lock_map
=
291 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key
);
292 EXPORT_SYMBOL_GPL(rcu_sched_lock_map
);
294 static struct lock_class_key rcu_callback_key
;
295 struct lockdep_map rcu_callback_map
=
296 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key
);
297 EXPORT_SYMBOL_GPL(rcu_callback_map
);
299 int notrace
debug_lockdep_rcu_enabled(void)
301 return rcu_scheduler_active
!= RCU_SCHEDULER_INACTIVE
&& debug_locks
&&
302 current
->lockdep_recursion
== 0;
304 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled
);
305 NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled
);
308 * rcu_read_lock_held() - might we be in RCU read-side critical section?
310 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
311 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
312 * this assumes we are in an RCU read-side critical section unless it can
313 * prove otherwise. This is useful for debug checks in functions that
314 * require that they be called within an RCU read-side critical section.
316 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
317 * and while lockdep is disabled.
319 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
320 * occur in the same context, for example, it is illegal to invoke
321 * rcu_read_unlock() in process context if the matching rcu_read_lock()
322 * was invoked from within an irq handler.
324 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
325 * offline from an RCU perspective, so check for those as well.
327 int rcu_read_lock_held(void)
329 if (!debug_lockdep_rcu_enabled())
331 if (!rcu_is_watching())
333 if (!rcu_lockdep_current_cpu_online())
335 return lock_is_held(&rcu_lock_map
);
337 EXPORT_SYMBOL_GPL(rcu_read_lock_held
);
340 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
342 * Check for bottom half being disabled, which covers both the
343 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
344 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
345 * will show the situation. This is useful for debug checks in functions
346 * that require that they be called within an RCU read-side critical
349 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
351 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
352 * offline from an RCU perspective, so check for those as well.
354 int rcu_read_lock_bh_held(void)
356 if (!debug_lockdep_rcu_enabled())
358 if (!rcu_is_watching())
360 if (!rcu_lockdep_current_cpu_online())
362 return in_softirq() || irqs_disabled();
364 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held
);
366 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
369 * wakeme_after_rcu() - Callback function to awaken a task after grace period
370 * @head: Pointer to rcu_head member within rcu_synchronize structure
372 * Awaken the corresponding task now that a grace period has elapsed.
374 void wakeme_after_rcu(struct rcu_head
*head
)
376 struct rcu_synchronize
*rcu
;
378 rcu
= container_of(head
, struct rcu_synchronize
, head
);
379 complete(&rcu
->completion
);
381 EXPORT_SYMBOL_GPL(wakeme_after_rcu
);
383 void __wait_rcu_gp(bool checktiny
, int n
, call_rcu_func_t
*crcu_array
,
384 struct rcu_synchronize
*rs_array
)
389 /* Initialize and register callbacks for each flavor specified. */
390 for (i
= 0; i
< n
; i
++) {
392 (crcu_array
[i
] == call_rcu
||
393 crcu_array
[i
] == call_rcu_bh
)) {
397 init_rcu_head_on_stack(&rs_array
[i
].head
);
398 init_completion(&rs_array
[i
].completion
);
399 for (j
= 0; j
< i
; j
++)
400 if (crcu_array
[j
] == crcu_array
[i
])
403 (crcu_array
[i
])(&rs_array
[i
].head
, wakeme_after_rcu
);
406 /* Wait for all callbacks to be invoked. */
407 for (i
= 0; i
< n
; i
++) {
409 (crcu_array
[i
] == call_rcu
||
410 crcu_array
[i
] == call_rcu_bh
))
412 for (j
= 0; j
< i
; j
++)
413 if (crcu_array
[j
] == crcu_array
[i
])
416 wait_for_completion(&rs_array
[i
].completion
);
417 destroy_rcu_head_on_stack(&rs_array
[i
].head
);
420 EXPORT_SYMBOL_GPL(__wait_rcu_gp
);
422 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
423 void init_rcu_head(struct rcu_head
*head
)
425 debug_object_init(head
, &rcuhead_debug_descr
);
427 EXPORT_SYMBOL_GPL(init_rcu_head
);
429 void destroy_rcu_head(struct rcu_head
*head
)
431 debug_object_free(head
, &rcuhead_debug_descr
);
433 EXPORT_SYMBOL_GPL(destroy_rcu_head
);
435 static bool rcuhead_is_static_object(void *addr
)
441 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
442 * @head: pointer to rcu_head structure to be initialized
444 * This function informs debugobjects of a new rcu_head structure that
445 * has been allocated as an auto variable on the stack. This function
446 * is not required for rcu_head structures that are statically defined or
447 * that are dynamically allocated on the heap. This function has no
448 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
450 void init_rcu_head_on_stack(struct rcu_head
*head
)
452 debug_object_init_on_stack(head
, &rcuhead_debug_descr
);
454 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack
);
457 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
458 * @head: pointer to rcu_head structure to be initialized
460 * This function informs debugobjects that an on-stack rcu_head structure
461 * is about to go out of scope. As with init_rcu_head_on_stack(), this
462 * function is not required for rcu_head structures that are statically
463 * defined or that are dynamically allocated on the heap. Also as with
464 * init_rcu_head_on_stack(), this function has no effect for
465 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
467 void destroy_rcu_head_on_stack(struct rcu_head
*head
)
469 debug_object_free(head
, &rcuhead_debug_descr
);
471 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack
);
473 struct debug_obj_descr rcuhead_debug_descr
= {
475 .is_static_object
= rcuhead_is_static_object
,
477 EXPORT_SYMBOL_GPL(rcuhead_debug_descr
);
478 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
480 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
481 void do_trace_rcu_torture_read(const char *rcutorturename
, struct rcu_head
*rhp
,
483 unsigned long c_old
, unsigned long c
)
485 trace_rcu_torture_read(rcutorturename
, rhp
, secs
, c_old
, c
);
487 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read
);
489 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
493 #ifdef CONFIG_RCU_STALL_COMMON
495 #ifdef CONFIG_PROVE_RCU
496 #define RCU_STALL_DELAY_DELTA (5 * HZ)
498 #define RCU_STALL_DELAY_DELTA 0
501 int rcu_cpu_stall_suppress __read_mostly
; /* 1 = suppress stall warnings. */
502 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress
);
503 static int rcu_cpu_stall_timeout __read_mostly
= CONFIG_RCU_CPU_STALL_TIMEOUT
;
505 module_param(rcu_cpu_stall_suppress
, int, 0644);
506 module_param(rcu_cpu_stall_timeout
, int, 0644);
508 int rcu_jiffies_till_stall_check(void)
510 int till_stall_check
= READ_ONCE(rcu_cpu_stall_timeout
);
513 * Limit check must be consistent with the Kconfig limits
514 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
516 if (till_stall_check
< 3) {
517 WRITE_ONCE(rcu_cpu_stall_timeout
, 3);
518 till_stall_check
= 3;
519 } else if (till_stall_check
> 300) {
520 WRITE_ONCE(rcu_cpu_stall_timeout
, 300);
521 till_stall_check
= 300;
523 return till_stall_check
* HZ
+ RCU_STALL_DELAY_DELTA
;
526 void rcu_sysrq_start(void)
528 if (!rcu_cpu_stall_suppress
)
529 rcu_cpu_stall_suppress
= 2;
532 void rcu_sysrq_end(void)
534 if (rcu_cpu_stall_suppress
== 2)
535 rcu_cpu_stall_suppress
= 0;
538 static int rcu_panic(struct notifier_block
*this, unsigned long ev
, void *ptr
)
540 rcu_cpu_stall_suppress
= 1;
544 static struct notifier_block rcu_panic_block
= {
545 .notifier_call
= rcu_panic
,
548 static int __init
check_cpu_stall_init(void)
550 atomic_notifier_chain_register(&panic_notifier_list
, &rcu_panic_block
);
553 early_initcall(check_cpu_stall_init
);
555 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
557 #ifdef CONFIG_TASKS_RCU
560 * Simple variant of RCU whose quiescent states are voluntary context switch,
561 * user-space execution, and idle. As such, grace periods can take one good
562 * long time. There are no read-side primitives similar to rcu_read_lock()
563 * and rcu_read_unlock() because this implementation is intended to get
564 * the system into a safe state for some of the manipulations involved in
565 * tracing and the like. Finally, this implementation does not support
566 * high call_rcu_tasks() rates from multiple CPUs. If this is required,
567 * per-CPU callback lists will be needed.
570 /* Global list of callbacks and associated lock. */
571 static struct rcu_head
*rcu_tasks_cbs_head
;
572 static struct rcu_head
**rcu_tasks_cbs_tail
= &rcu_tasks_cbs_head
;
573 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq
);
574 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock
);
576 /* Track exiting tasks in order to allow them to be waited for. */
577 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu
);
579 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
580 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
581 static int rcu_task_stall_timeout __read_mostly
= RCU_TASK_STALL_TIMEOUT
;
582 module_param(rcu_task_stall_timeout
, int, 0644);
584 static struct task_struct
*rcu_tasks_kthread_ptr
;
587 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
588 * @rhp: structure to be used for queueing the RCU updates.
589 * @func: actual callback function to be invoked after the grace period
591 * The callback function will be invoked some time after a full grace
592 * period elapses, in other words after all currently executing RCU
593 * read-side critical sections have completed. call_rcu_tasks() assumes
594 * that the read-side critical sections end at a voluntary context
595 * switch (not a preemption!), entry into idle, or transition to usermode
596 * execution. As such, there are no read-side primitives analogous to
597 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
598 * to determine that all tasks have passed through a safe state, not so
599 * much for data-strcuture synchronization.
601 * See the description of call_rcu() for more detailed information on
602 * memory ordering guarantees.
604 void call_rcu_tasks(struct rcu_head
*rhp
, rcu_callback_t func
)
611 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock
, flags
);
612 needwake
= !rcu_tasks_cbs_head
;
613 *rcu_tasks_cbs_tail
= rhp
;
614 rcu_tasks_cbs_tail
= &rhp
->next
;
615 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock
, flags
);
616 /* We can't create the thread unless interrupts are enabled. */
617 if (needwake
&& READ_ONCE(rcu_tasks_kthread_ptr
))
618 wake_up(&rcu_tasks_cbs_wq
);
620 EXPORT_SYMBOL_GPL(call_rcu_tasks
);
623 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
625 * Control will return to the caller some time after a full rcu-tasks
626 * grace period has elapsed, in other words after all currently
627 * executing rcu-tasks read-side critical sections have elapsed. These
628 * read-side critical sections are delimited by calls to schedule(),
629 * cond_resched_rcu_qs(), idle execution, userspace execution, calls
630 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
632 * This is a very specialized primitive, intended only for a few uses in
633 * tracing and other situations requiring manipulation of function
634 * preambles and profiling hooks. The synchronize_rcu_tasks() function
635 * is not (yet) intended for heavy use from multiple CPUs.
637 * Note that this guarantee implies further memory-ordering guarantees.
638 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
639 * each CPU is guaranteed to have executed a full memory barrier since the
640 * end of its last RCU-tasks read-side critical section whose beginning
641 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
642 * having an RCU-tasks read-side critical section that extends beyond
643 * the return from synchronize_rcu_tasks() is guaranteed to have executed
644 * a full memory barrier after the beginning of synchronize_rcu_tasks()
645 * and before the beginning of that RCU-tasks read-side critical section.
646 * Note that these guarantees include CPUs that are offline, idle, or
647 * executing in user mode, as well as CPUs that are executing in the kernel.
649 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
650 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
651 * to have executed a full memory barrier during the execution of
652 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
653 * (but again only if the system has more than one CPU).
655 void synchronize_rcu_tasks(void)
657 /* Complain if the scheduler has not started. */
658 RCU_LOCKDEP_WARN(rcu_scheduler_active
== RCU_SCHEDULER_INACTIVE
,
659 "synchronize_rcu_tasks called too soon");
661 /* Wait for the grace period. */
662 wait_rcu_gp(call_rcu_tasks
);
664 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks
);
667 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
669 * Although the current implementation is guaranteed to wait, it is not
670 * obligated to, for example, if there are no pending callbacks.
672 void rcu_barrier_tasks(void)
674 /* There is only one callback queue, so this is easy. ;-) */
675 synchronize_rcu_tasks();
677 EXPORT_SYMBOL_GPL(rcu_barrier_tasks
);
679 /* See if tasks are still holding out, complain if so. */
680 static void check_holdout_task(struct task_struct
*t
,
681 bool needreport
, bool *firstreport
)
685 if (!READ_ONCE(t
->rcu_tasks_holdout
) ||
686 t
->rcu_tasks_nvcsw
!= READ_ONCE(t
->nvcsw
) ||
687 !READ_ONCE(t
->on_rq
) ||
688 (IS_ENABLED(CONFIG_NO_HZ_FULL
) &&
689 !is_idle_task(t
) && t
->rcu_tasks_idle_cpu
>= 0)) {
690 WRITE_ONCE(t
->rcu_tasks_holdout
, false);
691 list_del_init(&t
->rcu_tasks_holdout_list
);
695 rcu_request_urgent_qs_task(t
);
699 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
700 *firstreport
= false;
703 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
704 t
, ".I"[is_idle_task(t
)],
705 "N."[cpu
< 0 || !tick_nohz_full_cpu(cpu
)],
706 t
->rcu_tasks_nvcsw
, t
->nvcsw
, t
->rcu_tasks_holdout
,
707 t
->rcu_tasks_idle_cpu
, cpu
);
711 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
712 static int __noreturn
rcu_tasks_kthread(void *arg
)
715 struct task_struct
*g
, *t
;
716 unsigned long lastreport
;
717 struct rcu_head
*list
;
718 struct rcu_head
*next
;
719 LIST_HEAD(rcu_tasks_holdouts
);
721 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
722 housekeeping_affine(current
, HK_FLAG_RCU
);
725 * Each pass through the following loop makes one check for
726 * newly arrived callbacks, and, if there are some, waits for
727 * one RCU-tasks grace period and then invokes the callbacks.
728 * This loop is terminated by the system going down. ;-)
732 /* Pick up any new callbacks. */
733 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock
, flags
);
734 list
= rcu_tasks_cbs_head
;
735 rcu_tasks_cbs_head
= NULL
;
736 rcu_tasks_cbs_tail
= &rcu_tasks_cbs_head
;
737 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock
, flags
);
739 /* If there were none, wait a bit and start over. */
741 wait_event_interruptible(rcu_tasks_cbs_wq
,
743 if (!rcu_tasks_cbs_head
) {
744 WARN_ON(signal_pending(current
));
745 schedule_timeout_interruptible(HZ
/10);
751 * Wait for all pre-existing t->on_rq and t->nvcsw
752 * transitions to complete. Invoking synchronize_sched()
753 * suffices because all these transitions occur with
754 * interrupts disabled. Without this synchronize_sched(),
755 * a read-side critical section that started before the
756 * grace period might be incorrectly seen as having started
757 * after the grace period.
759 * This synchronize_sched() also dispenses with the
760 * need for a memory barrier on the first store to
761 * ->rcu_tasks_holdout, as it forces the store to happen
762 * after the beginning of the grace period.
767 * There were callbacks, so we need to wait for an
768 * RCU-tasks grace period. Start off by scanning
769 * the task list for tasks that are not already
770 * voluntarily blocked. Mark these tasks and make
771 * a list of them in rcu_tasks_holdouts.
774 for_each_process_thread(g
, t
) {
775 if (t
!= current
&& READ_ONCE(t
->on_rq
) &&
778 t
->rcu_tasks_nvcsw
= READ_ONCE(t
->nvcsw
);
779 WRITE_ONCE(t
->rcu_tasks_holdout
, true);
780 list_add(&t
->rcu_tasks_holdout_list
,
781 &rcu_tasks_holdouts
);
787 * Wait for tasks that are in the process of exiting.
788 * This does only part of the job, ensuring that all
789 * tasks that were previously exiting reach the point
790 * where they have disabled preemption, allowing the
791 * later synchronize_sched() to finish the job.
793 synchronize_srcu(&tasks_rcu_exit_srcu
);
796 * Each pass through the following loop scans the list
797 * of holdout tasks, removing any that are no longer
798 * holdouts. When the list is empty, we are done.
800 lastreport
= jiffies
;
801 while (!list_empty(&rcu_tasks_holdouts
)) {
805 struct task_struct
*t1
;
807 schedule_timeout_interruptible(HZ
);
808 rtst
= READ_ONCE(rcu_task_stall_timeout
);
809 needreport
= rtst
> 0 &&
810 time_after(jiffies
, lastreport
+ rtst
);
812 lastreport
= jiffies
;
814 WARN_ON(signal_pending(current
));
815 list_for_each_entry_safe(t
, t1
, &rcu_tasks_holdouts
,
816 rcu_tasks_holdout_list
) {
817 check_holdout_task(t
, needreport
, &firstreport
);
823 * Because ->on_rq and ->nvcsw are not guaranteed
824 * to have a full memory barriers prior to them in the
825 * schedule() path, memory reordering on other CPUs could
826 * cause their RCU-tasks read-side critical sections to
827 * extend past the end of the grace period. However,
828 * because these ->nvcsw updates are carried out with
829 * interrupts disabled, we can use synchronize_sched()
830 * to force the needed ordering on all such CPUs.
832 * This synchronize_sched() also confines all
833 * ->rcu_tasks_holdout accesses to be within the grace
834 * period, avoiding the need for memory barriers for
835 * ->rcu_tasks_holdout accesses.
837 * In addition, this synchronize_sched() waits for exiting
838 * tasks to complete their final preempt_disable() region
839 * of execution, cleaning up after the synchronize_srcu()
844 /* Invoke the callbacks. */
853 schedule_timeout_uninterruptible(HZ
/10);
857 /* Spawn rcu_tasks_kthread() at core_initcall() time. */
858 static int __init
rcu_spawn_tasks_kthread(void)
860 struct task_struct
*t
;
862 t
= kthread_run(rcu_tasks_kthread
, NULL
, "rcu_tasks_kthread");
864 smp_mb(); /* Ensure others see full kthread. */
865 WRITE_ONCE(rcu_tasks_kthread_ptr
, t
);
868 core_initcall(rcu_spawn_tasks_kthread
);
870 /* Do the srcu_read_lock() for the above synchronize_srcu(). */
871 void exit_tasks_rcu_start(void)
874 current
->rcu_tasks_idx
= __srcu_read_lock(&tasks_rcu_exit_srcu
);
878 /* Do the srcu_read_unlock() for the above synchronize_srcu(). */
879 void exit_tasks_rcu_finish(void)
882 __srcu_read_unlock(&tasks_rcu_exit_srcu
, current
->rcu_tasks_idx
);
886 #endif /* #ifdef CONFIG_TASKS_RCU */
888 #ifndef CONFIG_TINY_RCU
891 * Print any non-default Tasks RCU settings.
893 static void __init
rcu_tasks_bootup_oddness(void)
895 #ifdef CONFIG_TASKS_RCU
896 if (rcu_task_stall_timeout
!= RCU_TASK_STALL_TIMEOUT
)
897 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout
);
899 pr_info("\tTasks RCU enabled.\n");
900 #endif /* #ifdef CONFIG_TASKS_RCU */
903 #endif /* #ifndef CONFIG_TINY_RCU */
905 #ifdef CONFIG_PROVE_RCU
908 * Early boot self test parameters, one for each flavor
910 static bool rcu_self_test
;
911 static bool rcu_self_test_bh
;
912 static bool rcu_self_test_sched
;
914 module_param(rcu_self_test
, bool, 0444);
915 module_param(rcu_self_test_bh
, bool, 0444);
916 module_param(rcu_self_test_sched
, bool, 0444);
918 static int rcu_self_test_counter
;
920 static void test_callback(struct rcu_head
*r
)
922 rcu_self_test_counter
++;
923 pr_info("RCU test callback executed %d\n", rcu_self_test_counter
);
926 static void early_boot_test_call_rcu(void)
928 static struct rcu_head head
;
930 call_rcu(&head
, test_callback
);
933 static void early_boot_test_call_rcu_bh(void)
935 static struct rcu_head head
;
937 call_rcu_bh(&head
, test_callback
);
940 static void early_boot_test_call_rcu_sched(void)
942 static struct rcu_head head
;
944 call_rcu_sched(&head
, test_callback
);
947 void rcu_early_boot_tests(void)
949 pr_info("Running RCU self tests\n");
952 early_boot_test_call_rcu();
953 if (rcu_self_test_bh
)
954 early_boot_test_call_rcu_bh();
955 if (rcu_self_test_sched
)
956 early_boot_test_call_rcu_sched();
957 rcu_test_sync_prims();
960 static int rcu_verify_early_boot_tests(void)
963 int early_boot_test_counter
= 0;
966 early_boot_test_counter
++;
969 if (rcu_self_test_bh
) {
970 early_boot_test_counter
++;
973 if (rcu_self_test_sched
) {
974 early_boot_test_counter
++;
978 if (rcu_self_test_counter
!= early_boot_test_counter
) {
985 late_initcall(rcu_verify_early_boot_tests
);
987 void rcu_early_boot_tests(void) {}
988 #endif /* CONFIG_PROVE_RCU */
990 #ifndef CONFIG_TINY_RCU
993 * Print any significant non-default boot-time settings.
995 void __init
rcupdate_announce_bootup_oddness(void)
998 pr_info("\tNo expedited grace period (rcu_normal).\n");
999 else if (rcu_normal_after_boot
)
1000 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
1001 else if (rcu_expedited
)
1002 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
1003 if (rcu_cpu_stall_suppress
)
1004 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
1005 if (rcu_cpu_stall_timeout
!= CONFIG_RCU_CPU_STALL_TIMEOUT
)
1006 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout
);
1007 rcu_tasks_bootup_oddness();
1010 #endif /* #ifndef CONFIG_TINY_RCU */