]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/rcu/update.c
Merge tag 'regmap-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[mirror_ubuntu-artful-kernel.git] / kernel / rcu / update.c
1 /*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2001
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 *
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
31 *
32 */
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched.h>
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/percpu.h>
43 #include <linux/notifier.h>
44 #include <linux/cpu.h>
45 #include <linux/mutex.h>
46 #include <linux/export.h>
47 #include <linux/hardirq.h>
48 #include <linux/delay.h>
49 #include <linux/module.h>
50 #include <linux/kthread.h>
51 #include <linux/tick.h>
52
53 #define CREATE_TRACE_POINTS
54
55 #include "rcu.h"
56
57 MODULE_ALIAS("rcupdate");
58 #ifdef MODULE_PARAM_PREFIX
59 #undef MODULE_PARAM_PREFIX
60 #endif
61 #define MODULE_PARAM_PREFIX "rcupdate."
62
63 #ifndef CONFIG_TINY_RCU
64 module_param(rcu_expedited, int, 0);
65 module_param(rcu_normal, int, 0);
66 static int rcu_normal_after_boot;
67 module_param(rcu_normal_after_boot, int, 0);
68 #endif /* #ifndef CONFIG_TINY_RCU */
69
70 #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT)
71 /**
72 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
73 *
74 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
75 * RCU-sched read-side critical section. In absence of
76 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
77 * critical section unless it can prove otherwise. Note that disabling
78 * of preemption (including disabling irqs) counts as an RCU-sched
79 * read-side critical section. This is useful for debug checks in functions
80 * that required that they be called within an RCU-sched read-side
81 * critical section.
82 *
83 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
84 * and while lockdep is disabled.
85 *
86 * Note that if the CPU is in the idle loop from an RCU point of
87 * view (ie: that we are in the section between rcu_idle_enter() and
88 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
89 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
90 * that are in such a section, considering these as in extended quiescent
91 * state, so such a CPU is effectively never in an RCU read-side critical
92 * section regardless of what RCU primitives it invokes. This state of
93 * affairs is required --- we need to keep an RCU-free window in idle
94 * where the CPU may possibly enter into low power mode. This way we can
95 * notice an extended quiescent state to other CPUs that started a grace
96 * period. Otherwise we would delay any grace period as long as we run in
97 * the idle task.
98 *
99 * Similarly, we avoid claiming an SRCU read lock held if the current
100 * CPU is offline.
101 */
102 int rcu_read_lock_sched_held(void)
103 {
104 int lockdep_opinion = 0;
105
106 if (!debug_lockdep_rcu_enabled())
107 return 1;
108 if (!rcu_is_watching())
109 return 0;
110 if (!rcu_lockdep_current_cpu_online())
111 return 0;
112 if (debug_locks)
113 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
114 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
115 }
116 EXPORT_SYMBOL(rcu_read_lock_sched_held);
117 #endif
118
119 #ifndef CONFIG_TINY_RCU
120
121 /*
122 * Should expedited grace-period primitives always fall back to their
123 * non-expedited counterparts? Intended for use within RCU. Note
124 * that if the user specifies both rcu_expedited and rcu_normal, then
125 * rcu_normal wins.
126 */
127 bool rcu_gp_is_normal(void)
128 {
129 return READ_ONCE(rcu_normal);
130 }
131
132 static atomic_t rcu_expedited_nesting =
133 ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
134
135 /*
136 * Should normal grace-period primitives be expedited? Intended for
137 * use within RCU. Note that this function takes the rcu_expedited
138 * sysfs/boot variable into account as well as the rcu_expedite_gp()
139 * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
140 * returns false is a -really- bad idea.
141 */
142 bool rcu_gp_is_expedited(void)
143 {
144 return rcu_expedited || atomic_read(&rcu_expedited_nesting);
145 }
146 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
147
148 /**
149 * rcu_expedite_gp - Expedite future RCU grace periods
150 *
151 * After a call to this function, future calls to synchronize_rcu() and
152 * friends act as the corresponding synchronize_rcu_expedited() function
153 * had instead been called.
154 */
155 void rcu_expedite_gp(void)
156 {
157 atomic_inc(&rcu_expedited_nesting);
158 }
159 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
160
161 /**
162 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
163 *
164 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
165 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
166 * and if the rcu_expedited sysfs/boot parameter is not set, then all
167 * subsequent calls to synchronize_rcu() and friends will return to
168 * their normal non-expedited behavior.
169 */
170 void rcu_unexpedite_gp(void)
171 {
172 atomic_dec(&rcu_expedited_nesting);
173 }
174 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
175
176 /*
177 * Inform RCU of the end of the in-kernel boot sequence.
178 */
179 void rcu_end_inkernel_boot(void)
180 {
181 if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
182 rcu_unexpedite_gp();
183 if (rcu_normal_after_boot)
184 WRITE_ONCE(rcu_normal, 1);
185 }
186
187 #endif /* #ifndef CONFIG_TINY_RCU */
188
189 #ifdef CONFIG_PREEMPT_RCU
190
191 /*
192 * Preemptible RCU implementation for rcu_read_lock().
193 * Just increment ->rcu_read_lock_nesting, shared state will be updated
194 * if we block.
195 */
196 void __rcu_read_lock(void)
197 {
198 current->rcu_read_lock_nesting++;
199 barrier(); /* critical section after entry code. */
200 }
201 EXPORT_SYMBOL_GPL(__rcu_read_lock);
202
203 /*
204 * Preemptible RCU implementation for rcu_read_unlock().
205 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
206 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
207 * invoke rcu_read_unlock_special() to clean up after a context switch
208 * in an RCU read-side critical section and other special cases.
209 */
210 void __rcu_read_unlock(void)
211 {
212 struct task_struct *t = current;
213
214 if (t->rcu_read_lock_nesting != 1) {
215 --t->rcu_read_lock_nesting;
216 } else {
217 barrier(); /* critical section before exit code. */
218 t->rcu_read_lock_nesting = INT_MIN;
219 barrier(); /* assign before ->rcu_read_unlock_special load */
220 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
221 rcu_read_unlock_special(t);
222 barrier(); /* ->rcu_read_unlock_special load before assign */
223 t->rcu_read_lock_nesting = 0;
224 }
225 #ifdef CONFIG_PROVE_LOCKING
226 {
227 int rrln = READ_ONCE(t->rcu_read_lock_nesting);
228
229 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
230 }
231 #endif /* #ifdef CONFIG_PROVE_LOCKING */
232 }
233 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
234
235 #endif /* #ifdef CONFIG_PREEMPT_RCU */
236
237 #ifdef CONFIG_DEBUG_LOCK_ALLOC
238 static struct lock_class_key rcu_lock_key;
239 struct lockdep_map rcu_lock_map =
240 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
241 EXPORT_SYMBOL_GPL(rcu_lock_map);
242
243 static struct lock_class_key rcu_bh_lock_key;
244 struct lockdep_map rcu_bh_lock_map =
245 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
246 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
247
248 static struct lock_class_key rcu_sched_lock_key;
249 struct lockdep_map rcu_sched_lock_map =
250 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
251 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
252
253 static struct lock_class_key rcu_callback_key;
254 struct lockdep_map rcu_callback_map =
255 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
256 EXPORT_SYMBOL_GPL(rcu_callback_map);
257
258 int notrace debug_lockdep_rcu_enabled(void)
259 {
260 return rcu_scheduler_active && debug_locks &&
261 current->lockdep_recursion == 0;
262 }
263 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
264
265 /**
266 * rcu_read_lock_held() - might we be in RCU read-side critical section?
267 *
268 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
269 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
270 * this assumes we are in an RCU read-side critical section unless it can
271 * prove otherwise. This is useful for debug checks in functions that
272 * require that they be called within an RCU read-side critical section.
273 *
274 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
275 * and while lockdep is disabled.
276 *
277 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
278 * occur in the same context, for example, it is illegal to invoke
279 * rcu_read_unlock() in process context if the matching rcu_read_lock()
280 * was invoked from within an irq handler.
281 *
282 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
283 * offline from an RCU perspective, so check for those as well.
284 */
285 int rcu_read_lock_held(void)
286 {
287 if (!debug_lockdep_rcu_enabled())
288 return 1;
289 if (!rcu_is_watching())
290 return 0;
291 if (!rcu_lockdep_current_cpu_online())
292 return 0;
293 return lock_is_held(&rcu_lock_map);
294 }
295 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
296
297 /**
298 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
299 *
300 * Check for bottom half being disabled, which covers both the
301 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
302 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
303 * will show the situation. This is useful for debug checks in functions
304 * that require that they be called within an RCU read-side critical
305 * section.
306 *
307 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
308 *
309 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
310 * offline from an RCU perspective, so check for those as well.
311 */
312 int rcu_read_lock_bh_held(void)
313 {
314 if (!debug_lockdep_rcu_enabled())
315 return 1;
316 if (!rcu_is_watching())
317 return 0;
318 if (!rcu_lockdep_current_cpu_online())
319 return 0;
320 return in_softirq() || irqs_disabled();
321 }
322 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
323
324 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
325
326 /**
327 * wakeme_after_rcu() - Callback function to awaken a task after grace period
328 * @head: Pointer to rcu_head member within rcu_synchronize structure
329 *
330 * Awaken the corresponding task now that a grace period has elapsed.
331 */
332 void wakeme_after_rcu(struct rcu_head *head)
333 {
334 struct rcu_synchronize *rcu;
335
336 rcu = container_of(head, struct rcu_synchronize, head);
337 complete(&rcu->completion);
338 }
339 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
340
341 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
342 struct rcu_synchronize *rs_array)
343 {
344 int i;
345
346 /* Initialize and register callbacks for each flavor specified. */
347 for (i = 0; i < n; i++) {
348 if (checktiny &&
349 (crcu_array[i] == call_rcu ||
350 crcu_array[i] == call_rcu_bh)) {
351 might_sleep();
352 continue;
353 }
354 init_rcu_head_on_stack(&rs_array[i].head);
355 init_completion(&rs_array[i].completion);
356 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
357 }
358
359 /* Wait for all callbacks to be invoked. */
360 for (i = 0; i < n; i++) {
361 if (checktiny &&
362 (crcu_array[i] == call_rcu ||
363 crcu_array[i] == call_rcu_bh))
364 continue;
365 wait_for_completion(&rs_array[i].completion);
366 destroy_rcu_head_on_stack(&rs_array[i].head);
367 }
368 }
369 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
370
371 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
372 void init_rcu_head(struct rcu_head *head)
373 {
374 debug_object_init(head, &rcuhead_debug_descr);
375 }
376
377 void destroy_rcu_head(struct rcu_head *head)
378 {
379 debug_object_free(head, &rcuhead_debug_descr);
380 }
381
382 /*
383 * fixup_activate is called when:
384 * - an active object is activated
385 * - an unknown object is activated (might be a statically initialized object)
386 * Activation is performed internally by call_rcu().
387 */
388 static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
389 {
390 struct rcu_head *head = addr;
391
392 switch (state) {
393
394 case ODEBUG_STATE_NOTAVAILABLE:
395 /*
396 * This is not really a fixup. We just make sure that it is
397 * tracked in the object tracker.
398 */
399 debug_object_init(head, &rcuhead_debug_descr);
400 debug_object_activate(head, &rcuhead_debug_descr);
401 return 0;
402 default:
403 return 1;
404 }
405 }
406
407 /**
408 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
409 * @head: pointer to rcu_head structure to be initialized
410 *
411 * This function informs debugobjects of a new rcu_head structure that
412 * has been allocated as an auto variable on the stack. This function
413 * is not required for rcu_head structures that are statically defined or
414 * that are dynamically allocated on the heap. This function has no
415 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
416 */
417 void init_rcu_head_on_stack(struct rcu_head *head)
418 {
419 debug_object_init_on_stack(head, &rcuhead_debug_descr);
420 }
421 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
422
423 /**
424 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
425 * @head: pointer to rcu_head structure to be initialized
426 *
427 * This function informs debugobjects that an on-stack rcu_head structure
428 * is about to go out of scope. As with init_rcu_head_on_stack(), this
429 * function is not required for rcu_head structures that are statically
430 * defined or that are dynamically allocated on the heap. Also as with
431 * init_rcu_head_on_stack(), this function has no effect for
432 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
433 */
434 void destroy_rcu_head_on_stack(struct rcu_head *head)
435 {
436 debug_object_free(head, &rcuhead_debug_descr);
437 }
438 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
439
440 struct debug_obj_descr rcuhead_debug_descr = {
441 .name = "rcu_head",
442 .fixup_activate = rcuhead_fixup_activate,
443 };
444 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
445 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
446
447 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
448 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
449 unsigned long secs,
450 unsigned long c_old, unsigned long c)
451 {
452 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
453 }
454 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
455 #else
456 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
457 do { } while (0)
458 #endif
459
460 #ifdef CONFIG_RCU_STALL_COMMON
461
462 #ifdef CONFIG_PROVE_RCU
463 #define RCU_STALL_DELAY_DELTA (5 * HZ)
464 #else
465 #define RCU_STALL_DELAY_DELTA 0
466 #endif
467
468 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
469 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
470
471 module_param(rcu_cpu_stall_suppress, int, 0644);
472 module_param(rcu_cpu_stall_timeout, int, 0644);
473
474 int rcu_jiffies_till_stall_check(void)
475 {
476 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
477
478 /*
479 * Limit check must be consistent with the Kconfig limits
480 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
481 */
482 if (till_stall_check < 3) {
483 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
484 till_stall_check = 3;
485 } else if (till_stall_check > 300) {
486 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
487 till_stall_check = 300;
488 }
489 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
490 }
491
492 void rcu_sysrq_start(void)
493 {
494 if (!rcu_cpu_stall_suppress)
495 rcu_cpu_stall_suppress = 2;
496 }
497
498 void rcu_sysrq_end(void)
499 {
500 if (rcu_cpu_stall_suppress == 2)
501 rcu_cpu_stall_suppress = 0;
502 }
503
504 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
505 {
506 rcu_cpu_stall_suppress = 1;
507 return NOTIFY_DONE;
508 }
509
510 static struct notifier_block rcu_panic_block = {
511 .notifier_call = rcu_panic,
512 };
513
514 static int __init check_cpu_stall_init(void)
515 {
516 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
517 return 0;
518 }
519 early_initcall(check_cpu_stall_init);
520
521 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
522
523 #ifdef CONFIG_TASKS_RCU
524
525 /*
526 * Simple variant of RCU whose quiescent states are voluntary context switch,
527 * user-space execution, and idle. As such, grace periods can take one good
528 * long time. There are no read-side primitives similar to rcu_read_lock()
529 * and rcu_read_unlock() because this implementation is intended to get
530 * the system into a safe state for some of the manipulations involved in
531 * tracing and the like. Finally, this implementation does not support
532 * high call_rcu_tasks() rates from multiple CPUs. If this is required,
533 * per-CPU callback lists will be needed.
534 */
535
536 /* Global list of callbacks and associated lock. */
537 static struct rcu_head *rcu_tasks_cbs_head;
538 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
539 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
540 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
541
542 /* Track exiting tasks in order to allow them to be waited for. */
543 DEFINE_SRCU(tasks_rcu_exit_srcu);
544
545 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
546 static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
547 module_param(rcu_task_stall_timeout, int, 0644);
548
549 static void rcu_spawn_tasks_kthread(void);
550
551 /*
552 * Post an RCU-tasks callback. First call must be from process context
553 * after the scheduler if fully operational.
554 */
555 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
556 {
557 unsigned long flags;
558 bool needwake;
559
560 rhp->next = NULL;
561 rhp->func = func;
562 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
563 needwake = !rcu_tasks_cbs_head;
564 *rcu_tasks_cbs_tail = rhp;
565 rcu_tasks_cbs_tail = &rhp->next;
566 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
567 if (needwake) {
568 rcu_spawn_tasks_kthread();
569 wake_up(&rcu_tasks_cbs_wq);
570 }
571 }
572 EXPORT_SYMBOL_GPL(call_rcu_tasks);
573
574 /**
575 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
576 *
577 * Control will return to the caller some time after a full rcu-tasks
578 * grace period has elapsed, in other words after all currently
579 * executing rcu-tasks read-side critical sections have elapsed. These
580 * read-side critical sections are delimited by calls to schedule(),
581 * cond_resched_rcu_qs(), idle execution, userspace execution, calls
582 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
583 *
584 * This is a very specialized primitive, intended only for a few uses in
585 * tracing and other situations requiring manipulation of function
586 * preambles and profiling hooks. The synchronize_rcu_tasks() function
587 * is not (yet) intended for heavy use from multiple CPUs.
588 *
589 * Note that this guarantee implies further memory-ordering guarantees.
590 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
591 * each CPU is guaranteed to have executed a full memory barrier since the
592 * end of its last RCU-tasks read-side critical section whose beginning
593 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
594 * having an RCU-tasks read-side critical section that extends beyond
595 * the return from synchronize_rcu_tasks() is guaranteed to have executed
596 * a full memory barrier after the beginning of synchronize_rcu_tasks()
597 * and before the beginning of that RCU-tasks read-side critical section.
598 * Note that these guarantees include CPUs that are offline, idle, or
599 * executing in user mode, as well as CPUs that are executing in the kernel.
600 *
601 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
602 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
603 * to have executed a full memory barrier during the execution of
604 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
605 * (but again only if the system has more than one CPU).
606 */
607 void synchronize_rcu_tasks(void)
608 {
609 /* Complain if the scheduler has not started. */
610 RCU_LOCKDEP_WARN(!rcu_scheduler_active,
611 "synchronize_rcu_tasks called too soon");
612
613 /* Wait for the grace period. */
614 wait_rcu_gp(call_rcu_tasks);
615 }
616 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
617
618 /**
619 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
620 *
621 * Although the current implementation is guaranteed to wait, it is not
622 * obligated to, for example, if there are no pending callbacks.
623 */
624 void rcu_barrier_tasks(void)
625 {
626 /* There is only one callback queue, so this is easy. ;-) */
627 synchronize_rcu_tasks();
628 }
629 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
630
631 /* See if tasks are still holding out, complain if so. */
632 static void check_holdout_task(struct task_struct *t,
633 bool needreport, bool *firstreport)
634 {
635 int cpu;
636
637 if (!READ_ONCE(t->rcu_tasks_holdout) ||
638 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
639 !READ_ONCE(t->on_rq) ||
640 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
641 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
642 WRITE_ONCE(t->rcu_tasks_holdout, false);
643 list_del_init(&t->rcu_tasks_holdout_list);
644 put_task_struct(t);
645 return;
646 }
647 if (!needreport)
648 return;
649 if (*firstreport) {
650 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
651 *firstreport = false;
652 }
653 cpu = task_cpu(t);
654 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
655 t, ".I"[is_idle_task(t)],
656 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
657 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
658 t->rcu_tasks_idle_cpu, cpu);
659 sched_show_task(t);
660 }
661
662 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
663 static int __noreturn rcu_tasks_kthread(void *arg)
664 {
665 unsigned long flags;
666 struct task_struct *g, *t;
667 unsigned long lastreport;
668 struct rcu_head *list;
669 struct rcu_head *next;
670 LIST_HEAD(rcu_tasks_holdouts);
671
672 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
673 housekeeping_affine(current);
674
675 /*
676 * Each pass through the following loop makes one check for
677 * newly arrived callbacks, and, if there are some, waits for
678 * one RCU-tasks grace period and then invokes the callbacks.
679 * This loop is terminated by the system going down. ;-)
680 */
681 for (;;) {
682
683 /* Pick up any new callbacks. */
684 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
685 list = rcu_tasks_cbs_head;
686 rcu_tasks_cbs_head = NULL;
687 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
688 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
689
690 /* If there were none, wait a bit and start over. */
691 if (!list) {
692 wait_event_interruptible(rcu_tasks_cbs_wq,
693 rcu_tasks_cbs_head);
694 if (!rcu_tasks_cbs_head) {
695 WARN_ON(signal_pending(current));
696 schedule_timeout_interruptible(HZ/10);
697 }
698 continue;
699 }
700
701 /*
702 * Wait for all pre-existing t->on_rq and t->nvcsw
703 * transitions to complete. Invoking synchronize_sched()
704 * suffices because all these transitions occur with
705 * interrupts disabled. Without this synchronize_sched(),
706 * a read-side critical section that started before the
707 * grace period might be incorrectly seen as having started
708 * after the grace period.
709 *
710 * This synchronize_sched() also dispenses with the
711 * need for a memory barrier on the first store to
712 * ->rcu_tasks_holdout, as it forces the store to happen
713 * after the beginning of the grace period.
714 */
715 synchronize_sched();
716
717 /*
718 * There were callbacks, so we need to wait for an
719 * RCU-tasks grace period. Start off by scanning
720 * the task list for tasks that are not already
721 * voluntarily blocked. Mark these tasks and make
722 * a list of them in rcu_tasks_holdouts.
723 */
724 rcu_read_lock();
725 for_each_process_thread(g, t) {
726 if (t != current && READ_ONCE(t->on_rq) &&
727 !is_idle_task(t)) {
728 get_task_struct(t);
729 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
730 WRITE_ONCE(t->rcu_tasks_holdout, true);
731 list_add(&t->rcu_tasks_holdout_list,
732 &rcu_tasks_holdouts);
733 }
734 }
735 rcu_read_unlock();
736
737 /*
738 * Wait for tasks that are in the process of exiting.
739 * This does only part of the job, ensuring that all
740 * tasks that were previously exiting reach the point
741 * where they have disabled preemption, allowing the
742 * later synchronize_sched() to finish the job.
743 */
744 synchronize_srcu(&tasks_rcu_exit_srcu);
745
746 /*
747 * Each pass through the following loop scans the list
748 * of holdout tasks, removing any that are no longer
749 * holdouts. When the list is empty, we are done.
750 */
751 lastreport = jiffies;
752 while (!list_empty(&rcu_tasks_holdouts)) {
753 bool firstreport;
754 bool needreport;
755 int rtst;
756 struct task_struct *t1;
757
758 schedule_timeout_interruptible(HZ);
759 rtst = READ_ONCE(rcu_task_stall_timeout);
760 needreport = rtst > 0 &&
761 time_after(jiffies, lastreport + rtst);
762 if (needreport)
763 lastreport = jiffies;
764 firstreport = true;
765 WARN_ON(signal_pending(current));
766 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
767 rcu_tasks_holdout_list) {
768 check_holdout_task(t, needreport, &firstreport);
769 cond_resched();
770 }
771 }
772
773 /*
774 * Because ->on_rq and ->nvcsw are not guaranteed
775 * to have a full memory barriers prior to them in the
776 * schedule() path, memory reordering on other CPUs could
777 * cause their RCU-tasks read-side critical sections to
778 * extend past the end of the grace period. However,
779 * because these ->nvcsw updates are carried out with
780 * interrupts disabled, we can use synchronize_sched()
781 * to force the needed ordering on all such CPUs.
782 *
783 * This synchronize_sched() also confines all
784 * ->rcu_tasks_holdout accesses to be within the grace
785 * period, avoiding the need for memory barriers for
786 * ->rcu_tasks_holdout accesses.
787 *
788 * In addition, this synchronize_sched() waits for exiting
789 * tasks to complete their final preempt_disable() region
790 * of execution, cleaning up after the synchronize_srcu()
791 * above.
792 */
793 synchronize_sched();
794
795 /* Invoke the callbacks. */
796 while (list) {
797 next = list->next;
798 local_bh_disable();
799 list->func(list);
800 local_bh_enable();
801 list = next;
802 cond_resched();
803 }
804 schedule_timeout_uninterruptible(HZ/10);
805 }
806 }
807
808 /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
809 static void rcu_spawn_tasks_kthread(void)
810 {
811 static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
812 static struct task_struct *rcu_tasks_kthread_ptr;
813 struct task_struct *t;
814
815 if (READ_ONCE(rcu_tasks_kthread_ptr)) {
816 smp_mb(); /* Ensure caller sees full kthread. */
817 return;
818 }
819 mutex_lock(&rcu_tasks_kthread_mutex);
820 if (rcu_tasks_kthread_ptr) {
821 mutex_unlock(&rcu_tasks_kthread_mutex);
822 return;
823 }
824 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
825 BUG_ON(IS_ERR(t));
826 smp_mb(); /* Ensure others see full kthread. */
827 WRITE_ONCE(rcu_tasks_kthread_ptr, t);
828 mutex_unlock(&rcu_tasks_kthread_mutex);
829 }
830
831 #endif /* #ifdef CONFIG_TASKS_RCU */
832
833 #ifdef CONFIG_PROVE_RCU
834
835 /*
836 * Early boot self test parameters, one for each flavor
837 */
838 static bool rcu_self_test;
839 static bool rcu_self_test_bh;
840 static bool rcu_self_test_sched;
841
842 module_param(rcu_self_test, bool, 0444);
843 module_param(rcu_self_test_bh, bool, 0444);
844 module_param(rcu_self_test_sched, bool, 0444);
845
846 static int rcu_self_test_counter;
847
848 static void test_callback(struct rcu_head *r)
849 {
850 rcu_self_test_counter++;
851 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
852 }
853
854 static void early_boot_test_call_rcu(void)
855 {
856 static struct rcu_head head;
857
858 call_rcu(&head, test_callback);
859 }
860
861 static void early_boot_test_call_rcu_bh(void)
862 {
863 static struct rcu_head head;
864
865 call_rcu_bh(&head, test_callback);
866 }
867
868 static void early_boot_test_call_rcu_sched(void)
869 {
870 static struct rcu_head head;
871
872 call_rcu_sched(&head, test_callback);
873 }
874
875 void rcu_early_boot_tests(void)
876 {
877 pr_info("Running RCU self tests\n");
878
879 if (rcu_self_test)
880 early_boot_test_call_rcu();
881 if (rcu_self_test_bh)
882 early_boot_test_call_rcu_bh();
883 if (rcu_self_test_sched)
884 early_boot_test_call_rcu_sched();
885 }
886
887 static int rcu_verify_early_boot_tests(void)
888 {
889 int ret = 0;
890 int early_boot_test_counter = 0;
891
892 if (rcu_self_test) {
893 early_boot_test_counter++;
894 rcu_barrier();
895 }
896 if (rcu_self_test_bh) {
897 early_boot_test_counter++;
898 rcu_barrier_bh();
899 }
900 if (rcu_self_test_sched) {
901 early_boot_test_counter++;
902 rcu_barrier_sched();
903 }
904
905 if (rcu_self_test_counter != early_boot_test_counter) {
906 WARN_ON(1);
907 ret = -1;
908 }
909
910 return ret;
911 }
912 late_initcall(rcu_verify_early_boot_tests);
913 #else
914 void rcu_early_boot_tests(void) {}
915 #endif /* CONFIG_PROVE_RCU */