]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
87de1cfd PM |
15 | * along with this program; if not, you can access it online at |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
1da177e4 | 17 | * |
01c1c660 | 18 | * Copyright IBM Corporation, 2001 |
1da177e4 LT |
19 | * |
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | |
21 | * Manfred Spraul <manfred@colorfullife.com> | |
a71fca58 | 22 | * |
1da177e4 LT |
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
25 | * Papers: | |
26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
28 | * | |
29 | * For detailed explanation of Read-Copy Update mechanism see - | |
a71fca58 | 30 | * http://lse.sourceforge.net/locking/rcupdate.html |
1da177e4 LT |
31 | * |
32 | */ | |
33 | #include <linux/types.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/smp.h> | |
38 | #include <linux/interrupt.h> | |
39 | #include <linux/sched.h> | |
60063497 | 40 | #include <linux/atomic.h> |
1da177e4 | 41 | #include <linux/bitops.h> |
1da177e4 LT |
42 | #include <linux/percpu.h> |
43 | #include <linux/notifier.h> | |
1da177e4 | 44 | #include <linux/cpu.h> |
9331b315 | 45 | #include <linux/mutex.h> |
9984de1a | 46 | #include <linux/export.h> |
e3818b8d | 47 | #include <linux/hardirq.h> |
e3ebfb96 | 48 | #include <linux/delay.h> |
3705b88d | 49 | #include <linux/module.h> |
8315f422 | 50 | #include <linux/kthread.h> |
1da177e4 | 51 | |
29c00b4a | 52 | #define CREATE_TRACE_POINTS |
29c00b4a PM |
53 | |
54 | #include "rcu.h" | |
55 | ||
4102adab PM |
56 | MODULE_ALIAS("rcupdate"); |
57 | #ifdef MODULE_PARAM_PREFIX | |
58 | #undef MODULE_PARAM_PREFIX | |
59 | #endif | |
60 | #define MODULE_PARAM_PREFIX "rcupdate." | |
61 | ||
3705b88d AM |
62 | module_param(rcu_expedited, int, 0); |
63 | ||
9dd8fb16 PM |
64 | #ifdef CONFIG_PREEMPT_RCU |
65 | ||
2a3fa843 PM |
66 | /* |
67 | * Preemptible RCU implementation for rcu_read_lock(). | |
68 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | |
69 | * if we block. | |
70 | */ | |
71 | void __rcu_read_lock(void) | |
72 | { | |
73 | current->rcu_read_lock_nesting++; | |
74 | barrier(); /* critical section after entry code. */ | |
75 | } | |
76 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
77 | ||
78 | /* | |
79 | * Preemptible RCU implementation for rcu_read_unlock(). | |
80 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | |
81 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
82 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
83 | * in an RCU read-side critical section and other special cases. | |
84 | */ | |
85 | void __rcu_read_unlock(void) | |
86 | { | |
87 | struct task_struct *t = current; | |
88 | ||
89 | if (t->rcu_read_lock_nesting != 1) { | |
90 | --t->rcu_read_lock_nesting; | |
91 | } else { | |
92 | barrier(); /* critical section before exit code. */ | |
93 | t->rcu_read_lock_nesting = INT_MIN; | |
94 | barrier(); /* assign before ->rcu_read_unlock_special load */ | |
95 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | |
96 | rcu_read_unlock_special(t); | |
97 | barrier(); /* ->rcu_read_unlock_special load before assign */ | |
98 | t->rcu_read_lock_nesting = 0; | |
99 | } | |
100 | #ifdef CONFIG_PROVE_LOCKING | |
101 | { | |
102 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | |
103 | ||
104 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | |
105 | } | |
106 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | |
107 | } | |
108 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
109 | ||
2439b696 | 110 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
9dd8fb16 | 111 | |
162cc279 PM |
112 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
113 | static struct lock_class_key rcu_lock_key; | |
114 | struct lockdep_map rcu_lock_map = | |
115 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | |
116 | EXPORT_SYMBOL_GPL(rcu_lock_map); | |
632ee200 PM |
117 | |
118 | static struct lock_class_key rcu_bh_lock_key; | |
119 | struct lockdep_map rcu_bh_lock_map = | |
120 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); | |
121 | EXPORT_SYMBOL_GPL(rcu_bh_lock_map); | |
122 | ||
123 | static struct lock_class_key rcu_sched_lock_key; | |
124 | struct lockdep_map rcu_sched_lock_map = | |
125 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); | |
126 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | |
e3818b8d | 127 | |
24ef659a PM |
128 | static struct lock_class_key rcu_callback_key; |
129 | struct lockdep_map rcu_callback_map = | |
130 | STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); | |
131 | EXPORT_SYMBOL_GPL(rcu_callback_map); | |
132 | ||
a0a5a056 | 133 | int notrace debug_lockdep_rcu_enabled(void) |
bc293d62 PM |
134 | { |
135 | return rcu_scheduler_active && debug_locks && | |
136 | current->lockdep_recursion == 0; | |
137 | } | |
138 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); | |
139 | ||
e3818b8d | 140 | /** |
ca5ecddf | 141 | * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
e3818b8d PM |
142 | * |
143 | * Check for bottom half being disabled, which covers both the | |
144 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses | |
145 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) | |
ca5ecddf PM |
146 | * will show the situation. This is useful for debug checks in functions |
147 | * that require that they be called within an RCU read-side critical | |
148 | * section. | |
e3818b8d PM |
149 | * |
150 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. | |
c0d6d01b PM |
151 | * |
152 | * Note that rcu_read_lock() is disallowed if the CPU is either idle or | |
153 | * offline from an RCU perspective, so check for those as well. | |
e3818b8d PM |
154 | */ |
155 | int rcu_read_lock_bh_held(void) | |
156 | { | |
157 | if (!debug_lockdep_rcu_enabled()) | |
158 | return 1; | |
5c173eb8 | 159 | if (!rcu_is_watching()) |
e6b80a3b | 160 | return 0; |
c0d6d01b PM |
161 | if (!rcu_lockdep_current_cpu_online()) |
162 | return 0; | |
773e3f93 | 163 | return in_softirq() || irqs_disabled(); |
e3818b8d PM |
164 | } |
165 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | |
166 | ||
167 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
168 | ||
2c42818e PM |
169 | struct rcu_synchronize { |
170 | struct rcu_head head; | |
171 | struct completion completion; | |
172 | }; | |
173 | ||
fbf6bfca PM |
174 | /* |
175 | * Awaken the corresponding synchronize_rcu() instance now that a | |
176 | * grace period has elapsed. | |
177 | */ | |
2c42818e | 178 | static void wakeme_after_rcu(struct rcu_head *head) |
21a1ea9e | 179 | { |
01c1c660 PM |
180 | struct rcu_synchronize *rcu; |
181 | ||
182 | rcu = container_of(head, struct rcu_synchronize, head); | |
183 | complete(&rcu->completion); | |
21a1ea9e | 184 | } |
ee84b824 | 185 | |
2c42818e PM |
186 | void wait_rcu_gp(call_rcu_func_t crf) |
187 | { | |
188 | struct rcu_synchronize rcu; | |
189 | ||
190 | init_rcu_head_on_stack(&rcu.head); | |
191 | init_completion(&rcu.completion); | |
192 | /* Will wake me after RCU finished. */ | |
193 | crf(&rcu.head, wakeme_after_rcu); | |
194 | /* Wait for it. */ | |
195 | wait_for_completion(&rcu.completion); | |
196 | destroy_rcu_head_on_stack(&rcu.head); | |
197 | } | |
198 | EXPORT_SYMBOL_GPL(wait_rcu_gp); | |
199 | ||
551d55a9 | 200 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
546a9d85 | 201 | void init_rcu_head(struct rcu_head *head) |
551d55a9 MD |
202 | { |
203 | debug_object_init(head, &rcuhead_debug_descr); | |
204 | } | |
205 | ||
546a9d85 | 206 | void destroy_rcu_head(struct rcu_head *head) |
551d55a9 MD |
207 | { |
208 | debug_object_free(head, &rcuhead_debug_descr); | |
209 | } | |
210 | ||
551d55a9 MD |
211 | /* |
212 | * fixup_activate is called when: | |
213 | * - an active object is activated | |
214 | * - an unknown object is activated (might be a statically initialized object) | |
215 | * Activation is performed internally by call_rcu(). | |
216 | */ | |
217 | static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) | |
218 | { | |
219 | struct rcu_head *head = addr; | |
220 | ||
221 | switch (state) { | |
222 | ||
223 | case ODEBUG_STATE_NOTAVAILABLE: | |
224 | /* | |
225 | * This is not really a fixup. We just make sure that it is | |
226 | * tracked in the object tracker. | |
227 | */ | |
228 | debug_object_init(head, &rcuhead_debug_descr); | |
229 | debug_object_activate(head, &rcuhead_debug_descr); | |
230 | return 0; | |
551d55a9 | 231 | default: |
551d55a9 | 232 | return 1; |
551d55a9 MD |
233 | } |
234 | } | |
235 | ||
236 | /** | |
237 | * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects | |
238 | * @head: pointer to rcu_head structure to be initialized | |
239 | * | |
240 | * This function informs debugobjects of a new rcu_head structure that | |
241 | * has been allocated as an auto variable on the stack. This function | |
242 | * is not required for rcu_head structures that are statically defined or | |
243 | * that are dynamically allocated on the heap. This function has no | |
244 | * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. | |
245 | */ | |
246 | void init_rcu_head_on_stack(struct rcu_head *head) | |
247 | { | |
248 | debug_object_init_on_stack(head, &rcuhead_debug_descr); | |
249 | } | |
250 | EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); | |
251 | ||
252 | /** | |
253 | * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects | |
254 | * @head: pointer to rcu_head structure to be initialized | |
255 | * | |
256 | * This function informs debugobjects that an on-stack rcu_head structure | |
257 | * is about to go out of scope. As with init_rcu_head_on_stack(), this | |
258 | * function is not required for rcu_head structures that are statically | |
259 | * defined or that are dynamically allocated on the heap. Also as with | |
260 | * init_rcu_head_on_stack(), this function has no effect for | |
261 | * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. | |
262 | */ | |
263 | void destroy_rcu_head_on_stack(struct rcu_head *head) | |
264 | { | |
265 | debug_object_free(head, &rcuhead_debug_descr); | |
266 | } | |
267 | EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); | |
268 | ||
269 | struct debug_obj_descr rcuhead_debug_descr = { | |
270 | .name = "rcu_head", | |
551d55a9 | 271 | .fixup_activate = rcuhead_fixup_activate, |
551d55a9 MD |
272 | }; |
273 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); | |
274 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
91afaf30 PM |
275 | |
276 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) | |
e66c33d5 | 277 | void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, |
52494535 PM |
278 | unsigned long secs, |
279 | unsigned long c_old, unsigned long c) | |
91afaf30 | 280 | { |
52494535 | 281 | trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); |
91afaf30 PM |
282 | } |
283 | EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); | |
284 | #else | |
52494535 PM |
285 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
286 | do { } while (0) | |
91afaf30 | 287 | #endif |
6bfc09e2 PM |
288 | |
289 | #ifdef CONFIG_RCU_STALL_COMMON | |
290 | ||
291 | #ifdef CONFIG_PROVE_RCU | |
292 | #define RCU_STALL_DELAY_DELTA (5 * HZ) | |
293 | #else | |
294 | #define RCU_STALL_DELAY_DELTA 0 | |
295 | #endif | |
296 | ||
297 | int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ | |
01896f7e | 298 | static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; |
6bfc09e2 PM |
299 | |
300 | module_param(rcu_cpu_stall_suppress, int, 0644); | |
301 | module_param(rcu_cpu_stall_timeout, int, 0644); | |
302 | ||
303 | int rcu_jiffies_till_stall_check(void) | |
304 | { | |
305 | int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); | |
306 | ||
307 | /* | |
308 | * Limit check must be consistent with the Kconfig limits | |
309 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. | |
310 | */ | |
311 | if (till_stall_check < 3) { | |
312 | ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; | |
313 | till_stall_check = 3; | |
314 | } else if (till_stall_check > 300) { | |
315 | ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; | |
316 | till_stall_check = 300; | |
317 | } | |
318 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; | |
319 | } | |
320 | ||
61f38db3 RR |
321 | void rcu_sysrq_start(void) |
322 | { | |
323 | if (!rcu_cpu_stall_suppress) | |
324 | rcu_cpu_stall_suppress = 2; | |
325 | } | |
326 | ||
327 | void rcu_sysrq_end(void) | |
328 | { | |
329 | if (rcu_cpu_stall_suppress == 2) | |
330 | rcu_cpu_stall_suppress = 0; | |
331 | } | |
332 | ||
6bfc09e2 PM |
333 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
334 | { | |
335 | rcu_cpu_stall_suppress = 1; | |
336 | return NOTIFY_DONE; | |
337 | } | |
338 | ||
339 | static struct notifier_block rcu_panic_block = { | |
340 | .notifier_call = rcu_panic, | |
341 | }; | |
342 | ||
343 | static int __init check_cpu_stall_init(void) | |
344 | { | |
345 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | |
346 | return 0; | |
347 | } | |
348 | early_initcall(check_cpu_stall_init); | |
349 | ||
350 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ | |
8315f422 PM |
351 | |
352 | #ifdef CONFIG_TASKS_RCU | |
353 | ||
354 | /* | |
355 | * Simple variant of RCU whose quiescent states are voluntary context switch, | |
356 | * user-space execution, and idle. As such, grace periods can take one good | |
357 | * long time. There are no read-side primitives similar to rcu_read_lock() | |
358 | * and rcu_read_unlock() because this implementation is intended to get | |
359 | * the system into a safe state for some of the manipulations involved in | |
360 | * tracing and the like. Finally, this implementation does not support | |
361 | * high call_rcu_tasks() rates from multiple CPUs. If this is required, | |
362 | * per-CPU callback lists will be needed. | |
363 | */ | |
364 | ||
365 | /* Global list of callbacks and associated lock. */ | |
366 | static struct rcu_head *rcu_tasks_cbs_head; | |
367 | static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; | |
c7b24d2b | 368 | static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); |
8315f422 PM |
369 | static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); |
370 | ||
3f95aa81 PM |
371 | /* Track exiting tasks in order to allow them to be waited for. */ |
372 | DEFINE_SRCU(tasks_rcu_exit_srcu); | |
373 | ||
374 | /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ | |
52db30ab | 375 | static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10; |
3f95aa81 PM |
376 | module_param(rcu_task_stall_timeout, int, 0644); |
377 | ||
84a8f446 PM |
378 | static void rcu_spawn_tasks_kthread(void); |
379 | ||
380 | /* | |
381 | * Post an RCU-tasks callback. First call must be from process context | |
382 | * after the scheduler if fully operational. | |
383 | */ | |
8315f422 PM |
384 | void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp)) |
385 | { | |
386 | unsigned long flags; | |
c7b24d2b | 387 | bool needwake; |
8315f422 PM |
388 | |
389 | rhp->next = NULL; | |
390 | rhp->func = func; | |
391 | raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); | |
c7b24d2b | 392 | needwake = !rcu_tasks_cbs_head; |
8315f422 PM |
393 | *rcu_tasks_cbs_tail = rhp; |
394 | rcu_tasks_cbs_tail = &rhp->next; | |
395 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); | |
84a8f446 PM |
396 | if (needwake) { |
397 | rcu_spawn_tasks_kthread(); | |
c7b24d2b | 398 | wake_up(&rcu_tasks_cbs_wq); |
84a8f446 | 399 | } |
8315f422 PM |
400 | } |
401 | EXPORT_SYMBOL_GPL(call_rcu_tasks); | |
402 | ||
53c6d4ed PM |
403 | /** |
404 | * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. | |
405 | * | |
406 | * Control will return to the caller some time after a full rcu-tasks | |
407 | * grace period has elapsed, in other words after all currently | |
408 | * executing rcu-tasks read-side critical sections have elapsed. These | |
409 | * read-side critical sections are delimited by calls to schedule(), | |
410 | * cond_resched_rcu_qs(), idle execution, userspace execution, calls | |
411 | * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). | |
412 | * | |
413 | * This is a very specialized primitive, intended only for a few uses in | |
414 | * tracing and other situations requiring manipulation of function | |
415 | * preambles and profiling hooks. The synchronize_rcu_tasks() function | |
416 | * is not (yet) intended for heavy use from multiple CPUs. | |
417 | * | |
418 | * Note that this guarantee implies further memory-ordering guarantees. | |
419 | * On systems with more than one CPU, when synchronize_rcu_tasks() returns, | |
420 | * each CPU is guaranteed to have executed a full memory barrier since the | |
421 | * end of its last RCU-tasks read-side critical section whose beginning | |
422 | * preceded the call to synchronize_rcu_tasks(). In addition, each CPU | |
423 | * having an RCU-tasks read-side critical section that extends beyond | |
424 | * the return from synchronize_rcu_tasks() is guaranteed to have executed | |
425 | * a full memory barrier after the beginning of synchronize_rcu_tasks() | |
426 | * and before the beginning of that RCU-tasks read-side critical section. | |
427 | * Note that these guarantees include CPUs that are offline, idle, or | |
428 | * executing in user mode, as well as CPUs that are executing in the kernel. | |
429 | * | |
430 | * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned | |
431 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | |
432 | * to have executed a full memory barrier during the execution of | |
433 | * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU | |
434 | * (but again only if the system has more than one CPU). | |
435 | */ | |
436 | void synchronize_rcu_tasks(void) | |
437 | { | |
438 | /* Complain if the scheduler has not started. */ | |
439 | rcu_lockdep_assert(!rcu_scheduler_active, | |
440 | "synchronize_rcu_tasks called too soon"); | |
441 | ||
442 | /* Wait for the grace period. */ | |
443 | wait_rcu_gp(call_rcu_tasks); | |
444 | } | |
06c2a923 | 445 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); |
53c6d4ed PM |
446 | |
447 | /** | |
448 | * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. | |
449 | * | |
450 | * Although the current implementation is guaranteed to wait, it is not | |
451 | * obligated to, for example, if there are no pending callbacks. | |
452 | */ | |
453 | void rcu_barrier_tasks(void) | |
454 | { | |
455 | /* There is only one callback queue, so this is easy. ;-) */ | |
456 | synchronize_rcu_tasks(); | |
457 | } | |
06c2a923 | 458 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks); |
53c6d4ed | 459 | |
52db30ab PM |
460 | /* See if tasks are still holding out, complain if so. */ |
461 | static void check_holdout_task(struct task_struct *t, | |
462 | bool needreport, bool *firstreport) | |
8315f422 PM |
463 | { |
464 | if (!ACCESS_ONCE(t->rcu_tasks_holdout) || | |
465 | t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) || | |
466 | !ACCESS_ONCE(t->on_rq)) { | |
467 | ACCESS_ONCE(t->rcu_tasks_holdout) = false; | |
468 | list_del_rcu(&t->rcu_tasks_holdout_list); | |
469 | put_task_struct(t); | |
52db30ab | 470 | return; |
8315f422 | 471 | } |
52db30ab PM |
472 | if (!needreport) |
473 | return; | |
474 | if (*firstreport) { | |
475 | pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); | |
476 | *firstreport = false; | |
477 | } | |
478 | sched_show_task(t); | |
8315f422 PM |
479 | } |
480 | ||
481 | /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ | |
482 | static int __noreturn rcu_tasks_kthread(void *arg) | |
483 | { | |
484 | unsigned long flags; | |
485 | struct task_struct *g, *t; | |
52db30ab | 486 | unsigned long lastreport; |
8315f422 PM |
487 | struct rcu_head *list; |
488 | struct rcu_head *next; | |
489 | LIST_HEAD(rcu_tasks_holdouts); | |
490 | ||
491 | /* FIXME: Add housekeeping affinity. */ | |
492 | ||
493 | /* | |
494 | * Each pass through the following loop makes one check for | |
495 | * newly arrived callbacks, and, if there are some, waits for | |
496 | * one RCU-tasks grace period and then invokes the callbacks. | |
497 | * This loop is terminated by the system going down. ;-) | |
498 | */ | |
499 | for (;;) { | |
500 | ||
501 | /* Pick up any new callbacks. */ | |
502 | raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); | |
503 | list = rcu_tasks_cbs_head; | |
504 | rcu_tasks_cbs_head = NULL; | |
505 | rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; | |
506 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); | |
507 | ||
508 | /* If there were none, wait a bit and start over. */ | |
509 | if (!list) { | |
c7b24d2b PM |
510 | wait_event_interruptible(rcu_tasks_cbs_wq, |
511 | rcu_tasks_cbs_head); | |
512 | if (!rcu_tasks_cbs_head) { | |
513 | WARN_ON(signal_pending(current)); | |
514 | schedule_timeout_interruptible(HZ/10); | |
515 | } | |
8315f422 PM |
516 | continue; |
517 | } | |
518 | ||
519 | /* | |
520 | * Wait for all pre-existing t->on_rq and t->nvcsw | |
521 | * transitions to complete. Invoking synchronize_sched() | |
522 | * suffices because all these transitions occur with | |
523 | * interrupts disabled. Without this synchronize_sched(), | |
524 | * a read-side critical section that started before the | |
525 | * grace period might be incorrectly seen as having started | |
526 | * after the grace period. | |
527 | * | |
528 | * This synchronize_sched() also dispenses with the | |
529 | * need for a memory barrier on the first store to | |
530 | * ->rcu_tasks_holdout, as it forces the store to happen | |
531 | * after the beginning of the grace period. | |
532 | */ | |
533 | synchronize_sched(); | |
534 | ||
535 | /* | |
536 | * There were callbacks, so we need to wait for an | |
537 | * RCU-tasks grace period. Start off by scanning | |
538 | * the task list for tasks that are not already | |
539 | * voluntarily blocked. Mark these tasks and make | |
540 | * a list of them in rcu_tasks_holdouts. | |
541 | */ | |
542 | rcu_read_lock(); | |
543 | for_each_process_thread(g, t) { | |
544 | if (t != current && ACCESS_ONCE(t->on_rq) && | |
545 | !is_idle_task(t)) { | |
546 | get_task_struct(t); | |
547 | t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw); | |
548 | ACCESS_ONCE(t->rcu_tasks_holdout) = true; | |
549 | list_add(&t->rcu_tasks_holdout_list, | |
550 | &rcu_tasks_holdouts); | |
551 | } | |
552 | } | |
553 | rcu_read_unlock(); | |
554 | ||
3f95aa81 PM |
555 | /* |
556 | * Wait for tasks that are in the process of exiting. | |
557 | * This does only part of the job, ensuring that all | |
558 | * tasks that were previously exiting reach the point | |
559 | * where they have disabled preemption, allowing the | |
560 | * later synchronize_sched() to finish the job. | |
561 | */ | |
562 | synchronize_srcu(&tasks_rcu_exit_srcu); | |
563 | ||
8315f422 PM |
564 | /* |
565 | * Each pass through the following loop scans the list | |
566 | * of holdout tasks, removing any that are no longer | |
567 | * holdouts. When the list is empty, we are done. | |
568 | */ | |
52db30ab | 569 | lastreport = jiffies; |
8315f422 | 570 | while (!list_empty(&rcu_tasks_holdouts)) { |
52db30ab PM |
571 | bool firstreport; |
572 | bool needreport; | |
573 | int rtst; | |
574 | ||
8315f422 | 575 | schedule_timeout_interruptible(HZ); |
52db30ab PM |
576 | rtst = ACCESS_ONCE(rcu_task_stall_timeout); |
577 | needreport = rtst > 0 && | |
578 | time_after(jiffies, lastreport + rtst); | |
579 | if (needreport) | |
580 | lastreport = jiffies; | |
581 | firstreport = true; | |
8315f422 PM |
582 | WARN_ON(signal_pending(current)); |
583 | rcu_read_lock(); | |
584 | list_for_each_entry_rcu(t, &rcu_tasks_holdouts, | |
585 | rcu_tasks_holdout_list) | |
52db30ab | 586 | check_holdout_task(t, needreport, &firstreport); |
8315f422 PM |
587 | rcu_read_unlock(); |
588 | } | |
589 | ||
590 | /* | |
591 | * Because ->on_rq and ->nvcsw are not guaranteed | |
592 | * to have a full memory barriers prior to them in the | |
593 | * schedule() path, memory reordering on other CPUs could | |
594 | * cause their RCU-tasks read-side critical sections to | |
595 | * extend past the end of the grace period. However, | |
596 | * because these ->nvcsw updates are carried out with | |
597 | * interrupts disabled, we can use synchronize_sched() | |
598 | * to force the needed ordering on all such CPUs. | |
599 | * | |
600 | * This synchronize_sched() also confines all | |
601 | * ->rcu_tasks_holdout accesses to be within the grace | |
602 | * period, avoiding the need for memory barriers for | |
603 | * ->rcu_tasks_holdout accesses. | |
3f95aa81 PM |
604 | * |
605 | * In addition, this synchronize_sched() waits for exiting | |
606 | * tasks to complete their final preempt_disable() region | |
607 | * of execution, cleaning up after the synchronize_srcu() | |
608 | * above. | |
8315f422 PM |
609 | */ |
610 | synchronize_sched(); | |
611 | ||
612 | /* Invoke the callbacks. */ | |
613 | while (list) { | |
614 | next = list->next; | |
615 | local_bh_disable(); | |
616 | list->func(list); | |
617 | local_bh_enable(); | |
618 | list = next; | |
619 | cond_resched(); | |
620 | } | |
c7b24d2b | 621 | schedule_timeout_uninterruptible(HZ/10); |
8315f422 PM |
622 | } |
623 | } | |
624 | ||
84a8f446 PM |
625 | /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */ |
626 | static void rcu_spawn_tasks_kthread(void) | |
8315f422 | 627 | { |
84a8f446 PM |
628 | static DEFINE_MUTEX(rcu_tasks_kthread_mutex); |
629 | static struct task_struct *rcu_tasks_kthread_ptr; | |
630 | struct task_struct *t; | |
8315f422 | 631 | |
84a8f446 PM |
632 | if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) { |
633 | smp_mb(); /* Ensure caller sees full kthread. */ | |
634 | return; | |
635 | } | |
636 | mutex_lock(&rcu_tasks_kthread_mutex); | |
637 | if (rcu_tasks_kthread_ptr) { | |
638 | mutex_unlock(&rcu_tasks_kthread_mutex); | |
639 | return; | |
640 | } | |
8315f422 PM |
641 | t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); |
642 | BUG_ON(IS_ERR(t)); | |
84a8f446 PM |
643 | smp_mb(); /* Ensure others see full kthread. */ |
644 | ACCESS_ONCE(rcu_tasks_kthread_ptr) = t; | |
645 | mutex_unlock(&rcu_tasks_kthread_mutex); | |
8315f422 | 646 | } |
8315f422 PM |
647 | |
648 | #endif /* #ifdef CONFIG_TASKS_RCU */ |