]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
87de1cfd PM |
15 | * along with this program; if not, you can access it online at |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
1da177e4 | 17 | * |
01c1c660 | 18 | * Copyright IBM Corporation, 2001 |
1da177e4 LT |
19 | * |
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | |
21 | * Manfred Spraul <manfred@colorfullife.com> | |
a71fca58 | 22 | * |
1da177e4 LT |
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
25 | * Papers: | |
26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
28 | * | |
29 | * For detailed explanation of Read-Copy Update mechanism see - | |
a71fca58 | 30 | * http://lse.sourceforge.net/locking/rcupdate.html |
1da177e4 LT |
31 | * |
32 | */ | |
33 | #include <linux/types.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/smp.h> | |
38 | #include <linux/interrupt.h> | |
39 | #include <linux/sched.h> | |
60063497 | 40 | #include <linux/atomic.h> |
1da177e4 | 41 | #include <linux/bitops.h> |
1da177e4 LT |
42 | #include <linux/percpu.h> |
43 | #include <linux/notifier.h> | |
1da177e4 | 44 | #include <linux/cpu.h> |
9331b315 | 45 | #include <linux/mutex.h> |
9984de1a | 46 | #include <linux/export.h> |
e3818b8d | 47 | #include <linux/hardirq.h> |
e3ebfb96 | 48 | #include <linux/delay.h> |
e77b7041 | 49 | #include <linux/moduleparam.h> |
8315f422 | 50 | #include <linux/kthread.h> |
4ff475ed | 51 | #include <linux/tick.h> |
1da177e4 | 52 | |
29c00b4a | 53 | #define CREATE_TRACE_POINTS |
29c00b4a PM |
54 | |
55 | #include "rcu.h" | |
56 | ||
4102adab PM |
57 | #ifdef MODULE_PARAM_PREFIX |
58 | #undef MODULE_PARAM_PREFIX | |
59 | #endif | |
60 | #define MODULE_PARAM_PREFIX "rcupdate." | |
61 | ||
79cfea02 | 62 | #ifndef CONFIG_TINY_RCU |
3705b88d | 63 | module_param(rcu_expedited, int, 0); |
5a9be7c6 | 64 | module_param(rcu_normal, int, 0); |
3e42ec1a PM |
65 | static int rcu_normal_after_boot; |
66 | module_param(rcu_normal_after_boot, int, 0); | |
79cfea02 | 67 | #endif /* #ifndef CONFIG_TINY_RCU */ |
3e42ec1a | 68 | |
293e2421 | 69 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
d5671f6b DV |
70 | /** |
71 | * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? | |
72 | * | |
73 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an | |
74 | * RCU-sched read-side critical section. In absence of | |
75 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side | |
76 | * critical section unless it can prove otherwise. Note that disabling | |
77 | * of preemption (including disabling irqs) counts as an RCU-sched | |
78 | * read-side critical section. This is useful for debug checks in functions | |
79 | * that required that they be called within an RCU-sched read-side | |
80 | * critical section. | |
81 | * | |
82 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | |
83 | * and while lockdep is disabled. | |
84 | * | |
85 | * Note that if the CPU is in the idle loop from an RCU point of | |
86 | * view (ie: that we are in the section between rcu_idle_enter() and | |
87 | * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU | |
88 | * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs | |
89 | * that are in such a section, considering these as in extended quiescent | |
90 | * state, so such a CPU is effectively never in an RCU read-side critical | |
91 | * section regardless of what RCU primitives it invokes. This state of | |
92 | * affairs is required --- we need to keep an RCU-free window in idle | |
93 | * where the CPU may possibly enter into low power mode. This way we can | |
94 | * notice an extended quiescent state to other CPUs that started a grace | |
95 | * period. Otherwise we would delay any grace period as long as we run in | |
96 | * the idle task. | |
97 | * | |
98 | * Similarly, we avoid claiming an SRCU read lock held if the current | |
99 | * CPU is offline. | |
100 | */ | |
101 | int rcu_read_lock_sched_held(void) | |
102 | { | |
103 | int lockdep_opinion = 0; | |
104 | ||
105 | if (!debug_lockdep_rcu_enabled()) | |
106 | return 1; | |
107 | if (!rcu_is_watching()) | |
108 | return 0; | |
109 | if (!rcu_lockdep_current_cpu_online()) | |
110 | return 0; | |
111 | if (debug_locks) | |
112 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | |
293e2421 | 113 | return lockdep_opinion || !preemptible(); |
d5671f6b DV |
114 | } |
115 | EXPORT_SYMBOL(rcu_read_lock_sched_held); | |
116 | #endif | |
117 | ||
0d39482c PM |
118 | #ifndef CONFIG_TINY_RCU |
119 | ||
5a9be7c6 PM |
120 | /* |
121 | * Should expedited grace-period primitives always fall back to their | |
122 | * non-expedited counterparts? Intended for use within RCU. Note | |
123 | * that if the user specifies both rcu_expedited and rcu_normal, then | |
124 | * rcu_normal wins. | |
125 | */ | |
126 | bool rcu_gp_is_normal(void) | |
127 | { | |
128 | return READ_ONCE(rcu_normal); | |
129 | } | |
4f2a848c | 130 | EXPORT_SYMBOL_GPL(rcu_gp_is_normal); |
5a9be7c6 | 131 | |
ee42571f PM |
132 | static atomic_t rcu_expedited_nesting = |
133 | ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0); | |
0d39482c PM |
134 | |
135 | /* | |
136 | * Should normal grace-period primitives be expedited? Intended for | |
137 | * use within RCU. Note that this function takes the rcu_expedited | |
138 | * sysfs/boot variable into account as well as the rcu_expedite_gp() | |
139 | * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited() | |
140 | * returns false is a -really- bad idea. | |
141 | */ | |
142 | bool rcu_gp_is_expedited(void) | |
143 | { | |
144 | return rcu_expedited || atomic_read(&rcu_expedited_nesting); | |
145 | } | |
146 | EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); | |
147 | ||
148 | /** | |
149 | * rcu_expedite_gp - Expedite future RCU grace periods | |
150 | * | |
151 | * After a call to this function, future calls to synchronize_rcu() and | |
152 | * friends act as the corresponding synchronize_rcu_expedited() function | |
153 | * had instead been called. | |
154 | */ | |
155 | void rcu_expedite_gp(void) | |
156 | { | |
157 | atomic_inc(&rcu_expedited_nesting); | |
158 | } | |
159 | EXPORT_SYMBOL_GPL(rcu_expedite_gp); | |
160 | ||
161 | /** | |
162 | * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation | |
163 | * | |
164 | * Undo a prior call to rcu_expedite_gp(). If all prior calls to | |
165 | * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(), | |
166 | * and if the rcu_expedited sysfs/boot parameter is not set, then all | |
167 | * subsequent calls to synchronize_rcu() and friends will return to | |
168 | * their normal non-expedited behavior. | |
169 | */ | |
170 | void rcu_unexpedite_gp(void) | |
171 | { | |
172 | atomic_dec(&rcu_expedited_nesting); | |
173 | } | |
174 | EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); | |
175 | ||
ee42571f PM |
176 | /* |
177 | * Inform RCU of the end of the in-kernel boot sequence. | |
178 | */ | |
179 | void rcu_end_inkernel_boot(void) | |
180 | { | |
181 | if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT)) | |
182 | rcu_unexpedite_gp(); | |
3e42ec1a PM |
183 | if (rcu_normal_after_boot) |
184 | WRITE_ONCE(rcu_normal, 1); | |
ee42571f | 185 | } |
0d39482c | 186 | |
79cfea02 PM |
187 | #endif /* #ifndef CONFIG_TINY_RCU */ |
188 | ||
9dd8fb16 PM |
189 | #ifdef CONFIG_PREEMPT_RCU |
190 | ||
2a3fa843 PM |
191 | /* |
192 | * Preemptible RCU implementation for rcu_read_lock(). | |
193 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | |
194 | * if we block. | |
195 | */ | |
196 | void __rcu_read_lock(void) | |
197 | { | |
198 | current->rcu_read_lock_nesting++; | |
199 | barrier(); /* critical section after entry code. */ | |
200 | } | |
201 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
202 | ||
203 | /* | |
204 | * Preemptible RCU implementation for rcu_read_unlock(). | |
205 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | |
206 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
207 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
208 | * in an RCU read-side critical section and other special cases. | |
209 | */ | |
210 | void __rcu_read_unlock(void) | |
211 | { | |
212 | struct task_struct *t = current; | |
213 | ||
214 | if (t->rcu_read_lock_nesting != 1) { | |
215 | --t->rcu_read_lock_nesting; | |
216 | } else { | |
217 | barrier(); /* critical section before exit code. */ | |
218 | t->rcu_read_lock_nesting = INT_MIN; | |
219 | barrier(); /* assign before ->rcu_read_unlock_special load */ | |
7d0ae808 | 220 | if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) |
2a3fa843 PM |
221 | rcu_read_unlock_special(t); |
222 | barrier(); /* ->rcu_read_unlock_special load before assign */ | |
223 | t->rcu_read_lock_nesting = 0; | |
224 | } | |
225 | #ifdef CONFIG_PROVE_LOCKING | |
226 | { | |
7d0ae808 | 227 | int rrln = READ_ONCE(t->rcu_read_lock_nesting); |
2a3fa843 PM |
228 | |
229 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | |
230 | } | |
231 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | |
232 | } | |
233 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
234 | ||
2439b696 | 235 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
9dd8fb16 | 236 | |
162cc279 PM |
237 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
238 | static struct lock_class_key rcu_lock_key; | |
239 | struct lockdep_map rcu_lock_map = | |
240 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | |
241 | EXPORT_SYMBOL_GPL(rcu_lock_map); | |
632ee200 PM |
242 | |
243 | static struct lock_class_key rcu_bh_lock_key; | |
244 | struct lockdep_map rcu_bh_lock_map = | |
245 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); | |
246 | EXPORT_SYMBOL_GPL(rcu_bh_lock_map); | |
247 | ||
248 | static struct lock_class_key rcu_sched_lock_key; | |
249 | struct lockdep_map rcu_sched_lock_map = | |
250 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); | |
251 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | |
e3818b8d | 252 | |
24ef659a PM |
253 | static struct lock_class_key rcu_callback_key; |
254 | struct lockdep_map rcu_callback_map = | |
255 | STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); | |
256 | EXPORT_SYMBOL_GPL(rcu_callback_map); | |
257 | ||
a0a5a056 | 258 | int notrace debug_lockdep_rcu_enabled(void) |
bc293d62 PM |
259 | { |
260 | return rcu_scheduler_active && debug_locks && | |
261 | current->lockdep_recursion == 0; | |
262 | } | |
263 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); | |
264 | ||
85b39d30 ON |
265 | /** |
266 | * rcu_read_lock_held() - might we be in RCU read-side critical section? | |
267 | * | |
268 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU | |
269 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | |
270 | * this assumes we are in an RCU read-side critical section unless it can | |
271 | * prove otherwise. This is useful for debug checks in functions that | |
272 | * require that they be called within an RCU read-side critical section. | |
273 | * | |
274 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot | |
275 | * and while lockdep is disabled. | |
276 | * | |
277 | * Note that rcu_read_lock() and the matching rcu_read_unlock() must | |
278 | * occur in the same context, for example, it is illegal to invoke | |
279 | * rcu_read_unlock() in process context if the matching rcu_read_lock() | |
280 | * was invoked from within an irq handler. | |
281 | * | |
282 | * Note that rcu_read_lock() is disallowed if the CPU is either idle or | |
283 | * offline from an RCU perspective, so check for those as well. | |
284 | */ | |
285 | int rcu_read_lock_held(void) | |
286 | { | |
287 | if (!debug_lockdep_rcu_enabled()) | |
288 | return 1; | |
289 | if (!rcu_is_watching()) | |
290 | return 0; | |
291 | if (!rcu_lockdep_current_cpu_online()) | |
292 | return 0; | |
293 | return lock_is_held(&rcu_lock_map); | |
294 | } | |
295 | EXPORT_SYMBOL_GPL(rcu_read_lock_held); | |
296 | ||
e3818b8d | 297 | /** |
ca5ecddf | 298 | * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
e3818b8d PM |
299 | * |
300 | * Check for bottom half being disabled, which covers both the | |
301 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses | |
302 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) | |
ca5ecddf PM |
303 | * will show the situation. This is useful for debug checks in functions |
304 | * that require that they be called within an RCU read-side critical | |
305 | * section. | |
e3818b8d PM |
306 | * |
307 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. | |
c0d6d01b PM |
308 | * |
309 | * Note that rcu_read_lock() is disallowed if the CPU is either idle or | |
310 | * offline from an RCU perspective, so check for those as well. | |
e3818b8d PM |
311 | */ |
312 | int rcu_read_lock_bh_held(void) | |
313 | { | |
314 | if (!debug_lockdep_rcu_enabled()) | |
315 | return 1; | |
5c173eb8 | 316 | if (!rcu_is_watching()) |
e6b80a3b | 317 | return 0; |
c0d6d01b PM |
318 | if (!rcu_lockdep_current_cpu_online()) |
319 | return 0; | |
773e3f93 | 320 | return in_softirq() || irqs_disabled(); |
e3818b8d PM |
321 | } |
322 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | |
323 | ||
324 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
325 | ||
ee376dbd PM |
326 | /** |
327 | * wakeme_after_rcu() - Callback function to awaken a task after grace period | |
328 | * @head: Pointer to rcu_head member within rcu_synchronize structure | |
329 | * | |
330 | * Awaken the corresponding task now that a grace period has elapsed. | |
fbf6bfca | 331 | */ |
ee376dbd | 332 | void wakeme_after_rcu(struct rcu_head *head) |
21a1ea9e | 333 | { |
01c1c660 PM |
334 | struct rcu_synchronize *rcu; |
335 | ||
336 | rcu = container_of(head, struct rcu_synchronize, head); | |
337 | complete(&rcu->completion); | |
21a1ea9e | 338 | } |
ec90a194 | 339 | EXPORT_SYMBOL_GPL(wakeme_after_rcu); |
ee84b824 | 340 | |
ec90a194 PM |
341 | void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, |
342 | struct rcu_synchronize *rs_array) | |
2c42818e | 343 | { |
ec90a194 PM |
344 | int i; |
345 | ||
346 | /* Initialize and register callbacks for each flavor specified. */ | |
347 | for (i = 0; i < n; i++) { | |
348 | if (checktiny && | |
349 | (crcu_array[i] == call_rcu || | |
350 | crcu_array[i] == call_rcu_bh)) { | |
351 | might_sleep(); | |
352 | continue; | |
353 | } | |
354 | init_rcu_head_on_stack(&rs_array[i].head); | |
355 | init_completion(&rs_array[i].completion); | |
356 | (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); | |
357 | } | |
358 | ||
359 | /* Wait for all callbacks to be invoked. */ | |
360 | for (i = 0; i < n; i++) { | |
361 | if (checktiny && | |
362 | (crcu_array[i] == call_rcu || | |
363 | crcu_array[i] == call_rcu_bh)) | |
364 | continue; | |
365 | wait_for_completion(&rs_array[i].completion); | |
366 | destroy_rcu_head_on_stack(&rs_array[i].head); | |
367 | } | |
2c42818e | 368 | } |
ec90a194 | 369 | EXPORT_SYMBOL_GPL(__wait_rcu_gp); |
2c42818e | 370 | |
551d55a9 | 371 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
546a9d85 | 372 | void init_rcu_head(struct rcu_head *head) |
551d55a9 MD |
373 | { |
374 | debug_object_init(head, &rcuhead_debug_descr); | |
375 | } | |
376 | ||
546a9d85 | 377 | void destroy_rcu_head(struct rcu_head *head) |
551d55a9 MD |
378 | { |
379 | debug_object_free(head, &rcuhead_debug_descr); | |
380 | } | |
381 | ||
b9fdac7f | 382 | static bool rcuhead_is_static_object(void *addr) |
551d55a9 | 383 | { |
b9fdac7f | 384 | return true; |
551d55a9 MD |
385 | } |
386 | ||
387 | /** | |
388 | * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects | |
389 | * @head: pointer to rcu_head structure to be initialized | |
390 | * | |
391 | * This function informs debugobjects of a new rcu_head structure that | |
392 | * has been allocated as an auto variable on the stack. This function | |
393 | * is not required for rcu_head structures that are statically defined or | |
394 | * that are dynamically allocated on the heap. This function has no | |
395 | * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. | |
396 | */ | |
397 | void init_rcu_head_on_stack(struct rcu_head *head) | |
398 | { | |
399 | debug_object_init_on_stack(head, &rcuhead_debug_descr); | |
400 | } | |
401 | EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); | |
402 | ||
403 | /** | |
404 | * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects | |
405 | * @head: pointer to rcu_head structure to be initialized | |
406 | * | |
407 | * This function informs debugobjects that an on-stack rcu_head structure | |
408 | * is about to go out of scope. As with init_rcu_head_on_stack(), this | |
409 | * function is not required for rcu_head structures that are statically | |
410 | * defined or that are dynamically allocated on the heap. Also as with | |
411 | * init_rcu_head_on_stack(), this function has no effect for | |
412 | * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. | |
413 | */ | |
414 | void destroy_rcu_head_on_stack(struct rcu_head *head) | |
415 | { | |
416 | debug_object_free(head, &rcuhead_debug_descr); | |
417 | } | |
418 | EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); | |
419 | ||
420 | struct debug_obj_descr rcuhead_debug_descr = { | |
421 | .name = "rcu_head", | |
b9fdac7f | 422 | .is_static_object = rcuhead_is_static_object, |
551d55a9 MD |
423 | }; |
424 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); | |
425 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
91afaf30 | 426 | |
28f6569a | 427 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) |
e66c33d5 | 428 | void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, |
52494535 PM |
429 | unsigned long secs, |
430 | unsigned long c_old, unsigned long c) | |
91afaf30 | 431 | { |
52494535 | 432 | trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); |
91afaf30 PM |
433 | } |
434 | EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); | |
435 | #else | |
52494535 PM |
436 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
437 | do { } while (0) | |
91afaf30 | 438 | #endif |
6bfc09e2 PM |
439 | |
440 | #ifdef CONFIG_RCU_STALL_COMMON | |
441 | ||
442 | #ifdef CONFIG_PROVE_RCU | |
443 | #define RCU_STALL_DELAY_DELTA (5 * HZ) | |
444 | #else | |
445 | #define RCU_STALL_DELAY_DELTA 0 | |
446 | #endif | |
447 | ||
448 | int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ | |
01896f7e | 449 | static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; |
6bfc09e2 PM |
450 | |
451 | module_param(rcu_cpu_stall_suppress, int, 0644); | |
452 | module_param(rcu_cpu_stall_timeout, int, 0644); | |
453 | ||
454 | int rcu_jiffies_till_stall_check(void) | |
455 | { | |
7d0ae808 | 456 | int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); |
6bfc09e2 PM |
457 | |
458 | /* | |
459 | * Limit check must be consistent with the Kconfig limits | |
460 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. | |
461 | */ | |
462 | if (till_stall_check < 3) { | |
7d0ae808 | 463 | WRITE_ONCE(rcu_cpu_stall_timeout, 3); |
6bfc09e2 PM |
464 | till_stall_check = 3; |
465 | } else if (till_stall_check > 300) { | |
7d0ae808 | 466 | WRITE_ONCE(rcu_cpu_stall_timeout, 300); |
6bfc09e2 PM |
467 | till_stall_check = 300; |
468 | } | |
469 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; | |
470 | } | |
471 | ||
61f38db3 RR |
472 | void rcu_sysrq_start(void) |
473 | { | |
474 | if (!rcu_cpu_stall_suppress) | |
475 | rcu_cpu_stall_suppress = 2; | |
476 | } | |
477 | ||
478 | void rcu_sysrq_end(void) | |
479 | { | |
480 | if (rcu_cpu_stall_suppress == 2) | |
481 | rcu_cpu_stall_suppress = 0; | |
482 | } | |
483 | ||
6bfc09e2 PM |
484 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
485 | { | |
486 | rcu_cpu_stall_suppress = 1; | |
487 | return NOTIFY_DONE; | |
488 | } | |
489 | ||
490 | static struct notifier_block rcu_panic_block = { | |
491 | .notifier_call = rcu_panic, | |
492 | }; | |
493 | ||
494 | static int __init check_cpu_stall_init(void) | |
495 | { | |
496 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | |
497 | return 0; | |
498 | } | |
499 | early_initcall(check_cpu_stall_init); | |
500 | ||
501 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ | |
8315f422 PM |
502 | |
503 | #ifdef CONFIG_TASKS_RCU | |
504 | ||
505 | /* | |
506 | * Simple variant of RCU whose quiescent states are voluntary context switch, | |
507 | * user-space execution, and idle. As such, grace periods can take one good | |
508 | * long time. There are no read-side primitives similar to rcu_read_lock() | |
509 | * and rcu_read_unlock() because this implementation is intended to get | |
510 | * the system into a safe state for some of the manipulations involved in | |
511 | * tracing and the like. Finally, this implementation does not support | |
512 | * high call_rcu_tasks() rates from multiple CPUs. If this is required, | |
513 | * per-CPU callback lists will be needed. | |
514 | */ | |
515 | ||
516 | /* Global list of callbacks and associated lock. */ | |
517 | static struct rcu_head *rcu_tasks_cbs_head; | |
518 | static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; | |
c7b24d2b | 519 | static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); |
8315f422 PM |
520 | static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); |
521 | ||
3f95aa81 PM |
522 | /* Track exiting tasks in order to allow them to be waited for. */ |
523 | DEFINE_SRCU(tasks_rcu_exit_srcu); | |
524 | ||
525 | /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ | |
52db30ab | 526 | static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10; |
3f95aa81 PM |
527 | module_param(rcu_task_stall_timeout, int, 0644); |
528 | ||
84a8f446 | 529 | static void rcu_spawn_tasks_kthread(void); |
4929c913 | 530 | static struct task_struct *rcu_tasks_kthread_ptr; |
84a8f446 PM |
531 | |
532 | /* | |
533 | * Post an RCU-tasks callback. First call must be from process context | |
534 | * after the scheduler if fully operational. | |
535 | */ | |
b6a4ae76 | 536 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) |
8315f422 PM |
537 | { |
538 | unsigned long flags; | |
c7b24d2b | 539 | bool needwake; |
4929c913 | 540 | bool havetask = READ_ONCE(rcu_tasks_kthread_ptr); |
8315f422 PM |
541 | |
542 | rhp->next = NULL; | |
543 | rhp->func = func; | |
544 | raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); | |
c7b24d2b | 545 | needwake = !rcu_tasks_cbs_head; |
8315f422 PM |
546 | *rcu_tasks_cbs_tail = rhp; |
547 | rcu_tasks_cbs_tail = &rhp->next; | |
548 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); | |
4929c913 PM |
549 | /* We can't create the thread unless interrupts are enabled. */ |
550 | if ((needwake && havetask) || | |
551 | (!havetask && !irqs_disabled_flags(flags))) { | |
84a8f446 | 552 | rcu_spawn_tasks_kthread(); |
c7b24d2b | 553 | wake_up(&rcu_tasks_cbs_wq); |
84a8f446 | 554 | } |
8315f422 PM |
555 | } |
556 | EXPORT_SYMBOL_GPL(call_rcu_tasks); | |
557 | ||
53c6d4ed PM |
558 | /** |
559 | * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. | |
560 | * | |
561 | * Control will return to the caller some time after a full rcu-tasks | |
562 | * grace period has elapsed, in other words after all currently | |
563 | * executing rcu-tasks read-side critical sections have elapsed. These | |
564 | * read-side critical sections are delimited by calls to schedule(), | |
565 | * cond_resched_rcu_qs(), idle execution, userspace execution, calls | |
566 | * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). | |
567 | * | |
568 | * This is a very specialized primitive, intended only for a few uses in | |
569 | * tracing and other situations requiring manipulation of function | |
570 | * preambles and profiling hooks. The synchronize_rcu_tasks() function | |
571 | * is not (yet) intended for heavy use from multiple CPUs. | |
572 | * | |
573 | * Note that this guarantee implies further memory-ordering guarantees. | |
574 | * On systems with more than one CPU, when synchronize_rcu_tasks() returns, | |
575 | * each CPU is guaranteed to have executed a full memory barrier since the | |
576 | * end of its last RCU-tasks read-side critical section whose beginning | |
577 | * preceded the call to synchronize_rcu_tasks(). In addition, each CPU | |
578 | * having an RCU-tasks read-side critical section that extends beyond | |
579 | * the return from synchronize_rcu_tasks() is guaranteed to have executed | |
580 | * a full memory barrier after the beginning of synchronize_rcu_tasks() | |
581 | * and before the beginning of that RCU-tasks read-side critical section. | |
582 | * Note that these guarantees include CPUs that are offline, idle, or | |
583 | * executing in user mode, as well as CPUs that are executing in the kernel. | |
584 | * | |
585 | * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned | |
586 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | |
587 | * to have executed a full memory barrier during the execution of | |
588 | * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU | |
589 | * (but again only if the system has more than one CPU). | |
590 | */ | |
591 | void synchronize_rcu_tasks(void) | |
592 | { | |
593 | /* Complain if the scheduler has not started. */ | |
a76a9a48 | 594 | RCU_LOCKDEP_WARN(!rcu_scheduler_active, |
f78f5b90 | 595 | "synchronize_rcu_tasks called too soon"); |
53c6d4ed PM |
596 | |
597 | /* Wait for the grace period. */ | |
598 | wait_rcu_gp(call_rcu_tasks); | |
599 | } | |
06c2a923 | 600 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); |
53c6d4ed PM |
601 | |
602 | /** | |
603 | * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. | |
604 | * | |
605 | * Although the current implementation is guaranteed to wait, it is not | |
606 | * obligated to, for example, if there are no pending callbacks. | |
607 | */ | |
608 | void rcu_barrier_tasks(void) | |
609 | { | |
610 | /* There is only one callback queue, so this is easy. ;-) */ | |
611 | synchronize_rcu_tasks(); | |
612 | } | |
06c2a923 | 613 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks); |
53c6d4ed | 614 | |
52db30ab PM |
615 | /* See if tasks are still holding out, complain if so. */ |
616 | static void check_holdout_task(struct task_struct *t, | |
617 | bool needreport, bool *firstreport) | |
8315f422 | 618 | { |
4ff475ed PM |
619 | int cpu; |
620 | ||
7d0ae808 PM |
621 | if (!READ_ONCE(t->rcu_tasks_holdout) || |
622 | t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || | |
623 | !READ_ONCE(t->on_rq) || | |
176f8f7a PM |
624 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && |
625 | !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { | |
7d0ae808 | 626 | WRITE_ONCE(t->rcu_tasks_holdout, false); |
8f20a5e8 | 627 | list_del_init(&t->rcu_tasks_holdout_list); |
8315f422 | 628 | put_task_struct(t); |
52db30ab | 629 | return; |
8315f422 | 630 | } |
52db30ab PM |
631 | if (!needreport) |
632 | return; | |
633 | if (*firstreport) { | |
634 | pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); | |
635 | *firstreport = false; | |
636 | } | |
4ff475ed PM |
637 | cpu = task_cpu(t); |
638 | pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", | |
639 | t, ".I"[is_idle_task(t)], | |
640 | "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], | |
641 | t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, | |
642 | t->rcu_tasks_idle_cpu, cpu); | |
52db30ab | 643 | sched_show_task(t); |
8315f422 PM |
644 | } |
645 | ||
646 | /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ | |
647 | static int __noreturn rcu_tasks_kthread(void *arg) | |
648 | { | |
649 | unsigned long flags; | |
650 | struct task_struct *g, *t; | |
52db30ab | 651 | unsigned long lastreport; |
8315f422 PM |
652 | struct rcu_head *list; |
653 | struct rcu_head *next; | |
654 | LIST_HEAD(rcu_tasks_holdouts); | |
655 | ||
60ced495 PM |
656 | /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ |
657 | housekeeping_affine(current); | |
8315f422 PM |
658 | |
659 | /* | |
660 | * Each pass through the following loop makes one check for | |
661 | * newly arrived callbacks, and, if there are some, waits for | |
662 | * one RCU-tasks grace period and then invokes the callbacks. | |
663 | * This loop is terminated by the system going down. ;-) | |
664 | */ | |
665 | for (;;) { | |
666 | ||
667 | /* Pick up any new callbacks. */ | |
668 | raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); | |
669 | list = rcu_tasks_cbs_head; | |
670 | rcu_tasks_cbs_head = NULL; | |
671 | rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; | |
672 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); | |
673 | ||
674 | /* If there were none, wait a bit and start over. */ | |
675 | if (!list) { | |
c7b24d2b PM |
676 | wait_event_interruptible(rcu_tasks_cbs_wq, |
677 | rcu_tasks_cbs_head); | |
678 | if (!rcu_tasks_cbs_head) { | |
679 | WARN_ON(signal_pending(current)); | |
680 | schedule_timeout_interruptible(HZ/10); | |
681 | } | |
8315f422 PM |
682 | continue; |
683 | } | |
684 | ||
685 | /* | |
686 | * Wait for all pre-existing t->on_rq and t->nvcsw | |
687 | * transitions to complete. Invoking synchronize_sched() | |
688 | * suffices because all these transitions occur with | |
689 | * interrupts disabled. Without this synchronize_sched(), | |
690 | * a read-side critical section that started before the | |
691 | * grace period might be incorrectly seen as having started | |
692 | * after the grace period. | |
693 | * | |
694 | * This synchronize_sched() also dispenses with the | |
695 | * need for a memory barrier on the first store to | |
696 | * ->rcu_tasks_holdout, as it forces the store to happen | |
697 | * after the beginning of the grace period. | |
698 | */ | |
699 | synchronize_sched(); | |
700 | ||
701 | /* | |
702 | * There were callbacks, so we need to wait for an | |
703 | * RCU-tasks grace period. Start off by scanning | |
704 | * the task list for tasks that are not already | |
705 | * voluntarily blocked. Mark these tasks and make | |
706 | * a list of them in rcu_tasks_holdouts. | |
707 | */ | |
708 | rcu_read_lock(); | |
709 | for_each_process_thread(g, t) { | |
7d0ae808 | 710 | if (t != current && READ_ONCE(t->on_rq) && |
8315f422 PM |
711 | !is_idle_task(t)) { |
712 | get_task_struct(t); | |
7d0ae808 PM |
713 | t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); |
714 | WRITE_ONCE(t->rcu_tasks_holdout, true); | |
8315f422 PM |
715 | list_add(&t->rcu_tasks_holdout_list, |
716 | &rcu_tasks_holdouts); | |
717 | } | |
718 | } | |
719 | rcu_read_unlock(); | |
720 | ||
3f95aa81 PM |
721 | /* |
722 | * Wait for tasks that are in the process of exiting. | |
723 | * This does only part of the job, ensuring that all | |
724 | * tasks that were previously exiting reach the point | |
725 | * where they have disabled preemption, allowing the | |
726 | * later synchronize_sched() to finish the job. | |
727 | */ | |
728 | synchronize_srcu(&tasks_rcu_exit_srcu); | |
729 | ||
8315f422 PM |
730 | /* |
731 | * Each pass through the following loop scans the list | |
732 | * of holdout tasks, removing any that are no longer | |
733 | * holdouts. When the list is empty, we are done. | |
734 | */ | |
52db30ab | 735 | lastreport = jiffies; |
8315f422 | 736 | while (!list_empty(&rcu_tasks_holdouts)) { |
52db30ab PM |
737 | bool firstreport; |
738 | bool needreport; | |
739 | int rtst; | |
8f20a5e8 | 740 | struct task_struct *t1; |
52db30ab | 741 | |
8315f422 | 742 | schedule_timeout_interruptible(HZ); |
7d0ae808 | 743 | rtst = READ_ONCE(rcu_task_stall_timeout); |
52db30ab PM |
744 | needreport = rtst > 0 && |
745 | time_after(jiffies, lastreport + rtst); | |
746 | if (needreport) | |
747 | lastreport = jiffies; | |
748 | firstreport = true; | |
8315f422 | 749 | WARN_ON(signal_pending(current)); |
8f20a5e8 PM |
750 | list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts, |
751 | rcu_tasks_holdout_list) { | |
52db30ab | 752 | check_holdout_task(t, needreport, &firstreport); |
8f20a5e8 PM |
753 | cond_resched(); |
754 | } | |
8315f422 PM |
755 | } |
756 | ||
757 | /* | |
758 | * Because ->on_rq and ->nvcsw are not guaranteed | |
759 | * to have a full memory barriers prior to them in the | |
760 | * schedule() path, memory reordering on other CPUs could | |
761 | * cause their RCU-tasks read-side critical sections to | |
762 | * extend past the end of the grace period. However, | |
763 | * because these ->nvcsw updates are carried out with | |
764 | * interrupts disabled, we can use synchronize_sched() | |
765 | * to force the needed ordering on all such CPUs. | |
766 | * | |
767 | * This synchronize_sched() also confines all | |
768 | * ->rcu_tasks_holdout accesses to be within the grace | |
769 | * period, avoiding the need for memory barriers for | |
770 | * ->rcu_tasks_holdout accesses. | |
3f95aa81 PM |
771 | * |
772 | * In addition, this synchronize_sched() waits for exiting | |
773 | * tasks to complete their final preempt_disable() region | |
774 | * of execution, cleaning up after the synchronize_srcu() | |
775 | * above. | |
8315f422 PM |
776 | */ |
777 | synchronize_sched(); | |
778 | ||
779 | /* Invoke the callbacks. */ | |
780 | while (list) { | |
781 | next = list->next; | |
782 | local_bh_disable(); | |
783 | list->func(list); | |
784 | local_bh_enable(); | |
785 | list = next; | |
786 | cond_resched(); | |
787 | } | |
c7b24d2b | 788 | schedule_timeout_uninterruptible(HZ/10); |
8315f422 PM |
789 | } |
790 | } | |
791 | ||
84a8f446 PM |
792 | /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */ |
793 | static void rcu_spawn_tasks_kthread(void) | |
8315f422 | 794 | { |
84a8f446 | 795 | static DEFINE_MUTEX(rcu_tasks_kthread_mutex); |
84a8f446 | 796 | struct task_struct *t; |
8315f422 | 797 | |
7d0ae808 | 798 | if (READ_ONCE(rcu_tasks_kthread_ptr)) { |
84a8f446 PM |
799 | smp_mb(); /* Ensure caller sees full kthread. */ |
800 | return; | |
801 | } | |
802 | mutex_lock(&rcu_tasks_kthread_mutex); | |
803 | if (rcu_tasks_kthread_ptr) { | |
804 | mutex_unlock(&rcu_tasks_kthread_mutex); | |
805 | return; | |
806 | } | |
8315f422 PM |
807 | t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); |
808 | BUG_ON(IS_ERR(t)); | |
84a8f446 | 809 | smp_mb(); /* Ensure others see full kthread. */ |
7d0ae808 | 810 | WRITE_ONCE(rcu_tasks_kthread_ptr, t); |
84a8f446 | 811 | mutex_unlock(&rcu_tasks_kthread_mutex); |
8315f422 | 812 | } |
8315f422 PM |
813 | |
814 | #endif /* #ifdef CONFIG_TASKS_RCU */ | |
aa23c6fb PK |
815 | |
816 | #ifdef CONFIG_PROVE_RCU | |
817 | ||
818 | /* | |
819 | * Early boot self test parameters, one for each flavor | |
820 | */ | |
821 | static bool rcu_self_test; | |
822 | static bool rcu_self_test_bh; | |
823 | static bool rcu_self_test_sched; | |
824 | ||
825 | module_param(rcu_self_test, bool, 0444); | |
826 | module_param(rcu_self_test_bh, bool, 0444); | |
827 | module_param(rcu_self_test_sched, bool, 0444); | |
828 | ||
829 | static int rcu_self_test_counter; | |
830 | ||
831 | static void test_callback(struct rcu_head *r) | |
832 | { | |
833 | rcu_self_test_counter++; | |
834 | pr_info("RCU test callback executed %d\n", rcu_self_test_counter); | |
835 | } | |
836 | ||
837 | static void early_boot_test_call_rcu(void) | |
838 | { | |
839 | static struct rcu_head head; | |
840 | ||
841 | call_rcu(&head, test_callback); | |
842 | } | |
843 | ||
844 | static void early_boot_test_call_rcu_bh(void) | |
845 | { | |
846 | static struct rcu_head head; | |
847 | ||
848 | call_rcu_bh(&head, test_callback); | |
849 | } | |
850 | ||
851 | static void early_boot_test_call_rcu_sched(void) | |
852 | { | |
853 | static struct rcu_head head; | |
854 | ||
855 | call_rcu_sched(&head, test_callback); | |
856 | } | |
857 | ||
858 | void rcu_early_boot_tests(void) | |
859 | { | |
860 | pr_info("Running RCU self tests\n"); | |
861 | ||
862 | if (rcu_self_test) | |
863 | early_boot_test_call_rcu(); | |
864 | if (rcu_self_test_bh) | |
865 | early_boot_test_call_rcu_bh(); | |
866 | if (rcu_self_test_sched) | |
867 | early_boot_test_call_rcu_sched(); | |
868 | } | |
869 | ||
870 | static int rcu_verify_early_boot_tests(void) | |
871 | { | |
872 | int ret = 0; | |
873 | int early_boot_test_counter = 0; | |
874 | ||
875 | if (rcu_self_test) { | |
876 | early_boot_test_counter++; | |
877 | rcu_barrier(); | |
878 | } | |
879 | if (rcu_self_test_bh) { | |
880 | early_boot_test_counter++; | |
881 | rcu_barrier_bh(); | |
882 | } | |
883 | if (rcu_self_test_sched) { | |
884 | early_boot_test_counter++; | |
885 | rcu_barrier_sched(); | |
886 | } | |
887 | ||
888 | if (rcu_self_test_counter != early_boot_test_counter) { | |
889 | WARN_ON(1); | |
890 | ret = -1; | |
891 | } | |
892 | ||
893 | return ret; | |
894 | } | |
895 | late_initcall(rcu_verify_early_boot_tests); | |
896 | #else | |
897 | void rcu_early_boot_tests(void) {} | |
898 | #endif /* CONFIG_PROVE_RCU */ |