]>
Commit | Line | Data |
---|---|---|
38b4df64 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
1da177e4 LT |
2 | /* |
3 | * Read-Copy Update mechanism for mutual exclusion | |
4 | * | |
01c1c660 | 5 | * Copyright IBM Corporation, 2001 |
1da177e4 LT |
6 | * |
7 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | |
8 | * Manfred Spraul <manfred@colorfullife.com> | |
a71fca58 | 9 | * |
38b4df64 | 10 | * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> |
1da177e4 LT |
11 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
12 | * Papers: | |
13 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
14 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
15 | * | |
16 | * For detailed explanation of Read-Copy Update mechanism see - | |
a71fca58 | 17 | * http://lse.sourceforge.net/locking/rcupdate.html |
1da177e4 LT |
18 | * |
19 | */ | |
20 | #include <linux/types.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/spinlock.h> | |
24 | #include <linux/smp.h> | |
25 | #include <linux/interrupt.h> | |
3f07c014 | 26 | #include <linux/sched/signal.h> |
b17b0153 | 27 | #include <linux/sched/debug.h> |
60063497 | 28 | #include <linux/atomic.h> |
1da177e4 | 29 | #include <linux/bitops.h> |
1da177e4 LT |
30 | #include <linux/percpu.h> |
31 | #include <linux/notifier.h> | |
1da177e4 | 32 | #include <linux/cpu.h> |
9331b315 | 33 | #include <linux/mutex.h> |
9984de1a | 34 | #include <linux/export.h> |
e3818b8d | 35 | #include <linux/hardirq.h> |
e3ebfb96 | 36 | #include <linux/delay.h> |
e77b7041 | 37 | #include <linux/moduleparam.h> |
8315f422 | 38 | #include <linux/kthread.h> |
4ff475ed | 39 | #include <linux/tick.h> |
f9411ebe | 40 | #include <linux/rcupdate_wait.h> |
78634061 | 41 | #include <linux/sched/isolation.h> |
a39f15b9 | 42 | #include <linux/kprobes.h> |
1da177e4 | 43 | |
29c00b4a | 44 | #define CREATE_TRACE_POINTS |
29c00b4a PM |
45 | |
46 | #include "rcu.h" | |
47 | ||
4102adab PM |
48 | #ifdef MODULE_PARAM_PREFIX |
49 | #undef MODULE_PARAM_PREFIX | |
50 | #endif | |
51 | #define MODULE_PARAM_PREFIX "rcupdate." | |
52 | ||
79cfea02 | 53 | #ifndef CONFIG_TINY_RCU |
3caec62f | 54 | extern int rcu_expedited; /* from sysctl */ |
3705b88d | 55 | module_param(rcu_expedited, int, 0); |
3caec62f | 56 | extern int rcu_normal; /* from sysctl */ |
5a9be7c6 | 57 | module_param(rcu_normal, int, 0); |
3e42ec1a PM |
58 | static int rcu_normal_after_boot; |
59 | module_param(rcu_normal_after_boot, int, 0); | |
79cfea02 | 60 | #endif /* #ifndef CONFIG_TINY_RCU */ |
3e42ec1a | 61 | |
293e2421 | 62 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
d5671f6b | 63 | /** |
28875945 JFG |
64 | * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section? |
65 | * @ret: Best guess answer if lockdep cannot be relied on | |
d5671f6b | 66 | * |
28875945 JFG |
67 | * Returns true if lockdep must be ignored, in which case *ret contains |
68 | * the best guess described below. Otherwise returns false, in which | |
69 | * case *ret tells the caller nothing and the caller should instead | |
70 | * consult lockdep. | |
71 | * | |
72 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, set *ret to nonzero iff in an | |
d5671f6b DV |
73 | * RCU-sched read-side critical section. In absence of |
74 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side | |
75 | * critical section unless it can prove otherwise. Note that disabling | |
76 | * of preemption (including disabling irqs) counts as an RCU-sched | |
77 | * read-side critical section. This is useful for debug checks in functions | |
78 | * that required that they be called within an RCU-sched read-side | |
79 | * critical section. | |
80 | * | |
81 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | |
82 | * and while lockdep is disabled. | |
83 | * | |
28875945 JFG |
84 | * Note that if the CPU is in the idle loop from an RCU point of view (ie: |
85 | * that we are in the section between rcu_idle_enter() and rcu_idle_exit()) | |
86 | * then rcu_read_lock_held() sets *ret to false even if the CPU did an | |
87 | * rcu_read_lock(). The reason for this is that RCU ignores CPUs that are | |
88 | * in such a section, considering these as in extended quiescent state, | |
89 | * so such a CPU is effectively never in an RCU read-side critical section | |
90 | * regardless of what RCU primitives it invokes. This state of affairs is | |
91 | * required --- we need to keep an RCU-free window in idle where the CPU may | |
92 | * possibly enter into low power mode. This way we can notice an extended | |
93 | * quiescent state to other CPUs that started a grace period. Otherwise | |
94 | * we would delay any grace period as long as we run in the idle task. | |
d5671f6b | 95 | * |
28875945 | 96 | * Similarly, we avoid claiming an RCU read lock held if the current |
d5671f6b DV |
97 | * CPU is offline. |
98 | */ | |
28875945 JFG |
99 | static bool rcu_read_lock_held_common(bool *ret) |
100 | { | |
101 | if (!debug_lockdep_rcu_enabled()) { | |
102 | *ret = 1; | |
103 | return true; | |
104 | } | |
105 | if (!rcu_is_watching()) { | |
106 | *ret = 0; | |
107 | return true; | |
108 | } | |
109 | if (!rcu_lockdep_current_cpu_online()) { | |
110 | *ret = 0; | |
111 | return true; | |
112 | } | |
113 | return false; | |
114 | } | |
115 | ||
d5671f6b DV |
116 | int rcu_read_lock_sched_held(void) |
117 | { | |
28875945 | 118 | bool ret; |
d5671f6b | 119 | |
28875945 JFG |
120 | if (rcu_read_lock_held_common(&ret)) |
121 | return ret; | |
9147089b | 122 | return lock_is_held(&rcu_sched_lock_map) || !preemptible(); |
d5671f6b DV |
123 | } |
124 | EXPORT_SYMBOL(rcu_read_lock_sched_held); | |
125 | #endif | |
126 | ||
0d39482c PM |
127 | #ifndef CONFIG_TINY_RCU |
128 | ||
5a9be7c6 PM |
129 | /* |
130 | * Should expedited grace-period primitives always fall back to their | |
131 | * non-expedited counterparts? Intended for use within RCU. Note | |
132 | * that if the user specifies both rcu_expedited and rcu_normal, then | |
52d7e48b | 133 | * rcu_normal wins. (Except during the time period during boot from |
900b1028 | 134 | * when the first task is spawned until the rcu_set_runtime_mode() |
52d7e48b | 135 | * core_initcall() is invoked, at which point everything is expedited.) |
5a9be7c6 PM |
136 | */ |
137 | bool rcu_gp_is_normal(void) | |
138 | { | |
52d7e48b PM |
139 | return READ_ONCE(rcu_normal) && |
140 | rcu_scheduler_active != RCU_SCHEDULER_INIT; | |
5a9be7c6 | 141 | } |
4f2a848c | 142 | EXPORT_SYMBOL_GPL(rcu_gp_is_normal); |
5a9be7c6 | 143 | |
7c6094db | 144 | static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1); |
0d39482c PM |
145 | |
146 | /* | |
147 | * Should normal grace-period primitives be expedited? Intended for | |
148 | * use within RCU. Note that this function takes the rcu_expedited | |
52d7e48b PM |
149 | * sysfs/boot variable and rcu_scheduler_active into account as well |
150 | * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp() | |
151 | * until rcu_gp_is_expedited() returns false is a -really- bad idea. | |
0d39482c PM |
152 | */ |
153 | bool rcu_gp_is_expedited(void) | |
154 | { | |
b823cafa | 155 | return rcu_expedited || atomic_read(&rcu_expedited_nesting); |
0d39482c PM |
156 | } |
157 | EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); | |
158 | ||
159 | /** | |
160 | * rcu_expedite_gp - Expedite future RCU grace periods | |
161 | * | |
162 | * After a call to this function, future calls to synchronize_rcu() and | |
163 | * friends act as the corresponding synchronize_rcu_expedited() function | |
164 | * had instead been called. | |
165 | */ | |
166 | void rcu_expedite_gp(void) | |
167 | { | |
168 | atomic_inc(&rcu_expedited_nesting); | |
169 | } | |
170 | EXPORT_SYMBOL_GPL(rcu_expedite_gp); | |
171 | ||
172 | /** | |
173 | * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation | |
174 | * | |
175 | * Undo a prior call to rcu_expedite_gp(). If all prior calls to | |
176 | * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(), | |
177 | * and if the rcu_expedited sysfs/boot parameter is not set, then all | |
178 | * subsequent calls to synchronize_rcu() and friends will return to | |
179 | * their normal non-expedited behavior. | |
180 | */ | |
181 | void rcu_unexpedite_gp(void) | |
182 | { | |
183 | atomic_dec(&rcu_expedited_nesting); | |
184 | } | |
185 | EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); | |
186 | ||
ee42571f PM |
187 | /* |
188 | * Inform RCU of the end of the in-kernel boot sequence. | |
189 | */ | |
190 | void rcu_end_inkernel_boot(void) | |
191 | { | |
7c6094db | 192 | rcu_unexpedite_gp(); |
3e42ec1a PM |
193 | if (rcu_normal_after_boot) |
194 | WRITE_ONCE(rcu_normal, 1); | |
ee42571f | 195 | } |
0d39482c | 196 | |
79cfea02 PM |
197 | #endif /* #ifndef CONFIG_TINY_RCU */ |
198 | ||
900b1028 PM |
199 | /* |
200 | * Test each non-SRCU synchronous grace-period wait API. This is | |
201 | * useful just after a change in mode for these primitives, and | |
202 | * during early boot. | |
203 | */ | |
204 | void rcu_test_sync_prims(void) | |
205 | { | |
206 | if (!IS_ENABLED(CONFIG_PROVE_RCU)) | |
207 | return; | |
208 | synchronize_rcu(); | |
900b1028 | 209 | synchronize_rcu_expedited(); |
900b1028 PM |
210 | } |
211 | ||
212 | #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) | |
213 | ||
214 | /* | |
215 | * Switch to run-time mode once RCU has fully initialized. | |
216 | */ | |
217 | static int __init rcu_set_runtime_mode(void) | |
218 | { | |
219 | rcu_test_sync_prims(); | |
220 | rcu_scheduler_active = RCU_SCHEDULER_RUNNING; | |
221 | rcu_test_sync_prims(); | |
222 | return 0; | |
223 | } | |
224 | core_initcall(rcu_set_runtime_mode); | |
225 | ||
226 | #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */ | |
227 | ||
162cc279 PM |
228 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
229 | static struct lock_class_key rcu_lock_key; | |
230 | struct lockdep_map rcu_lock_map = | |
231 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | |
232 | EXPORT_SYMBOL_GPL(rcu_lock_map); | |
632ee200 PM |
233 | |
234 | static struct lock_class_key rcu_bh_lock_key; | |
235 | struct lockdep_map rcu_bh_lock_map = | |
236 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); | |
237 | EXPORT_SYMBOL_GPL(rcu_bh_lock_map); | |
238 | ||
239 | static struct lock_class_key rcu_sched_lock_key; | |
240 | struct lockdep_map rcu_sched_lock_map = | |
241 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); | |
242 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | |
e3818b8d | 243 | |
24ef659a PM |
244 | static struct lock_class_key rcu_callback_key; |
245 | struct lockdep_map rcu_callback_map = | |
246 | STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); | |
247 | EXPORT_SYMBOL_GPL(rcu_callback_map); | |
248 | ||
a0a5a056 | 249 | int notrace debug_lockdep_rcu_enabled(void) |
bc293d62 | 250 | { |
52d7e48b | 251 | return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && |
bc293d62 PM |
252 | current->lockdep_recursion == 0; |
253 | } | |
254 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); | |
a39f15b9 | 255 | NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled); |
bc293d62 | 256 | |
85b39d30 ON |
257 | /** |
258 | * rcu_read_lock_held() - might we be in RCU read-side critical section? | |
259 | * | |
260 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU | |
261 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | |
262 | * this assumes we are in an RCU read-side critical section unless it can | |
263 | * prove otherwise. This is useful for debug checks in functions that | |
264 | * require that they be called within an RCU read-side critical section. | |
265 | * | |
266 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot | |
267 | * and while lockdep is disabled. | |
268 | * | |
269 | * Note that rcu_read_lock() and the matching rcu_read_unlock() must | |
270 | * occur in the same context, for example, it is illegal to invoke | |
271 | * rcu_read_unlock() in process context if the matching rcu_read_lock() | |
272 | * was invoked from within an irq handler. | |
273 | * | |
274 | * Note that rcu_read_lock() is disallowed if the CPU is either idle or | |
275 | * offline from an RCU perspective, so check for those as well. | |
276 | */ | |
277 | int rcu_read_lock_held(void) | |
278 | { | |
28875945 JFG |
279 | bool ret; |
280 | ||
281 | if (rcu_read_lock_held_common(&ret)) | |
282 | return ret; | |
85b39d30 ON |
283 | return lock_is_held(&rcu_lock_map); |
284 | } | |
285 | EXPORT_SYMBOL_GPL(rcu_read_lock_held); | |
286 | ||
e3818b8d | 287 | /** |
ca5ecddf | 288 | * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
e3818b8d PM |
289 | * |
290 | * Check for bottom half being disabled, which covers both the | |
291 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses | |
292 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) | |
ca5ecddf PM |
293 | * will show the situation. This is useful for debug checks in functions |
294 | * that require that they be called within an RCU read-side critical | |
295 | * section. | |
e3818b8d PM |
296 | * |
297 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. | |
c0d6d01b | 298 | * |
82fcecfa | 299 | * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or |
c0d6d01b | 300 | * offline from an RCU perspective, so check for those as well. |
e3818b8d PM |
301 | */ |
302 | int rcu_read_lock_bh_held(void) | |
303 | { | |
28875945 JFG |
304 | bool ret; |
305 | ||
306 | if (rcu_read_lock_held_common(&ret)) | |
307 | return ret; | |
773e3f93 | 308 | return in_softirq() || irqs_disabled(); |
e3818b8d PM |
309 | } |
310 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | |
311 | ||
28875945 JFG |
312 | int rcu_read_lock_any_held(void) |
313 | { | |
314 | bool ret; | |
315 | ||
316 | if (rcu_read_lock_held_common(&ret)) | |
317 | return ret; | |
318 | if (lock_is_held(&rcu_lock_map) || | |
319 | lock_is_held(&rcu_bh_lock_map) || | |
320 | lock_is_held(&rcu_sched_lock_map)) | |
321 | return 1; | |
322 | return !preemptible(); | |
323 | } | |
324 | EXPORT_SYMBOL_GPL(rcu_read_lock_any_held); | |
325 | ||
e3818b8d PM |
326 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
327 | ||
ee376dbd PM |
328 | /** |
329 | * wakeme_after_rcu() - Callback function to awaken a task after grace period | |
330 | * @head: Pointer to rcu_head member within rcu_synchronize structure | |
331 | * | |
332 | * Awaken the corresponding task now that a grace period has elapsed. | |
fbf6bfca | 333 | */ |
ee376dbd | 334 | void wakeme_after_rcu(struct rcu_head *head) |
21a1ea9e | 335 | { |
01c1c660 PM |
336 | struct rcu_synchronize *rcu; |
337 | ||
338 | rcu = container_of(head, struct rcu_synchronize, head); | |
339 | complete(&rcu->completion); | |
21a1ea9e | 340 | } |
ec90a194 | 341 | EXPORT_SYMBOL_GPL(wakeme_after_rcu); |
ee84b824 | 342 | |
ec90a194 PM |
343 | void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, |
344 | struct rcu_synchronize *rs_array) | |
2c42818e | 345 | { |
ec90a194 | 346 | int i; |
68ab0b42 | 347 | int j; |
ec90a194 | 348 | |
06462efc | 349 | /* Initialize and register callbacks for each crcu_array element. */ |
ec90a194 PM |
350 | for (i = 0; i < n; i++) { |
351 | if (checktiny && | |
309ba859 | 352 | (crcu_array[i] == call_rcu)) { |
ec90a194 PM |
353 | might_sleep(); |
354 | continue; | |
355 | } | |
356 | init_rcu_head_on_stack(&rs_array[i].head); | |
357 | init_completion(&rs_array[i].completion); | |
68ab0b42 PM |
358 | for (j = 0; j < i; j++) |
359 | if (crcu_array[j] == crcu_array[i]) | |
360 | break; | |
361 | if (j == i) | |
362 | (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); | |
ec90a194 PM |
363 | } |
364 | ||
365 | /* Wait for all callbacks to be invoked. */ | |
366 | for (i = 0; i < n; i++) { | |
367 | if (checktiny && | |
309ba859 | 368 | (crcu_array[i] == call_rcu)) |
ec90a194 | 369 | continue; |
68ab0b42 PM |
370 | for (j = 0; j < i; j++) |
371 | if (crcu_array[j] == crcu_array[i]) | |
372 | break; | |
373 | if (j == i) | |
374 | wait_for_completion(&rs_array[i].completion); | |
ec90a194 PM |
375 | destroy_rcu_head_on_stack(&rs_array[i].head); |
376 | } | |
2c42818e | 377 | } |
ec90a194 | 378 | EXPORT_SYMBOL_GPL(__wait_rcu_gp); |
2c42818e | 379 | |
551d55a9 | 380 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
546a9d85 | 381 | void init_rcu_head(struct rcu_head *head) |
551d55a9 MD |
382 | { |
383 | debug_object_init(head, &rcuhead_debug_descr); | |
384 | } | |
156baec3 | 385 | EXPORT_SYMBOL_GPL(init_rcu_head); |
551d55a9 | 386 | |
546a9d85 | 387 | void destroy_rcu_head(struct rcu_head *head) |
551d55a9 MD |
388 | { |
389 | debug_object_free(head, &rcuhead_debug_descr); | |
390 | } | |
156baec3 | 391 | EXPORT_SYMBOL_GPL(destroy_rcu_head); |
551d55a9 | 392 | |
b9fdac7f | 393 | static bool rcuhead_is_static_object(void *addr) |
551d55a9 | 394 | { |
b9fdac7f | 395 | return true; |
551d55a9 MD |
396 | } |
397 | ||
398 | /** | |
399 | * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects | |
400 | * @head: pointer to rcu_head structure to be initialized | |
401 | * | |
402 | * This function informs debugobjects of a new rcu_head structure that | |
403 | * has been allocated as an auto variable on the stack. This function | |
404 | * is not required for rcu_head structures that are statically defined or | |
405 | * that are dynamically allocated on the heap. This function has no | |
406 | * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. | |
407 | */ | |
408 | void init_rcu_head_on_stack(struct rcu_head *head) | |
409 | { | |
410 | debug_object_init_on_stack(head, &rcuhead_debug_descr); | |
411 | } | |
412 | EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); | |
413 | ||
414 | /** | |
415 | * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects | |
416 | * @head: pointer to rcu_head structure to be initialized | |
417 | * | |
418 | * This function informs debugobjects that an on-stack rcu_head structure | |
419 | * is about to go out of scope. As with init_rcu_head_on_stack(), this | |
420 | * function is not required for rcu_head structures that are statically | |
421 | * defined or that are dynamically allocated on the heap. Also as with | |
422 | * init_rcu_head_on_stack(), this function has no effect for | |
423 | * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. | |
424 | */ | |
425 | void destroy_rcu_head_on_stack(struct rcu_head *head) | |
426 | { | |
427 | debug_object_free(head, &rcuhead_debug_descr); | |
428 | } | |
429 | EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); | |
430 | ||
431 | struct debug_obj_descr rcuhead_debug_descr = { | |
432 | .name = "rcu_head", | |
b9fdac7f | 433 | .is_static_object = rcuhead_is_static_object, |
551d55a9 MD |
434 | }; |
435 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); | |
436 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
91afaf30 | 437 | |
28f6569a | 438 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) |
e66c33d5 | 439 | void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, |
52494535 PM |
440 | unsigned long secs, |
441 | unsigned long c_old, unsigned long c) | |
91afaf30 | 442 | { |
52494535 | 443 | trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); |
91afaf30 PM |
444 | } |
445 | EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); | |
446 | #else | |
52494535 PM |
447 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
448 | do { } while (0) | |
91afaf30 | 449 | #endif |
6bfc09e2 | 450 | |
c682db55 PM |
451 | #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) |
452 | /* Get rcutorture access to sched_setaffinity(). */ | |
453 | long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | |
454 | { | |
455 | int ret; | |
456 | ||
457 | ret = sched_setaffinity(pid, in_mask); | |
458 | WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret); | |
459 | return ret; | |
460 | } | |
461 | EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity); | |
462 | #endif | |
463 | ||
6bfc09e2 | 464 | #ifdef CONFIG_RCU_STALL_COMMON |
cdc694b2 PM |
465 | int rcu_cpu_stall_ftrace_dump __read_mostly; |
466 | module_param(rcu_cpu_stall_ftrace_dump, int, 0644); | |
6bfc09e2 | 467 | int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ |
f22ce091 | 468 | EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress); |
6bfc09e2 | 469 | module_param(rcu_cpu_stall_suppress, int, 0644); |
10462d6f | 470 | int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; |
6bfc09e2 | 471 | module_param(rcu_cpu_stall_timeout, int, 0644); |
6bfc09e2 | 472 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |
8315f422 PM |
473 | |
474 | #ifdef CONFIG_TASKS_RCU | |
475 | ||
476 | /* | |
6f56f714 PM |
477 | * Simple variant of RCU whose quiescent states are voluntary context |
478 | * switch, cond_resched_rcu_qs(), user-space execution, and idle. | |
479 | * As such, grace periods can take one good long time. There are no | |
480 | * read-side primitives similar to rcu_read_lock() and rcu_read_unlock() | |
481 | * because this implementation is intended to get the system into a safe | |
482 | * state for some of the manipulations involved in tracing and the like. | |
483 | * Finally, this implementation does not support high call_rcu_tasks() | |
484 | * rates from multiple CPUs. If this is required, per-CPU callback lists | |
485 | * will be needed. | |
8315f422 PM |
486 | */ |
487 | ||
488 | /* Global list of callbacks and associated lock. */ | |
489 | static struct rcu_head *rcu_tasks_cbs_head; | |
490 | static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; | |
c7b24d2b | 491 | static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); |
8315f422 PM |
492 | static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); |
493 | ||
3f95aa81 | 494 | /* Track exiting tasks in order to allow them to be waited for. */ |
ccdd29ff | 495 | DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); |
3f95aa81 PM |
496 | |
497 | /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ | |
59d80fd8 PM |
498 | #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) |
499 | static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; | |
3f95aa81 PM |
500 | module_param(rcu_task_stall_timeout, int, 0644); |
501 | ||
4929c913 | 502 | static struct task_struct *rcu_tasks_kthread_ptr; |
84a8f446 | 503 | |
a68a2bb2 PM |
504 | /** |
505 | * call_rcu_tasks() - Queue an RCU for invocation task-based grace period | |
506 | * @rhp: structure to be used for queueing the RCU updates. | |
507 | * @func: actual callback function to be invoked after the grace period | |
508 | * | |
509 | * The callback function will be invoked some time after a full grace | |
510 | * period elapses, in other words after all currently executing RCU | |
511 | * read-side critical sections have completed. call_rcu_tasks() assumes | |
512 | * that the read-side critical sections end at a voluntary context | |
6f56f714 PM |
513 | * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle, |
514 | * or transition to usermode execution. As such, there are no read-side | |
515 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because | |
516 | * this primitive is intended to determine that all tasks have passed | |
517 | * through a safe state, not so much for data-strcuture synchronization. | |
a68a2bb2 PM |
518 | * |
519 | * See the description of call_rcu() for more detailed information on | |
520 | * memory ordering guarantees. | |
84a8f446 | 521 | */ |
b6a4ae76 | 522 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) |
8315f422 PM |
523 | { |
524 | unsigned long flags; | |
c7b24d2b | 525 | bool needwake; |
8315f422 PM |
526 | |
527 | rhp->next = NULL; | |
528 | rhp->func = func; | |
529 | raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); | |
c7b24d2b | 530 | needwake = !rcu_tasks_cbs_head; |
8315f422 PM |
531 | *rcu_tasks_cbs_tail = rhp; |
532 | rcu_tasks_cbs_tail = &rhp->next; | |
533 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); | |
4929c913 | 534 | /* We can't create the thread unless interrupts are enabled. */ |
c63eb17f | 535 | if (needwake && READ_ONCE(rcu_tasks_kthread_ptr)) |
c7b24d2b | 536 | wake_up(&rcu_tasks_cbs_wq); |
8315f422 PM |
537 | } |
538 | EXPORT_SYMBOL_GPL(call_rcu_tasks); | |
539 | ||
53c6d4ed PM |
540 | /** |
541 | * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. | |
542 | * | |
543 | * Control will return to the caller some time after a full rcu-tasks | |
544 | * grace period has elapsed, in other words after all currently | |
545 | * executing rcu-tasks read-side critical sections have elapsed. These | |
546 | * read-side critical sections are delimited by calls to schedule(), | |
cee43939 | 547 | * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls |
53c6d4ed PM |
548 | * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). |
549 | * | |
550 | * This is a very specialized primitive, intended only for a few uses in | |
551 | * tracing and other situations requiring manipulation of function | |
552 | * preambles and profiling hooks. The synchronize_rcu_tasks() function | |
553 | * is not (yet) intended for heavy use from multiple CPUs. | |
554 | * | |
555 | * Note that this guarantee implies further memory-ordering guarantees. | |
556 | * On systems with more than one CPU, when synchronize_rcu_tasks() returns, | |
557 | * each CPU is guaranteed to have executed a full memory barrier since the | |
558 | * end of its last RCU-tasks read-side critical section whose beginning | |
559 | * preceded the call to synchronize_rcu_tasks(). In addition, each CPU | |
560 | * having an RCU-tasks read-side critical section that extends beyond | |
561 | * the return from synchronize_rcu_tasks() is guaranteed to have executed | |
562 | * a full memory barrier after the beginning of synchronize_rcu_tasks() | |
563 | * and before the beginning of that RCU-tasks read-side critical section. | |
564 | * Note that these guarantees include CPUs that are offline, idle, or | |
565 | * executing in user mode, as well as CPUs that are executing in the kernel. | |
566 | * | |
567 | * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned | |
568 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | |
569 | * to have executed a full memory barrier during the execution of | |
570 | * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU | |
571 | * (but again only if the system has more than one CPU). | |
572 | */ | |
573 | void synchronize_rcu_tasks(void) | |
574 | { | |
575 | /* Complain if the scheduler has not started. */ | |
52d7e48b | 576 | RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, |
f78f5b90 | 577 | "synchronize_rcu_tasks called too soon"); |
53c6d4ed PM |
578 | |
579 | /* Wait for the grace period. */ | |
580 | wait_rcu_gp(call_rcu_tasks); | |
581 | } | |
06c2a923 | 582 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); |
53c6d4ed PM |
583 | |
584 | /** | |
585 | * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. | |
586 | * | |
587 | * Although the current implementation is guaranteed to wait, it is not | |
588 | * obligated to, for example, if there are no pending callbacks. | |
589 | */ | |
590 | void rcu_barrier_tasks(void) | |
591 | { | |
592 | /* There is only one callback queue, so this is easy. ;-) */ | |
593 | synchronize_rcu_tasks(); | |
594 | } | |
06c2a923 | 595 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks); |
53c6d4ed | 596 | |
52db30ab PM |
597 | /* See if tasks are still holding out, complain if so. */ |
598 | static void check_holdout_task(struct task_struct *t, | |
599 | bool needreport, bool *firstreport) | |
8315f422 | 600 | { |
4ff475ed PM |
601 | int cpu; |
602 | ||
7d0ae808 PM |
603 | if (!READ_ONCE(t->rcu_tasks_holdout) || |
604 | t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || | |
605 | !READ_ONCE(t->on_rq) || | |
176f8f7a PM |
606 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && |
607 | !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { | |
7d0ae808 | 608 | WRITE_ONCE(t->rcu_tasks_holdout, false); |
8f20a5e8 | 609 | list_del_init(&t->rcu_tasks_holdout_list); |
8315f422 | 610 | put_task_struct(t); |
52db30ab | 611 | return; |
8315f422 | 612 | } |
bcbfdd01 | 613 | rcu_request_urgent_qs_task(t); |
52db30ab PM |
614 | if (!needreport) |
615 | return; | |
616 | if (*firstreport) { | |
617 | pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); | |
618 | *firstreport = false; | |
619 | } | |
4ff475ed PM |
620 | cpu = task_cpu(t); |
621 | pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", | |
622 | t, ".I"[is_idle_task(t)], | |
623 | "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], | |
624 | t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, | |
625 | t->rcu_tasks_idle_cpu, cpu); | |
52db30ab | 626 | sched_show_task(t); |
8315f422 PM |
627 | } |
628 | ||
629 | /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ | |
630 | static int __noreturn rcu_tasks_kthread(void *arg) | |
631 | { | |
632 | unsigned long flags; | |
633 | struct task_struct *g, *t; | |
52db30ab | 634 | unsigned long lastreport; |
8315f422 PM |
635 | struct rcu_head *list; |
636 | struct rcu_head *next; | |
637 | LIST_HEAD(rcu_tasks_holdouts); | |
c03be752 | 638 | int fract; |
8315f422 | 639 | |
60ced495 | 640 | /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ |
de201559 | 641 | housekeeping_affine(current, HK_FLAG_RCU); |
8315f422 PM |
642 | |
643 | /* | |
644 | * Each pass through the following loop makes one check for | |
645 | * newly arrived callbacks, and, if there are some, waits for | |
646 | * one RCU-tasks grace period and then invokes the callbacks. | |
647 | * This loop is terminated by the system going down. ;-) | |
648 | */ | |
649 | for (;;) { | |
650 | ||
651 | /* Pick up any new callbacks. */ | |
652 | raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); | |
653 | list = rcu_tasks_cbs_head; | |
654 | rcu_tasks_cbs_head = NULL; | |
655 | rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; | |
656 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); | |
657 | ||
658 | /* If there were none, wait a bit and start over. */ | |
659 | if (!list) { | |
c7b24d2b PM |
660 | wait_event_interruptible(rcu_tasks_cbs_wq, |
661 | rcu_tasks_cbs_head); | |
662 | if (!rcu_tasks_cbs_head) { | |
663 | WARN_ON(signal_pending(current)); | |
664 | schedule_timeout_interruptible(HZ/10); | |
665 | } | |
8315f422 PM |
666 | continue; |
667 | } | |
668 | ||
669 | /* | |
670 | * Wait for all pre-existing t->on_rq and t->nvcsw | |
06462efc | 671 | * transitions to complete. Invoking synchronize_rcu() |
8315f422 | 672 | * suffices because all these transitions occur with |
06462efc | 673 | * interrupts disabled. Without this synchronize_rcu(), |
8315f422 PM |
674 | * a read-side critical section that started before the |
675 | * grace period might be incorrectly seen as having started | |
676 | * after the grace period. | |
677 | * | |
06462efc | 678 | * This synchronize_rcu() also dispenses with the |
8315f422 PM |
679 | * need for a memory barrier on the first store to |
680 | * ->rcu_tasks_holdout, as it forces the store to happen | |
681 | * after the beginning of the grace period. | |
682 | */ | |
06462efc | 683 | synchronize_rcu(); |
8315f422 PM |
684 | |
685 | /* | |
686 | * There were callbacks, so we need to wait for an | |
687 | * RCU-tasks grace period. Start off by scanning | |
688 | * the task list for tasks that are not already | |
689 | * voluntarily blocked. Mark these tasks and make | |
690 | * a list of them in rcu_tasks_holdouts. | |
691 | */ | |
692 | rcu_read_lock(); | |
693 | for_each_process_thread(g, t) { | |
7d0ae808 | 694 | if (t != current && READ_ONCE(t->on_rq) && |
8315f422 PM |
695 | !is_idle_task(t)) { |
696 | get_task_struct(t); | |
7d0ae808 PM |
697 | t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); |
698 | WRITE_ONCE(t->rcu_tasks_holdout, true); | |
8315f422 PM |
699 | list_add(&t->rcu_tasks_holdout_list, |
700 | &rcu_tasks_holdouts); | |
701 | } | |
702 | } | |
703 | rcu_read_unlock(); | |
704 | ||
3f95aa81 PM |
705 | /* |
706 | * Wait for tasks that are in the process of exiting. | |
707 | * This does only part of the job, ensuring that all | |
708 | * tasks that were previously exiting reach the point | |
709 | * where they have disabled preemption, allowing the | |
06462efc | 710 | * later synchronize_rcu() to finish the job. |
3f95aa81 PM |
711 | */ |
712 | synchronize_srcu(&tasks_rcu_exit_srcu); | |
713 | ||
8315f422 PM |
714 | /* |
715 | * Each pass through the following loop scans the list | |
716 | * of holdout tasks, removing any that are no longer | |
717 | * holdouts. When the list is empty, we are done. | |
718 | */ | |
52db30ab | 719 | lastreport = jiffies; |
c03be752 SRV |
720 | |
721 | /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/ | |
722 | fract = 10; | |
723 | ||
724 | for (;;) { | |
52db30ab PM |
725 | bool firstreport; |
726 | bool needreport; | |
727 | int rtst; | |
8f20a5e8 | 728 | struct task_struct *t1; |
52db30ab | 729 | |
c03be752 SRV |
730 | if (list_empty(&rcu_tasks_holdouts)) |
731 | break; | |
732 | ||
733 | /* Slowly back off waiting for holdouts */ | |
734 | schedule_timeout_interruptible(HZ/fract); | |
735 | ||
736 | if (fract > 1) | |
737 | fract--; | |
738 | ||
7d0ae808 | 739 | rtst = READ_ONCE(rcu_task_stall_timeout); |
52db30ab PM |
740 | needreport = rtst > 0 && |
741 | time_after(jiffies, lastreport + rtst); | |
742 | if (needreport) | |
743 | lastreport = jiffies; | |
744 | firstreport = true; | |
8315f422 | 745 | WARN_ON(signal_pending(current)); |
8f20a5e8 PM |
746 | list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts, |
747 | rcu_tasks_holdout_list) { | |
52db30ab | 748 | check_holdout_task(t, needreport, &firstreport); |
8f20a5e8 PM |
749 | cond_resched(); |
750 | } | |
8315f422 PM |
751 | } |
752 | ||
753 | /* | |
754 | * Because ->on_rq and ->nvcsw are not guaranteed | |
755 | * to have a full memory barriers prior to them in the | |
756 | * schedule() path, memory reordering on other CPUs could | |
757 | * cause their RCU-tasks read-side critical sections to | |
758 | * extend past the end of the grace period. However, | |
759 | * because these ->nvcsw updates are carried out with | |
06462efc | 760 | * interrupts disabled, we can use synchronize_rcu() |
8315f422 PM |
761 | * to force the needed ordering on all such CPUs. |
762 | * | |
06462efc | 763 | * This synchronize_rcu() also confines all |
8315f422 PM |
764 | * ->rcu_tasks_holdout accesses to be within the grace |
765 | * period, avoiding the need for memory barriers for | |
766 | * ->rcu_tasks_holdout accesses. | |
3f95aa81 | 767 | * |
06462efc | 768 | * In addition, this synchronize_rcu() waits for exiting |
3f95aa81 PM |
769 | * tasks to complete their final preempt_disable() region |
770 | * of execution, cleaning up after the synchronize_srcu() | |
771 | * above. | |
8315f422 | 772 | */ |
06462efc | 773 | synchronize_rcu(); |
8315f422 PM |
774 | |
775 | /* Invoke the callbacks. */ | |
776 | while (list) { | |
777 | next = list->next; | |
778 | local_bh_disable(); | |
779 | list->func(list); | |
780 | local_bh_enable(); | |
781 | list = next; | |
782 | cond_resched(); | |
783 | } | |
cd23ac8d | 784 | /* Paranoid sleep to keep this from entering a tight loop */ |
c7b24d2b | 785 | schedule_timeout_uninterruptible(HZ/10); |
8315f422 PM |
786 | } |
787 | } | |
788 | ||
c63eb17f PM |
789 | /* Spawn rcu_tasks_kthread() at core_initcall() time. */ |
790 | static int __init rcu_spawn_tasks_kthread(void) | |
8315f422 | 791 | { |
84a8f446 | 792 | struct task_struct *t; |
8315f422 PM |
793 | |
794 | t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); | |
f0ad56e8 PM |
795 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__)) |
796 | return 0; | |
84a8f446 | 797 | smp_mb(); /* Ensure others see full kthread. */ |
7d0ae808 | 798 | WRITE_ONCE(rcu_tasks_kthread_ptr, t); |
c63eb17f | 799 | return 0; |
8315f422 | 800 | } |
c63eb17f | 801 | core_initcall(rcu_spawn_tasks_kthread); |
8315f422 | 802 | |
ccdd29ff PM |
803 | /* Do the srcu_read_lock() for the above synchronize_srcu(). */ |
804 | void exit_tasks_rcu_start(void) | |
805 | { | |
806 | preempt_disable(); | |
807 | current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); | |
808 | preempt_enable(); | |
809 | } | |
810 | ||
811 | /* Do the srcu_read_unlock() for the above synchronize_srcu(). */ | |
812 | void exit_tasks_rcu_finish(void) | |
813 | { | |
814 | preempt_disable(); | |
815 | __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx); | |
816 | preempt_enable(); | |
817 | } | |
818 | ||
8315f422 | 819 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
aa23c6fb | 820 | |
59d80fd8 PM |
821 | #ifndef CONFIG_TINY_RCU |
822 | ||
823 | /* | |
824 | * Print any non-default Tasks RCU settings. | |
825 | */ | |
826 | static void __init rcu_tasks_bootup_oddness(void) | |
827 | { | |
828 | #ifdef CONFIG_TASKS_RCU | |
829 | if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) | |
830 | pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); | |
831 | else | |
832 | pr_info("\tTasks RCU enabled.\n"); | |
833 | #endif /* #ifdef CONFIG_TASKS_RCU */ | |
834 | } | |
835 | ||
836 | #endif /* #ifndef CONFIG_TINY_RCU */ | |
837 | ||
aa23c6fb PK |
838 | #ifdef CONFIG_PROVE_RCU |
839 | ||
840 | /* | |
72ce30dd | 841 | * Early boot self test parameters. |
aa23c6fb PK |
842 | */ |
843 | static bool rcu_self_test; | |
aa23c6fb | 844 | module_param(rcu_self_test, bool, 0444); |
aa23c6fb PK |
845 | |
846 | static int rcu_self_test_counter; | |
847 | ||
848 | static void test_callback(struct rcu_head *r) | |
849 | { | |
850 | rcu_self_test_counter++; | |
851 | pr_info("RCU test callback executed %d\n", rcu_self_test_counter); | |
852 | } | |
853 | ||
e0fcba9a PM |
854 | DEFINE_STATIC_SRCU(early_srcu); |
855 | ||
aa23c6fb PK |
856 | static void early_boot_test_call_rcu(void) |
857 | { | |
858 | static struct rcu_head head; | |
e0fcba9a | 859 | static struct rcu_head shead; |
aa23c6fb PK |
860 | |
861 | call_rcu(&head, test_callback); | |
e0fcba9a PM |
862 | if (IS_ENABLED(CONFIG_SRCU)) |
863 | call_srcu(&early_srcu, &shead, test_callback); | |
aa23c6fb PK |
864 | } |
865 | ||
866 | void rcu_early_boot_tests(void) | |
867 | { | |
868 | pr_info("Running RCU self tests\n"); | |
869 | ||
870 | if (rcu_self_test) | |
871 | early_boot_test_call_rcu(); | |
52d7e48b | 872 | rcu_test_sync_prims(); |
aa23c6fb PK |
873 | } |
874 | ||
875 | static int rcu_verify_early_boot_tests(void) | |
876 | { | |
877 | int ret = 0; | |
878 | int early_boot_test_counter = 0; | |
879 | ||
880 | if (rcu_self_test) { | |
881 | early_boot_test_counter++; | |
882 | rcu_barrier(); | |
e0fcba9a PM |
883 | if (IS_ENABLED(CONFIG_SRCU)) { |
884 | early_boot_test_counter++; | |
885 | srcu_barrier(&early_srcu); | |
886 | } | |
aa23c6fb | 887 | } |
aa23c6fb PK |
888 | if (rcu_self_test_counter != early_boot_test_counter) { |
889 | WARN_ON(1); | |
890 | ret = -1; | |
891 | } | |
892 | ||
893 | return ret; | |
894 | } | |
895 | late_initcall(rcu_verify_early_boot_tests); | |
896 | #else | |
897 | void rcu_early_boot_tests(void) {} | |
898 | #endif /* CONFIG_PROVE_RCU */ | |
59d80fd8 PM |
899 | |
900 | #ifndef CONFIG_TINY_RCU | |
901 | ||
902 | /* | |
903 | * Print any significant non-default boot-time settings. | |
904 | */ | |
905 | void __init rcupdate_announce_bootup_oddness(void) | |
906 | { | |
907 | if (rcu_normal) | |
908 | pr_info("\tNo expedited grace period (rcu_normal).\n"); | |
909 | else if (rcu_normal_after_boot) | |
910 | pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n"); | |
911 | else if (rcu_expedited) | |
912 | pr_info("\tAll grace periods are expedited (rcu_expedited).\n"); | |
913 | if (rcu_cpu_stall_suppress) | |
914 | pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n"); | |
915 | if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT) | |
916 | pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout); | |
917 | rcu_tasks_bootup_oddness(); | |
918 | } | |
919 | ||
920 | #endif /* #ifndef CONFIG_TINY_RCU */ |