]>
Commit | Line | Data |
---|---|---|
f41d911f PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | |
3 | * Internal non-public definitions that provide either classic | |
4 | * or preemptable semantics. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
20 | * Copyright Red Hat, 2009 | |
21 | * Copyright IBM Corporation, 2009 | |
22 | * | |
23 | * Author: Ingo Molnar <mingo@elte.hu> | |
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
25 | */ | |
26 | ||
d9a3da06 | 27 | #include <linux/delay.h> |
7b27d547 | 28 | #include <linux/stop_machine.h> |
f41d911f | 29 | |
26845c28 PM |
30 | /* |
31 | * Check the RCU kernel configuration parameters and print informative | |
32 | * messages about anything out of the ordinary. If you like #ifdef, you | |
33 | * will love this function. | |
34 | */ | |
35 | static void __init rcu_bootup_announce_oddness(void) | |
36 | { | |
37 | #ifdef CONFIG_RCU_TRACE | |
38 | printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); | |
39 | #endif | |
40 | #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) | |
41 | printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", | |
42 | CONFIG_RCU_FANOUT); | |
43 | #endif | |
44 | #ifdef CONFIG_RCU_FANOUT_EXACT | |
45 | printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); | |
46 | #endif | |
47 | #ifdef CONFIG_RCU_FAST_NO_HZ | |
48 | printk(KERN_INFO | |
49 | "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); | |
50 | #endif | |
51 | #ifdef CONFIG_PROVE_RCU | |
52 | printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); | |
53 | #endif | |
54 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | |
55 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); | |
56 | #endif | |
81a294c4 | 57 | #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) |
26845c28 PM |
58 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); |
59 | #endif | |
60 | #if NUM_RCU_LVL_4 != 0 | |
61 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | |
62 | #endif | |
63 | } | |
64 | ||
f41d911f PM |
65 | #ifdef CONFIG_TREE_PREEMPT_RCU |
66 | ||
67 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | |
68 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | |
69 | ||
d9a3da06 PM |
70 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
71 | ||
f41d911f PM |
72 | /* |
73 | * Tell them what RCU they are running. | |
74 | */ | |
0e0fc1c2 | 75 | static void __init rcu_bootup_announce(void) |
f41d911f | 76 | { |
26845c28 PM |
77 | printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); |
78 | rcu_bootup_announce_oddness(); | |
f41d911f PM |
79 | } |
80 | ||
81 | /* | |
82 | * Return the number of RCU-preempt batches processed thus far | |
83 | * for debug and statistics. | |
84 | */ | |
85 | long rcu_batches_completed_preempt(void) | |
86 | { | |
87 | return rcu_preempt_state.completed; | |
88 | } | |
89 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | |
90 | ||
91 | /* | |
92 | * Return the number of RCU batches processed thus far for debug & stats. | |
93 | */ | |
94 | long rcu_batches_completed(void) | |
95 | { | |
96 | return rcu_batches_completed_preempt(); | |
97 | } | |
98 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
99 | ||
bf66f18e PM |
100 | /* |
101 | * Force a quiescent state for preemptible RCU. | |
102 | */ | |
103 | void rcu_force_quiescent_state(void) | |
104 | { | |
105 | force_quiescent_state(&rcu_preempt_state, 0); | |
106 | } | |
107 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |
108 | ||
f41d911f PM |
109 | /* |
110 | * Record a preemptable-RCU quiescent state for the specified CPU. Note | |
111 | * that this just means that the task currently running on the CPU is | |
112 | * not in a quiescent state. There might be any number of tasks blocked | |
113 | * while in an RCU read-side critical section. | |
25502a6c PM |
114 | * |
115 | * Unlike the other rcu_*_qs() functions, callers to this function | |
116 | * must disable irqs in order to protect the assignment to | |
117 | * ->rcu_read_unlock_special. | |
f41d911f | 118 | */ |
c3422bea | 119 | static void rcu_preempt_qs(int cpu) |
f41d911f PM |
120 | { |
121 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | |
25502a6c | 122 | |
c64ac3ce | 123 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
c3422bea PM |
124 | barrier(); |
125 | rdp->passed_quiesc = 1; | |
25502a6c | 126 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
127 | } |
128 | ||
129 | /* | |
c3422bea PM |
130 | * We have entered the scheduler, and the current task might soon be |
131 | * context-switched away from. If this task is in an RCU read-side | |
132 | * critical section, we will no longer be able to rely on the CPU to | |
12f5f524 PM |
133 | * record that fact, so we enqueue the task on the blkd_tasks list. |
134 | * The task will dequeue itself when it exits the outermost enclosing | |
135 | * RCU read-side critical section. Therefore, the current grace period | |
136 | * cannot be permitted to complete until the blkd_tasks list entries | |
137 | * predating the current grace period drain, in other words, until | |
138 | * rnp->gp_tasks becomes NULL. | |
c3422bea PM |
139 | * |
140 | * Caller must disable preemption. | |
f41d911f | 141 | */ |
c3422bea | 142 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
143 | { |
144 | struct task_struct *t = current; | |
c3422bea | 145 | unsigned long flags; |
f41d911f PM |
146 | struct rcu_data *rdp; |
147 | struct rcu_node *rnp; | |
148 | ||
149 | if (t->rcu_read_lock_nesting && | |
150 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | |
151 | ||
152 | /* Possibly blocking in an RCU read-side critical section. */ | |
394f99a9 | 153 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
f41d911f | 154 | rnp = rdp->mynode; |
1304afb2 | 155 | raw_spin_lock_irqsave(&rnp->lock, flags); |
f41d911f | 156 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
86848966 | 157 | t->rcu_blocked_node = rnp; |
f41d911f PM |
158 | |
159 | /* | |
160 | * If this CPU has already checked in, then this task | |
161 | * will hold up the next grace period rather than the | |
162 | * current grace period. Queue the task accordingly. | |
163 | * If the task is queued for the current grace period | |
164 | * (i.e., this CPU has not yet passed through a quiescent | |
165 | * state for the current grace period), then as long | |
166 | * as that task remains queued, the current grace period | |
12f5f524 PM |
167 | * cannot end. Note that there is some uncertainty as |
168 | * to exactly when the current grace period started. | |
169 | * We take a conservative approach, which can result | |
170 | * in unnecessarily waiting on tasks that started very | |
171 | * slightly after the current grace period began. C'est | |
172 | * la vie!!! | |
b0e165c0 PM |
173 | * |
174 | * But first, note that the current CPU must still be | |
175 | * on line! | |
f41d911f | 176 | */ |
b0e165c0 | 177 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); |
e7d8842e | 178 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); |
12f5f524 PM |
179 | if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { |
180 | list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); | |
181 | rnp->gp_tasks = &t->rcu_node_entry; | |
182 | } else { | |
183 | list_add(&t->rcu_node_entry, &rnp->blkd_tasks); | |
184 | if (rnp->qsmask & rdp->grpmask) | |
185 | rnp->gp_tasks = &t->rcu_node_entry; | |
186 | } | |
1304afb2 | 187 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
f41d911f PM |
188 | } |
189 | ||
190 | /* | |
191 | * Either we were not in an RCU read-side critical section to | |
192 | * begin with, or we have now recorded that critical section | |
193 | * globally. Either way, we can now note a quiescent state | |
194 | * for this CPU. Again, if we were in an RCU read-side critical | |
195 | * section, and if that critical section was blocking the current | |
196 | * grace period, then the fact that the task has been enqueued | |
197 | * means that we continue to block the current grace period. | |
198 | */ | |
e7d8842e | 199 | local_irq_save(flags); |
25502a6c | 200 | rcu_preempt_qs(cpu); |
e7d8842e | 201 | local_irq_restore(flags); |
f41d911f PM |
202 | } |
203 | ||
204 | /* | |
205 | * Tree-preemptable RCU implementation for rcu_read_lock(). | |
206 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | |
207 | * if we block. | |
208 | */ | |
209 | void __rcu_read_lock(void) | |
210 | { | |
80dcf60e | 211 | current->rcu_read_lock_nesting++; |
f41d911f PM |
212 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ |
213 | } | |
214 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
215 | ||
fc2219d4 PM |
216 | /* |
217 | * Check for preempted RCU readers blocking the current grace period | |
218 | * for the specified rcu_node structure. If the caller needs a reliable | |
219 | * answer, it must hold the rcu_node's ->lock. | |
220 | */ | |
221 | static int rcu_preempted_readers(struct rcu_node *rnp) | |
222 | { | |
12f5f524 | 223 | return rnp->gp_tasks != NULL; |
fc2219d4 PM |
224 | } |
225 | ||
b668c9cf PM |
226 | /* |
227 | * Record a quiescent state for all tasks that were previously queued | |
228 | * on the specified rcu_node structure and that were blocking the current | |
229 | * RCU grace period. The caller must hold the specified rnp->lock with | |
230 | * irqs disabled, and this lock is released upon return, but irqs remain | |
231 | * disabled. | |
232 | */ | |
d3f6bad3 | 233 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf PM |
234 | __releases(rnp->lock) |
235 | { | |
236 | unsigned long mask; | |
237 | struct rcu_node *rnp_p; | |
238 | ||
239 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | |
1304afb2 | 240 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf PM |
241 | return; /* Still need more quiescent states! */ |
242 | } | |
243 | ||
244 | rnp_p = rnp->parent; | |
245 | if (rnp_p == NULL) { | |
246 | /* | |
247 | * Either there is only one rcu_node in the tree, | |
248 | * or tasks were kicked up to root rcu_node due to | |
249 | * CPUs going offline. | |
250 | */ | |
d3f6bad3 | 251 | rcu_report_qs_rsp(&rcu_preempt_state, flags); |
b668c9cf PM |
252 | return; |
253 | } | |
254 | ||
255 | /* Report up the rest of the hierarchy. */ | |
256 | mask = rnp->grpmask; | |
1304afb2 PM |
257 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
258 | raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ | |
d3f6bad3 | 259 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); |
b668c9cf PM |
260 | } |
261 | ||
12f5f524 PM |
262 | /* |
263 | * Advance a ->blkd_tasks-list pointer to the next entry, instead | |
264 | * returning NULL if at the end of the list. | |
265 | */ | |
266 | static struct list_head *rcu_next_node_entry(struct task_struct *t, | |
267 | struct rcu_node *rnp) | |
268 | { | |
269 | struct list_head *np; | |
270 | ||
271 | np = t->rcu_node_entry.next; | |
272 | if (np == &rnp->blkd_tasks) | |
273 | np = NULL; | |
274 | return np; | |
275 | } | |
276 | ||
b668c9cf PM |
277 | /* |
278 | * Handle special cases during rcu_read_unlock(), such as needing to | |
279 | * notify RCU core processing or task having blocked during the RCU | |
280 | * read-side critical section. | |
281 | */ | |
f41d911f PM |
282 | static void rcu_read_unlock_special(struct task_struct *t) |
283 | { | |
284 | int empty; | |
d9a3da06 | 285 | int empty_exp; |
f41d911f | 286 | unsigned long flags; |
12f5f524 | 287 | struct list_head *np; |
f41d911f PM |
288 | struct rcu_node *rnp; |
289 | int special; | |
290 | ||
291 | /* NMI handlers cannot block and cannot safely manipulate state. */ | |
292 | if (in_nmi()) | |
293 | return; | |
294 | ||
295 | local_irq_save(flags); | |
296 | ||
297 | /* | |
298 | * If RCU core is waiting for this CPU to exit critical section, | |
299 | * let it know that we have done so. | |
300 | */ | |
301 | special = t->rcu_read_unlock_special; | |
302 | if (special & RCU_READ_UNLOCK_NEED_QS) { | |
c3422bea | 303 | rcu_preempt_qs(smp_processor_id()); |
f41d911f PM |
304 | } |
305 | ||
306 | /* Hardware IRQ handlers cannot block. */ | |
307 | if (in_irq()) { | |
308 | local_irq_restore(flags); | |
309 | return; | |
310 | } | |
311 | ||
312 | /* Clean up if blocked during RCU read-side critical section. */ | |
313 | if (special & RCU_READ_UNLOCK_BLOCKED) { | |
314 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | |
315 | ||
dd5d19ba PM |
316 | /* |
317 | * Remove this task from the list it blocked on. The | |
318 | * task can migrate while we acquire the lock, but at | |
319 | * most one time. So at most two passes through loop. | |
320 | */ | |
321 | for (;;) { | |
86848966 | 322 | rnp = t->rcu_blocked_node; |
1304afb2 | 323 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
86848966 | 324 | if (rnp == t->rcu_blocked_node) |
dd5d19ba | 325 | break; |
1304afb2 | 326 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
dd5d19ba | 327 | } |
fc2219d4 | 328 | empty = !rcu_preempted_readers(rnp); |
d9a3da06 PM |
329 | empty_exp = !rcu_preempted_readers_exp(rnp); |
330 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | |
12f5f524 | 331 | np = rcu_next_node_entry(t, rnp); |
f41d911f | 332 | list_del_init(&t->rcu_node_entry); |
12f5f524 PM |
333 | if (&t->rcu_node_entry == rnp->gp_tasks) |
334 | rnp->gp_tasks = np; | |
335 | if (&t->rcu_node_entry == rnp->exp_tasks) | |
336 | rnp->exp_tasks = np; | |
dd5d19ba | 337 | t->rcu_blocked_node = NULL; |
f41d911f PM |
338 | |
339 | /* | |
340 | * If this was the last task on the current list, and if | |
341 | * we aren't waiting on any CPUs, report the quiescent state. | |
d3f6bad3 | 342 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. |
f41d911f | 343 | */ |
b668c9cf | 344 | if (empty) |
1304afb2 | 345 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf | 346 | else |
d3f6bad3 | 347 | rcu_report_unblock_qs_rnp(rnp, flags); |
d9a3da06 PM |
348 | |
349 | /* | |
350 | * If this was the last task on the expedited lists, | |
351 | * then we need to report up the rcu_node hierarchy. | |
352 | */ | |
353 | if (!empty_exp && !rcu_preempted_readers_exp(rnp)) | |
354 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | |
b668c9cf PM |
355 | } else { |
356 | local_irq_restore(flags); | |
f41d911f | 357 | } |
f41d911f PM |
358 | } |
359 | ||
360 | /* | |
361 | * Tree-preemptable RCU implementation for rcu_read_unlock(). | |
362 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | |
363 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
364 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
365 | * in an RCU read-side critical section and other special cases. | |
366 | */ | |
367 | void __rcu_read_unlock(void) | |
368 | { | |
369 | struct task_struct *t = current; | |
370 | ||
371 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | |
80dcf60e PM |
372 | --t->rcu_read_lock_nesting; |
373 | barrier(); /* decrement before load of ->rcu_read_unlock_special */ | |
374 | if (t->rcu_read_lock_nesting == 0 && | |
f41d911f PM |
375 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |
376 | rcu_read_unlock_special(t); | |
cba8244a PM |
377 | #ifdef CONFIG_PROVE_LOCKING |
378 | WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); | |
379 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | |
f41d911f PM |
380 | } |
381 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
382 | ||
1ed509a2 PM |
383 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE |
384 | ||
385 | /* | |
386 | * Dump detailed information for all tasks blocking the current RCU | |
387 | * grace period on the specified rcu_node structure. | |
388 | */ | |
389 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | |
390 | { | |
391 | unsigned long flags; | |
1ed509a2 PM |
392 | struct task_struct *t; |
393 | ||
12f5f524 PM |
394 | if (!rcu_preempted_readers(rnp)) |
395 | return; | |
396 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
397 | t = list_entry(rnp->gp_tasks, | |
398 | struct task_struct, rcu_node_entry); | |
399 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) | |
400 | sched_show_task(t); | |
401 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1ed509a2 PM |
402 | } |
403 | ||
404 | /* | |
405 | * Dump detailed information for all tasks blocking the current RCU | |
406 | * grace period. | |
407 | */ | |
408 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
409 | { | |
410 | struct rcu_node *rnp = rcu_get_root(rsp); | |
411 | ||
412 | rcu_print_detail_task_stall_rnp(rnp); | |
413 | rcu_for_each_leaf_node(rsp, rnp) | |
414 | rcu_print_detail_task_stall_rnp(rnp); | |
415 | } | |
416 | ||
417 | #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | |
418 | ||
419 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
420 | { | |
421 | } | |
422 | ||
423 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | |
424 | ||
f41d911f PM |
425 | /* |
426 | * Scan the current list of tasks blocked within RCU read-side critical | |
427 | * sections, printing out the tid of each. | |
428 | */ | |
429 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
430 | { | |
f41d911f PM |
431 | struct task_struct *t; |
432 | ||
12f5f524 PM |
433 | if (!rcu_preempted_readers(rnp)) |
434 | return; | |
435 | t = list_entry(rnp->gp_tasks, | |
436 | struct task_struct, rcu_node_entry); | |
437 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) | |
438 | printk(" P%d", t->pid); | |
f41d911f PM |
439 | } |
440 | ||
53d84e00 PM |
441 | /* |
442 | * Suppress preemptible RCU's CPU stall warnings by pushing the | |
443 | * time of the next stall-warning message comfortably far into the | |
444 | * future. | |
445 | */ | |
446 | static void rcu_preempt_stall_reset(void) | |
447 | { | |
448 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; | |
449 | } | |
450 | ||
b0e165c0 PM |
451 | /* |
452 | * Check that the list of blocked tasks for the newly completed grace | |
453 | * period is in fact empty. It is a serious bug to complete a grace | |
454 | * period that still has RCU readers blocked! This function must be | |
455 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | |
456 | * must be held by the caller. | |
12f5f524 PM |
457 | * |
458 | * Also, if there are blocked tasks on the list, they automatically | |
459 | * block the newly created grace period, so set up ->gp_tasks accordingly. | |
b0e165c0 PM |
460 | */ |
461 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
462 | { | |
fc2219d4 | 463 | WARN_ON_ONCE(rcu_preempted_readers(rnp)); |
12f5f524 PM |
464 | if (!list_empty(&rnp->blkd_tasks)) |
465 | rnp->gp_tasks = rnp->blkd_tasks.next; | |
28ecd580 | 466 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
467 | } |
468 | ||
33f76148 PM |
469 | #ifdef CONFIG_HOTPLUG_CPU |
470 | ||
dd5d19ba PM |
471 | /* |
472 | * Handle tasklist migration for case in which all CPUs covered by the | |
473 | * specified rcu_node have gone offline. Move them up to the root | |
474 | * rcu_node. The reason for not just moving them to the immediate | |
475 | * parent is to remove the need for rcu_read_unlock_special() to | |
476 | * make more than two attempts to acquire the target rcu_node's lock. | |
b668c9cf PM |
477 | * Returns true if there were tasks blocking the current RCU grace |
478 | * period. | |
dd5d19ba | 479 | * |
237c80c5 PM |
480 | * Returns 1 if there was previously a task blocking the current grace |
481 | * period on the specified rcu_node structure. | |
482 | * | |
dd5d19ba PM |
483 | * The caller must hold rnp->lock with irqs disabled. |
484 | */ | |
237c80c5 PM |
485 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
486 | struct rcu_node *rnp, | |
487 | struct rcu_data *rdp) | |
dd5d19ba | 488 | { |
dd5d19ba PM |
489 | struct list_head *lp; |
490 | struct list_head *lp_root; | |
d9a3da06 | 491 | int retval = 0; |
dd5d19ba | 492 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
12f5f524 | 493 | struct task_struct *t; |
dd5d19ba | 494 | |
86848966 PM |
495 | if (rnp == rnp_root) { |
496 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | |
237c80c5 | 497 | return 0; /* Shouldn't happen: at least one CPU online. */ |
86848966 | 498 | } |
12f5f524 PM |
499 | |
500 | /* If we are on an internal node, complain bitterly. */ | |
501 | WARN_ON_ONCE(rnp != rdp->mynode); | |
dd5d19ba PM |
502 | |
503 | /* | |
12f5f524 PM |
504 | * Move tasks up to root rcu_node. Don't try to get fancy for |
505 | * this corner-case operation -- just put this node's tasks | |
506 | * at the head of the root node's list, and update the root node's | |
507 | * ->gp_tasks and ->exp_tasks pointers to those of this node's, | |
508 | * if non-NULL. This might result in waiting for more tasks than | |
509 | * absolutely necessary, but this is a good performance/complexity | |
510 | * tradeoff. | |
dd5d19ba | 511 | */ |
d9a3da06 PM |
512 | if (rcu_preempted_readers(rnp)) |
513 | retval |= RCU_OFL_TASKS_NORM_GP; | |
514 | if (rcu_preempted_readers_exp(rnp)) | |
515 | retval |= RCU_OFL_TASKS_EXP_GP; | |
12f5f524 PM |
516 | lp = &rnp->blkd_tasks; |
517 | lp_root = &rnp_root->blkd_tasks; | |
518 | while (!list_empty(lp)) { | |
519 | t = list_entry(lp->next, typeof(*t), rcu_node_entry); | |
520 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | |
521 | list_del(&t->rcu_node_entry); | |
522 | t->rcu_blocked_node = rnp_root; | |
523 | list_add(&t->rcu_node_entry, lp_root); | |
524 | if (&t->rcu_node_entry == rnp->gp_tasks) | |
525 | rnp_root->gp_tasks = rnp->gp_tasks; | |
526 | if (&t->rcu_node_entry == rnp->exp_tasks) | |
527 | rnp_root->exp_tasks = rnp->exp_tasks; | |
528 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ | |
dd5d19ba | 529 | } |
12f5f524 PM |
530 | rnp->gp_tasks = NULL; |
531 | rnp->exp_tasks = NULL; | |
237c80c5 | 532 | return retval; |
dd5d19ba PM |
533 | } |
534 | ||
33f76148 PM |
535 | /* |
536 | * Do CPU-offline processing for preemptable RCU. | |
537 | */ | |
538 | static void rcu_preempt_offline_cpu(int cpu) | |
539 | { | |
540 | __rcu_offline_cpu(cpu, &rcu_preempt_state); | |
541 | } | |
542 | ||
543 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
544 | ||
f41d911f PM |
545 | /* |
546 | * Check for a quiescent state from the current CPU. When a task blocks, | |
547 | * the task is recorded in the corresponding CPU's rcu_node structure, | |
548 | * which is checked elsewhere. | |
549 | * | |
550 | * Caller must disable hard irqs. | |
551 | */ | |
552 | static void rcu_preempt_check_callbacks(int cpu) | |
553 | { | |
554 | struct task_struct *t = current; | |
555 | ||
556 | if (t->rcu_read_lock_nesting == 0) { | |
c3422bea | 557 | rcu_preempt_qs(cpu); |
f41d911f PM |
558 | return; |
559 | } | |
a71fca58 | 560 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) |
c3422bea | 561 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
562 | } |
563 | ||
564 | /* | |
565 | * Process callbacks for preemptable RCU. | |
566 | */ | |
567 | static void rcu_preempt_process_callbacks(void) | |
568 | { | |
569 | __rcu_process_callbacks(&rcu_preempt_state, | |
570 | &__get_cpu_var(rcu_preempt_data)); | |
571 | } | |
572 | ||
573 | /* | |
574 | * Queue a preemptable-RCU callback for invocation after a grace period. | |
575 | */ | |
576 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
577 | { | |
578 | __call_rcu(head, func, &rcu_preempt_state); | |
579 | } | |
580 | EXPORT_SYMBOL_GPL(call_rcu); | |
581 | ||
6ebb237b PM |
582 | /** |
583 | * synchronize_rcu - wait until a grace period has elapsed. | |
584 | * | |
585 | * Control will return to the caller some time after a full grace | |
586 | * period has elapsed, in other words after all currently executing RCU | |
77d8485a PM |
587 | * read-side critical sections have completed. Note, however, that |
588 | * upon return from synchronize_rcu(), the caller might well be executing | |
589 | * concurrently with new RCU read-side critical sections that began while | |
590 | * synchronize_rcu() was waiting. RCU read-side critical sections are | |
591 | * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. | |
6ebb237b PM |
592 | */ |
593 | void synchronize_rcu(void) | |
594 | { | |
595 | struct rcu_synchronize rcu; | |
596 | ||
597 | if (!rcu_scheduler_active) | |
598 | return; | |
599 | ||
72d5a9f7 | 600 | init_rcu_head_on_stack(&rcu.head); |
6ebb237b PM |
601 | init_completion(&rcu.completion); |
602 | /* Will wake me after RCU finished. */ | |
603 | call_rcu(&rcu.head, wakeme_after_rcu); | |
604 | /* Wait for it. */ | |
605 | wait_for_completion(&rcu.completion); | |
72d5a9f7 | 606 | destroy_rcu_head_on_stack(&rcu.head); |
6ebb237b PM |
607 | } |
608 | EXPORT_SYMBOL_GPL(synchronize_rcu); | |
609 | ||
d9a3da06 PM |
610 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); |
611 | static long sync_rcu_preempt_exp_count; | |
612 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | |
613 | ||
614 | /* | |
615 | * Return non-zero if there are any tasks in RCU read-side critical | |
616 | * sections blocking the current preemptible-RCU expedited grace period. | |
617 | * If there is no preemptible-RCU expedited grace period currently in | |
618 | * progress, returns zero unconditionally. | |
619 | */ | |
620 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | |
621 | { | |
12f5f524 | 622 | return rnp->exp_tasks != NULL; |
d9a3da06 PM |
623 | } |
624 | ||
625 | /* | |
626 | * return non-zero if there is no RCU expedited grace period in progress | |
627 | * for the specified rcu_node structure, in other words, if all CPUs and | |
628 | * tasks covered by the specified rcu_node structure have done their bit | |
629 | * for the current expedited grace period. Works only for preemptible | |
630 | * RCU -- other RCU implementation use other means. | |
631 | * | |
632 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
633 | */ | |
634 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | |
635 | { | |
636 | return !rcu_preempted_readers_exp(rnp) && | |
637 | ACCESS_ONCE(rnp->expmask) == 0; | |
638 | } | |
639 | ||
640 | /* | |
641 | * Report the exit from RCU read-side critical section for the last task | |
642 | * that queued itself during or before the current expedited preemptible-RCU | |
643 | * grace period. This event is reported either to the rcu_node structure on | |
644 | * which the task was queued or to one of that rcu_node structure's ancestors, | |
645 | * recursively up the tree. (Calm down, calm down, we do the recursion | |
646 | * iteratively!) | |
647 | * | |
648 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
649 | */ | |
650 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |
651 | { | |
652 | unsigned long flags; | |
653 | unsigned long mask; | |
654 | ||
1304afb2 | 655 | raw_spin_lock_irqsave(&rnp->lock, flags); |
d9a3da06 PM |
656 | for (;;) { |
657 | if (!sync_rcu_preempt_exp_done(rnp)) | |
658 | break; | |
659 | if (rnp->parent == NULL) { | |
660 | wake_up(&sync_rcu_preempt_exp_wq); | |
661 | break; | |
662 | } | |
663 | mask = rnp->grpmask; | |
1304afb2 | 664 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
d9a3da06 | 665 | rnp = rnp->parent; |
1304afb2 | 666 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
d9a3da06 PM |
667 | rnp->expmask &= ~mask; |
668 | } | |
1304afb2 | 669 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
d9a3da06 PM |
670 | } |
671 | ||
672 | /* | |
673 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | |
674 | * grace period for the specified rcu_node structure. If there are no such | |
675 | * tasks, report it up the rcu_node hierarchy. | |
676 | * | |
677 | * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. | |
678 | */ | |
679 | static void | |
680 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |
681 | { | |
12f5f524 | 682 | int must_wait = 0; |
d9a3da06 | 683 | |
1304afb2 | 684 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
12f5f524 PM |
685 | if (!list_empty(&rnp->blkd_tasks)) { |
686 | rnp->exp_tasks = rnp->blkd_tasks.next; | |
687 | must_wait = 1; | |
688 | } | |
1304afb2 | 689 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
d9a3da06 PM |
690 | if (!must_wait) |
691 | rcu_report_exp_rnp(rsp, rnp); | |
692 | } | |
693 | ||
019129d5 | 694 | /* |
d9a3da06 PM |
695 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea |
696 | * is to invoke synchronize_sched_expedited() to push all the tasks to | |
12f5f524 | 697 | * the ->blkd_tasks lists and wait for this list to drain. |
019129d5 PM |
698 | */ |
699 | void synchronize_rcu_expedited(void) | |
700 | { | |
d9a3da06 PM |
701 | unsigned long flags; |
702 | struct rcu_node *rnp; | |
703 | struct rcu_state *rsp = &rcu_preempt_state; | |
704 | long snap; | |
705 | int trycount = 0; | |
706 | ||
707 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | |
708 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | |
709 | smp_mb(); /* Above access cannot bleed into critical section. */ | |
710 | ||
711 | /* | |
712 | * Acquire lock, falling back to synchronize_rcu() if too many | |
713 | * lock-acquisition failures. Of course, if someone does the | |
714 | * expedited grace period for us, just leave. | |
715 | */ | |
716 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | |
717 | if (trycount++ < 10) | |
718 | udelay(trycount * num_online_cpus()); | |
719 | else { | |
720 | synchronize_rcu(); | |
721 | return; | |
722 | } | |
723 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
724 | goto mb_ret; /* Others did our work for us. */ | |
725 | } | |
726 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
727 | goto unlock_mb_ret; /* Others did our work for us. */ | |
728 | ||
12f5f524 | 729 | /* force all RCU readers onto ->blkd_tasks lists. */ |
d9a3da06 PM |
730 | synchronize_sched_expedited(); |
731 | ||
1304afb2 | 732 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
d9a3da06 PM |
733 | |
734 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | |
735 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | |
1304afb2 | 736 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
d9a3da06 | 737 | rnp->expmask = rnp->qsmaskinit; |
1304afb2 | 738 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
d9a3da06 PM |
739 | } |
740 | ||
12f5f524 | 741 | /* Snapshot current state of ->blkd_tasks lists. */ |
d9a3da06 PM |
742 | rcu_for_each_leaf_node(rsp, rnp) |
743 | sync_rcu_preempt_exp_init(rsp, rnp); | |
744 | if (NUM_RCU_NODES > 1) | |
745 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | |
746 | ||
1304afb2 | 747 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
d9a3da06 | 748 | |
12f5f524 | 749 | /* Wait for snapshotted ->blkd_tasks lists to drain. */ |
d9a3da06 PM |
750 | rnp = rcu_get_root(rsp); |
751 | wait_event(sync_rcu_preempt_exp_wq, | |
752 | sync_rcu_preempt_exp_done(rnp)); | |
753 | ||
754 | /* Clean up and exit. */ | |
755 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | |
756 | ACCESS_ONCE(sync_rcu_preempt_exp_count)++; | |
757 | unlock_mb_ret: | |
758 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | |
759 | mb_ret: | |
760 | smp_mb(); /* ensure subsequent action seen after grace period. */ | |
019129d5 PM |
761 | } |
762 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
763 | ||
f41d911f PM |
764 | /* |
765 | * Check to see if there is any immediate preemptable-RCU-related work | |
766 | * to be done. | |
767 | */ | |
768 | static int rcu_preempt_pending(int cpu) | |
769 | { | |
770 | return __rcu_pending(&rcu_preempt_state, | |
771 | &per_cpu(rcu_preempt_data, cpu)); | |
772 | } | |
773 | ||
774 | /* | |
775 | * Does preemptable RCU need the CPU to stay out of dynticks mode? | |
776 | */ | |
777 | static int rcu_preempt_needs_cpu(int cpu) | |
778 | { | |
779 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | |
780 | } | |
781 | ||
e74f4c45 PM |
782 | /** |
783 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | |
784 | */ | |
785 | void rcu_barrier(void) | |
786 | { | |
787 | _rcu_barrier(&rcu_preempt_state, call_rcu); | |
788 | } | |
789 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
790 | ||
f41d911f PM |
791 | /* |
792 | * Initialize preemptable RCU's per-CPU data. | |
793 | */ | |
794 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
795 | { | |
796 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | |
797 | } | |
798 | ||
e74f4c45 | 799 | /* |
2d999e03 | 800 | * Move preemptable RCU's callbacks from dying CPU to other online CPU. |
e74f4c45 | 801 | */ |
29494be7 | 802 | static void rcu_preempt_send_cbs_to_online(void) |
e74f4c45 | 803 | { |
29494be7 | 804 | rcu_send_cbs_to_online(&rcu_preempt_state); |
e74f4c45 PM |
805 | } |
806 | ||
1eba8f84 PM |
807 | /* |
808 | * Initialize preemptable RCU's state structures. | |
809 | */ | |
810 | static void __init __rcu_init_preempt(void) | |
811 | { | |
394f99a9 | 812 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); |
1eba8f84 PM |
813 | } |
814 | ||
f41d911f PM |
815 | /* |
816 | * Check for a task exiting while in a preemptable-RCU read-side | |
817 | * critical section, clean up if so. No need to issue warnings, | |
818 | * as debug_check_no_locks_held() already does this if lockdep | |
819 | * is enabled. | |
820 | */ | |
821 | void exit_rcu(void) | |
822 | { | |
823 | struct task_struct *t = current; | |
824 | ||
825 | if (t->rcu_read_lock_nesting == 0) | |
826 | return; | |
827 | t->rcu_read_lock_nesting = 1; | |
828 | rcu_read_unlock(); | |
829 | } | |
830 | ||
831 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
832 | ||
833 | /* | |
834 | * Tell them what RCU they are running. | |
835 | */ | |
0e0fc1c2 | 836 | static void __init rcu_bootup_announce(void) |
f41d911f PM |
837 | { |
838 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | |
26845c28 | 839 | rcu_bootup_announce_oddness(); |
f41d911f PM |
840 | } |
841 | ||
842 | /* | |
843 | * Return the number of RCU batches processed thus far for debug & stats. | |
844 | */ | |
845 | long rcu_batches_completed(void) | |
846 | { | |
847 | return rcu_batches_completed_sched(); | |
848 | } | |
849 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
850 | ||
bf66f18e PM |
851 | /* |
852 | * Force a quiescent state for RCU, which, because there is no preemptible | |
853 | * RCU, becomes the same as rcu-sched. | |
854 | */ | |
855 | void rcu_force_quiescent_state(void) | |
856 | { | |
857 | rcu_sched_force_quiescent_state(); | |
858 | } | |
859 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |
860 | ||
f41d911f PM |
861 | /* |
862 | * Because preemptable RCU does not exist, we never have to check for | |
863 | * CPUs being in quiescent states. | |
864 | */ | |
c3422bea | 865 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
866 | { |
867 | } | |
868 | ||
fc2219d4 PM |
869 | /* |
870 | * Because preemptable RCU does not exist, there are never any preempted | |
871 | * RCU readers. | |
872 | */ | |
873 | static int rcu_preempted_readers(struct rcu_node *rnp) | |
874 | { | |
875 | return 0; | |
876 | } | |
877 | ||
b668c9cf PM |
878 | #ifdef CONFIG_HOTPLUG_CPU |
879 | ||
880 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | |
d3f6bad3 | 881 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf | 882 | { |
1304afb2 | 883 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf PM |
884 | } |
885 | ||
886 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
887 | ||
1ed509a2 PM |
888 | /* |
889 | * Because preemptable RCU does not exist, we never have to check for | |
890 | * tasks blocked within RCU read-side critical sections. | |
891 | */ | |
892 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
893 | { | |
894 | } | |
895 | ||
f41d911f PM |
896 | /* |
897 | * Because preemptable RCU does not exist, we never have to check for | |
898 | * tasks blocked within RCU read-side critical sections. | |
899 | */ | |
900 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
901 | { | |
902 | } | |
903 | ||
53d84e00 PM |
904 | /* |
905 | * Because preemptible RCU does not exist, there is no need to suppress | |
906 | * its CPU stall warnings. | |
907 | */ | |
908 | static void rcu_preempt_stall_reset(void) | |
909 | { | |
910 | } | |
911 | ||
b0e165c0 PM |
912 | /* |
913 | * Because there is no preemptable RCU, there can be no readers blocked, | |
49e29126 PM |
914 | * so there is no need to check for blocked tasks. So check only for |
915 | * bogus qsmask values. | |
b0e165c0 PM |
916 | */ |
917 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
918 | { | |
49e29126 | 919 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
920 | } |
921 | ||
33f76148 PM |
922 | #ifdef CONFIG_HOTPLUG_CPU |
923 | ||
dd5d19ba PM |
924 | /* |
925 | * Because preemptable RCU does not exist, it never needs to migrate | |
237c80c5 PM |
926 | * tasks that were blocked within RCU read-side critical sections, and |
927 | * such non-existent tasks cannot possibly have been blocking the current | |
928 | * grace period. | |
dd5d19ba | 929 | */ |
237c80c5 PM |
930 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
931 | struct rcu_node *rnp, | |
932 | struct rcu_data *rdp) | |
dd5d19ba | 933 | { |
237c80c5 | 934 | return 0; |
dd5d19ba PM |
935 | } |
936 | ||
33f76148 PM |
937 | /* |
938 | * Because preemptable RCU does not exist, it never needs CPU-offline | |
939 | * processing. | |
940 | */ | |
941 | static void rcu_preempt_offline_cpu(int cpu) | |
942 | { | |
943 | } | |
944 | ||
945 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
946 | ||
f41d911f PM |
947 | /* |
948 | * Because preemptable RCU does not exist, it never has any callbacks | |
949 | * to check. | |
950 | */ | |
1eba8f84 | 951 | static void rcu_preempt_check_callbacks(int cpu) |
f41d911f PM |
952 | { |
953 | } | |
954 | ||
955 | /* | |
956 | * Because preemptable RCU does not exist, it never has any callbacks | |
957 | * to process. | |
958 | */ | |
1eba8f84 | 959 | static void rcu_preempt_process_callbacks(void) |
f41d911f PM |
960 | { |
961 | } | |
962 | ||
019129d5 PM |
963 | /* |
964 | * Wait for an rcu-preempt grace period, but make it happen quickly. | |
965 | * But because preemptable RCU does not exist, map to rcu-sched. | |
966 | */ | |
967 | void synchronize_rcu_expedited(void) | |
968 | { | |
969 | synchronize_sched_expedited(); | |
970 | } | |
971 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
972 | ||
d9a3da06 PM |
973 | #ifdef CONFIG_HOTPLUG_CPU |
974 | ||
975 | /* | |
976 | * Because preemptable RCU does not exist, there is never any need to | |
977 | * report on tasks preempted in RCU read-side critical sections during | |
978 | * expedited RCU grace periods. | |
979 | */ | |
980 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |
981 | { | |
982 | return; | |
983 | } | |
984 | ||
985 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
986 | ||
f41d911f PM |
987 | /* |
988 | * Because preemptable RCU does not exist, it never has any work to do. | |
989 | */ | |
990 | static int rcu_preempt_pending(int cpu) | |
991 | { | |
992 | return 0; | |
993 | } | |
994 | ||
995 | /* | |
996 | * Because preemptable RCU does not exist, it never needs any CPU. | |
997 | */ | |
998 | static int rcu_preempt_needs_cpu(int cpu) | |
999 | { | |
1000 | return 0; | |
1001 | } | |
1002 | ||
e74f4c45 PM |
1003 | /* |
1004 | * Because preemptable RCU does not exist, rcu_barrier() is just | |
1005 | * another name for rcu_barrier_sched(). | |
1006 | */ | |
1007 | void rcu_barrier(void) | |
1008 | { | |
1009 | rcu_barrier_sched(); | |
1010 | } | |
1011 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
1012 | ||
f41d911f PM |
1013 | /* |
1014 | * Because preemptable RCU does not exist, there is no per-CPU | |
1015 | * data to initialize. | |
1016 | */ | |
1017 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
1018 | { | |
1019 | } | |
1020 | ||
e74f4c45 PM |
1021 | /* |
1022 | * Because there is no preemptable RCU, there are no callbacks to move. | |
1023 | */ | |
29494be7 | 1024 | static void rcu_preempt_send_cbs_to_online(void) |
e74f4c45 PM |
1025 | { |
1026 | } | |
1027 | ||
1eba8f84 PM |
1028 | /* |
1029 | * Because preemptable RCU does not exist, it need not be initialized. | |
1030 | */ | |
1031 | static void __init __rcu_init_preempt(void) | |
1032 | { | |
1033 | } | |
1034 | ||
f41d911f | 1035 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
8bd93a2c | 1036 | |
7b27d547 LJ |
1037 | #ifndef CONFIG_SMP |
1038 | ||
1039 | void synchronize_sched_expedited(void) | |
1040 | { | |
1041 | cond_resched(); | |
1042 | } | |
1043 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | |
1044 | ||
1045 | #else /* #ifndef CONFIG_SMP */ | |
1046 | ||
e27fc964 TH |
1047 | static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0); |
1048 | static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0); | |
7b27d547 LJ |
1049 | |
1050 | static int synchronize_sched_expedited_cpu_stop(void *data) | |
1051 | { | |
1052 | /* | |
1053 | * There must be a full memory barrier on each affected CPU | |
1054 | * between the time that try_stop_cpus() is called and the | |
1055 | * time that it returns. | |
1056 | * | |
1057 | * In the current initial implementation of cpu_stop, the | |
1058 | * above condition is already met when the control reaches | |
1059 | * this point and the following smp_mb() is not strictly | |
1060 | * necessary. Do smp_mb() anyway for documentation and | |
1061 | * robustness against future implementation changes. | |
1062 | */ | |
1063 | smp_mb(); /* See above comment block. */ | |
1064 | return 0; | |
1065 | } | |
1066 | ||
1067 | /* | |
1068 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | |
1069 | * approach to force grace period to end quickly. This consumes | |
1070 | * significant time on all CPUs, and is thus not recommended for | |
1071 | * any sort of common-case code. | |
1072 | * | |
1073 | * Note that it is illegal to call this function while holding any | |
1074 | * lock that is acquired by a CPU-hotplug notifier. Failing to | |
1075 | * observe this restriction will result in deadlock. | |
db3a8920 | 1076 | * |
e27fc964 TH |
1077 | * This implementation can be thought of as an application of ticket |
1078 | * locking to RCU, with sync_sched_expedited_started and | |
1079 | * sync_sched_expedited_done taking on the roles of the halves | |
1080 | * of the ticket-lock word. Each task atomically increments | |
1081 | * sync_sched_expedited_started upon entry, snapshotting the old value, | |
1082 | * then attempts to stop all the CPUs. If this succeeds, then each | |
1083 | * CPU will have executed a context switch, resulting in an RCU-sched | |
1084 | * grace period. We are then done, so we use atomic_cmpxchg() to | |
1085 | * update sync_sched_expedited_done to match our snapshot -- but | |
1086 | * only if someone else has not already advanced past our snapshot. | |
1087 | * | |
1088 | * On the other hand, if try_stop_cpus() fails, we check the value | |
1089 | * of sync_sched_expedited_done. If it has advanced past our | |
1090 | * initial snapshot, then someone else must have forced a grace period | |
1091 | * some time after we took our snapshot. In this case, our work is | |
1092 | * done for us, and we can simply return. Otherwise, we try again, | |
1093 | * but keep our initial snapshot for purposes of checking for someone | |
1094 | * doing our work for us. | |
1095 | * | |
1096 | * If we fail too many times in a row, we fall back to synchronize_sched(). | |
7b27d547 LJ |
1097 | */ |
1098 | void synchronize_sched_expedited(void) | |
1099 | { | |
e27fc964 | 1100 | int firstsnap, s, snap, trycount = 0; |
7b27d547 | 1101 | |
e27fc964 TH |
1102 | /* Note that atomic_inc_return() implies full memory barrier. */ |
1103 | firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); | |
7b27d547 | 1104 | get_online_cpus(); |
e27fc964 TH |
1105 | |
1106 | /* | |
1107 | * Each pass through the following loop attempts to force a | |
1108 | * context switch on each CPU. | |
1109 | */ | |
7b27d547 LJ |
1110 | while (try_stop_cpus(cpu_online_mask, |
1111 | synchronize_sched_expedited_cpu_stop, | |
1112 | NULL) == -EAGAIN) { | |
1113 | put_online_cpus(); | |
e27fc964 TH |
1114 | |
1115 | /* No joy, try again later. Or just synchronize_sched(). */ | |
7b27d547 LJ |
1116 | if (trycount++ < 10) |
1117 | udelay(trycount * num_online_cpus()); | |
1118 | else { | |
1119 | synchronize_sched(); | |
1120 | return; | |
1121 | } | |
e27fc964 TH |
1122 | |
1123 | /* Check to see if someone else did our work for us. */ | |
1124 | s = atomic_read(&sync_sched_expedited_done); | |
1125 | if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) { | |
7b27d547 LJ |
1126 | smp_mb(); /* ensure test happens before caller kfree */ |
1127 | return; | |
1128 | } | |
e27fc964 TH |
1129 | |
1130 | /* | |
1131 | * Refetching sync_sched_expedited_started allows later | |
1132 | * callers to piggyback on our grace period. We subtract | |
1133 | * 1 to get the same token that the last incrementer got. | |
1134 | * We retry after they started, so our grace period works | |
1135 | * for them, and they started after our first try, so their | |
1136 | * grace period works for us. | |
1137 | */ | |
7b27d547 | 1138 | get_online_cpus(); |
e27fc964 TH |
1139 | snap = atomic_read(&sync_sched_expedited_started) - 1; |
1140 | smp_mb(); /* ensure read is before try_stop_cpus(). */ | |
7b27d547 | 1141 | } |
e27fc964 TH |
1142 | |
1143 | /* | |
1144 | * Everyone up to our most recent fetch is covered by our grace | |
1145 | * period. Update the counter, but only if our work is still | |
1146 | * relevant -- which it won't be if someone who started later | |
1147 | * than we did beat us to the punch. | |
1148 | */ | |
1149 | do { | |
1150 | s = atomic_read(&sync_sched_expedited_done); | |
1151 | if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) { | |
1152 | smp_mb(); /* ensure test happens before caller kfree */ | |
1153 | break; | |
1154 | } | |
1155 | } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s); | |
1156 | ||
7b27d547 LJ |
1157 | put_online_cpus(); |
1158 | } | |
1159 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | |
1160 | ||
1161 | #endif /* #else #ifndef CONFIG_SMP */ | |
1162 | ||
8bd93a2c PM |
1163 | #if !defined(CONFIG_RCU_FAST_NO_HZ) |
1164 | ||
1165 | /* | |
1166 | * Check to see if any future RCU-related work will need to be done | |
1167 | * by the current CPU, even if none need be done immediately, returning | |
1168 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
1169 | * an exported member of the RCU API. | |
1170 | * | |
1171 | * Because we have preemptible RCU, just check whether this CPU needs | |
1172 | * any flavor of RCU. Do not chew up lots of CPU cycles with preemption | |
1173 | * disabled in a most-likely vain attempt to cause RCU not to need this CPU. | |
1174 | */ | |
1175 | int rcu_needs_cpu(int cpu) | |
1176 | { | |
1177 | return rcu_needs_cpu_quick_check(cpu); | |
1178 | } | |
1179 | ||
a47cd880 PM |
1180 | /* |
1181 | * Check to see if we need to continue a callback-flush operations to | |
1182 | * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle | |
1183 | * entry is not configured, so we never do need to. | |
1184 | */ | |
1185 | static void rcu_needs_cpu_flush(void) | |
1186 | { | |
1187 | } | |
1188 | ||
8bd93a2c PM |
1189 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
1190 | ||
1191 | #define RCU_NEEDS_CPU_FLUSHES 5 | |
a47cd880 | 1192 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); |
71da8132 | 1193 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); |
8bd93a2c PM |
1194 | |
1195 | /* | |
1196 | * Check to see if any future RCU-related work will need to be done | |
1197 | * by the current CPU, even if none need be done immediately, returning | |
1198 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
1199 | * an exported member of the RCU API. | |
1200 | * | |
1201 | * Because we are not supporting preemptible RCU, attempt to accelerate | |
1202 | * any current grace periods so that RCU no longer needs this CPU, but | |
1203 | * only if all other CPUs are already in dynticks-idle mode. This will | |
1204 | * allow the CPU cores to be powered down immediately, as opposed to after | |
1205 | * waiting many milliseconds for grace periods to elapse. | |
a47cd880 PM |
1206 | * |
1207 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | |
1208 | * disabled, we do one pass of force_quiescent_state(), then do a | |
a26ac245 | 1209 | * invoke_rcu_kthread() to cause rcu_process_callbacks() to be invoked later. |
a47cd880 | 1210 | * The per-cpu rcu_dyntick_drain variable controls the sequencing. |
8bd93a2c PM |
1211 | */ |
1212 | int rcu_needs_cpu(int cpu) | |
1213 | { | |
a47cd880 | 1214 | int c = 0; |
77e38ed3 | 1215 | int snap; |
8bd93a2c PM |
1216 | int thatcpu; |
1217 | ||
622ea685 PM |
1218 | /* Check for being in the holdoff period. */ |
1219 | if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) | |
1220 | return rcu_needs_cpu_quick_check(cpu); | |
1221 | ||
8bd93a2c | 1222 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
77e38ed3 PM |
1223 | for_each_online_cpu(thatcpu) { |
1224 | if (thatcpu == cpu) | |
1225 | continue; | |
e59fb312 PM |
1226 | snap = atomic_add_return(0, &per_cpu(rcu_dynticks, |
1227 | thatcpu).dynticks); | |
77e38ed3 | 1228 | smp_mb(); /* Order sampling of snap with end of grace period. */ |
e59fb312 | 1229 | if ((snap & 0x1) != 0) { |
a47cd880 | 1230 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
71da8132 | 1231 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
8bd93a2c | 1232 | return rcu_needs_cpu_quick_check(cpu); |
8bd93a2c | 1233 | } |
77e38ed3 | 1234 | } |
a47cd880 PM |
1235 | |
1236 | /* Check and update the rcu_dyntick_drain sequencing. */ | |
1237 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | |
1238 | /* First time through, initialize the counter. */ | |
1239 | per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; | |
1240 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | |
1241 | /* We have hit the limit, so time to give up. */ | |
71da8132 | 1242 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; |
a47cd880 PM |
1243 | return rcu_needs_cpu_quick_check(cpu); |
1244 | } | |
1245 | ||
1246 | /* Do one step pushing remaining RCU callbacks through. */ | |
1247 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { | |
1248 | rcu_sched_qs(cpu); | |
1249 | force_quiescent_state(&rcu_sched_state, 0); | |
1250 | c = c || per_cpu(rcu_sched_data, cpu).nxtlist; | |
1251 | } | |
1252 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | |
1253 | rcu_bh_qs(cpu); | |
1254 | force_quiescent_state(&rcu_bh_state, 0); | |
1255 | c = c || per_cpu(rcu_bh_data, cpu).nxtlist; | |
8bd93a2c PM |
1256 | } |
1257 | ||
1258 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | |
622ea685 | 1259 | if (c) |
a26ac245 | 1260 | invoke_rcu_kthread(); |
8bd93a2c PM |
1261 | return c; |
1262 | } | |
1263 | ||
a47cd880 PM |
1264 | /* |
1265 | * Check to see if we need to continue a callback-flush operations to | |
1266 | * allow the last CPU to enter dyntick-idle mode. | |
1267 | */ | |
1268 | static void rcu_needs_cpu_flush(void) | |
1269 | { | |
1270 | int cpu = smp_processor_id(); | |
71da8132 | 1271 | unsigned long flags; |
a47cd880 PM |
1272 | |
1273 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) | |
1274 | return; | |
71da8132 | 1275 | local_irq_save(flags); |
a47cd880 | 1276 | (void)rcu_needs_cpu(cpu); |
71da8132 | 1277 | local_irq_restore(flags); |
a47cd880 PM |
1278 | } |
1279 | ||
8bd93a2c | 1280 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |