]>
Commit | Line | Data |
---|---|---|
f41d911f PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | |
3 | * Internal non-public definitions that provide either classic | |
4 | * or preemptable semantics. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
20 | * Copyright Red Hat, 2009 | |
21 | * Copyright IBM Corporation, 2009 | |
22 | * | |
23 | * Author: Ingo Molnar <mingo@elte.hu> | |
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
25 | */ | |
26 | ||
d9a3da06 | 27 | #include <linux/delay.h> |
f41d911f | 28 | |
26845c28 PM |
29 | /* |
30 | * Check the RCU kernel configuration parameters and print informative | |
31 | * messages about anything out of the ordinary. If you like #ifdef, you | |
32 | * will love this function. | |
33 | */ | |
34 | static void __init rcu_bootup_announce_oddness(void) | |
35 | { | |
36 | #ifdef CONFIG_RCU_TRACE | |
37 | printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); | |
38 | #endif | |
39 | #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) | |
40 | printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", | |
41 | CONFIG_RCU_FANOUT); | |
42 | #endif | |
43 | #ifdef CONFIG_RCU_FANOUT_EXACT | |
44 | printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); | |
45 | #endif | |
46 | #ifdef CONFIG_RCU_FAST_NO_HZ | |
47 | printk(KERN_INFO | |
48 | "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); | |
49 | #endif | |
50 | #ifdef CONFIG_PROVE_RCU | |
51 | printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); | |
52 | #endif | |
53 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | |
54 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); | |
55 | #endif | |
56 | #ifndef CONFIG_RCU_CPU_STALL_DETECTOR | |
57 | printk(KERN_INFO | |
58 | "\tRCU-based detection of stalled CPUs is disabled.\n"); | |
59 | #endif | |
60 | #ifndef CONFIG_RCU_CPU_STALL_VERBOSE | |
61 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); | |
62 | #endif | |
63 | #if NUM_RCU_LVL_4 != 0 | |
64 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | |
65 | #endif | |
66 | } | |
67 | ||
f41d911f PM |
68 | #ifdef CONFIG_TREE_PREEMPT_RCU |
69 | ||
70 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | |
71 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | |
72 | ||
d9a3da06 PM |
73 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
74 | ||
f41d911f PM |
75 | /* |
76 | * Tell them what RCU they are running. | |
77 | */ | |
0e0fc1c2 | 78 | static void __init rcu_bootup_announce(void) |
f41d911f | 79 | { |
26845c28 PM |
80 | printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); |
81 | rcu_bootup_announce_oddness(); | |
f41d911f PM |
82 | } |
83 | ||
84 | /* | |
85 | * Return the number of RCU-preempt batches processed thus far | |
86 | * for debug and statistics. | |
87 | */ | |
88 | long rcu_batches_completed_preempt(void) | |
89 | { | |
90 | return rcu_preempt_state.completed; | |
91 | } | |
92 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | |
93 | ||
94 | /* | |
95 | * Return the number of RCU batches processed thus far for debug & stats. | |
96 | */ | |
97 | long rcu_batches_completed(void) | |
98 | { | |
99 | return rcu_batches_completed_preempt(); | |
100 | } | |
101 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
102 | ||
bf66f18e PM |
103 | /* |
104 | * Force a quiescent state for preemptible RCU. | |
105 | */ | |
106 | void rcu_force_quiescent_state(void) | |
107 | { | |
108 | force_quiescent_state(&rcu_preempt_state, 0); | |
109 | } | |
110 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |
111 | ||
f41d911f PM |
112 | /* |
113 | * Record a preemptable-RCU quiescent state for the specified CPU. Note | |
114 | * that this just means that the task currently running on the CPU is | |
115 | * not in a quiescent state. There might be any number of tasks blocked | |
116 | * while in an RCU read-side critical section. | |
25502a6c PM |
117 | * |
118 | * Unlike the other rcu_*_qs() functions, callers to this function | |
119 | * must disable irqs in order to protect the assignment to | |
120 | * ->rcu_read_unlock_special. | |
f41d911f | 121 | */ |
c3422bea | 122 | static void rcu_preempt_qs(int cpu) |
f41d911f PM |
123 | { |
124 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | |
25502a6c | 125 | |
c64ac3ce | 126 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
c3422bea PM |
127 | barrier(); |
128 | rdp->passed_quiesc = 1; | |
25502a6c | 129 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
130 | } |
131 | ||
132 | /* | |
c3422bea PM |
133 | * We have entered the scheduler, and the current task might soon be |
134 | * context-switched away from. If this task is in an RCU read-side | |
135 | * critical section, we will no longer be able to rely on the CPU to | |
136 | * record that fact, so we enqueue the task on the appropriate entry | |
137 | * of the blocked_tasks[] array. The task will dequeue itself when | |
138 | * it exits the outermost enclosing RCU read-side critical section. | |
139 | * Therefore, the current grace period cannot be permitted to complete | |
140 | * until the blocked_tasks[] entry indexed by the low-order bit of | |
141 | * rnp->gpnum empties. | |
142 | * | |
143 | * Caller must disable preemption. | |
f41d911f | 144 | */ |
c3422bea | 145 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
146 | { |
147 | struct task_struct *t = current; | |
c3422bea | 148 | unsigned long flags; |
f41d911f PM |
149 | int phase; |
150 | struct rcu_data *rdp; | |
151 | struct rcu_node *rnp; | |
152 | ||
153 | if (t->rcu_read_lock_nesting && | |
154 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | |
155 | ||
156 | /* Possibly blocking in an RCU read-side critical section. */ | |
394f99a9 | 157 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
f41d911f | 158 | rnp = rdp->mynode; |
1304afb2 | 159 | raw_spin_lock_irqsave(&rnp->lock, flags); |
f41d911f | 160 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
86848966 | 161 | t->rcu_blocked_node = rnp; |
f41d911f PM |
162 | |
163 | /* | |
164 | * If this CPU has already checked in, then this task | |
165 | * will hold up the next grace period rather than the | |
166 | * current grace period. Queue the task accordingly. | |
167 | * If the task is queued for the current grace period | |
168 | * (i.e., this CPU has not yet passed through a quiescent | |
169 | * state for the current grace period), then as long | |
170 | * as that task remains queued, the current grace period | |
171 | * cannot end. | |
b0e165c0 PM |
172 | * |
173 | * But first, note that the current CPU must still be | |
174 | * on line! | |
f41d911f | 175 | */ |
b0e165c0 | 176 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); |
e7d8842e PM |
177 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); |
178 | phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; | |
f41d911f | 179 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); |
1304afb2 | 180 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
f41d911f PM |
181 | } |
182 | ||
183 | /* | |
184 | * Either we were not in an RCU read-side critical section to | |
185 | * begin with, or we have now recorded that critical section | |
186 | * globally. Either way, we can now note a quiescent state | |
187 | * for this CPU. Again, if we were in an RCU read-side critical | |
188 | * section, and if that critical section was blocking the current | |
189 | * grace period, then the fact that the task has been enqueued | |
190 | * means that we continue to block the current grace period. | |
191 | */ | |
e7d8842e | 192 | local_irq_save(flags); |
25502a6c | 193 | rcu_preempt_qs(cpu); |
e7d8842e | 194 | local_irq_restore(flags); |
f41d911f PM |
195 | } |
196 | ||
197 | /* | |
198 | * Tree-preemptable RCU implementation for rcu_read_lock(). | |
199 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | |
200 | * if we block. | |
201 | */ | |
202 | void __rcu_read_lock(void) | |
203 | { | |
204 | ACCESS_ONCE(current->rcu_read_lock_nesting)++; | |
205 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | |
206 | } | |
207 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
208 | ||
fc2219d4 PM |
209 | /* |
210 | * Check for preempted RCU readers blocking the current grace period | |
211 | * for the specified rcu_node structure. If the caller needs a reliable | |
212 | * answer, it must hold the rcu_node's ->lock. | |
213 | */ | |
214 | static int rcu_preempted_readers(struct rcu_node *rnp) | |
215 | { | |
d9a3da06 PM |
216 | int phase = rnp->gpnum & 0x1; |
217 | ||
218 | return !list_empty(&rnp->blocked_tasks[phase]) || | |
219 | !list_empty(&rnp->blocked_tasks[phase + 2]); | |
fc2219d4 PM |
220 | } |
221 | ||
b668c9cf PM |
222 | /* |
223 | * Record a quiescent state for all tasks that were previously queued | |
224 | * on the specified rcu_node structure and that were blocking the current | |
225 | * RCU grace period. The caller must hold the specified rnp->lock with | |
226 | * irqs disabled, and this lock is released upon return, but irqs remain | |
227 | * disabled. | |
228 | */ | |
d3f6bad3 | 229 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf PM |
230 | __releases(rnp->lock) |
231 | { | |
232 | unsigned long mask; | |
233 | struct rcu_node *rnp_p; | |
234 | ||
235 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | |
1304afb2 | 236 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf PM |
237 | return; /* Still need more quiescent states! */ |
238 | } | |
239 | ||
240 | rnp_p = rnp->parent; | |
241 | if (rnp_p == NULL) { | |
242 | /* | |
243 | * Either there is only one rcu_node in the tree, | |
244 | * or tasks were kicked up to root rcu_node due to | |
245 | * CPUs going offline. | |
246 | */ | |
d3f6bad3 | 247 | rcu_report_qs_rsp(&rcu_preempt_state, flags); |
b668c9cf PM |
248 | return; |
249 | } | |
250 | ||
251 | /* Report up the rest of the hierarchy. */ | |
252 | mask = rnp->grpmask; | |
1304afb2 PM |
253 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
254 | raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ | |
d3f6bad3 | 255 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); |
b668c9cf PM |
256 | } |
257 | ||
258 | /* | |
259 | * Handle special cases during rcu_read_unlock(), such as needing to | |
260 | * notify RCU core processing or task having blocked during the RCU | |
261 | * read-side critical section. | |
262 | */ | |
f41d911f PM |
263 | static void rcu_read_unlock_special(struct task_struct *t) |
264 | { | |
265 | int empty; | |
d9a3da06 | 266 | int empty_exp; |
f41d911f | 267 | unsigned long flags; |
f41d911f PM |
268 | struct rcu_node *rnp; |
269 | int special; | |
270 | ||
271 | /* NMI handlers cannot block and cannot safely manipulate state. */ | |
272 | if (in_nmi()) | |
273 | return; | |
274 | ||
275 | local_irq_save(flags); | |
276 | ||
277 | /* | |
278 | * If RCU core is waiting for this CPU to exit critical section, | |
279 | * let it know that we have done so. | |
280 | */ | |
281 | special = t->rcu_read_unlock_special; | |
282 | if (special & RCU_READ_UNLOCK_NEED_QS) { | |
c3422bea | 283 | rcu_preempt_qs(smp_processor_id()); |
f41d911f PM |
284 | } |
285 | ||
286 | /* Hardware IRQ handlers cannot block. */ | |
287 | if (in_irq()) { | |
288 | local_irq_restore(flags); | |
289 | return; | |
290 | } | |
291 | ||
292 | /* Clean up if blocked during RCU read-side critical section. */ | |
293 | if (special & RCU_READ_UNLOCK_BLOCKED) { | |
294 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | |
295 | ||
dd5d19ba PM |
296 | /* |
297 | * Remove this task from the list it blocked on. The | |
298 | * task can migrate while we acquire the lock, but at | |
299 | * most one time. So at most two passes through loop. | |
300 | */ | |
301 | for (;;) { | |
86848966 | 302 | rnp = t->rcu_blocked_node; |
1304afb2 | 303 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
86848966 | 304 | if (rnp == t->rcu_blocked_node) |
dd5d19ba | 305 | break; |
1304afb2 | 306 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
dd5d19ba | 307 | } |
fc2219d4 | 308 | empty = !rcu_preempted_readers(rnp); |
d9a3da06 PM |
309 | empty_exp = !rcu_preempted_readers_exp(rnp); |
310 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | |
f41d911f | 311 | list_del_init(&t->rcu_node_entry); |
dd5d19ba | 312 | t->rcu_blocked_node = NULL; |
f41d911f PM |
313 | |
314 | /* | |
315 | * If this was the last task on the current list, and if | |
316 | * we aren't waiting on any CPUs, report the quiescent state. | |
d3f6bad3 | 317 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. |
f41d911f | 318 | */ |
b668c9cf | 319 | if (empty) |
1304afb2 | 320 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf | 321 | else |
d3f6bad3 | 322 | rcu_report_unblock_qs_rnp(rnp, flags); |
d9a3da06 PM |
323 | |
324 | /* | |
325 | * If this was the last task on the expedited lists, | |
326 | * then we need to report up the rcu_node hierarchy. | |
327 | */ | |
328 | if (!empty_exp && !rcu_preempted_readers_exp(rnp)) | |
329 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | |
b668c9cf PM |
330 | } else { |
331 | local_irq_restore(flags); | |
f41d911f | 332 | } |
f41d911f PM |
333 | } |
334 | ||
335 | /* | |
336 | * Tree-preemptable RCU implementation for rcu_read_unlock(). | |
337 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | |
338 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
339 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
340 | * in an RCU read-side critical section and other special cases. | |
341 | */ | |
342 | void __rcu_read_unlock(void) | |
343 | { | |
344 | struct task_struct *t = current; | |
345 | ||
346 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | |
347 | if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && | |
348 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | |
349 | rcu_read_unlock_special(t); | |
cba8244a PM |
350 | #ifdef CONFIG_PROVE_LOCKING |
351 | WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); | |
352 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | |
f41d911f PM |
353 | } |
354 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
355 | ||
356 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | |
357 | ||
1ed509a2 PM |
358 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE |
359 | ||
360 | /* | |
361 | * Dump detailed information for all tasks blocking the current RCU | |
362 | * grace period on the specified rcu_node structure. | |
363 | */ | |
364 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | |
365 | { | |
366 | unsigned long flags; | |
367 | struct list_head *lp; | |
368 | int phase; | |
369 | struct task_struct *t; | |
370 | ||
371 | if (rcu_preempted_readers(rnp)) { | |
372 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
373 | phase = rnp->gpnum & 0x1; | |
374 | lp = &rnp->blocked_tasks[phase]; | |
375 | list_for_each_entry(t, lp, rcu_node_entry) | |
376 | sched_show_task(t); | |
377 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
378 | } | |
379 | } | |
380 | ||
381 | /* | |
382 | * Dump detailed information for all tasks blocking the current RCU | |
383 | * grace period. | |
384 | */ | |
385 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
386 | { | |
387 | struct rcu_node *rnp = rcu_get_root(rsp); | |
388 | ||
389 | rcu_print_detail_task_stall_rnp(rnp); | |
390 | rcu_for_each_leaf_node(rsp, rnp) | |
391 | rcu_print_detail_task_stall_rnp(rnp); | |
392 | } | |
393 | ||
394 | #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | |
395 | ||
396 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
397 | { | |
398 | } | |
399 | ||
400 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | |
401 | ||
f41d911f PM |
402 | /* |
403 | * Scan the current list of tasks blocked within RCU read-side critical | |
404 | * sections, printing out the tid of each. | |
405 | */ | |
406 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
407 | { | |
f41d911f | 408 | struct list_head *lp; |
fc2219d4 | 409 | int phase; |
f41d911f PM |
410 | struct task_struct *t; |
411 | ||
fc2219d4 | 412 | if (rcu_preempted_readers(rnp)) { |
fc2219d4 | 413 | phase = rnp->gpnum & 0x1; |
f41d911f PM |
414 | lp = &rnp->blocked_tasks[phase]; |
415 | list_for_each_entry(t, lp, rcu_node_entry) | |
416 | printk(" P%d", t->pid); | |
f41d911f PM |
417 | } |
418 | } | |
419 | ||
53d84e00 PM |
420 | /* |
421 | * Suppress preemptible RCU's CPU stall warnings by pushing the | |
422 | * time of the next stall-warning message comfortably far into the | |
423 | * future. | |
424 | */ | |
425 | static void rcu_preempt_stall_reset(void) | |
426 | { | |
427 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; | |
428 | } | |
429 | ||
f41d911f PM |
430 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
431 | ||
b0e165c0 PM |
432 | /* |
433 | * Check that the list of blocked tasks for the newly completed grace | |
434 | * period is in fact empty. It is a serious bug to complete a grace | |
435 | * period that still has RCU readers blocked! This function must be | |
436 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | |
437 | * must be held by the caller. | |
438 | */ | |
439 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
440 | { | |
fc2219d4 | 441 | WARN_ON_ONCE(rcu_preempted_readers(rnp)); |
28ecd580 | 442 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
443 | } |
444 | ||
33f76148 PM |
445 | #ifdef CONFIG_HOTPLUG_CPU |
446 | ||
dd5d19ba PM |
447 | /* |
448 | * Handle tasklist migration for case in which all CPUs covered by the | |
449 | * specified rcu_node have gone offline. Move them up to the root | |
450 | * rcu_node. The reason for not just moving them to the immediate | |
451 | * parent is to remove the need for rcu_read_unlock_special() to | |
452 | * make more than two attempts to acquire the target rcu_node's lock. | |
b668c9cf PM |
453 | * Returns true if there were tasks blocking the current RCU grace |
454 | * period. | |
dd5d19ba | 455 | * |
237c80c5 PM |
456 | * Returns 1 if there was previously a task blocking the current grace |
457 | * period on the specified rcu_node structure. | |
458 | * | |
dd5d19ba PM |
459 | * The caller must hold rnp->lock with irqs disabled. |
460 | */ | |
237c80c5 PM |
461 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
462 | struct rcu_node *rnp, | |
463 | struct rcu_data *rdp) | |
dd5d19ba PM |
464 | { |
465 | int i; | |
466 | struct list_head *lp; | |
467 | struct list_head *lp_root; | |
d9a3da06 | 468 | int retval = 0; |
dd5d19ba PM |
469 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
470 | struct task_struct *tp; | |
471 | ||
86848966 PM |
472 | if (rnp == rnp_root) { |
473 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | |
237c80c5 | 474 | return 0; /* Shouldn't happen: at least one CPU online. */ |
86848966 | 475 | } |
28ecd580 PM |
476 | WARN_ON_ONCE(rnp != rdp->mynode && |
477 | (!list_empty(&rnp->blocked_tasks[0]) || | |
d9a3da06 PM |
478 | !list_empty(&rnp->blocked_tasks[1]) || |
479 | !list_empty(&rnp->blocked_tasks[2]) || | |
480 | !list_empty(&rnp->blocked_tasks[3]))); | |
dd5d19ba PM |
481 | |
482 | /* | |
483 | * Move tasks up to root rcu_node. Rely on the fact that the | |
484 | * root rcu_node can be at most one ahead of the rest of the | |
485 | * rcu_nodes in terms of gp_num value. This fact allows us to | |
486 | * move the blocked_tasks[] array directly, element by element. | |
487 | */ | |
d9a3da06 PM |
488 | if (rcu_preempted_readers(rnp)) |
489 | retval |= RCU_OFL_TASKS_NORM_GP; | |
490 | if (rcu_preempted_readers_exp(rnp)) | |
491 | retval |= RCU_OFL_TASKS_EXP_GP; | |
492 | for (i = 0; i < 4; i++) { | |
dd5d19ba PM |
493 | lp = &rnp->blocked_tasks[i]; |
494 | lp_root = &rnp_root->blocked_tasks[i]; | |
495 | while (!list_empty(lp)) { | |
496 | tp = list_entry(lp->next, typeof(*tp), rcu_node_entry); | |
1304afb2 | 497 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ |
dd5d19ba PM |
498 | list_del(&tp->rcu_node_entry); |
499 | tp->rcu_blocked_node = rnp_root; | |
500 | list_add(&tp->rcu_node_entry, lp_root); | |
1304afb2 | 501 | raw_spin_unlock(&rnp_root->lock); /* irqs remain disabled */ |
dd5d19ba PM |
502 | } |
503 | } | |
237c80c5 | 504 | return retval; |
dd5d19ba PM |
505 | } |
506 | ||
33f76148 PM |
507 | /* |
508 | * Do CPU-offline processing for preemptable RCU. | |
509 | */ | |
510 | static void rcu_preempt_offline_cpu(int cpu) | |
511 | { | |
512 | __rcu_offline_cpu(cpu, &rcu_preempt_state); | |
513 | } | |
514 | ||
515 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
516 | ||
f41d911f PM |
517 | /* |
518 | * Check for a quiescent state from the current CPU. When a task blocks, | |
519 | * the task is recorded in the corresponding CPU's rcu_node structure, | |
520 | * which is checked elsewhere. | |
521 | * | |
522 | * Caller must disable hard irqs. | |
523 | */ | |
524 | static void rcu_preempt_check_callbacks(int cpu) | |
525 | { | |
526 | struct task_struct *t = current; | |
527 | ||
528 | if (t->rcu_read_lock_nesting == 0) { | |
c3422bea | 529 | rcu_preempt_qs(cpu); |
f41d911f PM |
530 | return; |
531 | } | |
a71fca58 | 532 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) |
c3422bea | 533 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
534 | } |
535 | ||
536 | /* | |
537 | * Process callbacks for preemptable RCU. | |
538 | */ | |
539 | static void rcu_preempt_process_callbacks(void) | |
540 | { | |
541 | __rcu_process_callbacks(&rcu_preempt_state, | |
542 | &__get_cpu_var(rcu_preempt_data)); | |
543 | } | |
544 | ||
545 | /* | |
546 | * Queue a preemptable-RCU callback for invocation after a grace period. | |
547 | */ | |
548 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
549 | { | |
550 | __call_rcu(head, func, &rcu_preempt_state); | |
551 | } | |
552 | EXPORT_SYMBOL_GPL(call_rcu); | |
553 | ||
6ebb237b PM |
554 | /** |
555 | * synchronize_rcu - wait until a grace period has elapsed. | |
556 | * | |
557 | * Control will return to the caller some time after a full grace | |
558 | * period has elapsed, in other words after all currently executing RCU | |
77d8485a PM |
559 | * read-side critical sections have completed. Note, however, that |
560 | * upon return from synchronize_rcu(), the caller might well be executing | |
561 | * concurrently with new RCU read-side critical sections that began while | |
562 | * synchronize_rcu() was waiting. RCU read-side critical sections are | |
563 | * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. | |
6ebb237b PM |
564 | */ |
565 | void synchronize_rcu(void) | |
566 | { | |
567 | struct rcu_synchronize rcu; | |
568 | ||
569 | if (!rcu_scheduler_active) | |
570 | return; | |
571 | ||
72d5a9f7 | 572 | init_rcu_head_on_stack(&rcu.head); |
6ebb237b PM |
573 | init_completion(&rcu.completion); |
574 | /* Will wake me after RCU finished. */ | |
575 | call_rcu(&rcu.head, wakeme_after_rcu); | |
576 | /* Wait for it. */ | |
577 | wait_for_completion(&rcu.completion); | |
72d5a9f7 | 578 | destroy_rcu_head_on_stack(&rcu.head); |
6ebb237b PM |
579 | } |
580 | EXPORT_SYMBOL_GPL(synchronize_rcu); | |
581 | ||
d9a3da06 PM |
582 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); |
583 | static long sync_rcu_preempt_exp_count; | |
584 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | |
585 | ||
586 | /* | |
587 | * Return non-zero if there are any tasks in RCU read-side critical | |
588 | * sections blocking the current preemptible-RCU expedited grace period. | |
589 | * If there is no preemptible-RCU expedited grace period currently in | |
590 | * progress, returns zero unconditionally. | |
591 | */ | |
592 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | |
593 | { | |
594 | return !list_empty(&rnp->blocked_tasks[2]) || | |
595 | !list_empty(&rnp->blocked_tasks[3]); | |
596 | } | |
597 | ||
598 | /* | |
599 | * return non-zero if there is no RCU expedited grace period in progress | |
600 | * for the specified rcu_node structure, in other words, if all CPUs and | |
601 | * tasks covered by the specified rcu_node structure have done their bit | |
602 | * for the current expedited grace period. Works only for preemptible | |
603 | * RCU -- other RCU implementation use other means. | |
604 | * | |
605 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
606 | */ | |
607 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | |
608 | { | |
609 | return !rcu_preempted_readers_exp(rnp) && | |
610 | ACCESS_ONCE(rnp->expmask) == 0; | |
611 | } | |
612 | ||
613 | /* | |
614 | * Report the exit from RCU read-side critical section for the last task | |
615 | * that queued itself during or before the current expedited preemptible-RCU | |
616 | * grace period. This event is reported either to the rcu_node structure on | |
617 | * which the task was queued or to one of that rcu_node structure's ancestors, | |
618 | * recursively up the tree. (Calm down, calm down, we do the recursion | |
619 | * iteratively!) | |
620 | * | |
621 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
622 | */ | |
623 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |
624 | { | |
625 | unsigned long flags; | |
626 | unsigned long mask; | |
627 | ||
1304afb2 | 628 | raw_spin_lock_irqsave(&rnp->lock, flags); |
d9a3da06 PM |
629 | for (;;) { |
630 | if (!sync_rcu_preempt_exp_done(rnp)) | |
631 | break; | |
632 | if (rnp->parent == NULL) { | |
633 | wake_up(&sync_rcu_preempt_exp_wq); | |
634 | break; | |
635 | } | |
636 | mask = rnp->grpmask; | |
1304afb2 | 637 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
d9a3da06 | 638 | rnp = rnp->parent; |
1304afb2 | 639 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
d9a3da06 PM |
640 | rnp->expmask &= ~mask; |
641 | } | |
1304afb2 | 642 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
d9a3da06 PM |
643 | } |
644 | ||
645 | /* | |
646 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | |
647 | * grace period for the specified rcu_node structure. If there are no such | |
648 | * tasks, report it up the rcu_node hierarchy. | |
649 | * | |
650 | * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. | |
651 | */ | |
652 | static void | |
653 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |
654 | { | |
655 | int must_wait; | |
656 | ||
1304afb2 | 657 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
d9a3da06 PM |
658 | list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]); |
659 | list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]); | |
660 | must_wait = rcu_preempted_readers_exp(rnp); | |
1304afb2 | 661 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
d9a3da06 PM |
662 | if (!must_wait) |
663 | rcu_report_exp_rnp(rsp, rnp); | |
664 | } | |
665 | ||
019129d5 | 666 | /* |
d9a3da06 PM |
667 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea |
668 | * is to invoke synchronize_sched_expedited() to push all the tasks to | |
669 | * the ->blocked_tasks[] lists, move all entries from the first set of | |
670 | * ->blocked_tasks[] lists to the second set, and finally wait for this | |
671 | * second set to drain. | |
019129d5 PM |
672 | */ |
673 | void synchronize_rcu_expedited(void) | |
674 | { | |
d9a3da06 PM |
675 | unsigned long flags; |
676 | struct rcu_node *rnp; | |
677 | struct rcu_state *rsp = &rcu_preempt_state; | |
678 | long snap; | |
679 | int trycount = 0; | |
680 | ||
681 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | |
682 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | |
683 | smp_mb(); /* Above access cannot bleed into critical section. */ | |
684 | ||
685 | /* | |
686 | * Acquire lock, falling back to synchronize_rcu() if too many | |
687 | * lock-acquisition failures. Of course, if someone does the | |
688 | * expedited grace period for us, just leave. | |
689 | */ | |
690 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | |
691 | if (trycount++ < 10) | |
692 | udelay(trycount * num_online_cpus()); | |
693 | else { | |
694 | synchronize_rcu(); | |
695 | return; | |
696 | } | |
697 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
698 | goto mb_ret; /* Others did our work for us. */ | |
699 | } | |
700 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
701 | goto unlock_mb_ret; /* Others did our work for us. */ | |
702 | ||
703 | /* force all RCU readers onto blocked_tasks[]. */ | |
704 | synchronize_sched_expedited(); | |
705 | ||
1304afb2 | 706 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
d9a3da06 PM |
707 | |
708 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | |
709 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | |
1304afb2 | 710 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
d9a3da06 | 711 | rnp->expmask = rnp->qsmaskinit; |
1304afb2 | 712 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
d9a3da06 PM |
713 | } |
714 | ||
715 | /* Snapshot current state of ->blocked_tasks[] lists. */ | |
716 | rcu_for_each_leaf_node(rsp, rnp) | |
717 | sync_rcu_preempt_exp_init(rsp, rnp); | |
718 | if (NUM_RCU_NODES > 1) | |
719 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | |
720 | ||
1304afb2 | 721 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
d9a3da06 PM |
722 | |
723 | /* Wait for snapshotted ->blocked_tasks[] lists to drain. */ | |
724 | rnp = rcu_get_root(rsp); | |
725 | wait_event(sync_rcu_preempt_exp_wq, | |
726 | sync_rcu_preempt_exp_done(rnp)); | |
727 | ||
728 | /* Clean up and exit. */ | |
729 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | |
730 | ACCESS_ONCE(sync_rcu_preempt_exp_count)++; | |
731 | unlock_mb_ret: | |
732 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | |
733 | mb_ret: | |
734 | smp_mb(); /* ensure subsequent action seen after grace period. */ | |
019129d5 PM |
735 | } |
736 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
737 | ||
f41d911f PM |
738 | /* |
739 | * Check to see if there is any immediate preemptable-RCU-related work | |
740 | * to be done. | |
741 | */ | |
742 | static int rcu_preempt_pending(int cpu) | |
743 | { | |
744 | return __rcu_pending(&rcu_preempt_state, | |
745 | &per_cpu(rcu_preempt_data, cpu)); | |
746 | } | |
747 | ||
748 | /* | |
749 | * Does preemptable RCU need the CPU to stay out of dynticks mode? | |
750 | */ | |
751 | static int rcu_preempt_needs_cpu(int cpu) | |
752 | { | |
753 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | |
754 | } | |
755 | ||
e74f4c45 PM |
756 | /** |
757 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | |
758 | */ | |
759 | void rcu_barrier(void) | |
760 | { | |
761 | _rcu_barrier(&rcu_preempt_state, call_rcu); | |
762 | } | |
763 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
764 | ||
f41d911f PM |
765 | /* |
766 | * Initialize preemptable RCU's per-CPU data. | |
767 | */ | |
768 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
769 | { | |
770 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | |
771 | } | |
772 | ||
e74f4c45 PM |
773 | /* |
774 | * Move preemptable RCU's callbacks to ->orphan_cbs_list. | |
775 | */ | |
776 | static void rcu_preempt_send_cbs_to_orphanage(void) | |
777 | { | |
778 | rcu_send_cbs_to_orphanage(&rcu_preempt_state); | |
779 | } | |
780 | ||
1eba8f84 PM |
781 | /* |
782 | * Initialize preemptable RCU's state structures. | |
783 | */ | |
784 | static void __init __rcu_init_preempt(void) | |
785 | { | |
394f99a9 | 786 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); |
1eba8f84 PM |
787 | } |
788 | ||
f41d911f PM |
789 | /* |
790 | * Check for a task exiting while in a preemptable-RCU read-side | |
791 | * critical section, clean up if so. No need to issue warnings, | |
792 | * as debug_check_no_locks_held() already does this if lockdep | |
793 | * is enabled. | |
794 | */ | |
795 | void exit_rcu(void) | |
796 | { | |
797 | struct task_struct *t = current; | |
798 | ||
799 | if (t->rcu_read_lock_nesting == 0) | |
800 | return; | |
801 | t->rcu_read_lock_nesting = 1; | |
802 | rcu_read_unlock(); | |
803 | } | |
804 | ||
805 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
806 | ||
807 | /* | |
808 | * Tell them what RCU they are running. | |
809 | */ | |
0e0fc1c2 | 810 | static void __init rcu_bootup_announce(void) |
f41d911f PM |
811 | { |
812 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | |
26845c28 | 813 | rcu_bootup_announce_oddness(); |
f41d911f PM |
814 | } |
815 | ||
816 | /* | |
817 | * Return the number of RCU batches processed thus far for debug & stats. | |
818 | */ | |
819 | long rcu_batches_completed(void) | |
820 | { | |
821 | return rcu_batches_completed_sched(); | |
822 | } | |
823 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
824 | ||
bf66f18e PM |
825 | /* |
826 | * Force a quiescent state for RCU, which, because there is no preemptible | |
827 | * RCU, becomes the same as rcu-sched. | |
828 | */ | |
829 | void rcu_force_quiescent_state(void) | |
830 | { | |
831 | rcu_sched_force_quiescent_state(); | |
832 | } | |
833 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |
834 | ||
f41d911f PM |
835 | /* |
836 | * Because preemptable RCU does not exist, we never have to check for | |
837 | * CPUs being in quiescent states. | |
838 | */ | |
c3422bea | 839 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
840 | { |
841 | } | |
842 | ||
fc2219d4 PM |
843 | /* |
844 | * Because preemptable RCU does not exist, there are never any preempted | |
845 | * RCU readers. | |
846 | */ | |
847 | static int rcu_preempted_readers(struct rcu_node *rnp) | |
848 | { | |
849 | return 0; | |
850 | } | |
851 | ||
b668c9cf PM |
852 | #ifdef CONFIG_HOTPLUG_CPU |
853 | ||
854 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | |
d3f6bad3 | 855 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf | 856 | { |
1304afb2 | 857 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf PM |
858 | } |
859 | ||
860 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
861 | ||
f41d911f PM |
862 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
863 | ||
1ed509a2 PM |
864 | /* |
865 | * Because preemptable RCU does not exist, we never have to check for | |
866 | * tasks blocked within RCU read-side critical sections. | |
867 | */ | |
868 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
869 | { | |
870 | } | |
871 | ||
f41d911f PM |
872 | /* |
873 | * Because preemptable RCU does not exist, we never have to check for | |
874 | * tasks blocked within RCU read-side critical sections. | |
875 | */ | |
876 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
877 | { | |
878 | } | |
879 | ||
53d84e00 PM |
880 | /* |
881 | * Because preemptible RCU does not exist, there is no need to suppress | |
882 | * its CPU stall warnings. | |
883 | */ | |
884 | static void rcu_preempt_stall_reset(void) | |
885 | { | |
886 | } | |
887 | ||
f41d911f PM |
888 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
889 | ||
b0e165c0 PM |
890 | /* |
891 | * Because there is no preemptable RCU, there can be no readers blocked, | |
49e29126 PM |
892 | * so there is no need to check for blocked tasks. So check only for |
893 | * bogus qsmask values. | |
b0e165c0 PM |
894 | */ |
895 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
896 | { | |
49e29126 | 897 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
898 | } |
899 | ||
33f76148 PM |
900 | #ifdef CONFIG_HOTPLUG_CPU |
901 | ||
dd5d19ba PM |
902 | /* |
903 | * Because preemptable RCU does not exist, it never needs to migrate | |
237c80c5 PM |
904 | * tasks that were blocked within RCU read-side critical sections, and |
905 | * such non-existent tasks cannot possibly have been blocking the current | |
906 | * grace period. | |
dd5d19ba | 907 | */ |
237c80c5 PM |
908 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
909 | struct rcu_node *rnp, | |
910 | struct rcu_data *rdp) | |
dd5d19ba | 911 | { |
237c80c5 | 912 | return 0; |
dd5d19ba PM |
913 | } |
914 | ||
33f76148 PM |
915 | /* |
916 | * Because preemptable RCU does not exist, it never needs CPU-offline | |
917 | * processing. | |
918 | */ | |
919 | static void rcu_preempt_offline_cpu(int cpu) | |
920 | { | |
921 | } | |
922 | ||
923 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
924 | ||
f41d911f PM |
925 | /* |
926 | * Because preemptable RCU does not exist, it never has any callbacks | |
927 | * to check. | |
928 | */ | |
1eba8f84 | 929 | static void rcu_preempt_check_callbacks(int cpu) |
f41d911f PM |
930 | { |
931 | } | |
932 | ||
933 | /* | |
934 | * Because preemptable RCU does not exist, it never has any callbacks | |
935 | * to process. | |
936 | */ | |
1eba8f84 | 937 | static void rcu_preempt_process_callbacks(void) |
f41d911f PM |
938 | { |
939 | } | |
940 | ||
019129d5 PM |
941 | /* |
942 | * Wait for an rcu-preempt grace period, but make it happen quickly. | |
943 | * But because preemptable RCU does not exist, map to rcu-sched. | |
944 | */ | |
945 | void synchronize_rcu_expedited(void) | |
946 | { | |
947 | synchronize_sched_expedited(); | |
948 | } | |
949 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
950 | ||
d9a3da06 PM |
951 | #ifdef CONFIG_HOTPLUG_CPU |
952 | ||
953 | /* | |
954 | * Because preemptable RCU does not exist, there is never any need to | |
955 | * report on tasks preempted in RCU read-side critical sections during | |
956 | * expedited RCU grace periods. | |
957 | */ | |
958 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |
959 | { | |
960 | return; | |
961 | } | |
962 | ||
963 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
964 | ||
f41d911f PM |
965 | /* |
966 | * Because preemptable RCU does not exist, it never has any work to do. | |
967 | */ | |
968 | static int rcu_preempt_pending(int cpu) | |
969 | { | |
970 | return 0; | |
971 | } | |
972 | ||
973 | /* | |
974 | * Because preemptable RCU does not exist, it never needs any CPU. | |
975 | */ | |
976 | static int rcu_preempt_needs_cpu(int cpu) | |
977 | { | |
978 | return 0; | |
979 | } | |
980 | ||
e74f4c45 PM |
981 | /* |
982 | * Because preemptable RCU does not exist, rcu_barrier() is just | |
983 | * another name for rcu_barrier_sched(). | |
984 | */ | |
985 | void rcu_barrier(void) | |
986 | { | |
987 | rcu_barrier_sched(); | |
988 | } | |
989 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
990 | ||
f41d911f PM |
991 | /* |
992 | * Because preemptable RCU does not exist, there is no per-CPU | |
993 | * data to initialize. | |
994 | */ | |
995 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
996 | { | |
997 | } | |
998 | ||
e74f4c45 PM |
999 | /* |
1000 | * Because there is no preemptable RCU, there are no callbacks to move. | |
1001 | */ | |
1002 | static void rcu_preempt_send_cbs_to_orphanage(void) | |
1003 | { | |
1004 | } | |
1005 | ||
1eba8f84 PM |
1006 | /* |
1007 | * Because preemptable RCU does not exist, it need not be initialized. | |
1008 | */ | |
1009 | static void __init __rcu_init_preempt(void) | |
1010 | { | |
1011 | } | |
1012 | ||
f41d911f | 1013 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
8bd93a2c PM |
1014 | |
1015 | #if !defined(CONFIG_RCU_FAST_NO_HZ) | |
1016 | ||
1017 | /* | |
1018 | * Check to see if any future RCU-related work will need to be done | |
1019 | * by the current CPU, even if none need be done immediately, returning | |
1020 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
1021 | * an exported member of the RCU API. | |
1022 | * | |
1023 | * Because we have preemptible RCU, just check whether this CPU needs | |
1024 | * any flavor of RCU. Do not chew up lots of CPU cycles with preemption | |
1025 | * disabled in a most-likely vain attempt to cause RCU not to need this CPU. | |
1026 | */ | |
1027 | int rcu_needs_cpu(int cpu) | |
1028 | { | |
1029 | return rcu_needs_cpu_quick_check(cpu); | |
1030 | } | |
1031 | ||
a47cd880 PM |
1032 | /* |
1033 | * Check to see if we need to continue a callback-flush operations to | |
1034 | * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle | |
1035 | * entry is not configured, so we never do need to. | |
1036 | */ | |
1037 | static void rcu_needs_cpu_flush(void) | |
1038 | { | |
1039 | } | |
1040 | ||
8bd93a2c PM |
1041 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
1042 | ||
1043 | #define RCU_NEEDS_CPU_FLUSHES 5 | |
a47cd880 | 1044 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); |
71da8132 | 1045 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); |
8bd93a2c PM |
1046 | |
1047 | /* | |
1048 | * Check to see if any future RCU-related work will need to be done | |
1049 | * by the current CPU, even if none need be done immediately, returning | |
1050 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
1051 | * an exported member of the RCU API. | |
1052 | * | |
1053 | * Because we are not supporting preemptible RCU, attempt to accelerate | |
1054 | * any current grace periods so that RCU no longer needs this CPU, but | |
1055 | * only if all other CPUs are already in dynticks-idle mode. This will | |
1056 | * allow the CPU cores to be powered down immediately, as opposed to after | |
1057 | * waiting many milliseconds for grace periods to elapse. | |
a47cd880 PM |
1058 | * |
1059 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | |
1060 | * disabled, we do one pass of force_quiescent_state(), then do a | |
1061 | * raise_softirq() to cause rcu_process_callbacks() to be invoked later. | |
1062 | * The per-cpu rcu_dyntick_drain variable controls the sequencing. | |
8bd93a2c PM |
1063 | */ |
1064 | int rcu_needs_cpu(int cpu) | |
1065 | { | |
a47cd880 | 1066 | int c = 0; |
77e38ed3 PM |
1067 | int snap; |
1068 | int snap_nmi; | |
8bd93a2c PM |
1069 | int thatcpu; |
1070 | ||
622ea685 PM |
1071 | /* Check for being in the holdoff period. */ |
1072 | if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) | |
1073 | return rcu_needs_cpu_quick_check(cpu); | |
1074 | ||
8bd93a2c | 1075 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
77e38ed3 PM |
1076 | for_each_online_cpu(thatcpu) { |
1077 | if (thatcpu == cpu) | |
1078 | continue; | |
d822ed10 PM |
1079 | snap = per_cpu(rcu_dynticks, thatcpu).dynticks; |
1080 | snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi; | |
77e38ed3 PM |
1081 | smp_mb(); /* Order sampling of snap with end of grace period. */ |
1082 | if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { | |
a47cd880 | 1083 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
71da8132 | 1084 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
8bd93a2c | 1085 | return rcu_needs_cpu_quick_check(cpu); |
8bd93a2c | 1086 | } |
77e38ed3 | 1087 | } |
a47cd880 PM |
1088 | |
1089 | /* Check and update the rcu_dyntick_drain sequencing. */ | |
1090 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | |
1091 | /* First time through, initialize the counter. */ | |
1092 | per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; | |
1093 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | |
1094 | /* We have hit the limit, so time to give up. */ | |
71da8132 | 1095 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; |
a47cd880 PM |
1096 | return rcu_needs_cpu_quick_check(cpu); |
1097 | } | |
1098 | ||
1099 | /* Do one step pushing remaining RCU callbacks through. */ | |
1100 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { | |
1101 | rcu_sched_qs(cpu); | |
1102 | force_quiescent_state(&rcu_sched_state, 0); | |
1103 | c = c || per_cpu(rcu_sched_data, cpu).nxtlist; | |
1104 | } | |
1105 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | |
1106 | rcu_bh_qs(cpu); | |
1107 | force_quiescent_state(&rcu_bh_state, 0); | |
1108 | c = c || per_cpu(rcu_bh_data, cpu).nxtlist; | |
8bd93a2c PM |
1109 | } |
1110 | ||
1111 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | |
622ea685 | 1112 | if (c) |
a47cd880 | 1113 | raise_softirq(RCU_SOFTIRQ); |
8bd93a2c PM |
1114 | return c; |
1115 | } | |
1116 | ||
a47cd880 PM |
1117 | /* |
1118 | * Check to see if we need to continue a callback-flush operations to | |
1119 | * allow the last CPU to enter dyntick-idle mode. | |
1120 | */ | |
1121 | static void rcu_needs_cpu_flush(void) | |
1122 | { | |
1123 | int cpu = smp_processor_id(); | |
71da8132 | 1124 | unsigned long flags; |
a47cd880 PM |
1125 | |
1126 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) | |
1127 | return; | |
71da8132 | 1128 | local_irq_save(flags); |
a47cd880 | 1129 | (void)rcu_needs_cpu(cpu); |
71da8132 | 1130 | local_irq_restore(flags); |
a47cd880 PM |
1131 | } |
1132 | ||
8bd93a2c | 1133 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |