]>
Commit | Line | Data |
---|---|---|
f41d911f PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | |
3 | * Internal non-public definitions that provide either classic | |
6cc68793 | 4 | * or preemptible semantics. |
f41d911f PM |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
20 | * Copyright Red Hat, 2009 | |
21 | * Copyright IBM Corporation, 2009 | |
22 | * | |
23 | * Author: Ingo Molnar <mingo@elte.hu> | |
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
25 | */ | |
26 | ||
d9a3da06 | 27 | #include <linux/delay.h> |
7b27d547 | 28 | #include <linux/stop_machine.h> |
f41d911f | 29 | |
26845c28 PM |
30 | /* |
31 | * Check the RCU kernel configuration parameters and print informative | |
32 | * messages about anything out of the ordinary. If you like #ifdef, you | |
33 | * will love this function. | |
34 | */ | |
35 | static void __init rcu_bootup_announce_oddness(void) | |
36 | { | |
37 | #ifdef CONFIG_RCU_TRACE | |
38 | printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); | |
39 | #endif | |
40 | #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) | |
41 | printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", | |
42 | CONFIG_RCU_FANOUT); | |
43 | #endif | |
44 | #ifdef CONFIG_RCU_FANOUT_EXACT | |
45 | printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); | |
46 | #endif | |
47 | #ifdef CONFIG_RCU_FAST_NO_HZ | |
48 | printk(KERN_INFO | |
49 | "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); | |
50 | #endif | |
51 | #ifdef CONFIG_PROVE_RCU | |
52 | printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); | |
53 | #endif | |
54 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | |
55 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); | |
56 | #endif | |
81a294c4 | 57 | #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) |
26845c28 PM |
58 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); |
59 | #endif | |
60 | #if NUM_RCU_LVL_4 != 0 | |
61 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | |
62 | #endif | |
63 | } | |
64 | ||
f41d911f PM |
65 | #ifdef CONFIG_TREE_PREEMPT_RCU |
66 | ||
e99033c5 | 67 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt); |
f41d911f | 68 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
27f4d280 | 69 | static struct rcu_state *rcu_state = &rcu_preempt_state; |
f41d911f | 70 | |
10f39bb1 | 71 | static void rcu_read_unlock_special(struct task_struct *t); |
d9a3da06 PM |
72 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
73 | ||
f41d911f PM |
74 | /* |
75 | * Tell them what RCU they are running. | |
76 | */ | |
0e0fc1c2 | 77 | static void __init rcu_bootup_announce(void) |
f41d911f | 78 | { |
6cc68793 | 79 | printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n"); |
26845c28 | 80 | rcu_bootup_announce_oddness(); |
f41d911f PM |
81 | } |
82 | ||
83 | /* | |
84 | * Return the number of RCU-preempt batches processed thus far | |
85 | * for debug and statistics. | |
86 | */ | |
87 | long rcu_batches_completed_preempt(void) | |
88 | { | |
89 | return rcu_preempt_state.completed; | |
90 | } | |
91 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | |
92 | ||
93 | /* | |
94 | * Return the number of RCU batches processed thus far for debug & stats. | |
95 | */ | |
96 | long rcu_batches_completed(void) | |
97 | { | |
98 | return rcu_batches_completed_preempt(); | |
99 | } | |
100 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
101 | ||
bf66f18e PM |
102 | /* |
103 | * Force a quiescent state for preemptible RCU. | |
104 | */ | |
105 | void rcu_force_quiescent_state(void) | |
106 | { | |
107 | force_quiescent_state(&rcu_preempt_state, 0); | |
108 | } | |
109 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |
110 | ||
f41d911f | 111 | /* |
6cc68793 | 112 | * Record a preemptible-RCU quiescent state for the specified CPU. Note |
f41d911f PM |
113 | * that this just means that the task currently running on the CPU is |
114 | * not in a quiescent state. There might be any number of tasks blocked | |
115 | * while in an RCU read-side critical section. | |
25502a6c PM |
116 | * |
117 | * Unlike the other rcu_*_qs() functions, callers to this function | |
118 | * must disable irqs in order to protect the assignment to | |
119 | * ->rcu_read_unlock_special. | |
f41d911f | 120 | */ |
c3422bea | 121 | static void rcu_preempt_qs(int cpu) |
f41d911f PM |
122 | { |
123 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | |
25502a6c | 124 | |
e4cc1f22 | 125 | rdp->passed_quiesce_gpnum = rdp->gpnum; |
c3422bea | 126 | barrier(); |
e4cc1f22 | 127 | if (rdp->passed_quiesce == 0) |
d4c08f2a | 128 | trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs"); |
e4cc1f22 | 129 | rdp->passed_quiesce = 1; |
25502a6c | 130 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
131 | } |
132 | ||
133 | /* | |
c3422bea PM |
134 | * We have entered the scheduler, and the current task might soon be |
135 | * context-switched away from. If this task is in an RCU read-side | |
136 | * critical section, we will no longer be able to rely on the CPU to | |
12f5f524 PM |
137 | * record that fact, so we enqueue the task on the blkd_tasks list. |
138 | * The task will dequeue itself when it exits the outermost enclosing | |
139 | * RCU read-side critical section. Therefore, the current grace period | |
140 | * cannot be permitted to complete until the blkd_tasks list entries | |
141 | * predating the current grace period drain, in other words, until | |
142 | * rnp->gp_tasks becomes NULL. | |
c3422bea PM |
143 | * |
144 | * Caller must disable preemption. | |
f41d911f | 145 | */ |
c3422bea | 146 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
147 | { |
148 | struct task_struct *t = current; | |
c3422bea | 149 | unsigned long flags; |
f41d911f PM |
150 | struct rcu_data *rdp; |
151 | struct rcu_node *rnp; | |
152 | ||
10f39bb1 | 153 | if (t->rcu_read_lock_nesting > 0 && |
f41d911f PM |
154 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
155 | ||
156 | /* Possibly blocking in an RCU read-side critical section. */ | |
394f99a9 | 157 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
f41d911f | 158 | rnp = rdp->mynode; |
1304afb2 | 159 | raw_spin_lock_irqsave(&rnp->lock, flags); |
f41d911f | 160 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
86848966 | 161 | t->rcu_blocked_node = rnp; |
f41d911f PM |
162 | |
163 | /* | |
164 | * If this CPU has already checked in, then this task | |
165 | * will hold up the next grace period rather than the | |
166 | * current grace period. Queue the task accordingly. | |
167 | * If the task is queued for the current grace period | |
168 | * (i.e., this CPU has not yet passed through a quiescent | |
169 | * state for the current grace period), then as long | |
170 | * as that task remains queued, the current grace period | |
12f5f524 PM |
171 | * cannot end. Note that there is some uncertainty as |
172 | * to exactly when the current grace period started. | |
173 | * We take a conservative approach, which can result | |
174 | * in unnecessarily waiting on tasks that started very | |
175 | * slightly after the current grace period began. C'est | |
176 | * la vie!!! | |
b0e165c0 PM |
177 | * |
178 | * But first, note that the current CPU must still be | |
179 | * on line! | |
f41d911f | 180 | */ |
b0e165c0 | 181 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); |
e7d8842e | 182 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); |
12f5f524 PM |
183 | if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { |
184 | list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); | |
185 | rnp->gp_tasks = &t->rcu_node_entry; | |
27f4d280 PM |
186 | #ifdef CONFIG_RCU_BOOST |
187 | if (rnp->boost_tasks != NULL) | |
188 | rnp->boost_tasks = rnp->gp_tasks; | |
189 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
12f5f524 PM |
190 | } else { |
191 | list_add(&t->rcu_node_entry, &rnp->blkd_tasks); | |
192 | if (rnp->qsmask & rdp->grpmask) | |
193 | rnp->gp_tasks = &t->rcu_node_entry; | |
194 | } | |
d4c08f2a PM |
195 | trace_rcu_preempt_task(rdp->rsp->name, |
196 | t->pid, | |
197 | (rnp->qsmask & rdp->grpmask) | |
198 | ? rnp->gpnum | |
199 | : rnp->gpnum + 1); | |
1304afb2 | 200 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
10f39bb1 PM |
201 | } else if (t->rcu_read_lock_nesting < 0 && |
202 | t->rcu_read_unlock_special) { | |
203 | ||
204 | /* | |
205 | * Complete exit from RCU read-side critical section on | |
206 | * behalf of preempted instance of __rcu_read_unlock(). | |
207 | */ | |
208 | rcu_read_unlock_special(t); | |
f41d911f PM |
209 | } |
210 | ||
211 | /* | |
212 | * Either we were not in an RCU read-side critical section to | |
213 | * begin with, or we have now recorded that critical section | |
214 | * globally. Either way, we can now note a quiescent state | |
215 | * for this CPU. Again, if we were in an RCU read-side critical | |
216 | * section, and if that critical section was blocking the current | |
217 | * grace period, then the fact that the task has been enqueued | |
218 | * means that we continue to block the current grace period. | |
219 | */ | |
e7d8842e | 220 | local_irq_save(flags); |
25502a6c | 221 | rcu_preempt_qs(cpu); |
e7d8842e | 222 | local_irq_restore(flags); |
f41d911f PM |
223 | } |
224 | ||
225 | /* | |
6cc68793 | 226 | * Tree-preemptible RCU implementation for rcu_read_lock(). |
f41d911f PM |
227 | * Just increment ->rcu_read_lock_nesting, shared state will be updated |
228 | * if we block. | |
229 | */ | |
230 | void __rcu_read_lock(void) | |
231 | { | |
80dcf60e | 232 | current->rcu_read_lock_nesting++; |
f41d911f PM |
233 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ |
234 | } | |
235 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
236 | ||
fc2219d4 PM |
237 | /* |
238 | * Check for preempted RCU readers blocking the current grace period | |
239 | * for the specified rcu_node structure. If the caller needs a reliable | |
240 | * answer, it must hold the rcu_node's ->lock. | |
241 | */ | |
27f4d280 | 242 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) |
fc2219d4 | 243 | { |
12f5f524 | 244 | return rnp->gp_tasks != NULL; |
fc2219d4 PM |
245 | } |
246 | ||
b668c9cf PM |
247 | /* |
248 | * Record a quiescent state for all tasks that were previously queued | |
249 | * on the specified rcu_node structure and that were blocking the current | |
250 | * RCU grace period. The caller must hold the specified rnp->lock with | |
251 | * irqs disabled, and this lock is released upon return, but irqs remain | |
252 | * disabled. | |
253 | */ | |
d3f6bad3 | 254 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf PM |
255 | __releases(rnp->lock) |
256 | { | |
257 | unsigned long mask; | |
258 | struct rcu_node *rnp_p; | |
259 | ||
27f4d280 | 260 | if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { |
1304afb2 | 261 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf PM |
262 | return; /* Still need more quiescent states! */ |
263 | } | |
264 | ||
265 | rnp_p = rnp->parent; | |
266 | if (rnp_p == NULL) { | |
267 | /* | |
268 | * Either there is only one rcu_node in the tree, | |
269 | * or tasks were kicked up to root rcu_node due to | |
270 | * CPUs going offline. | |
271 | */ | |
d3f6bad3 | 272 | rcu_report_qs_rsp(&rcu_preempt_state, flags); |
b668c9cf PM |
273 | return; |
274 | } | |
275 | ||
276 | /* Report up the rest of the hierarchy. */ | |
277 | mask = rnp->grpmask; | |
1304afb2 PM |
278 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
279 | raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ | |
d3f6bad3 | 280 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); |
b668c9cf PM |
281 | } |
282 | ||
12f5f524 PM |
283 | /* |
284 | * Advance a ->blkd_tasks-list pointer to the next entry, instead | |
285 | * returning NULL if at the end of the list. | |
286 | */ | |
287 | static struct list_head *rcu_next_node_entry(struct task_struct *t, | |
288 | struct rcu_node *rnp) | |
289 | { | |
290 | struct list_head *np; | |
291 | ||
292 | np = t->rcu_node_entry.next; | |
293 | if (np == &rnp->blkd_tasks) | |
294 | np = NULL; | |
295 | return np; | |
296 | } | |
297 | ||
b668c9cf PM |
298 | /* |
299 | * Handle special cases during rcu_read_unlock(), such as needing to | |
300 | * notify RCU core processing or task having blocked during the RCU | |
301 | * read-side critical section. | |
302 | */ | |
be0e1e21 | 303 | static noinline void rcu_read_unlock_special(struct task_struct *t) |
f41d911f PM |
304 | { |
305 | int empty; | |
d9a3da06 | 306 | int empty_exp; |
f41d911f | 307 | unsigned long flags; |
12f5f524 | 308 | struct list_head *np; |
82e78d80 PM |
309 | #ifdef CONFIG_RCU_BOOST |
310 | struct rt_mutex *rbmp = NULL; | |
311 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
f41d911f PM |
312 | struct rcu_node *rnp; |
313 | int special; | |
314 | ||
315 | /* NMI handlers cannot block and cannot safely manipulate state. */ | |
316 | if (in_nmi()) | |
317 | return; | |
318 | ||
319 | local_irq_save(flags); | |
320 | ||
321 | /* | |
322 | * If RCU core is waiting for this CPU to exit critical section, | |
323 | * let it know that we have done so. | |
324 | */ | |
325 | special = t->rcu_read_unlock_special; | |
326 | if (special & RCU_READ_UNLOCK_NEED_QS) { | |
c3422bea | 327 | rcu_preempt_qs(smp_processor_id()); |
f41d911f PM |
328 | } |
329 | ||
330 | /* Hardware IRQ handlers cannot block. */ | |
ec433f0c | 331 | if (in_irq() || in_serving_softirq()) { |
f41d911f PM |
332 | local_irq_restore(flags); |
333 | return; | |
334 | } | |
335 | ||
336 | /* Clean up if blocked during RCU read-side critical section. */ | |
337 | if (special & RCU_READ_UNLOCK_BLOCKED) { | |
338 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | |
339 | ||
dd5d19ba PM |
340 | /* |
341 | * Remove this task from the list it blocked on. The | |
342 | * task can migrate while we acquire the lock, but at | |
343 | * most one time. So at most two passes through loop. | |
344 | */ | |
345 | for (;;) { | |
86848966 | 346 | rnp = t->rcu_blocked_node; |
1304afb2 | 347 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
86848966 | 348 | if (rnp == t->rcu_blocked_node) |
dd5d19ba | 349 | break; |
1304afb2 | 350 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
dd5d19ba | 351 | } |
27f4d280 | 352 | empty = !rcu_preempt_blocked_readers_cgp(rnp); |
d9a3da06 PM |
353 | empty_exp = !rcu_preempted_readers_exp(rnp); |
354 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | |
12f5f524 | 355 | np = rcu_next_node_entry(t, rnp); |
f41d911f | 356 | list_del_init(&t->rcu_node_entry); |
82e78d80 | 357 | t->rcu_blocked_node = NULL; |
d4c08f2a PM |
358 | trace_rcu_unlock_preempted_task("rcu_preempt", |
359 | rnp->gpnum, t->pid); | |
12f5f524 PM |
360 | if (&t->rcu_node_entry == rnp->gp_tasks) |
361 | rnp->gp_tasks = np; | |
362 | if (&t->rcu_node_entry == rnp->exp_tasks) | |
363 | rnp->exp_tasks = np; | |
27f4d280 PM |
364 | #ifdef CONFIG_RCU_BOOST |
365 | if (&t->rcu_node_entry == rnp->boost_tasks) | |
366 | rnp->boost_tasks = np; | |
82e78d80 PM |
367 | /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */ |
368 | if (t->rcu_boost_mutex) { | |
369 | rbmp = t->rcu_boost_mutex; | |
370 | t->rcu_boost_mutex = NULL; | |
7765be2f | 371 | } |
27f4d280 | 372 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
f41d911f PM |
373 | |
374 | /* | |
375 | * If this was the last task on the current list, and if | |
376 | * we aren't waiting on any CPUs, report the quiescent state. | |
d3f6bad3 | 377 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. |
f41d911f | 378 | */ |
d4c08f2a PM |
379 | if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { |
380 | trace_rcu_quiescent_state_report("preempt_rcu", | |
381 | rnp->gpnum, | |
382 | 0, rnp->qsmask, | |
383 | rnp->level, | |
384 | rnp->grplo, | |
385 | rnp->grphi, | |
386 | !!rnp->gp_tasks); | |
d3f6bad3 | 387 | rcu_report_unblock_qs_rnp(rnp, flags); |
d4c08f2a PM |
388 | } else |
389 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
d9a3da06 | 390 | |
27f4d280 PM |
391 | #ifdef CONFIG_RCU_BOOST |
392 | /* Unboost if we were boosted. */ | |
82e78d80 PM |
393 | if (rbmp) |
394 | rt_mutex_unlock(rbmp); | |
27f4d280 PM |
395 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
396 | ||
d9a3da06 PM |
397 | /* |
398 | * If this was the last task on the expedited lists, | |
399 | * then we need to report up the rcu_node hierarchy. | |
400 | */ | |
401 | if (!empty_exp && !rcu_preempted_readers_exp(rnp)) | |
402 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | |
b668c9cf PM |
403 | } else { |
404 | local_irq_restore(flags); | |
f41d911f | 405 | } |
f41d911f PM |
406 | } |
407 | ||
408 | /* | |
6cc68793 | 409 | * Tree-preemptible RCU implementation for rcu_read_unlock(). |
f41d911f PM |
410 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost |
411 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
412 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
413 | * in an RCU read-side critical section and other special cases. | |
414 | */ | |
415 | void __rcu_read_unlock(void) | |
416 | { | |
417 | struct task_struct *t = current; | |
418 | ||
10f39bb1 PM |
419 | if (t->rcu_read_lock_nesting != 1) |
420 | --t->rcu_read_lock_nesting; | |
421 | else { | |
6206ab9b | 422 | barrier(); /* critical section before exit code. */ |
10f39bb1 PM |
423 | t->rcu_read_lock_nesting = INT_MIN; |
424 | barrier(); /* assign before ->rcu_read_unlock_special load */ | |
be0e1e21 PM |
425 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |
426 | rcu_read_unlock_special(t); | |
10f39bb1 PM |
427 | barrier(); /* ->rcu_read_unlock_special load before assign */ |
428 | t->rcu_read_lock_nesting = 0; | |
be0e1e21 | 429 | } |
cba8244a | 430 | #ifdef CONFIG_PROVE_LOCKING |
10f39bb1 PM |
431 | { |
432 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | |
433 | ||
434 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | |
435 | } | |
cba8244a | 436 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
f41d911f PM |
437 | } |
438 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
439 | ||
1ed509a2 PM |
440 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE |
441 | ||
442 | /* | |
443 | * Dump detailed information for all tasks blocking the current RCU | |
444 | * grace period on the specified rcu_node structure. | |
445 | */ | |
446 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | |
447 | { | |
448 | unsigned long flags; | |
1ed509a2 PM |
449 | struct task_struct *t; |
450 | ||
27f4d280 | 451 | if (!rcu_preempt_blocked_readers_cgp(rnp)) |
12f5f524 PM |
452 | return; |
453 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
454 | t = list_entry(rnp->gp_tasks, | |
455 | struct task_struct, rcu_node_entry); | |
456 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) | |
457 | sched_show_task(t); | |
458 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1ed509a2 PM |
459 | } |
460 | ||
461 | /* | |
462 | * Dump detailed information for all tasks blocking the current RCU | |
463 | * grace period. | |
464 | */ | |
465 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
466 | { | |
467 | struct rcu_node *rnp = rcu_get_root(rsp); | |
468 | ||
469 | rcu_print_detail_task_stall_rnp(rnp); | |
470 | rcu_for_each_leaf_node(rsp, rnp) | |
471 | rcu_print_detail_task_stall_rnp(rnp); | |
472 | } | |
473 | ||
474 | #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | |
475 | ||
476 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
477 | { | |
478 | } | |
479 | ||
480 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | |
481 | ||
f41d911f PM |
482 | /* |
483 | * Scan the current list of tasks blocked within RCU read-side critical | |
484 | * sections, printing out the tid of each. | |
485 | */ | |
9bc8b558 | 486 | static int rcu_print_task_stall(struct rcu_node *rnp) |
f41d911f | 487 | { |
f41d911f | 488 | struct task_struct *t; |
9bc8b558 | 489 | int ndetected = 0; |
f41d911f | 490 | |
27f4d280 | 491 | if (!rcu_preempt_blocked_readers_cgp(rnp)) |
9bc8b558 | 492 | return 0; |
12f5f524 PM |
493 | t = list_entry(rnp->gp_tasks, |
494 | struct task_struct, rcu_node_entry); | |
9bc8b558 | 495 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { |
12f5f524 | 496 | printk(" P%d", t->pid); |
9bc8b558 PM |
497 | ndetected++; |
498 | } | |
499 | return ndetected; | |
f41d911f PM |
500 | } |
501 | ||
53d84e00 PM |
502 | /* |
503 | * Suppress preemptible RCU's CPU stall warnings by pushing the | |
504 | * time of the next stall-warning message comfortably far into the | |
505 | * future. | |
506 | */ | |
507 | static void rcu_preempt_stall_reset(void) | |
508 | { | |
509 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; | |
510 | } | |
511 | ||
b0e165c0 PM |
512 | /* |
513 | * Check that the list of blocked tasks for the newly completed grace | |
514 | * period is in fact empty. It is a serious bug to complete a grace | |
515 | * period that still has RCU readers blocked! This function must be | |
516 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | |
517 | * must be held by the caller. | |
12f5f524 PM |
518 | * |
519 | * Also, if there are blocked tasks on the list, they automatically | |
520 | * block the newly created grace period, so set up ->gp_tasks accordingly. | |
b0e165c0 PM |
521 | */ |
522 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
523 | { | |
27f4d280 | 524 | WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); |
12f5f524 PM |
525 | if (!list_empty(&rnp->blkd_tasks)) |
526 | rnp->gp_tasks = rnp->blkd_tasks.next; | |
28ecd580 | 527 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
528 | } |
529 | ||
33f76148 PM |
530 | #ifdef CONFIG_HOTPLUG_CPU |
531 | ||
dd5d19ba PM |
532 | /* |
533 | * Handle tasklist migration for case in which all CPUs covered by the | |
534 | * specified rcu_node have gone offline. Move them up to the root | |
535 | * rcu_node. The reason for not just moving them to the immediate | |
536 | * parent is to remove the need for rcu_read_unlock_special() to | |
537 | * make more than two attempts to acquire the target rcu_node's lock. | |
b668c9cf PM |
538 | * Returns true if there were tasks blocking the current RCU grace |
539 | * period. | |
dd5d19ba | 540 | * |
237c80c5 PM |
541 | * Returns 1 if there was previously a task blocking the current grace |
542 | * period on the specified rcu_node structure. | |
543 | * | |
dd5d19ba PM |
544 | * The caller must hold rnp->lock with irqs disabled. |
545 | */ | |
237c80c5 PM |
546 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
547 | struct rcu_node *rnp, | |
548 | struct rcu_data *rdp) | |
dd5d19ba | 549 | { |
dd5d19ba PM |
550 | struct list_head *lp; |
551 | struct list_head *lp_root; | |
d9a3da06 | 552 | int retval = 0; |
dd5d19ba | 553 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
12f5f524 | 554 | struct task_struct *t; |
dd5d19ba | 555 | |
86848966 PM |
556 | if (rnp == rnp_root) { |
557 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | |
237c80c5 | 558 | return 0; /* Shouldn't happen: at least one CPU online. */ |
86848966 | 559 | } |
12f5f524 PM |
560 | |
561 | /* If we are on an internal node, complain bitterly. */ | |
562 | WARN_ON_ONCE(rnp != rdp->mynode); | |
dd5d19ba PM |
563 | |
564 | /* | |
12f5f524 PM |
565 | * Move tasks up to root rcu_node. Don't try to get fancy for |
566 | * this corner-case operation -- just put this node's tasks | |
567 | * at the head of the root node's list, and update the root node's | |
568 | * ->gp_tasks and ->exp_tasks pointers to those of this node's, | |
569 | * if non-NULL. This might result in waiting for more tasks than | |
570 | * absolutely necessary, but this is a good performance/complexity | |
571 | * tradeoff. | |
dd5d19ba | 572 | */ |
27f4d280 | 573 | if (rcu_preempt_blocked_readers_cgp(rnp)) |
d9a3da06 PM |
574 | retval |= RCU_OFL_TASKS_NORM_GP; |
575 | if (rcu_preempted_readers_exp(rnp)) | |
576 | retval |= RCU_OFL_TASKS_EXP_GP; | |
12f5f524 PM |
577 | lp = &rnp->blkd_tasks; |
578 | lp_root = &rnp_root->blkd_tasks; | |
579 | while (!list_empty(lp)) { | |
580 | t = list_entry(lp->next, typeof(*t), rcu_node_entry); | |
581 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | |
582 | list_del(&t->rcu_node_entry); | |
583 | t->rcu_blocked_node = rnp_root; | |
584 | list_add(&t->rcu_node_entry, lp_root); | |
585 | if (&t->rcu_node_entry == rnp->gp_tasks) | |
586 | rnp_root->gp_tasks = rnp->gp_tasks; | |
587 | if (&t->rcu_node_entry == rnp->exp_tasks) | |
588 | rnp_root->exp_tasks = rnp->exp_tasks; | |
27f4d280 PM |
589 | #ifdef CONFIG_RCU_BOOST |
590 | if (&t->rcu_node_entry == rnp->boost_tasks) | |
591 | rnp_root->boost_tasks = rnp->boost_tasks; | |
592 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
12f5f524 | 593 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ |
dd5d19ba | 594 | } |
27f4d280 PM |
595 | |
596 | #ifdef CONFIG_RCU_BOOST | |
597 | /* In case root is being boosted and leaf is not. */ | |
598 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | |
599 | if (rnp_root->boost_tasks != NULL && | |
600 | rnp_root->boost_tasks != rnp_root->gp_tasks) | |
601 | rnp_root->boost_tasks = rnp_root->gp_tasks; | |
602 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ | |
603 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
604 | ||
12f5f524 PM |
605 | rnp->gp_tasks = NULL; |
606 | rnp->exp_tasks = NULL; | |
237c80c5 | 607 | return retval; |
dd5d19ba PM |
608 | } |
609 | ||
33f76148 | 610 | /* |
6cc68793 | 611 | * Do CPU-offline processing for preemptible RCU. |
33f76148 PM |
612 | */ |
613 | static void rcu_preempt_offline_cpu(int cpu) | |
614 | { | |
615 | __rcu_offline_cpu(cpu, &rcu_preempt_state); | |
616 | } | |
617 | ||
618 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
619 | ||
f41d911f PM |
620 | /* |
621 | * Check for a quiescent state from the current CPU. When a task blocks, | |
622 | * the task is recorded in the corresponding CPU's rcu_node structure, | |
623 | * which is checked elsewhere. | |
624 | * | |
625 | * Caller must disable hard irqs. | |
626 | */ | |
627 | static void rcu_preempt_check_callbacks(int cpu) | |
628 | { | |
629 | struct task_struct *t = current; | |
630 | ||
631 | if (t->rcu_read_lock_nesting == 0) { | |
c3422bea | 632 | rcu_preempt_qs(cpu); |
f41d911f PM |
633 | return; |
634 | } | |
10f39bb1 PM |
635 | if (t->rcu_read_lock_nesting > 0 && |
636 | per_cpu(rcu_preempt_data, cpu).qs_pending) | |
c3422bea | 637 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
638 | } |
639 | ||
640 | /* | |
6cc68793 | 641 | * Process callbacks for preemptible RCU. |
f41d911f PM |
642 | */ |
643 | static void rcu_preempt_process_callbacks(void) | |
644 | { | |
645 | __rcu_process_callbacks(&rcu_preempt_state, | |
646 | &__get_cpu_var(rcu_preempt_data)); | |
647 | } | |
648 | ||
a46e0899 PM |
649 | #ifdef CONFIG_RCU_BOOST |
650 | ||
09223371 SL |
651 | static void rcu_preempt_do_callbacks(void) |
652 | { | |
653 | rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); | |
654 | } | |
655 | ||
a46e0899 PM |
656 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
657 | ||
f41d911f | 658 | /* |
6cc68793 | 659 | * Queue a preemptible-RCU callback for invocation after a grace period. |
f41d911f PM |
660 | */ |
661 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
662 | { | |
663 | __call_rcu(head, func, &rcu_preempt_state); | |
664 | } | |
665 | EXPORT_SYMBOL_GPL(call_rcu); | |
666 | ||
6ebb237b PM |
667 | /** |
668 | * synchronize_rcu - wait until a grace period has elapsed. | |
669 | * | |
670 | * Control will return to the caller some time after a full grace | |
671 | * period has elapsed, in other words after all currently executing RCU | |
77d8485a PM |
672 | * read-side critical sections have completed. Note, however, that |
673 | * upon return from synchronize_rcu(), the caller might well be executing | |
674 | * concurrently with new RCU read-side critical sections that began while | |
675 | * synchronize_rcu() was waiting. RCU read-side critical sections are | |
676 | * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. | |
6ebb237b PM |
677 | */ |
678 | void synchronize_rcu(void) | |
679 | { | |
6ebb237b PM |
680 | if (!rcu_scheduler_active) |
681 | return; | |
2c42818e | 682 | wait_rcu_gp(call_rcu); |
6ebb237b PM |
683 | } |
684 | EXPORT_SYMBOL_GPL(synchronize_rcu); | |
685 | ||
d9a3da06 PM |
686 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); |
687 | static long sync_rcu_preempt_exp_count; | |
688 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | |
689 | ||
690 | /* | |
691 | * Return non-zero if there are any tasks in RCU read-side critical | |
692 | * sections blocking the current preemptible-RCU expedited grace period. | |
693 | * If there is no preemptible-RCU expedited grace period currently in | |
694 | * progress, returns zero unconditionally. | |
695 | */ | |
696 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | |
697 | { | |
12f5f524 | 698 | return rnp->exp_tasks != NULL; |
d9a3da06 PM |
699 | } |
700 | ||
701 | /* | |
702 | * return non-zero if there is no RCU expedited grace period in progress | |
703 | * for the specified rcu_node structure, in other words, if all CPUs and | |
704 | * tasks covered by the specified rcu_node structure have done their bit | |
705 | * for the current expedited grace period. Works only for preemptible | |
706 | * RCU -- other RCU implementation use other means. | |
707 | * | |
708 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
709 | */ | |
710 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | |
711 | { | |
712 | return !rcu_preempted_readers_exp(rnp) && | |
713 | ACCESS_ONCE(rnp->expmask) == 0; | |
714 | } | |
715 | ||
716 | /* | |
717 | * Report the exit from RCU read-side critical section for the last task | |
718 | * that queued itself during or before the current expedited preemptible-RCU | |
719 | * grace period. This event is reported either to the rcu_node structure on | |
720 | * which the task was queued or to one of that rcu_node structure's ancestors, | |
721 | * recursively up the tree. (Calm down, calm down, we do the recursion | |
722 | * iteratively!) | |
723 | * | |
724 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
725 | */ | |
726 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |
727 | { | |
728 | unsigned long flags; | |
729 | unsigned long mask; | |
730 | ||
1304afb2 | 731 | raw_spin_lock_irqsave(&rnp->lock, flags); |
d9a3da06 | 732 | for (;;) { |
131906b0 PM |
733 | if (!sync_rcu_preempt_exp_done(rnp)) { |
734 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
d9a3da06 | 735 | break; |
131906b0 | 736 | } |
d9a3da06 | 737 | if (rnp->parent == NULL) { |
131906b0 | 738 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
d9a3da06 PM |
739 | wake_up(&sync_rcu_preempt_exp_wq); |
740 | break; | |
741 | } | |
742 | mask = rnp->grpmask; | |
1304afb2 | 743 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
d9a3da06 | 744 | rnp = rnp->parent; |
1304afb2 | 745 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
d9a3da06 PM |
746 | rnp->expmask &= ~mask; |
747 | } | |
d9a3da06 PM |
748 | } |
749 | ||
750 | /* | |
751 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | |
752 | * grace period for the specified rcu_node structure. If there are no such | |
753 | * tasks, report it up the rcu_node hierarchy. | |
754 | * | |
755 | * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. | |
756 | */ | |
757 | static void | |
758 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |
759 | { | |
1217ed1b | 760 | unsigned long flags; |
12f5f524 | 761 | int must_wait = 0; |
d9a3da06 | 762 | |
1217ed1b PM |
763 | raw_spin_lock_irqsave(&rnp->lock, flags); |
764 | if (list_empty(&rnp->blkd_tasks)) | |
765 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
766 | else { | |
12f5f524 | 767 | rnp->exp_tasks = rnp->blkd_tasks.next; |
1217ed1b | 768 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
12f5f524 PM |
769 | must_wait = 1; |
770 | } | |
d9a3da06 PM |
771 | if (!must_wait) |
772 | rcu_report_exp_rnp(rsp, rnp); | |
773 | } | |
774 | ||
019129d5 | 775 | /* |
d9a3da06 PM |
776 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea |
777 | * is to invoke synchronize_sched_expedited() to push all the tasks to | |
12f5f524 | 778 | * the ->blkd_tasks lists and wait for this list to drain. |
019129d5 PM |
779 | */ |
780 | void synchronize_rcu_expedited(void) | |
781 | { | |
d9a3da06 PM |
782 | unsigned long flags; |
783 | struct rcu_node *rnp; | |
784 | struct rcu_state *rsp = &rcu_preempt_state; | |
785 | long snap; | |
786 | int trycount = 0; | |
787 | ||
788 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | |
789 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | |
790 | smp_mb(); /* Above access cannot bleed into critical section. */ | |
791 | ||
792 | /* | |
793 | * Acquire lock, falling back to synchronize_rcu() if too many | |
794 | * lock-acquisition failures. Of course, if someone does the | |
795 | * expedited grace period for us, just leave. | |
796 | */ | |
797 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | |
798 | if (trycount++ < 10) | |
799 | udelay(trycount * num_online_cpus()); | |
800 | else { | |
801 | synchronize_rcu(); | |
802 | return; | |
803 | } | |
804 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
805 | goto mb_ret; /* Others did our work for us. */ | |
806 | } | |
807 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
808 | goto unlock_mb_ret; /* Others did our work for us. */ | |
809 | ||
12f5f524 | 810 | /* force all RCU readers onto ->blkd_tasks lists. */ |
d9a3da06 PM |
811 | synchronize_sched_expedited(); |
812 | ||
1304afb2 | 813 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
d9a3da06 PM |
814 | |
815 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | |
816 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | |
1304afb2 | 817 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
d9a3da06 | 818 | rnp->expmask = rnp->qsmaskinit; |
1304afb2 | 819 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
d9a3da06 PM |
820 | } |
821 | ||
12f5f524 | 822 | /* Snapshot current state of ->blkd_tasks lists. */ |
d9a3da06 PM |
823 | rcu_for_each_leaf_node(rsp, rnp) |
824 | sync_rcu_preempt_exp_init(rsp, rnp); | |
825 | if (NUM_RCU_NODES > 1) | |
826 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | |
827 | ||
1304afb2 | 828 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
d9a3da06 | 829 | |
12f5f524 | 830 | /* Wait for snapshotted ->blkd_tasks lists to drain. */ |
d9a3da06 PM |
831 | rnp = rcu_get_root(rsp); |
832 | wait_event(sync_rcu_preempt_exp_wq, | |
833 | sync_rcu_preempt_exp_done(rnp)); | |
834 | ||
835 | /* Clean up and exit. */ | |
836 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | |
837 | ACCESS_ONCE(sync_rcu_preempt_exp_count)++; | |
838 | unlock_mb_ret: | |
839 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | |
840 | mb_ret: | |
841 | smp_mb(); /* ensure subsequent action seen after grace period. */ | |
019129d5 PM |
842 | } |
843 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
844 | ||
f41d911f | 845 | /* |
6cc68793 | 846 | * Check to see if there is any immediate preemptible-RCU-related work |
f41d911f PM |
847 | * to be done. |
848 | */ | |
849 | static int rcu_preempt_pending(int cpu) | |
850 | { | |
851 | return __rcu_pending(&rcu_preempt_state, | |
852 | &per_cpu(rcu_preempt_data, cpu)); | |
853 | } | |
854 | ||
855 | /* | |
6cc68793 | 856 | * Does preemptible RCU need the CPU to stay out of dynticks mode? |
f41d911f PM |
857 | */ |
858 | static int rcu_preempt_needs_cpu(int cpu) | |
859 | { | |
860 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | |
861 | } | |
862 | ||
e74f4c45 PM |
863 | /** |
864 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | |
865 | */ | |
866 | void rcu_barrier(void) | |
867 | { | |
868 | _rcu_barrier(&rcu_preempt_state, call_rcu); | |
869 | } | |
870 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
871 | ||
f41d911f | 872 | /* |
6cc68793 | 873 | * Initialize preemptible RCU's per-CPU data. |
f41d911f PM |
874 | */ |
875 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
876 | { | |
877 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | |
878 | } | |
879 | ||
e74f4c45 | 880 | /* |
6cc68793 | 881 | * Move preemptible RCU's callbacks from dying CPU to other online CPU. |
e74f4c45 | 882 | */ |
29494be7 | 883 | static void rcu_preempt_send_cbs_to_online(void) |
e74f4c45 | 884 | { |
29494be7 | 885 | rcu_send_cbs_to_online(&rcu_preempt_state); |
e74f4c45 PM |
886 | } |
887 | ||
1eba8f84 | 888 | /* |
6cc68793 | 889 | * Initialize preemptible RCU's state structures. |
1eba8f84 PM |
890 | */ |
891 | static void __init __rcu_init_preempt(void) | |
892 | { | |
394f99a9 | 893 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); |
1eba8f84 PM |
894 | } |
895 | ||
f41d911f | 896 | /* |
6cc68793 | 897 | * Check for a task exiting while in a preemptible-RCU read-side |
f41d911f PM |
898 | * critical section, clean up if so. No need to issue warnings, |
899 | * as debug_check_no_locks_held() already does this if lockdep | |
900 | * is enabled. | |
901 | */ | |
902 | void exit_rcu(void) | |
903 | { | |
904 | struct task_struct *t = current; | |
905 | ||
906 | if (t->rcu_read_lock_nesting == 0) | |
907 | return; | |
908 | t->rcu_read_lock_nesting = 1; | |
13491a0e | 909 | __rcu_read_unlock(); |
f41d911f PM |
910 | } |
911 | ||
912 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
913 | ||
27f4d280 PM |
914 | static struct rcu_state *rcu_state = &rcu_sched_state; |
915 | ||
f41d911f PM |
916 | /* |
917 | * Tell them what RCU they are running. | |
918 | */ | |
0e0fc1c2 | 919 | static void __init rcu_bootup_announce(void) |
f41d911f PM |
920 | { |
921 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | |
26845c28 | 922 | rcu_bootup_announce_oddness(); |
f41d911f PM |
923 | } |
924 | ||
925 | /* | |
926 | * Return the number of RCU batches processed thus far for debug & stats. | |
927 | */ | |
928 | long rcu_batches_completed(void) | |
929 | { | |
930 | return rcu_batches_completed_sched(); | |
931 | } | |
932 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
933 | ||
bf66f18e PM |
934 | /* |
935 | * Force a quiescent state for RCU, which, because there is no preemptible | |
936 | * RCU, becomes the same as rcu-sched. | |
937 | */ | |
938 | void rcu_force_quiescent_state(void) | |
939 | { | |
940 | rcu_sched_force_quiescent_state(); | |
941 | } | |
942 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |
943 | ||
f41d911f | 944 | /* |
6cc68793 | 945 | * Because preemptible RCU does not exist, we never have to check for |
f41d911f PM |
946 | * CPUs being in quiescent states. |
947 | */ | |
c3422bea | 948 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
949 | { |
950 | } | |
951 | ||
fc2219d4 | 952 | /* |
6cc68793 | 953 | * Because preemptible RCU does not exist, there are never any preempted |
fc2219d4 PM |
954 | * RCU readers. |
955 | */ | |
27f4d280 | 956 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) |
fc2219d4 PM |
957 | { |
958 | return 0; | |
959 | } | |
960 | ||
b668c9cf PM |
961 | #ifdef CONFIG_HOTPLUG_CPU |
962 | ||
963 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | |
d3f6bad3 | 964 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf | 965 | { |
1304afb2 | 966 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf PM |
967 | } |
968 | ||
969 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
970 | ||
1ed509a2 | 971 | /* |
6cc68793 | 972 | * Because preemptible RCU does not exist, we never have to check for |
1ed509a2 PM |
973 | * tasks blocked within RCU read-side critical sections. |
974 | */ | |
975 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
976 | { | |
977 | } | |
978 | ||
f41d911f | 979 | /* |
6cc68793 | 980 | * Because preemptible RCU does not exist, we never have to check for |
f41d911f PM |
981 | * tasks blocked within RCU read-side critical sections. |
982 | */ | |
9bc8b558 | 983 | static int rcu_print_task_stall(struct rcu_node *rnp) |
f41d911f | 984 | { |
9bc8b558 | 985 | return 0; |
f41d911f PM |
986 | } |
987 | ||
53d84e00 PM |
988 | /* |
989 | * Because preemptible RCU does not exist, there is no need to suppress | |
990 | * its CPU stall warnings. | |
991 | */ | |
992 | static void rcu_preempt_stall_reset(void) | |
993 | { | |
994 | } | |
995 | ||
b0e165c0 | 996 | /* |
6cc68793 | 997 | * Because there is no preemptible RCU, there can be no readers blocked, |
49e29126 PM |
998 | * so there is no need to check for blocked tasks. So check only for |
999 | * bogus qsmask values. | |
b0e165c0 PM |
1000 | */ |
1001 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
1002 | { | |
49e29126 | 1003 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
1004 | } |
1005 | ||
33f76148 PM |
1006 | #ifdef CONFIG_HOTPLUG_CPU |
1007 | ||
dd5d19ba | 1008 | /* |
6cc68793 | 1009 | * Because preemptible RCU does not exist, it never needs to migrate |
237c80c5 PM |
1010 | * tasks that were blocked within RCU read-side critical sections, and |
1011 | * such non-existent tasks cannot possibly have been blocking the current | |
1012 | * grace period. | |
dd5d19ba | 1013 | */ |
237c80c5 PM |
1014 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
1015 | struct rcu_node *rnp, | |
1016 | struct rcu_data *rdp) | |
dd5d19ba | 1017 | { |
237c80c5 | 1018 | return 0; |
dd5d19ba PM |
1019 | } |
1020 | ||
33f76148 | 1021 | /* |
6cc68793 | 1022 | * Because preemptible RCU does not exist, it never needs CPU-offline |
33f76148 PM |
1023 | * processing. |
1024 | */ | |
1025 | static void rcu_preempt_offline_cpu(int cpu) | |
1026 | { | |
1027 | } | |
1028 | ||
1029 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1030 | ||
f41d911f | 1031 | /* |
6cc68793 | 1032 | * Because preemptible RCU does not exist, it never has any callbacks |
f41d911f PM |
1033 | * to check. |
1034 | */ | |
1eba8f84 | 1035 | static void rcu_preempt_check_callbacks(int cpu) |
f41d911f PM |
1036 | { |
1037 | } | |
1038 | ||
1039 | /* | |
6cc68793 | 1040 | * Because preemptible RCU does not exist, it never has any callbacks |
f41d911f PM |
1041 | * to process. |
1042 | */ | |
1eba8f84 | 1043 | static void rcu_preempt_process_callbacks(void) |
f41d911f PM |
1044 | { |
1045 | } | |
1046 | ||
019129d5 PM |
1047 | /* |
1048 | * Wait for an rcu-preempt grace period, but make it happen quickly. | |
6cc68793 | 1049 | * But because preemptible RCU does not exist, map to rcu-sched. |
019129d5 PM |
1050 | */ |
1051 | void synchronize_rcu_expedited(void) | |
1052 | { | |
1053 | synchronize_sched_expedited(); | |
1054 | } | |
1055 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
1056 | ||
d9a3da06 PM |
1057 | #ifdef CONFIG_HOTPLUG_CPU |
1058 | ||
1059 | /* | |
6cc68793 | 1060 | * Because preemptible RCU does not exist, there is never any need to |
d9a3da06 PM |
1061 | * report on tasks preempted in RCU read-side critical sections during |
1062 | * expedited RCU grace periods. | |
1063 | */ | |
1064 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |
1065 | { | |
1066 | return; | |
1067 | } | |
1068 | ||
1069 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1070 | ||
f41d911f | 1071 | /* |
6cc68793 | 1072 | * Because preemptible RCU does not exist, it never has any work to do. |
f41d911f PM |
1073 | */ |
1074 | static int rcu_preempt_pending(int cpu) | |
1075 | { | |
1076 | return 0; | |
1077 | } | |
1078 | ||
1079 | /* | |
6cc68793 | 1080 | * Because preemptible RCU does not exist, it never needs any CPU. |
f41d911f PM |
1081 | */ |
1082 | static int rcu_preempt_needs_cpu(int cpu) | |
1083 | { | |
1084 | return 0; | |
1085 | } | |
1086 | ||
e74f4c45 | 1087 | /* |
6cc68793 | 1088 | * Because preemptible RCU does not exist, rcu_barrier() is just |
e74f4c45 PM |
1089 | * another name for rcu_barrier_sched(). |
1090 | */ | |
1091 | void rcu_barrier(void) | |
1092 | { | |
1093 | rcu_barrier_sched(); | |
1094 | } | |
1095 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
1096 | ||
f41d911f | 1097 | /* |
6cc68793 | 1098 | * Because preemptible RCU does not exist, there is no per-CPU |
f41d911f PM |
1099 | * data to initialize. |
1100 | */ | |
1101 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
1102 | { | |
1103 | } | |
1104 | ||
e74f4c45 | 1105 | /* |
6cc68793 | 1106 | * Because there is no preemptible RCU, there are no callbacks to move. |
e74f4c45 | 1107 | */ |
29494be7 | 1108 | static void rcu_preempt_send_cbs_to_online(void) |
e74f4c45 PM |
1109 | { |
1110 | } | |
1111 | ||
1eba8f84 | 1112 | /* |
6cc68793 | 1113 | * Because preemptible RCU does not exist, it need not be initialized. |
1eba8f84 PM |
1114 | */ |
1115 | static void __init __rcu_init_preempt(void) | |
1116 | { | |
1117 | } | |
1118 | ||
f41d911f | 1119 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
8bd93a2c | 1120 | |
27f4d280 PM |
1121 | #ifdef CONFIG_RCU_BOOST |
1122 | ||
1123 | #include "rtmutex_common.h" | |
1124 | ||
0ea1f2eb PM |
1125 | #ifdef CONFIG_RCU_TRACE |
1126 | ||
1127 | static void rcu_initiate_boost_trace(struct rcu_node *rnp) | |
1128 | { | |
1129 | if (list_empty(&rnp->blkd_tasks)) | |
1130 | rnp->n_balk_blkd_tasks++; | |
1131 | else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) | |
1132 | rnp->n_balk_exp_gp_tasks++; | |
1133 | else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL) | |
1134 | rnp->n_balk_boost_tasks++; | |
1135 | else if (rnp->gp_tasks != NULL && rnp->qsmask != 0) | |
1136 | rnp->n_balk_notblocked++; | |
1137 | else if (rnp->gp_tasks != NULL && | |
a9f4793d | 1138 | ULONG_CMP_LT(jiffies, rnp->boost_time)) |
0ea1f2eb PM |
1139 | rnp->n_balk_notyet++; |
1140 | else | |
1141 | rnp->n_balk_nos++; | |
1142 | } | |
1143 | ||
1144 | #else /* #ifdef CONFIG_RCU_TRACE */ | |
1145 | ||
1146 | static void rcu_initiate_boost_trace(struct rcu_node *rnp) | |
1147 | { | |
1148 | } | |
1149 | ||
1150 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | |
1151 | ||
5342e269 PM |
1152 | static struct lock_class_key rcu_boost_class; |
1153 | ||
27f4d280 PM |
1154 | /* |
1155 | * Carry out RCU priority boosting on the task indicated by ->exp_tasks | |
1156 | * or ->boost_tasks, advancing the pointer to the next task in the | |
1157 | * ->blkd_tasks list. | |
1158 | * | |
1159 | * Note that irqs must be enabled: boosting the task can block. | |
1160 | * Returns 1 if there are more tasks needing to be boosted. | |
1161 | */ | |
1162 | static int rcu_boost(struct rcu_node *rnp) | |
1163 | { | |
1164 | unsigned long flags; | |
1165 | struct rt_mutex mtx; | |
1166 | struct task_struct *t; | |
1167 | struct list_head *tb; | |
1168 | ||
1169 | if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) | |
1170 | return 0; /* Nothing left to boost. */ | |
1171 | ||
1172 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1173 | ||
1174 | /* | |
1175 | * Recheck under the lock: all tasks in need of boosting | |
1176 | * might exit their RCU read-side critical sections on their own. | |
1177 | */ | |
1178 | if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { | |
1179 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1180 | return 0; | |
1181 | } | |
1182 | ||
1183 | /* | |
1184 | * Preferentially boost tasks blocking expedited grace periods. | |
1185 | * This cannot starve the normal grace periods because a second | |
1186 | * expedited grace period must boost all blocked tasks, including | |
1187 | * those blocking the pre-existing normal grace period. | |
1188 | */ | |
0ea1f2eb | 1189 | if (rnp->exp_tasks != NULL) { |
27f4d280 | 1190 | tb = rnp->exp_tasks; |
0ea1f2eb PM |
1191 | rnp->n_exp_boosts++; |
1192 | } else { | |
27f4d280 | 1193 | tb = rnp->boost_tasks; |
0ea1f2eb PM |
1194 | rnp->n_normal_boosts++; |
1195 | } | |
1196 | rnp->n_tasks_boosted++; | |
27f4d280 PM |
1197 | |
1198 | /* | |
1199 | * We boost task t by manufacturing an rt_mutex that appears to | |
1200 | * be held by task t. We leave a pointer to that rt_mutex where | |
1201 | * task t can find it, and task t will release the mutex when it | |
1202 | * exits its outermost RCU read-side critical section. Then | |
1203 | * simply acquiring this artificial rt_mutex will boost task | |
1204 | * t's priority. (Thanks to tglx for suggesting this approach!) | |
1205 | * | |
1206 | * Note that task t must acquire rnp->lock to remove itself from | |
1207 | * the ->blkd_tasks list, which it will do from exit() if from | |
1208 | * nowhere else. We therefore are guaranteed that task t will | |
1209 | * stay around at least until we drop rnp->lock. Note that | |
1210 | * rnp->lock also resolves races between our priority boosting | |
1211 | * and task t's exiting its outermost RCU read-side critical | |
1212 | * section. | |
1213 | */ | |
1214 | t = container_of(tb, struct task_struct, rcu_node_entry); | |
1215 | rt_mutex_init_proxy_locked(&mtx, t); | |
5342e269 PM |
1216 | /* Avoid lockdep false positives. This rt_mutex is its own thing. */ |
1217 | lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class, | |
1218 | "rcu_boost_mutex"); | |
27f4d280 | 1219 | t->rcu_boost_mutex = &mtx; |
27f4d280 PM |
1220 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1221 | rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ | |
1222 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ | |
1223 | ||
1224 | return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL; | |
1225 | } | |
1226 | ||
1227 | /* | |
1228 | * Timer handler to initiate waking up of boost kthreads that | |
1229 | * have yielded the CPU due to excessive numbers of tasks to | |
1230 | * boost. We wake up the per-rcu_node kthread, which in turn | |
1231 | * will wake up the booster kthread. | |
1232 | */ | |
1233 | static void rcu_boost_kthread_timer(unsigned long arg) | |
1234 | { | |
1217ed1b | 1235 | invoke_rcu_node_kthread((struct rcu_node *)arg); |
27f4d280 PM |
1236 | } |
1237 | ||
1238 | /* | |
1239 | * Priority-boosting kthread. One per leaf rcu_node and one for the | |
1240 | * root rcu_node. | |
1241 | */ | |
1242 | static int rcu_boost_kthread(void *arg) | |
1243 | { | |
1244 | struct rcu_node *rnp = (struct rcu_node *)arg; | |
1245 | int spincnt = 0; | |
1246 | int more2boost; | |
1247 | ||
385680a9 | 1248 | trace_rcu_utilization("Start boost kthread@init"); |
27f4d280 | 1249 | for (;;) { |
d71df90e | 1250 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; |
385680a9 | 1251 | trace_rcu_utilization("End boost kthread@rcu_wait"); |
08bca60a | 1252 | rcu_wait(rnp->boost_tasks || rnp->exp_tasks); |
385680a9 | 1253 | trace_rcu_utilization("Start boost kthread@rcu_wait"); |
d71df90e | 1254 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; |
27f4d280 PM |
1255 | more2boost = rcu_boost(rnp); |
1256 | if (more2boost) | |
1257 | spincnt++; | |
1258 | else | |
1259 | spincnt = 0; | |
1260 | if (spincnt > 10) { | |
385680a9 | 1261 | trace_rcu_utilization("End boost kthread@rcu_yield"); |
27f4d280 | 1262 | rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp); |
385680a9 | 1263 | trace_rcu_utilization("Start boost kthread@rcu_yield"); |
27f4d280 PM |
1264 | spincnt = 0; |
1265 | } | |
1266 | } | |
1217ed1b | 1267 | /* NOTREACHED */ |
385680a9 | 1268 | trace_rcu_utilization("End boost kthread@notreached"); |
27f4d280 PM |
1269 | return 0; |
1270 | } | |
1271 | ||
1272 | /* | |
1273 | * Check to see if it is time to start boosting RCU readers that are | |
1274 | * blocking the current grace period, and, if so, tell the per-rcu_node | |
1275 | * kthread to start boosting them. If there is an expedited grace | |
1276 | * period in progress, it is always time to boost. | |
1277 | * | |
1217ed1b PM |
1278 | * The caller must hold rnp->lock, which this function releases, |
1279 | * but irqs remain disabled. The ->boost_kthread_task is immortal, | |
1280 | * so we don't need to worry about it going away. | |
27f4d280 | 1281 | */ |
1217ed1b | 1282 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
27f4d280 PM |
1283 | { |
1284 | struct task_struct *t; | |
1285 | ||
0ea1f2eb PM |
1286 | if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { |
1287 | rnp->n_balk_exp_gp_tasks++; | |
1217ed1b | 1288 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27f4d280 | 1289 | return; |
0ea1f2eb | 1290 | } |
27f4d280 PM |
1291 | if (rnp->exp_tasks != NULL || |
1292 | (rnp->gp_tasks != NULL && | |
1293 | rnp->boost_tasks == NULL && | |
1294 | rnp->qsmask == 0 && | |
1295 | ULONG_CMP_GE(jiffies, rnp->boost_time))) { | |
1296 | if (rnp->exp_tasks == NULL) | |
1297 | rnp->boost_tasks = rnp->gp_tasks; | |
1217ed1b | 1298 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27f4d280 PM |
1299 | t = rnp->boost_kthread_task; |
1300 | if (t != NULL) | |
1301 | wake_up_process(t); | |
1217ed1b | 1302 | } else { |
0ea1f2eb | 1303 | rcu_initiate_boost_trace(rnp); |
1217ed1b PM |
1304 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1305 | } | |
27f4d280 PM |
1306 | } |
1307 | ||
a46e0899 PM |
1308 | /* |
1309 | * Wake up the per-CPU kthread to invoke RCU callbacks. | |
1310 | */ | |
1311 | static void invoke_rcu_callbacks_kthread(void) | |
1312 | { | |
1313 | unsigned long flags; | |
1314 | ||
1315 | local_irq_save(flags); | |
1316 | __this_cpu_write(rcu_cpu_has_work, 1); | |
1eb52121 SL |
1317 | if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && |
1318 | current != __this_cpu_read(rcu_cpu_kthread_task)) | |
1319 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | |
a46e0899 PM |
1320 | local_irq_restore(flags); |
1321 | } | |
1322 | ||
0f962a5e PM |
1323 | /* |
1324 | * Set the affinity of the boost kthread. The CPU-hotplug locks are | |
1325 | * held, so no one should be messing with the existence of the boost | |
1326 | * kthread. | |
1327 | */ | |
27f4d280 PM |
1328 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
1329 | cpumask_var_t cm) | |
1330 | { | |
27f4d280 PM |
1331 | struct task_struct *t; |
1332 | ||
27f4d280 PM |
1333 | t = rnp->boost_kthread_task; |
1334 | if (t != NULL) | |
1335 | set_cpus_allowed_ptr(rnp->boost_kthread_task, cm); | |
27f4d280 PM |
1336 | } |
1337 | ||
1338 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) | |
1339 | ||
1340 | /* | |
1341 | * Do priority-boost accounting for the start of a new grace period. | |
1342 | */ | |
1343 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |
1344 | { | |
1345 | rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; | |
1346 | } | |
1347 | ||
27f4d280 PM |
1348 | /* |
1349 | * Create an RCU-boost kthread for the specified node if one does not | |
1350 | * already exist. We only create this kthread for preemptible RCU. | |
1351 | * Returns zero if all is well, a negated errno otherwise. | |
1352 | */ | |
1353 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |
1354 | struct rcu_node *rnp, | |
1355 | int rnp_index) | |
1356 | { | |
1357 | unsigned long flags; | |
1358 | struct sched_param sp; | |
1359 | struct task_struct *t; | |
1360 | ||
1361 | if (&rcu_preempt_state != rsp) | |
1362 | return 0; | |
a46e0899 | 1363 | rsp->boost = 1; |
27f4d280 PM |
1364 | if (rnp->boost_kthread_task != NULL) |
1365 | return 0; | |
1366 | t = kthread_create(rcu_boost_kthread, (void *)rnp, | |
1367 | "rcub%d", rnp_index); | |
1368 | if (IS_ERR(t)) | |
1369 | return PTR_ERR(t); | |
1370 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1371 | rnp->boost_kthread_task = t; | |
1372 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
27f4d280 PM |
1373 | sp.sched_priority = RCU_KTHREAD_PRIO; |
1374 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
9a432736 | 1375 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ |
27f4d280 PM |
1376 | return 0; |
1377 | } | |
1378 | ||
f8b7fc6b PM |
1379 | #ifdef CONFIG_HOTPLUG_CPU |
1380 | ||
1381 | /* | |
1382 | * Stop the RCU's per-CPU kthread when its CPU goes offline,. | |
1383 | */ | |
1384 | static void rcu_stop_cpu_kthread(int cpu) | |
1385 | { | |
1386 | struct task_struct *t; | |
1387 | ||
1388 | /* Stop the CPU's kthread. */ | |
1389 | t = per_cpu(rcu_cpu_kthread_task, cpu); | |
1390 | if (t != NULL) { | |
1391 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | |
1392 | kthread_stop(t); | |
1393 | } | |
1394 | } | |
1395 | ||
1396 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1397 | ||
1398 | static void rcu_kthread_do_work(void) | |
1399 | { | |
1400 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | |
1401 | rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | |
1402 | rcu_preempt_do_callbacks(); | |
1403 | } | |
1404 | ||
1405 | /* | |
1406 | * Wake up the specified per-rcu_node-structure kthread. | |
1407 | * Because the per-rcu_node kthreads are immortal, we don't need | |
1408 | * to do anything to keep them alive. | |
1409 | */ | |
1410 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | |
1411 | { | |
1412 | struct task_struct *t; | |
1413 | ||
1414 | t = rnp->node_kthread_task; | |
1415 | if (t != NULL) | |
1416 | wake_up_process(t); | |
1417 | } | |
1418 | ||
1419 | /* | |
1420 | * Set the specified CPU's kthread to run RT or not, as specified by | |
1421 | * the to_rt argument. The CPU-hotplug locks are held, so the task | |
1422 | * is not going away. | |
1423 | */ | |
1424 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | |
1425 | { | |
1426 | int policy; | |
1427 | struct sched_param sp; | |
1428 | struct task_struct *t; | |
1429 | ||
1430 | t = per_cpu(rcu_cpu_kthread_task, cpu); | |
1431 | if (t == NULL) | |
1432 | return; | |
1433 | if (to_rt) { | |
1434 | policy = SCHED_FIFO; | |
1435 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1436 | } else { | |
1437 | policy = SCHED_NORMAL; | |
1438 | sp.sched_priority = 0; | |
1439 | } | |
1440 | sched_setscheduler_nocheck(t, policy, &sp); | |
1441 | } | |
1442 | ||
1443 | /* | |
1444 | * Timer handler to initiate the waking up of per-CPU kthreads that | |
1445 | * have yielded the CPU due to excess numbers of RCU callbacks. | |
1446 | * We wake up the per-rcu_node kthread, which in turn will wake up | |
1447 | * the booster kthread. | |
1448 | */ | |
1449 | static void rcu_cpu_kthread_timer(unsigned long arg) | |
1450 | { | |
1451 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | |
1452 | struct rcu_node *rnp = rdp->mynode; | |
1453 | ||
1454 | atomic_or(rdp->grpmask, &rnp->wakemask); | |
1455 | invoke_rcu_node_kthread(rnp); | |
1456 | } | |
1457 | ||
1458 | /* | |
1459 | * Drop to non-real-time priority and yield, but only after posting a | |
1460 | * timer that will cause us to regain our real-time priority if we | |
1461 | * remain preempted. Either way, we restore our real-time priority | |
1462 | * before returning. | |
1463 | */ | |
1464 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | |
1465 | { | |
1466 | struct sched_param sp; | |
1467 | struct timer_list yield_timer; | |
1468 | ||
1469 | setup_timer_on_stack(&yield_timer, f, arg); | |
1470 | mod_timer(&yield_timer, jiffies + 2); | |
1471 | sp.sched_priority = 0; | |
1472 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | |
1473 | set_user_nice(current, 19); | |
1474 | schedule(); | |
1475 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1476 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | |
1477 | del_timer(&yield_timer); | |
1478 | } | |
1479 | ||
1480 | /* | |
1481 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | |
1482 | * This can happen while the corresponding CPU is either coming online | |
1483 | * or going offline. We cannot wait until the CPU is fully online | |
1484 | * before starting the kthread, because the various notifier functions | |
1485 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | |
1486 | * the corresponding CPU is online. | |
1487 | * | |
1488 | * Return 1 if the kthread needs to stop, 0 otherwise. | |
1489 | * | |
1490 | * Caller must disable bh. This function can momentarily enable it. | |
1491 | */ | |
1492 | static int rcu_cpu_kthread_should_stop(int cpu) | |
1493 | { | |
1494 | while (cpu_is_offline(cpu) || | |
1495 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | |
1496 | smp_processor_id() != cpu) { | |
1497 | if (kthread_should_stop()) | |
1498 | return 1; | |
1499 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | |
1500 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | |
1501 | local_bh_enable(); | |
1502 | schedule_timeout_uninterruptible(1); | |
1503 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | |
1504 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | |
1505 | local_bh_disable(); | |
1506 | } | |
1507 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | |
1508 | return 0; | |
1509 | } | |
1510 | ||
1511 | /* | |
1512 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | |
e0f23060 PM |
1513 | * RCU softirq used in flavors and configurations of RCU that do not |
1514 | * support RCU priority boosting. | |
f8b7fc6b PM |
1515 | */ |
1516 | static int rcu_cpu_kthread(void *arg) | |
1517 | { | |
1518 | int cpu = (int)(long)arg; | |
1519 | unsigned long flags; | |
1520 | int spincnt = 0; | |
1521 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | |
1522 | char work; | |
1523 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | |
1524 | ||
385680a9 | 1525 | trace_rcu_utilization("Start CPU kthread@init"); |
f8b7fc6b PM |
1526 | for (;;) { |
1527 | *statusp = RCU_KTHREAD_WAITING; | |
385680a9 | 1528 | trace_rcu_utilization("End CPU kthread@rcu_wait"); |
f8b7fc6b | 1529 | rcu_wait(*workp != 0 || kthread_should_stop()); |
385680a9 | 1530 | trace_rcu_utilization("Start CPU kthread@rcu_wait"); |
f8b7fc6b PM |
1531 | local_bh_disable(); |
1532 | if (rcu_cpu_kthread_should_stop(cpu)) { | |
1533 | local_bh_enable(); | |
1534 | break; | |
1535 | } | |
1536 | *statusp = RCU_KTHREAD_RUNNING; | |
1537 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | |
1538 | local_irq_save(flags); | |
1539 | work = *workp; | |
1540 | *workp = 0; | |
1541 | local_irq_restore(flags); | |
1542 | if (work) | |
1543 | rcu_kthread_do_work(); | |
1544 | local_bh_enable(); | |
1545 | if (*workp != 0) | |
1546 | spincnt++; | |
1547 | else | |
1548 | spincnt = 0; | |
1549 | if (spincnt > 10) { | |
1550 | *statusp = RCU_KTHREAD_YIELDING; | |
385680a9 | 1551 | trace_rcu_utilization("End CPU kthread@rcu_yield"); |
f8b7fc6b | 1552 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); |
385680a9 | 1553 | trace_rcu_utilization("Start CPU kthread@rcu_yield"); |
f8b7fc6b PM |
1554 | spincnt = 0; |
1555 | } | |
1556 | } | |
1557 | *statusp = RCU_KTHREAD_STOPPED; | |
385680a9 | 1558 | trace_rcu_utilization("End CPU kthread@term"); |
f8b7fc6b PM |
1559 | return 0; |
1560 | } | |
1561 | ||
1562 | /* | |
1563 | * Spawn a per-CPU kthread, setting up affinity and priority. | |
1564 | * Because the CPU hotplug lock is held, no other CPU will be attempting | |
1565 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | |
1566 | * attempting to access it during boot, but the locking in kthread_bind() | |
1567 | * will enforce sufficient ordering. | |
1568 | * | |
1569 | * Please note that we cannot simply refuse to wake up the per-CPU | |
1570 | * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, | |
1571 | * which can result in softlockup complaints if the task ends up being | |
1572 | * idle for more than a couple of minutes. | |
1573 | * | |
1574 | * However, please note also that we cannot bind the per-CPU kthread to its | |
1575 | * CPU until that CPU is fully online. We also cannot wait until the | |
1576 | * CPU is fully online before we create its per-CPU kthread, as this would | |
1577 | * deadlock the system when CPU notifiers tried waiting for grace | |
1578 | * periods. So we bind the per-CPU kthread to its CPU only if the CPU | |
1579 | * is online. If its CPU is not yet fully online, then the code in | |
1580 | * rcu_cpu_kthread() will wait until it is fully online, and then do | |
1581 | * the binding. | |
1582 | */ | |
1583 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | |
1584 | { | |
1585 | struct sched_param sp; | |
1586 | struct task_struct *t; | |
1587 | ||
b0d30417 | 1588 | if (!rcu_scheduler_fully_active || |
f8b7fc6b PM |
1589 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) |
1590 | return 0; | |
1f288094 ED |
1591 | t = kthread_create_on_node(rcu_cpu_kthread, |
1592 | (void *)(long)cpu, | |
1593 | cpu_to_node(cpu), | |
1594 | "rcuc%d", cpu); | |
f8b7fc6b PM |
1595 | if (IS_ERR(t)) |
1596 | return PTR_ERR(t); | |
1597 | if (cpu_online(cpu)) | |
1598 | kthread_bind(t, cpu); | |
1599 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | |
1600 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | |
1601 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1602 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
1603 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | |
1604 | wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ | |
1605 | return 0; | |
1606 | } | |
1607 | ||
1608 | /* | |
1609 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | |
1610 | * kthreads when needed. We ignore requests to wake up kthreads | |
1611 | * for offline CPUs, which is OK because force_quiescent_state() | |
1612 | * takes care of this case. | |
1613 | */ | |
1614 | static int rcu_node_kthread(void *arg) | |
1615 | { | |
1616 | int cpu; | |
1617 | unsigned long flags; | |
1618 | unsigned long mask; | |
1619 | struct rcu_node *rnp = (struct rcu_node *)arg; | |
1620 | struct sched_param sp; | |
1621 | struct task_struct *t; | |
1622 | ||
1623 | for (;;) { | |
1624 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | |
1625 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | |
1626 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | |
1627 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1628 | mask = atomic_xchg(&rnp->wakemask, 0); | |
1629 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | |
1630 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | |
1631 | if ((mask & 0x1) == 0) | |
1632 | continue; | |
1633 | preempt_disable(); | |
1634 | t = per_cpu(rcu_cpu_kthread_task, cpu); | |
1635 | if (!cpu_online(cpu) || t == NULL) { | |
1636 | preempt_enable(); | |
1637 | continue; | |
1638 | } | |
1639 | per_cpu(rcu_cpu_has_work, cpu) = 1; | |
1640 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1641 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
1642 | preempt_enable(); | |
1643 | } | |
1644 | } | |
1645 | /* NOTREACHED */ | |
1646 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | |
1647 | return 0; | |
1648 | } | |
1649 | ||
1650 | /* | |
1651 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | |
1652 | * served by the rcu_node in question. The CPU hotplug lock is still | |
1653 | * held, so the value of rnp->qsmaskinit will be stable. | |
1654 | * | |
1655 | * We don't include outgoingcpu in the affinity set, use -1 if there is | |
1656 | * no outgoing CPU. If there are no CPUs left in the affinity set, | |
1657 | * this function allows the kthread to execute on any CPU. | |
1658 | */ | |
1659 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |
1660 | { | |
1661 | cpumask_var_t cm; | |
1662 | int cpu; | |
1663 | unsigned long mask = rnp->qsmaskinit; | |
1664 | ||
1665 | if (rnp->node_kthread_task == NULL) | |
1666 | return; | |
1667 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | |
1668 | return; | |
1669 | cpumask_clear(cm); | |
1670 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | |
1671 | if ((mask & 0x1) && cpu != outgoingcpu) | |
1672 | cpumask_set_cpu(cpu, cm); | |
1673 | if (cpumask_weight(cm) == 0) { | |
1674 | cpumask_setall(cm); | |
1675 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | |
1676 | cpumask_clear_cpu(cpu, cm); | |
1677 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | |
1678 | } | |
1679 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | |
1680 | rcu_boost_kthread_setaffinity(rnp, cm); | |
1681 | free_cpumask_var(cm); | |
1682 | } | |
1683 | ||
1684 | /* | |
1685 | * Spawn a per-rcu_node kthread, setting priority and affinity. | |
1686 | * Called during boot before online/offline can happen, or, if | |
1687 | * during runtime, with the main CPU-hotplug locks held. So only | |
1688 | * one of these can be executing at a time. | |
1689 | */ | |
1690 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | |
1691 | struct rcu_node *rnp) | |
1692 | { | |
1693 | unsigned long flags; | |
1694 | int rnp_index = rnp - &rsp->node[0]; | |
1695 | struct sched_param sp; | |
1696 | struct task_struct *t; | |
1697 | ||
b0d30417 | 1698 | if (!rcu_scheduler_fully_active || |
f8b7fc6b PM |
1699 | rnp->qsmaskinit == 0) |
1700 | return 0; | |
1701 | if (rnp->node_kthread_task == NULL) { | |
1702 | t = kthread_create(rcu_node_kthread, (void *)rnp, | |
1703 | "rcun%d", rnp_index); | |
1704 | if (IS_ERR(t)) | |
1705 | return PTR_ERR(t); | |
1706 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1707 | rnp->node_kthread_task = t; | |
1708 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1709 | sp.sched_priority = 99; | |
1710 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
1711 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | |
1712 | } | |
1713 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | |
1714 | } | |
1715 | ||
1716 | /* | |
1717 | * Spawn all kthreads -- called as soon as the scheduler is running. | |
1718 | */ | |
1719 | static int __init rcu_spawn_kthreads(void) | |
1720 | { | |
1721 | int cpu; | |
1722 | struct rcu_node *rnp; | |
1723 | ||
b0d30417 | 1724 | rcu_scheduler_fully_active = 1; |
f8b7fc6b PM |
1725 | for_each_possible_cpu(cpu) { |
1726 | per_cpu(rcu_cpu_has_work, cpu) = 0; | |
1727 | if (cpu_online(cpu)) | |
1728 | (void)rcu_spawn_one_cpu_kthread(cpu); | |
1729 | } | |
1730 | rnp = rcu_get_root(rcu_state); | |
1731 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | |
1732 | if (NUM_RCU_NODES > 1) { | |
1733 | rcu_for_each_leaf_node(rcu_state, rnp) | |
1734 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | |
1735 | } | |
1736 | return 0; | |
1737 | } | |
1738 | early_initcall(rcu_spawn_kthreads); | |
1739 | ||
1740 | static void __cpuinit rcu_prepare_kthreads(int cpu) | |
1741 | { | |
1742 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | |
1743 | struct rcu_node *rnp = rdp->mynode; | |
1744 | ||
1745 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | |
b0d30417 | 1746 | if (rcu_scheduler_fully_active) { |
f8b7fc6b PM |
1747 | (void)rcu_spawn_one_cpu_kthread(cpu); |
1748 | if (rnp->node_kthread_task == NULL) | |
1749 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | |
1750 | } | |
1751 | } | |
1752 | ||
27f4d280 PM |
1753 | #else /* #ifdef CONFIG_RCU_BOOST */ |
1754 | ||
1217ed1b | 1755 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
27f4d280 | 1756 | { |
1217ed1b | 1757 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27f4d280 PM |
1758 | } |
1759 | ||
a46e0899 | 1760 | static void invoke_rcu_callbacks_kthread(void) |
27f4d280 | 1761 | { |
a46e0899 | 1762 | WARN_ON_ONCE(1); |
27f4d280 PM |
1763 | } |
1764 | ||
1765 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |
1766 | { | |
1767 | } | |
1768 | ||
f8b7fc6b PM |
1769 | #ifdef CONFIG_HOTPLUG_CPU |
1770 | ||
1771 | static void rcu_stop_cpu_kthread(int cpu) | |
1772 | { | |
1773 | } | |
1774 | ||
1775 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1776 | ||
1777 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |
1778 | { | |
1779 | } | |
1780 | ||
1781 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | |
1782 | { | |
1783 | } | |
1784 | ||
b0d30417 PM |
1785 | static int __init rcu_scheduler_really_started(void) |
1786 | { | |
1787 | rcu_scheduler_fully_active = 1; | |
1788 | return 0; | |
1789 | } | |
1790 | early_initcall(rcu_scheduler_really_started); | |
1791 | ||
f8b7fc6b PM |
1792 | static void __cpuinit rcu_prepare_kthreads(int cpu) |
1793 | { | |
1794 | } | |
1795 | ||
27f4d280 PM |
1796 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1797 | ||
7b27d547 LJ |
1798 | #ifndef CONFIG_SMP |
1799 | ||
1800 | void synchronize_sched_expedited(void) | |
1801 | { | |
1802 | cond_resched(); | |
1803 | } | |
1804 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | |
1805 | ||
1806 | #else /* #ifndef CONFIG_SMP */ | |
1807 | ||
e27fc964 TH |
1808 | static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0); |
1809 | static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0); | |
7b27d547 LJ |
1810 | |
1811 | static int synchronize_sched_expedited_cpu_stop(void *data) | |
1812 | { | |
1813 | /* | |
1814 | * There must be a full memory barrier on each affected CPU | |
1815 | * between the time that try_stop_cpus() is called and the | |
1816 | * time that it returns. | |
1817 | * | |
1818 | * In the current initial implementation of cpu_stop, the | |
1819 | * above condition is already met when the control reaches | |
1820 | * this point and the following smp_mb() is not strictly | |
1821 | * necessary. Do smp_mb() anyway for documentation and | |
1822 | * robustness against future implementation changes. | |
1823 | */ | |
1824 | smp_mb(); /* See above comment block. */ | |
1825 | return 0; | |
1826 | } | |
1827 | ||
1828 | /* | |
1829 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | |
1830 | * approach to force grace period to end quickly. This consumes | |
1831 | * significant time on all CPUs, and is thus not recommended for | |
1832 | * any sort of common-case code. | |
1833 | * | |
1834 | * Note that it is illegal to call this function while holding any | |
1835 | * lock that is acquired by a CPU-hotplug notifier. Failing to | |
1836 | * observe this restriction will result in deadlock. | |
db3a8920 | 1837 | * |
e27fc964 TH |
1838 | * This implementation can be thought of as an application of ticket |
1839 | * locking to RCU, with sync_sched_expedited_started and | |
1840 | * sync_sched_expedited_done taking on the roles of the halves | |
1841 | * of the ticket-lock word. Each task atomically increments | |
1842 | * sync_sched_expedited_started upon entry, snapshotting the old value, | |
1843 | * then attempts to stop all the CPUs. If this succeeds, then each | |
1844 | * CPU will have executed a context switch, resulting in an RCU-sched | |
1845 | * grace period. We are then done, so we use atomic_cmpxchg() to | |
1846 | * update sync_sched_expedited_done to match our snapshot -- but | |
1847 | * only if someone else has not already advanced past our snapshot. | |
1848 | * | |
1849 | * On the other hand, if try_stop_cpus() fails, we check the value | |
1850 | * of sync_sched_expedited_done. If it has advanced past our | |
1851 | * initial snapshot, then someone else must have forced a grace period | |
1852 | * some time after we took our snapshot. In this case, our work is | |
1853 | * done for us, and we can simply return. Otherwise, we try again, | |
1854 | * but keep our initial snapshot for purposes of checking for someone | |
1855 | * doing our work for us. | |
1856 | * | |
1857 | * If we fail too many times in a row, we fall back to synchronize_sched(). | |
7b27d547 LJ |
1858 | */ |
1859 | void synchronize_sched_expedited(void) | |
1860 | { | |
e27fc964 | 1861 | int firstsnap, s, snap, trycount = 0; |
7b27d547 | 1862 | |
e27fc964 TH |
1863 | /* Note that atomic_inc_return() implies full memory barrier. */ |
1864 | firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); | |
7b27d547 | 1865 | get_online_cpus(); |
e27fc964 TH |
1866 | |
1867 | /* | |
1868 | * Each pass through the following loop attempts to force a | |
1869 | * context switch on each CPU. | |
1870 | */ | |
7b27d547 LJ |
1871 | while (try_stop_cpus(cpu_online_mask, |
1872 | synchronize_sched_expedited_cpu_stop, | |
1873 | NULL) == -EAGAIN) { | |
1874 | put_online_cpus(); | |
e27fc964 TH |
1875 | |
1876 | /* No joy, try again later. Or just synchronize_sched(). */ | |
7b27d547 LJ |
1877 | if (trycount++ < 10) |
1878 | udelay(trycount * num_online_cpus()); | |
1879 | else { | |
1880 | synchronize_sched(); | |
1881 | return; | |
1882 | } | |
e27fc964 TH |
1883 | |
1884 | /* Check to see if someone else did our work for us. */ | |
1885 | s = atomic_read(&sync_sched_expedited_done); | |
1886 | if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) { | |
7b27d547 LJ |
1887 | smp_mb(); /* ensure test happens before caller kfree */ |
1888 | return; | |
1889 | } | |
e27fc964 TH |
1890 | |
1891 | /* | |
1892 | * Refetching sync_sched_expedited_started allows later | |
1893 | * callers to piggyback on our grace period. We subtract | |
1894 | * 1 to get the same token that the last incrementer got. | |
1895 | * We retry after they started, so our grace period works | |
1896 | * for them, and they started after our first try, so their | |
1897 | * grace period works for us. | |
1898 | */ | |
7b27d547 | 1899 | get_online_cpus(); |
e27fc964 TH |
1900 | snap = atomic_read(&sync_sched_expedited_started) - 1; |
1901 | smp_mb(); /* ensure read is before try_stop_cpus(). */ | |
7b27d547 | 1902 | } |
e27fc964 TH |
1903 | |
1904 | /* | |
1905 | * Everyone up to our most recent fetch is covered by our grace | |
1906 | * period. Update the counter, but only if our work is still | |
1907 | * relevant -- which it won't be if someone who started later | |
1908 | * than we did beat us to the punch. | |
1909 | */ | |
1910 | do { | |
1911 | s = atomic_read(&sync_sched_expedited_done); | |
1912 | if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) { | |
1913 | smp_mb(); /* ensure test happens before caller kfree */ | |
1914 | break; | |
1915 | } | |
1916 | } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s); | |
1917 | ||
7b27d547 LJ |
1918 | put_online_cpus(); |
1919 | } | |
1920 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | |
1921 | ||
1922 | #endif /* #else #ifndef CONFIG_SMP */ | |
1923 | ||
8bd93a2c PM |
1924 | #if !defined(CONFIG_RCU_FAST_NO_HZ) |
1925 | ||
1926 | /* | |
1927 | * Check to see if any future RCU-related work will need to be done | |
1928 | * by the current CPU, even if none need be done immediately, returning | |
1929 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
1930 | * an exported member of the RCU API. | |
1931 | * | |
1932 | * Because we have preemptible RCU, just check whether this CPU needs | |
1933 | * any flavor of RCU. Do not chew up lots of CPU cycles with preemption | |
1934 | * disabled in a most-likely vain attempt to cause RCU not to need this CPU. | |
1935 | */ | |
1936 | int rcu_needs_cpu(int cpu) | |
1937 | { | |
1938 | return rcu_needs_cpu_quick_check(cpu); | |
1939 | } | |
1940 | ||
a47cd880 PM |
1941 | /* |
1942 | * Check to see if we need to continue a callback-flush operations to | |
1943 | * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle | |
1944 | * entry is not configured, so we never do need to. | |
1945 | */ | |
1946 | static void rcu_needs_cpu_flush(void) | |
1947 | { | |
1948 | } | |
1949 | ||
8bd93a2c PM |
1950 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
1951 | ||
1952 | #define RCU_NEEDS_CPU_FLUSHES 5 | |
a47cd880 | 1953 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); |
71da8132 | 1954 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); |
8bd93a2c PM |
1955 | |
1956 | /* | |
1957 | * Check to see if any future RCU-related work will need to be done | |
1958 | * by the current CPU, even if none need be done immediately, returning | |
1959 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
1960 | * an exported member of the RCU API. | |
1961 | * | |
1962 | * Because we are not supporting preemptible RCU, attempt to accelerate | |
1963 | * any current grace periods so that RCU no longer needs this CPU, but | |
1964 | * only if all other CPUs are already in dynticks-idle mode. This will | |
1965 | * allow the CPU cores to be powered down immediately, as opposed to after | |
1966 | * waiting many milliseconds for grace periods to elapse. | |
a47cd880 PM |
1967 | * |
1968 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | |
1969 | * disabled, we do one pass of force_quiescent_state(), then do a | |
a46e0899 | 1970 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked |
27f4d280 | 1971 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. |
8bd93a2c PM |
1972 | */ |
1973 | int rcu_needs_cpu(int cpu) | |
1974 | { | |
a47cd880 | 1975 | int c = 0; |
77e38ed3 | 1976 | int snap; |
8bd93a2c PM |
1977 | int thatcpu; |
1978 | ||
622ea685 PM |
1979 | /* Check for being in the holdoff period. */ |
1980 | if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) | |
1981 | return rcu_needs_cpu_quick_check(cpu); | |
1982 | ||
8bd93a2c | 1983 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
77e38ed3 PM |
1984 | for_each_online_cpu(thatcpu) { |
1985 | if (thatcpu == cpu) | |
1986 | continue; | |
23b5c8fa PM |
1987 | snap = atomic_add_return(0, &per_cpu(rcu_dynticks, |
1988 | thatcpu).dynticks); | |
77e38ed3 | 1989 | smp_mb(); /* Order sampling of snap with end of grace period. */ |
23b5c8fa | 1990 | if ((snap & 0x1) != 0) { |
a47cd880 | 1991 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
71da8132 | 1992 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
8bd93a2c | 1993 | return rcu_needs_cpu_quick_check(cpu); |
8bd93a2c | 1994 | } |
77e38ed3 | 1995 | } |
a47cd880 PM |
1996 | |
1997 | /* Check and update the rcu_dyntick_drain sequencing. */ | |
1998 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | |
1999 | /* First time through, initialize the counter. */ | |
2000 | per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; | |
2001 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | |
2002 | /* We have hit the limit, so time to give up. */ | |
71da8132 | 2003 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; |
a47cd880 PM |
2004 | return rcu_needs_cpu_quick_check(cpu); |
2005 | } | |
2006 | ||
2007 | /* Do one step pushing remaining RCU callbacks through. */ | |
2008 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { | |
2009 | rcu_sched_qs(cpu); | |
2010 | force_quiescent_state(&rcu_sched_state, 0); | |
2011 | c = c || per_cpu(rcu_sched_data, cpu).nxtlist; | |
2012 | } | |
2013 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | |
2014 | rcu_bh_qs(cpu); | |
2015 | force_quiescent_state(&rcu_bh_state, 0); | |
2016 | c = c || per_cpu(rcu_bh_data, cpu).nxtlist; | |
8bd93a2c PM |
2017 | } |
2018 | ||
2019 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | |
622ea685 | 2020 | if (c) |
a46e0899 | 2021 | invoke_rcu_core(); |
8bd93a2c PM |
2022 | return c; |
2023 | } | |
2024 | ||
a47cd880 PM |
2025 | /* |
2026 | * Check to see if we need to continue a callback-flush operations to | |
2027 | * allow the last CPU to enter dyntick-idle mode. | |
2028 | */ | |
2029 | static void rcu_needs_cpu_flush(void) | |
2030 | { | |
2031 | int cpu = smp_processor_id(); | |
71da8132 | 2032 | unsigned long flags; |
a47cd880 PM |
2033 | |
2034 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) | |
2035 | return; | |
71da8132 | 2036 | local_irq_save(flags); |
a47cd880 | 2037 | (void)rcu_needs_cpu(cpu); |
71da8132 | 2038 | local_irq_restore(flags); |
a47cd880 PM |
2039 | } |
2040 | ||
8bd93a2c | 2041 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |