]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/rcutree_plugin.h
rcu: fix tracing bug thinko on boost-balk attribution
[mirror_ubuntu-bionic-kernel.git] / kernel / rcutree_plugin.h
1 /*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
27 #include <linux/delay.h>
28 #include <linux/stop_machine.h>
29
30 /*
31 * Check the RCU kernel configuration parameters and print informative
32 * messages about anything out of the ordinary. If you like #ifdef, you
33 * will love this function.
34 */
35 static void __init rcu_bootup_announce_oddness(void)
36 {
37 #ifdef CONFIG_RCU_TRACE
38 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
39 #endif
40 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
41 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
42 CONFIG_RCU_FANOUT);
43 #endif
44 #ifdef CONFIG_RCU_FANOUT_EXACT
45 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
46 #endif
47 #ifdef CONFIG_RCU_FAST_NO_HZ
48 printk(KERN_INFO
49 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
50 #endif
51 #ifdef CONFIG_PROVE_RCU
52 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
53 #endif
54 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
55 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
56 #endif
57 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
58 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
59 #endif
60 #if NUM_RCU_LVL_4 != 0
61 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
62 #endif
63 }
64
65 #ifdef CONFIG_TREE_PREEMPT_RCU
66
67 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
68 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
69 static struct rcu_state *rcu_state = &rcu_preempt_state;
70
71 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
72
73 /*
74 * Tell them what RCU they are running.
75 */
76 static void __init rcu_bootup_announce(void)
77 {
78 printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n");
79 rcu_bootup_announce_oddness();
80 }
81
82 /*
83 * Return the number of RCU-preempt batches processed thus far
84 * for debug and statistics.
85 */
86 long rcu_batches_completed_preempt(void)
87 {
88 return rcu_preempt_state.completed;
89 }
90 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
91
92 /*
93 * Return the number of RCU batches processed thus far for debug & stats.
94 */
95 long rcu_batches_completed(void)
96 {
97 return rcu_batches_completed_preempt();
98 }
99 EXPORT_SYMBOL_GPL(rcu_batches_completed);
100
101 /*
102 * Force a quiescent state for preemptible RCU.
103 */
104 void rcu_force_quiescent_state(void)
105 {
106 force_quiescent_state(&rcu_preempt_state, 0);
107 }
108 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
109
110 /*
111 * Record a preemptable-RCU quiescent state for the specified CPU. Note
112 * that this just means that the task currently running on the CPU is
113 * not in a quiescent state. There might be any number of tasks blocked
114 * while in an RCU read-side critical section.
115 *
116 * Unlike the other rcu_*_qs() functions, callers to this function
117 * must disable irqs in order to protect the assignment to
118 * ->rcu_read_unlock_special.
119 */
120 static void rcu_preempt_qs(int cpu)
121 {
122 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
123
124 rdp->passed_quiesc_completed = rdp->gpnum - 1;
125 barrier();
126 rdp->passed_quiesc = 1;
127 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
128 }
129
130 /*
131 * We have entered the scheduler, and the current task might soon be
132 * context-switched away from. If this task is in an RCU read-side
133 * critical section, we will no longer be able to rely on the CPU to
134 * record that fact, so we enqueue the task on the blkd_tasks list.
135 * The task will dequeue itself when it exits the outermost enclosing
136 * RCU read-side critical section. Therefore, the current grace period
137 * cannot be permitted to complete until the blkd_tasks list entries
138 * predating the current grace period drain, in other words, until
139 * rnp->gp_tasks becomes NULL.
140 *
141 * Caller must disable preemption.
142 */
143 static void rcu_preempt_note_context_switch(int cpu)
144 {
145 struct task_struct *t = current;
146 unsigned long flags;
147 struct rcu_data *rdp;
148 struct rcu_node *rnp;
149
150 if (t->rcu_read_lock_nesting &&
151 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
152
153 /* Possibly blocking in an RCU read-side critical section. */
154 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
155 rnp = rdp->mynode;
156 raw_spin_lock_irqsave(&rnp->lock, flags);
157 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
158 t->rcu_blocked_node = rnp;
159
160 /*
161 * If this CPU has already checked in, then this task
162 * will hold up the next grace period rather than the
163 * current grace period. Queue the task accordingly.
164 * If the task is queued for the current grace period
165 * (i.e., this CPU has not yet passed through a quiescent
166 * state for the current grace period), then as long
167 * as that task remains queued, the current grace period
168 * cannot end. Note that there is some uncertainty as
169 * to exactly when the current grace period started.
170 * We take a conservative approach, which can result
171 * in unnecessarily waiting on tasks that started very
172 * slightly after the current grace period began. C'est
173 * la vie!!!
174 *
175 * But first, note that the current CPU must still be
176 * on line!
177 */
178 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
179 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
180 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
181 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
182 rnp->gp_tasks = &t->rcu_node_entry;
183 #ifdef CONFIG_RCU_BOOST
184 if (rnp->boost_tasks != NULL)
185 rnp->boost_tasks = rnp->gp_tasks;
186 #endif /* #ifdef CONFIG_RCU_BOOST */
187 } else {
188 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
189 if (rnp->qsmask & rdp->grpmask)
190 rnp->gp_tasks = &t->rcu_node_entry;
191 }
192 raw_spin_unlock_irqrestore(&rnp->lock, flags);
193 }
194
195 /*
196 * Either we were not in an RCU read-side critical section to
197 * begin with, or we have now recorded that critical section
198 * globally. Either way, we can now note a quiescent state
199 * for this CPU. Again, if we were in an RCU read-side critical
200 * section, and if that critical section was blocking the current
201 * grace period, then the fact that the task has been enqueued
202 * means that we continue to block the current grace period.
203 */
204 local_irq_save(flags);
205 rcu_preempt_qs(cpu);
206 local_irq_restore(flags);
207 }
208
209 /*
210 * Tree-preemptable RCU implementation for rcu_read_lock().
211 * Just increment ->rcu_read_lock_nesting, shared state will be updated
212 * if we block.
213 */
214 void __rcu_read_lock(void)
215 {
216 current->rcu_read_lock_nesting++;
217 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
218 }
219 EXPORT_SYMBOL_GPL(__rcu_read_lock);
220
221 /*
222 * Check for preempted RCU readers blocking the current grace period
223 * for the specified rcu_node structure. If the caller needs a reliable
224 * answer, it must hold the rcu_node's ->lock.
225 */
226 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
227 {
228 return rnp->gp_tasks != NULL;
229 }
230
231 /*
232 * Record a quiescent state for all tasks that were previously queued
233 * on the specified rcu_node structure and that were blocking the current
234 * RCU grace period. The caller must hold the specified rnp->lock with
235 * irqs disabled, and this lock is released upon return, but irqs remain
236 * disabled.
237 */
238 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
239 __releases(rnp->lock)
240 {
241 unsigned long mask;
242 struct rcu_node *rnp_p;
243
244 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
245 raw_spin_unlock_irqrestore(&rnp->lock, flags);
246 return; /* Still need more quiescent states! */
247 }
248
249 rnp_p = rnp->parent;
250 if (rnp_p == NULL) {
251 /*
252 * Either there is only one rcu_node in the tree,
253 * or tasks were kicked up to root rcu_node due to
254 * CPUs going offline.
255 */
256 rcu_report_qs_rsp(&rcu_preempt_state, flags);
257 return;
258 }
259
260 /* Report up the rest of the hierarchy. */
261 mask = rnp->grpmask;
262 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
263 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
264 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
265 }
266
267 /*
268 * Advance a ->blkd_tasks-list pointer to the next entry, instead
269 * returning NULL if at the end of the list.
270 */
271 static struct list_head *rcu_next_node_entry(struct task_struct *t,
272 struct rcu_node *rnp)
273 {
274 struct list_head *np;
275
276 np = t->rcu_node_entry.next;
277 if (np == &rnp->blkd_tasks)
278 np = NULL;
279 return np;
280 }
281
282 /*
283 * Handle special cases during rcu_read_unlock(), such as needing to
284 * notify RCU core processing or task having blocked during the RCU
285 * read-side critical section.
286 */
287 static void rcu_read_unlock_special(struct task_struct *t)
288 {
289 int empty;
290 int empty_exp;
291 unsigned long flags;
292 struct list_head *np;
293 struct rcu_node *rnp;
294 int special;
295
296 /* NMI handlers cannot block and cannot safely manipulate state. */
297 if (in_nmi())
298 return;
299
300 local_irq_save(flags);
301
302 /*
303 * If RCU core is waiting for this CPU to exit critical section,
304 * let it know that we have done so.
305 */
306 special = t->rcu_read_unlock_special;
307 if (special & RCU_READ_UNLOCK_NEED_QS) {
308 rcu_preempt_qs(smp_processor_id());
309 }
310
311 /* Hardware IRQ handlers cannot block. */
312 if (in_irq()) {
313 local_irq_restore(flags);
314 return;
315 }
316
317 /* Clean up if blocked during RCU read-side critical section. */
318 if (special & RCU_READ_UNLOCK_BLOCKED) {
319 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
320
321 /*
322 * Remove this task from the list it blocked on. The
323 * task can migrate while we acquire the lock, but at
324 * most one time. So at most two passes through loop.
325 */
326 for (;;) {
327 rnp = t->rcu_blocked_node;
328 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
329 if (rnp == t->rcu_blocked_node)
330 break;
331 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
332 }
333 empty = !rcu_preempt_blocked_readers_cgp(rnp);
334 empty_exp = !rcu_preempted_readers_exp(rnp);
335 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
336 np = rcu_next_node_entry(t, rnp);
337 list_del_init(&t->rcu_node_entry);
338 if (&t->rcu_node_entry == rnp->gp_tasks)
339 rnp->gp_tasks = np;
340 if (&t->rcu_node_entry == rnp->exp_tasks)
341 rnp->exp_tasks = np;
342 #ifdef CONFIG_RCU_BOOST
343 if (&t->rcu_node_entry == rnp->boost_tasks)
344 rnp->boost_tasks = np;
345 #endif /* #ifdef CONFIG_RCU_BOOST */
346 t->rcu_blocked_node = NULL;
347
348 /*
349 * If this was the last task on the current list, and if
350 * we aren't waiting on any CPUs, report the quiescent state.
351 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
352 */
353 if (empty)
354 raw_spin_unlock_irqrestore(&rnp->lock, flags);
355 else
356 rcu_report_unblock_qs_rnp(rnp, flags);
357
358 #ifdef CONFIG_RCU_BOOST
359 /* Unboost if we were boosted. */
360 if (special & RCU_READ_UNLOCK_BOOSTED) {
361 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
362 rt_mutex_unlock(t->rcu_boost_mutex);
363 t->rcu_boost_mutex = NULL;
364 }
365 #endif /* #ifdef CONFIG_RCU_BOOST */
366
367 /*
368 * If this was the last task on the expedited lists,
369 * then we need to report up the rcu_node hierarchy.
370 */
371 if (!empty_exp && !rcu_preempted_readers_exp(rnp))
372 rcu_report_exp_rnp(&rcu_preempt_state, rnp);
373 } else {
374 local_irq_restore(flags);
375 }
376 }
377
378 /*
379 * Tree-preemptable RCU implementation for rcu_read_unlock().
380 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
381 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
382 * invoke rcu_read_unlock_special() to clean up after a context switch
383 * in an RCU read-side critical section and other special cases.
384 */
385 void __rcu_read_unlock(void)
386 {
387 struct task_struct *t = current;
388
389 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
390 --t->rcu_read_lock_nesting;
391 barrier(); /* decrement before load of ->rcu_read_unlock_special */
392 if (t->rcu_read_lock_nesting == 0 &&
393 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
394 rcu_read_unlock_special(t);
395 #ifdef CONFIG_PROVE_LOCKING
396 WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
397 #endif /* #ifdef CONFIG_PROVE_LOCKING */
398 }
399 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
400
401 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
402
403 /*
404 * Dump detailed information for all tasks blocking the current RCU
405 * grace period on the specified rcu_node structure.
406 */
407 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
408 {
409 unsigned long flags;
410 struct task_struct *t;
411
412 if (!rcu_preempt_blocked_readers_cgp(rnp))
413 return;
414 raw_spin_lock_irqsave(&rnp->lock, flags);
415 t = list_entry(rnp->gp_tasks,
416 struct task_struct, rcu_node_entry);
417 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
418 sched_show_task(t);
419 raw_spin_unlock_irqrestore(&rnp->lock, flags);
420 }
421
422 /*
423 * Dump detailed information for all tasks blocking the current RCU
424 * grace period.
425 */
426 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
427 {
428 struct rcu_node *rnp = rcu_get_root(rsp);
429
430 rcu_print_detail_task_stall_rnp(rnp);
431 rcu_for_each_leaf_node(rsp, rnp)
432 rcu_print_detail_task_stall_rnp(rnp);
433 }
434
435 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
436
437 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
438 {
439 }
440
441 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
442
443 /*
444 * Scan the current list of tasks blocked within RCU read-side critical
445 * sections, printing out the tid of each.
446 */
447 static void rcu_print_task_stall(struct rcu_node *rnp)
448 {
449 struct task_struct *t;
450
451 if (!rcu_preempt_blocked_readers_cgp(rnp))
452 return;
453 t = list_entry(rnp->gp_tasks,
454 struct task_struct, rcu_node_entry);
455 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
456 printk(" P%d", t->pid);
457 }
458
459 /*
460 * Suppress preemptible RCU's CPU stall warnings by pushing the
461 * time of the next stall-warning message comfortably far into the
462 * future.
463 */
464 static void rcu_preempt_stall_reset(void)
465 {
466 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
467 }
468
469 /*
470 * Check that the list of blocked tasks for the newly completed grace
471 * period is in fact empty. It is a serious bug to complete a grace
472 * period that still has RCU readers blocked! This function must be
473 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
474 * must be held by the caller.
475 *
476 * Also, if there are blocked tasks on the list, they automatically
477 * block the newly created grace period, so set up ->gp_tasks accordingly.
478 */
479 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
480 {
481 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
482 if (!list_empty(&rnp->blkd_tasks))
483 rnp->gp_tasks = rnp->blkd_tasks.next;
484 WARN_ON_ONCE(rnp->qsmask);
485 }
486
487 #ifdef CONFIG_HOTPLUG_CPU
488
489 /*
490 * Handle tasklist migration for case in which all CPUs covered by the
491 * specified rcu_node have gone offline. Move them up to the root
492 * rcu_node. The reason for not just moving them to the immediate
493 * parent is to remove the need for rcu_read_unlock_special() to
494 * make more than two attempts to acquire the target rcu_node's lock.
495 * Returns true if there were tasks blocking the current RCU grace
496 * period.
497 *
498 * Returns 1 if there was previously a task blocking the current grace
499 * period on the specified rcu_node structure.
500 *
501 * The caller must hold rnp->lock with irqs disabled.
502 */
503 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
504 struct rcu_node *rnp,
505 struct rcu_data *rdp)
506 {
507 struct list_head *lp;
508 struct list_head *lp_root;
509 int retval = 0;
510 struct rcu_node *rnp_root = rcu_get_root(rsp);
511 struct task_struct *t;
512
513 if (rnp == rnp_root) {
514 WARN_ONCE(1, "Last CPU thought to be offlined?");
515 return 0; /* Shouldn't happen: at least one CPU online. */
516 }
517
518 /* If we are on an internal node, complain bitterly. */
519 WARN_ON_ONCE(rnp != rdp->mynode);
520
521 /*
522 * Move tasks up to root rcu_node. Don't try to get fancy for
523 * this corner-case operation -- just put this node's tasks
524 * at the head of the root node's list, and update the root node's
525 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
526 * if non-NULL. This might result in waiting for more tasks than
527 * absolutely necessary, but this is a good performance/complexity
528 * tradeoff.
529 */
530 if (rcu_preempt_blocked_readers_cgp(rnp))
531 retval |= RCU_OFL_TASKS_NORM_GP;
532 if (rcu_preempted_readers_exp(rnp))
533 retval |= RCU_OFL_TASKS_EXP_GP;
534 lp = &rnp->blkd_tasks;
535 lp_root = &rnp_root->blkd_tasks;
536 while (!list_empty(lp)) {
537 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
538 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
539 list_del(&t->rcu_node_entry);
540 t->rcu_blocked_node = rnp_root;
541 list_add(&t->rcu_node_entry, lp_root);
542 if (&t->rcu_node_entry == rnp->gp_tasks)
543 rnp_root->gp_tasks = rnp->gp_tasks;
544 if (&t->rcu_node_entry == rnp->exp_tasks)
545 rnp_root->exp_tasks = rnp->exp_tasks;
546 #ifdef CONFIG_RCU_BOOST
547 if (&t->rcu_node_entry == rnp->boost_tasks)
548 rnp_root->boost_tasks = rnp->boost_tasks;
549 #endif /* #ifdef CONFIG_RCU_BOOST */
550 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
551 }
552
553 #ifdef CONFIG_RCU_BOOST
554 /* In case root is being boosted and leaf is not. */
555 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
556 if (rnp_root->boost_tasks != NULL &&
557 rnp_root->boost_tasks != rnp_root->gp_tasks)
558 rnp_root->boost_tasks = rnp_root->gp_tasks;
559 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
560 #endif /* #ifdef CONFIG_RCU_BOOST */
561
562 rnp->gp_tasks = NULL;
563 rnp->exp_tasks = NULL;
564 return retval;
565 }
566
567 /*
568 * Do CPU-offline processing for preemptable RCU.
569 */
570 static void rcu_preempt_offline_cpu(int cpu)
571 {
572 __rcu_offline_cpu(cpu, &rcu_preempt_state);
573 }
574
575 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
576
577 /*
578 * Check for a quiescent state from the current CPU. When a task blocks,
579 * the task is recorded in the corresponding CPU's rcu_node structure,
580 * which is checked elsewhere.
581 *
582 * Caller must disable hard irqs.
583 */
584 static void rcu_preempt_check_callbacks(int cpu)
585 {
586 struct task_struct *t = current;
587
588 if (t->rcu_read_lock_nesting == 0) {
589 rcu_preempt_qs(cpu);
590 return;
591 }
592 if (per_cpu(rcu_preempt_data, cpu).qs_pending)
593 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
594 }
595
596 /*
597 * Process callbacks for preemptable RCU.
598 */
599 static void rcu_preempt_process_callbacks(void)
600 {
601 __rcu_process_callbacks(&rcu_preempt_state,
602 &__get_cpu_var(rcu_preempt_data));
603 }
604
605 /*
606 * Queue a preemptable-RCU callback for invocation after a grace period.
607 */
608 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
609 {
610 __call_rcu(head, func, &rcu_preempt_state);
611 }
612 EXPORT_SYMBOL_GPL(call_rcu);
613
614 /**
615 * synchronize_rcu - wait until a grace period has elapsed.
616 *
617 * Control will return to the caller some time after a full grace
618 * period has elapsed, in other words after all currently executing RCU
619 * read-side critical sections have completed. Note, however, that
620 * upon return from synchronize_rcu(), the caller might well be executing
621 * concurrently with new RCU read-side critical sections that began while
622 * synchronize_rcu() was waiting. RCU read-side critical sections are
623 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
624 */
625 void synchronize_rcu(void)
626 {
627 struct rcu_synchronize rcu;
628
629 if (!rcu_scheduler_active)
630 return;
631
632 init_rcu_head_on_stack(&rcu.head);
633 init_completion(&rcu.completion);
634 /* Will wake me after RCU finished. */
635 call_rcu(&rcu.head, wakeme_after_rcu);
636 /* Wait for it. */
637 wait_for_completion(&rcu.completion);
638 destroy_rcu_head_on_stack(&rcu.head);
639 }
640 EXPORT_SYMBOL_GPL(synchronize_rcu);
641
642 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
643 static long sync_rcu_preempt_exp_count;
644 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
645
646 /*
647 * Return non-zero if there are any tasks in RCU read-side critical
648 * sections blocking the current preemptible-RCU expedited grace period.
649 * If there is no preemptible-RCU expedited grace period currently in
650 * progress, returns zero unconditionally.
651 */
652 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
653 {
654 return rnp->exp_tasks != NULL;
655 }
656
657 /*
658 * return non-zero if there is no RCU expedited grace period in progress
659 * for the specified rcu_node structure, in other words, if all CPUs and
660 * tasks covered by the specified rcu_node structure have done their bit
661 * for the current expedited grace period. Works only for preemptible
662 * RCU -- other RCU implementation use other means.
663 *
664 * Caller must hold sync_rcu_preempt_exp_mutex.
665 */
666 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
667 {
668 return !rcu_preempted_readers_exp(rnp) &&
669 ACCESS_ONCE(rnp->expmask) == 0;
670 }
671
672 /*
673 * Report the exit from RCU read-side critical section for the last task
674 * that queued itself during or before the current expedited preemptible-RCU
675 * grace period. This event is reported either to the rcu_node structure on
676 * which the task was queued or to one of that rcu_node structure's ancestors,
677 * recursively up the tree. (Calm down, calm down, we do the recursion
678 * iteratively!)
679 *
680 * Caller must hold sync_rcu_preempt_exp_mutex.
681 */
682 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
683 {
684 unsigned long flags;
685 unsigned long mask;
686
687 raw_spin_lock_irqsave(&rnp->lock, flags);
688 for (;;) {
689 if (!sync_rcu_preempt_exp_done(rnp))
690 break;
691 if (rnp->parent == NULL) {
692 wake_up(&sync_rcu_preempt_exp_wq);
693 break;
694 }
695 mask = rnp->grpmask;
696 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
697 rnp = rnp->parent;
698 raw_spin_lock(&rnp->lock); /* irqs already disabled */
699 rnp->expmask &= ~mask;
700 }
701 raw_spin_unlock_irqrestore(&rnp->lock, flags);
702 }
703
704 /*
705 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
706 * grace period for the specified rcu_node structure. If there are no such
707 * tasks, report it up the rcu_node hierarchy.
708 *
709 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
710 */
711 static void
712 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
713 {
714 int must_wait = 0;
715
716 raw_spin_lock(&rnp->lock); /* irqs already disabled */
717 if (!list_empty(&rnp->blkd_tasks)) {
718 rnp->exp_tasks = rnp->blkd_tasks.next;
719 rcu_initiate_boost(rnp);
720 must_wait = 1;
721 }
722 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
723 if (!must_wait)
724 rcu_report_exp_rnp(rsp, rnp);
725 }
726
727 /*
728 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
729 * is to invoke synchronize_sched_expedited() to push all the tasks to
730 * the ->blkd_tasks lists and wait for this list to drain.
731 */
732 void synchronize_rcu_expedited(void)
733 {
734 unsigned long flags;
735 struct rcu_node *rnp;
736 struct rcu_state *rsp = &rcu_preempt_state;
737 long snap;
738 int trycount = 0;
739
740 smp_mb(); /* Caller's modifications seen first by other CPUs. */
741 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
742 smp_mb(); /* Above access cannot bleed into critical section. */
743
744 /*
745 * Acquire lock, falling back to synchronize_rcu() if too many
746 * lock-acquisition failures. Of course, if someone does the
747 * expedited grace period for us, just leave.
748 */
749 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
750 if (trycount++ < 10)
751 udelay(trycount * num_online_cpus());
752 else {
753 synchronize_rcu();
754 return;
755 }
756 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
757 goto mb_ret; /* Others did our work for us. */
758 }
759 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
760 goto unlock_mb_ret; /* Others did our work for us. */
761
762 /* force all RCU readers onto ->blkd_tasks lists. */
763 synchronize_sched_expedited();
764
765 raw_spin_lock_irqsave(&rsp->onofflock, flags);
766
767 /* Initialize ->expmask for all non-leaf rcu_node structures. */
768 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
769 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
770 rnp->expmask = rnp->qsmaskinit;
771 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
772 }
773
774 /* Snapshot current state of ->blkd_tasks lists. */
775 rcu_for_each_leaf_node(rsp, rnp)
776 sync_rcu_preempt_exp_init(rsp, rnp);
777 if (NUM_RCU_NODES > 1)
778 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
779
780 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
781
782 /* Wait for snapshotted ->blkd_tasks lists to drain. */
783 rnp = rcu_get_root(rsp);
784 wait_event(sync_rcu_preempt_exp_wq,
785 sync_rcu_preempt_exp_done(rnp));
786
787 /* Clean up and exit. */
788 smp_mb(); /* ensure expedited GP seen before counter increment. */
789 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
790 unlock_mb_ret:
791 mutex_unlock(&sync_rcu_preempt_exp_mutex);
792 mb_ret:
793 smp_mb(); /* ensure subsequent action seen after grace period. */
794 }
795 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
796
797 /*
798 * Check to see if there is any immediate preemptable-RCU-related work
799 * to be done.
800 */
801 static int rcu_preempt_pending(int cpu)
802 {
803 return __rcu_pending(&rcu_preempt_state,
804 &per_cpu(rcu_preempt_data, cpu));
805 }
806
807 /*
808 * Does preemptable RCU need the CPU to stay out of dynticks mode?
809 */
810 static int rcu_preempt_needs_cpu(int cpu)
811 {
812 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
813 }
814
815 /**
816 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
817 */
818 void rcu_barrier(void)
819 {
820 _rcu_barrier(&rcu_preempt_state, call_rcu);
821 }
822 EXPORT_SYMBOL_GPL(rcu_barrier);
823
824 /*
825 * Initialize preemptable RCU's per-CPU data.
826 */
827 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
828 {
829 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
830 }
831
832 /*
833 * Move preemptable RCU's callbacks from dying CPU to other online CPU.
834 */
835 static void rcu_preempt_send_cbs_to_online(void)
836 {
837 rcu_send_cbs_to_online(&rcu_preempt_state);
838 }
839
840 /*
841 * Initialize preemptable RCU's state structures.
842 */
843 static void __init __rcu_init_preempt(void)
844 {
845 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
846 }
847
848 /*
849 * Check for a task exiting while in a preemptable-RCU read-side
850 * critical section, clean up if so. No need to issue warnings,
851 * as debug_check_no_locks_held() already does this if lockdep
852 * is enabled.
853 */
854 void exit_rcu(void)
855 {
856 struct task_struct *t = current;
857
858 if (t->rcu_read_lock_nesting == 0)
859 return;
860 t->rcu_read_lock_nesting = 1;
861 rcu_read_unlock();
862 }
863
864 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
865
866 static struct rcu_state *rcu_state = &rcu_sched_state;
867
868 /*
869 * Tell them what RCU they are running.
870 */
871 static void __init rcu_bootup_announce(void)
872 {
873 printk(KERN_INFO "Hierarchical RCU implementation.\n");
874 rcu_bootup_announce_oddness();
875 }
876
877 /*
878 * Return the number of RCU batches processed thus far for debug & stats.
879 */
880 long rcu_batches_completed(void)
881 {
882 return rcu_batches_completed_sched();
883 }
884 EXPORT_SYMBOL_GPL(rcu_batches_completed);
885
886 /*
887 * Force a quiescent state for RCU, which, because there is no preemptible
888 * RCU, becomes the same as rcu-sched.
889 */
890 void rcu_force_quiescent_state(void)
891 {
892 rcu_sched_force_quiescent_state();
893 }
894 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
895
896 /*
897 * Because preemptable RCU does not exist, we never have to check for
898 * CPUs being in quiescent states.
899 */
900 static void rcu_preempt_note_context_switch(int cpu)
901 {
902 }
903
904 /*
905 * Because preemptable RCU does not exist, there are never any preempted
906 * RCU readers.
907 */
908 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
909 {
910 return 0;
911 }
912
913 #ifdef CONFIG_HOTPLUG_CPU
914
915 /* Because preemptible RCU does not exist, no quieting of tasks. */
916 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
917 {
918 raw_spin_unlock_irqrestore(&rnp->lock, flags);
919 }
920
921 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
922
923 /*
924 * Because preemptable RCU does not exist, we never have to check for
925 * tasks blocked within RCU read-side critical sections.
926 */
927 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
928 {
929 }
930
931 /*
932 * Because preemptable RCU does not exist, we never have to check for
933 * tasks blocked within RCU read-side critical sections.
934 */
935 static void rcu_print_task_stall(struct rcu_node *rnp)
936 {
937 }
938
939 /*
940 * Because preemptible RCU does not exist, there is no need to suppress
941 * its CPU stall warnings.
942 */
943 static void rcu_preempt_stall_reset(void)
944 {
945 }
946
947 /*
948 * Because there is no preemptable RCU, there can be no readers blocked,
949 * so there is no need to check for blocked tasks. So check only for
950 * bogus qsmask values.
951 */
952 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
953 {
954 WARN_ON_ONCE(rnp->qsmask);
955 }
956
957 #ifdef CONFIG_HOTPLUG_CPU
958
959 /*
960 * Because preemptable RCU does not exist, it never needs to migrate
961 * tasks that were blocked within RCU read-side critical sections, and
962 * such non-existent tasks cannot possibly have been blocking the current
963 * grace period.
964 */
965 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
966 struct rcu_node *rnp,
967 struct rcu_data *rdp)
968 {
969 return 0;
970 }
971
972 /*
973 * Because preemptable RCU does not exist, it never needs CPU-offline
974 * processing.
975 */
976 static void rcu_preempt_offline_cpu(int cpu)
977 {
978 }
979
980 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
981
982 /*
983 * Because preemptable RCU does not exist, it never has any callbacks
984 * to check.
985 */
986 static void rcu_preempt_check_callbacks(int cpu)
987 {
988 }
989
990 /*
991 * Because preemptable RCU does not exist, it never has any callbacks
992 * to process.
993 */
994 static void rcu_preempt_process_callbacks(void)
995 {
996 }
997
998 /*
999 * Wait for an rcu-preempt grace period, but make it happen quickly.
1000 * But because preemptable RCU does not exist, map to rcu-sched.
1001 */
1002 void synchronize_rcu_expedited(void)
1003 {
1004 synchronize_sched_expedited();
1005 }
1006 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1007
1008 #ifdef CONFIG_HOTPLUG_CPU
1009
1010 /*
1011 * Because preemptable RCU does not exist, there is never any need to
1012 * report on tasks preempted in RCU read-side critical sections during
1013 * expedited RCU grace periods.
1014 */
1015 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
1016 {
1017 return;
1018 }
1019
1020 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1021
1022 /*
1023 * Because preemptable RCU does not exist, it never has any work to do.
1024 */
1025 static int rcu_preempt_pending(int cpu)
1026 {
1027 return 0;
1028 }
1029
1030 /*
1031 * Because preemptable RCU does not exist, it never needs any CPU.
1032 */
1033 static int rcu_preempt_needs_cpu(int cpu)
1034 {
1035 return 0;
1036 }
1037
1038 /*
1039 * Because preemptable RCU does not exist, rcu_barrier() is just
1040 * another name for rcu_barrier_sched().
1041 */
1042 void rcu_barrier(void)
1043 {
1044 rcu_barrier_sched();
1045 }
1046 EXPORT_SYMBOL_GPL(rcu_barrier);
1047
1048 /*
1049 * Because preemptable RCU does not exist, there is no per-CPU
1050 * data to initialize.
1051 */
1052 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1053 {
1054 }
1055
1056 /*
1057 * Because there is no preemptable RCU, there are no callbacks to move.
1058 */
1059 static void rcu_preempt_send_cbs_to_online(void)
1060 {
1061 }
1062
1063 /*
1064 * Because preemptable RCU does not exist, it need not be initialized.
1065 */
1066 static void __init __rcu_init_preempt(void)
1067 {
1068 }
1069
1070 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1071
1072 #ifdef CONFIG_RCU_BOOST
1073
1074 #include "rtmutex_common.h"
1075
1076 #ifdef CONFIG_RCU_TRACE
1077
1078 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1079 {
1080 if (list_empty(&rnp->blkd_tasks))
1081 rnp->n_balk_blkd_tasks++;
1082 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1083 rnp->n_balk_exp_gp_tasks++;
1084 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1085 rnp->n_balk_boost_tasks++;
1086 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1087 rnp->n_balk_notblocked++;
1088 else if (rnp->gp_tasks != NULL &&
1089 ULONG_CMP_LT(jiffies, rnp->boost_time))
1090 rnp->n_balk_notyet++;
1091 else
1092 rnp->n_balk_nos++;
1093 }
1094
1095 #else /* #ifdef CONFIG_RCU_TRACE */
1096
1097 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1098 {
1099 }
1100
1101 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1102
1103 /*
1104 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1105 * or ->boost_tasks, advancing the pointer to the next task in the
1106 * ->blkd_tasks list.
1107 *
1108 * Note that irqs must be enabled: boosting the task can block.
1109 * Returns 1 if there are more tasks needing to be boosted.
1110 */
1111 static int rcu_boost(struct rcu_node *rnp)
1112 {
1113 unsigned long flags;
1114 struct rt_mutex mtx;
1115 struct task_struct *t;
1116 struct list_head *tb;
1117
1118 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1119 return 0; /* Nothing left to boost. */
1120
1121 raw_spin_lock_irqsave(&rnp->lock, flags);
1122
1123 /*
1124 * Recheck under the lock: all tasks in need of boosting
1125 * might exit their RCU read-side critical sections on their own.
1126 */
1127 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1128 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1129 return 0;
1130 }
1131
1132 /*
1133 * Preferentially boost tasks blocking expedited grace periods.
1134 * This cannot starve the normal grace periods because a second
1135 * expedited grace period must boost all blocked tasks, including
1136 * those blocking the pre-existing normal grace period.
1137 */
1138 if (rnp->exp_tasks != NULL) {
1139 tb = rnp->exp_tasks;
1140 rnp->n_exp_boosts++;
1141 } else {
1142 tb = rnp->boost_tasks;
1143 rnp->n_normal_boosts++;
1144 }
1145 rnp->n_tasks_boosted++;
1146
1147 /*
1148 * We boost task t by manufacturing an rt_mutex that appears to
1149 * be held by task t. We leave a pointer to that rt_mutex where
1150 * task t can find it, and task t will release the mutex when it
1151 * exits its outermost RCU read-side critical section. Then
1152 * simply acquiring this artificial rt_mutex will boost task
1153 * t's priority. (Thanks to tglx for suggesting this approach!)
1154 *
1155 * Note that task t must acquire rnp->lock to remove itself from
1156 * the ->blkd_tasks list, which it will do from exit() if from
1157 * nowhere else. We therefore are guaranteed that task t will
1158 * stay around at least until we drop rnp->lock. Note that
1159 * rnp->lock also resolves races between our priority boosting
1160 * and task t's exiting its outermost RCU read-side critical
1161 * section.
1162 */
1163 t = container_of(tb, struct task_struct, rcu_node_entry);
1164 rt_mutex_init_proxy_locked(&mtx, t);
1165 t->rcu_boost_mutex = &mtx;
1166 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
1167 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1168 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1169 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1170
1171 return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL;
1172 }
1173
1174 /*
1175 * Timer handler to initiate waking up of boost kthreads that
1176 * have yielded the CPU due to excessive numbers of tasks to
1177 * boost. We wake up the per-rcu_node kthread, which in turn
1178 * will wake up the booster kthread.
1179 */
1180 static void rcu_boost_kthread_timer(unsigned long arg)
1181 {
1182 unsigned long flags;
1183 struct rcu_node *rnp = (struct rcu_node *)arg;
1184
1185 raw_spin_lock_irqsave(&rnp->lock, flags);
1186 invoke_rcu_node_kthread(rnp);
1187 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1188 }
1189
1190 /*
1191 * Priority-boosting kthread. One per leaf rcu_node and one for the
1192 * root rcu_node.
1193 */
1194 static int rcu_boost_kthread(void *arg)
1195 {
1196 struct rcu_node *rnp = (struct rcu_node *)arg;
1197 int spincnt = 0;
1198 int more2boost;
1199
1200 for (;;) {
1201 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1202 wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks ||
1203 rnp->exp_tasks ||
1204 kthread_should_stop());
1205 if (kthread_should_stop())
1206 break;
1207 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1208 more2boost = rcu_boost(rnp);
1209 if (more2boost)
1210 spincnt++;
1211 else
1212 spincnt = 0;
1213 if (spincnt > 10) {
1214 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1215 spincnt = 0;
1216 }
1217 }
1218 rnp->boost_kthread_status = RCU_KTHREAD_STOPPED;
1219 return 0;
1220 }
1221
1222 /*
1223 * Check to see if it is time to start boosting RCU readers that are
1224 * blocking the current grace period, and, if so, tell the per-rcu_node
1225 * kthread to start boosting them. If there is an expedited grace
1226 * period in progress, it is always time to boost.
1227 *
1228 * The caller must hold rnp->lock.
1229 */
1230 static void rcu_initiate_boost(struct rcu_node *rnp)
1231 {
1232 struct task_struct *t;
1233
1234 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1235 rnp->n_balk_exp_gp_tasks++;
1236 return;
1237 }
1238 if (rnp->exp_tasks != NULL ||
1239 (rnp->gp_tasks != NULL &&
1240 rnp->boost_tasks == NULL &&
1241 rnp->qsmask == 0 &&
1242 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1243 if (rnp->exp_tasks == NULL)
1244 rnp->boost_tasks = rnp->gp_tasks;
1245 t = rnp->boost_kthread_task;
1246 if (t != NULL)
1247 wake_up_process(t);
1248 } else
1249 rcu_initiate_boost_trace(rnp);
1250 }
1251
1252 /*
1253 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1254 * held, so no one should be messing with the existence of the boost
1255 * kthread.
1256 */
1257 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1258 cpumask_var_t cm)
1259 {
1260 struct task_struct *t;
1261
1262 t = rnp->boost_kthread_task;
1263 if (t != NULL)
1264 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1265 }
1266
1267 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1268
1269 /*
1270 * Do priority-boost accounting for the start of a new grace period.
1271 */
1272 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1273 {
1274 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1275 }
1276
1277 /*
1278 * Initialize the RCU-boost waitqueue.
1279 */
1280 static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
1281 {
1282 init_waitqueue_head(&rnp->boost_wq);
1283 }
1284
1285 /*
1286 * Create an RCU-boost kthread for the specified node if one does not
1287 * already exist. We only create this kthread for preemptible RCU.
1288 * Returns zero if all is well, a negated errno otherwise.
1289 */
1290 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1291 struct rcu_node *rnp,
1292 int rnp_index)
1293 {
1294 unsigned long flags;
1295 struct sched_param sp;
1296 struct task_struct *t;
1297
1298 if (&rcu_preempt_state != rsp)
1299 return 0;
1300 if (rnp->boost_kthread_task != NULL)
1301 return 0;
1302 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1303 "rcub%d", rnp_index);
1304 if (IS_ERR(t))
1305 return PTR_ERR(t);
1306 raw_spin_lock_irqsave(&rnp->lock, flags);
1307 rnp->boost_kthread_task = t;
1308 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1309 wake_up_process(t);
1310 sp.sched_priority = RCU_KTHREAD_PRIO;
1311 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1312 return 0;
1313 }
1314
1315 #ifdef CONFIG_HOTPLUG_CPU
1316
1317 static void rcu_stop_boost_kthread(struct rcu_node *rnp)
1318 {
1319 unsigned long flags;
1320 struct task_struct *t;
1321
1322 raw_spin_lock_irqsave(&rnp->lock, flags);
1323 t = rnp->boost_kthread_task;
1324 rnp->boost_kthread_task = NULL;
1325 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1326 if (t != NULL)
1327 kthread_stop(t);
1328 }
1329
1330 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1331
1332 #else /* #ifdef CONFIG_RCU_BOOST */
1333
1334 static void rcu_initiate_boost(struct rcu_node *rnp)
1335 {
1336 }
1337
1338 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1339 cpumask_var_t cm)
1340 {
1341 }
1342
1343 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1344 {
1345 }
1346
1347 static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
1348 {
1349 }
1350
1351 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1352 struct rcu_node *rnp,
1353 int rnp_index)
1354 {
1355 return 0;
1356 }
1357
1358 #ifdef CONFIG_HOTPLUG_CPU
1359
1360 static void rcu_stop_boost_kthread(struct rcu_node *rnp)
1361 {
1362 }
1363
1364 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1365
1366 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1367
1368 #ifndef CONFIG_SMP
1369
1370 void synchronize_sched_expedited(void)
1371 {
1372 cond_resched();
1373 }
1374 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1375
1376 #else /* #ifndef CONFIG_SMP */
1377
1378 static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1379 static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1380
1381 static int synchronize_sched_expedited_cpu_stop(void *data)
1382 {
1383 /*
1384 * There must be a full memory barrier on each affected CPU
1385 * between the time that try_stop_cpus() is called and the
1386 * time that it returns.
1387 *
1388 * In the current initial implementation of cpu_stop, the
1389 * above condition is already met when the control reaches
1390 * this point and the following smp_mb() is not strictly
1391 * necessary. Do smp_mb() anyway for documentation and
1392 * robustness against future implementation changes.
1393 */
1394 smp_mb(); /* See above comment block. */
1395 return 0;
1396 }
1397
1398 /*
1399 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1400 * approach to force grace period to end quickly. This consumes
1401 * significant time on all CPUs, and is thus not recommended for
1402 * any sort of common-case code.
1403 *
1404 * Note that it is illegal to call this function while holding any
1405 * lock that is acquired by a CPU-hotplug notifier. Failing to
1406 * observe this restriction will result in deadlock.
1407 *
1408 * This implementation can be thought of as an application of ticket
1409 * locking to RCU, with sync_sched_expedited_started and
1410 * sync_sched_expedited_done taking on the roles of the halves
1411 * of the ticket-lock word. Each task atomically increments
1412 * sync_sched_expedited_started upon entry, snapshotting the old value,
1413 * then attempts to stop all the CPUs. If this succeeds, then each
1414 * CPU will have executed a context switch, resulting in an RCU-sched
1415 * grace period. We are then done, so we use atomic_cmpxchg() to
1416 * update sync_sched_expedited_done to match our snapshot -- but
1417 * only if someone else has not already advanced past our snapshot.
1418 *
1419 * On the other hand, if try_stop_cpus() fails, we check the value
1420 * of sync_sched_expedited_done. If it has advanced past our
1421 * initial snapshot, then someone else must have forced a grace period
1422 * some time after we took our snapshot. In this case, our work is
1423 * done for us, and we can simply return. Otherwise, we try again,
1424 * but keep our initial snapshot for purposes of checking for someone
1425 * doing our work for us.
1426 *
1427 * If we fail too many times in a row, we fall back to synchronize_sched().
1428 */
1429 void synchronize_sched_expedited(void)
1430 {
1431 int firstsnap, s, snap, trycount = 0;
1432
1433 /* Note that atomic_inc_return() implies full memory barrier. */
1434 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
1435 get_online_cpus();
1436
1437 /*
1438 * Each pass through the following loop attempts to force a
1439 * context switch on each CPU.
1440 */
1441 while (try_stop_cpus(cpu_online_mask,
1442 synchronize_sched_expedited_cpu_stop,
1443 NULL) == -EAGAIN) {
1444 put_online_cpus();
1445
1446 /* No joy, try again later. Or just synchronize_sched(). */
1447 if (trycount++ < 10)
1448 udelay(trycount * num_online_cpus());
1449 else {
1450 synchronize_sched();
1451 return;
1452 }
1453
1454 /* Check to see if someone else did our work for us. */
1455 s = atomic_read(&sync_sched_expedited_done);
1456 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
1457 smp_mb(); /* ensure test happens before caller kfree */
1458 return;
1459 }
1460
1461 /*
1462 * Refetching sync_sched_expedited_started allows later
1463 * callers to piggyback on our grace period. We subtract
1464 * 1 to get the same token that the last incrementer got.
1465 * We retry after they started, so our grace period works
1466 * for them, and they started after our first try, so their
1467 * grace period works for us.
1468 */
1469 get_online_cpus();
1470 snap = atomic_read(&sync_sched_expedited_started) - 1;
1471 smp_mb(); /* ensure read is before try_stop_cpus(). */
1472 }
1473
1474 /*
1475 * Everyone up to our most recent fetch is covered by our grace
1476 * period. Update the counter, but only if our work is still
1477 * relevant -- which it won't be if someone who started later
1478 * than we did beat us to the punch.
1479 */
1480 do {
1481 s = atomic_read(&sync_sched_expedited_done);
1482 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1483 smp_mb(); /* ensure test happens before caller kfree */
1484 break;
1485 }
1486 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
1487
1488 put_online_cpus();
1489 }
1490 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1491
1492 #endif /* #else #ifndef CONFIG_SMP */
1493
1494 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1495
1496 /*
1497 * Check to see if any future RCU-related work will need to be done
1498 * by the current CPU, even if none need be done immediately, returning
1499 * 1 if so. This function is part of the RCU implementation; it is -not-
1500 * an exported member of the RCU API.
1501 *
1502 * Because we have preemptible RCU, just check whether this CPU needs
1503 * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
1504 * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
1505 */
1506 int rcu_needs_cpu(int cpu)
1507 {
1508 return rcu_needs_cpu_quick_check(cpu);
1509 }
1510
1511 /*
1512 * Check to see if we need to continue a callback-flush operations to
1513 * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle
1514 * entry is not configured, so we never do need to.
1515 */
1516 static void rcu_needs_cpu_flush(void)
1517 {
1518 }
1519
1520 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1521
1522 #define RCU_NEEDS_CPU_FLUSHES 5
1523 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1524 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1525
1526 /*
1527 * Check to see if any future RCU-related work will need to be done
1528 * by the current CPU, even if none need be done immediately, returning
1529 * 1 if so. This function is part of the RCU implementation; it is -not-
1530 * an exported member of the RCU API.
1531 *
1532 * Because we are not supporting preemptible RCU, attempt to accelerate
1533 * any current grace periods so that RCU no longer needs this CPU, but
1534 * only if all other CPUs are already in dynticks-idle mode. This will
1535 * allow the CPU cores to be powered down immediately, as opposed to after
1536 * waiting many milliseconds for grace periods to elapse.
1537 *
1538 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1539 * disabled, we do one pass of force_quiescent_state(), then do a
1540 * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked
1541 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
1542 */
1543 int rcu_needs_cpu(int cpu)
1544 {
1545 int c = 0;
1546 int snap;
1547 int thatcpu;
1548
1549 /* Check for being in the holdoff period. */
1550 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
1551 return rcu_needs_cpu_quick_check(cpu);
1552
1553 /* Don't bother unless we are the last non-dyntick-idle CPU. */
1554 for_each_online_cpu(thatcpu) {
1555 if (thatcpu == cpu)
1556 continue;
1557 snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
1558 thatcpu).dynticks);
1559 smp_mb(); /* Order sampling of snap with end of grace period. */
1560 if ((snap & 0x1) != 0) {
1561 per_cpu(rcu_dyntick_drain, cpu) = 0;
1562 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
1563 return rcu_needs_cpu_quick_check(cpu);
1564 }
1565 }
1566
1567 /* Check and update the rcu_dyntick_drain sequencing. */
1568 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
1569 /* First time through, initialize the counter. */
1570 per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
1571 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
1572 /* We have hit the limit, so time to give up. */
1573 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
1574 return rcu_needs_cpu_quick_check(cpu);
1575 }
1576
1577 /* Do one step pushing remaining RCU callbacks through. */
1578 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
1579 rcu_sched_qs(cpu);
1580 force_quiescent_state(&rcu_sched_state, 0);
1581 c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
1582 }
1583 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
1584 rcu_bh_qs(cpu);
1585 force_quiescent_state(&rcu_bh_state, 0);
1586 c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
1587 }
1588
1589 /* If RCU callbacks are still pending, RCU still needs this CPU. */
1590 if (c)
1591 invoke_rcu_cpu_kthread();
1592 return c;
1593 }
1594
1595 /*
1596 * Check to see if we need to continue a callback-flush operations to
1597 * allow the last CPU to enter dyntick-idle mode.
1598 */
1599 static void rcu_needs_cpu_flush(void)
1600 {
1601 int cpu = smp_processor_id();
1602 unsigned long flags;
1603
1604 if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
1605 return;
1606 local_irq_save(flags);
1607 (void)rcu_needs_cpu(cpu);
1608 local_irq_restore(flags);
1609 }
1610
1611 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */