]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/rcupreempt.c
rculist.h: use the rcu API
[mirror_ubuntu-artful-kernel.git] / kernel / rcupreempt.c
CommitLineData
e260be67
PM
1/*
2 * Read-Copy Update mechanism for mutual exclusion, realtime implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2006
19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * With thanks to Esben Nielsen, Bill Huey, and Ingo Molnar
22 * for pushing me away from locks and towards counters, and
23 * to Suparna Bhattacharya for pushing me completely away
24 * from atomic instructions on the read side.
25 *
2232c2d8
SR
26 * - Added handling of Dynamic Ticks
27 * Copyright 2007 - Paul E. Mckenney <paulmck@us.ibm.com>
28 * - Steven Rostedt <srostedt@redhat.com>
29 *
e260be67
PM
30 * Papers: http://www.rdrop.com/users/paulmck/RCU
31 *
32 * Design Document: http://lwn.net/Articles/253651/
33 *
34 * For detailed explanation of Read-Copy Update mechanism see -
35 * Documentation/RCU/ *.txt
36 *
37 */
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/init.h>
41#include <linux/spinlock.h>
42#include <linux/smp.h>
43#include <linux/rcupdate.h>
44#include <linux/interrupt.h>
45#include <linux/sched.h>
46#include <asm/atomic.h>
47#include <linux/bitops.h>
48#include <linux/module.h>
4446a36f 49#include <linux/kthread.h>
e260be67
PM
50#include <linux/completion.h>
51#include <linux/moduleparam.h>
52#include <linux/percpu.h>
53#include <linux/notifier.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/random.h>
57#include <linux/delay.h>
58#include <linux/byteorder/swabb.h>
59#include <linux/cpumask.h>
60#include <linux/rcupreempt_trace.h>
61
62/*
63 * Macro that prevents the compiler from reordering accesses, but does
64 * absolutely -nothing- to prevent CPUs from reordering. This is used
65 * only to mediate communication between mainline code and hardware
66 * interrupt and NMI handlers.
67 */
68#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
69
70/*
71 * PREEMPT_RCU data structures.
72 */
73
74/*
75 * GP_STAGES specifies the number of times the state machine has
76 * to go through the all the rcu_try_flip_states (see below)
77 * in a single Grace Period.
78 *
79 * GP in GP_STAGES stands for Grace Period ;)
80 */
81#define GP_STAGES 2
82struct rcu_data {
83 spinlock_t lock; /* Protect rcu_data fields. */
84 long completed; /* Number of last completed batch. */
85 int waitlistcount;
86 struct tasklet_struct rcu_tasklet;
87 struct rcu_head *nextlist;
88 struct rcu_head **nexttail;
89 struct rcu_head *waitlist[GP_STAGES];
90 struct rcu_head **waittail[GP_STAGES];
4446a36f 91 struct rcu_head *donelist; /* from waitlist & waitschedlist */
e260be67
PM
92 struct rcu_head **donetail;
93 long rcu_flipctr[2];
4446a36f
PM
94 struct rcu_head *nextschedlist;
95 struct rcu_head **nextschedtail;
96 struct rcu_head *waitschedlist;
97 struct rcu_head **waitschedtail;
98 int rcu_sched_sleeping;
e260be67
PM
99#ifdef CONFIG_RCU_TRACE
100 struct rcupreempt_trace trace;
101#endif /* #ifdef CONFIG_RCU_TRACE */
102};
103
104/*
105 * States for rcu_try_flip() and friends.
106 */
107
108enum rcu_try_flip_states {
109
110 /*
111 * Stay here if nothing is happening. Flip the counter if somthing
112 * starts happening. Denoted by "I"
113 */
114 rcu_try_flip_idle_state,
115
116 /*
117 * Wait here for all CPUs to notice that the counter has flipped. This
118 * prevents the old set of counters from ever being incremented once
119 * we leave this state, which in turn is necessary because we cannot
120 * test any individual counter for zero -- we can only check the sum.
121 * Denoted by "A".
122 */
123 rcu_try_flip_waitack_state,
124
125 /*
126 * Wait here for the sum of the old per-CPU counters to reach zero.
127 * Denoted by "Z".
128 */
129 rcu_try_flip_waitzero_state,
130
131 /*
132 * Wait here for each of the other CPUs to execute a memory barrier.
133 * This is necessary to ensure that these other CPUs really have
134 * completed executing their RCU read-side critical sections, despite
135 * their CPUs wildly reordering memory. Denoted by "M".
136 */
137 rcu_try_flip_waitmb_state,
138};
139
4446a36f
PM
140/*
141 * States for rcu_ctrlblk.rcu_sched_sleep.
142 */
143
144enum rcu_sched_sleep_states {
145 rcu_sched_not_sleeping, /* Not sleeping, callbacks need GP. */
146 rcu_sched_sleep_prep, /* Thinking of sleeping, rechecking. */
147 rcu_sched_sleeping, /* Sleeping, awaken if GP needed. */
148};
149
e260be67
PM
150struct rcu_ctrlblk {
151 spinlock_t fliplock; /* Protect state-machine transitions. */
152 long completed; /* Number of last completed batch. */
153 enum rcu_try_flip_states rcu_try_flip_state; /* The current state of
154 the rcu state machine */
4446a36f
PM
155 spinlock_t schedlock; /* Protect rcu_sched sleep state. */
156 enum rcu_sched_sleep_states sched_sleep; /* rcu_sched state. */
157 wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */
e260be67
PM
158};
159
160static DEFINE_PER_CPU(struct rcu_data, rcu_data);
161static struct rcu_ctrlblk rcu_ctrlblk = {
162 .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
163 .completed = 0,
164 .rcu_try_flip_state = rcu_try_flip_idle_state,
4446a36f
PM
165 .schedlock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.schedlock),
166 .sched_sleep = rcu_sched_not_sleeping,
167 .sched_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rcu_ctrlblk.sched_wq),
e260be67
PM
168};
169
4446a36f 170static struct task_struct *rcu_sched_grace_period_task;
e260be67
PM
171
172#ifdef CONFIG_RCU_TRACE
173static char *rcu_try_flip_state_names[] =
174 { "idle", "waitack", "waitzero", "waitmb" };
175#endif /* #ifdef CONFIG_RCU_TRACE */
176
eaf649e9
PM
177static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE;
178
e260be67
PM
179/*
180 * Enum and per-CPU flag to determine when each CPU has seen
181 * the most recent counter flip.
182 */
183
184enum rcu_flip_flag_values {
185 rcu_flip_seen, /* Steady/initial state, last flip seen. */
186 /* Only GP detector can update. */
187 rcu_flipped /* Flip just completed, need confirmation. */
188 /* Only corresponding CPU can update. */
189};
190static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_flip_flag_values, rcu_flip_flag)
191 = rcu_flip_seen;
192
193/*
194 * Enum and per-CPU flag to determine when each CPU has executed the
195 * needed memory barrier to fence in memory references from its last RCU
196 * read-side critical section in the just-completed grace period.
197 */
198
199enum rcu_mb_flag_values {
200 rcu_mb_done, /* Steady/initial state, no mb()s required. */
201 /* Only GP detector can update. */
202 rcu_mb_needed /* Flip just completed, need an mb(). */
203 /* Only corresponding CPU can update. */
204};
205static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag)
206 = rcu_mb_done;
207
208/*
209 * RCU_DATA_ME: find the current CPU's rcu_data structure.
210 * RCU_DATA_CPU: find the specified CPU's rcu_data structure.
211 */
212#define RCU_DATA_ME() (&__get_cpu_var(rcu_data))
213#define RCU_DATA_CPU(cpu) (&per_cpu(rcu_data, cpu))
214
215/*
216 * Helper macro for tracing when the appropriate rcu_data is not
217 * cached in a local variable, but where the CPU number is so cached.
218 */
219#define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace));
220
221/*
222 * Helper macro for tracing when the appropriate rcu_data is not
223 * cached in a local variable.
224 */
225#define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace));
226
227/*
228 * Helper macro for tracing when the appropriate rcu_data is pointed
229 * to by a local variable.
230 */
231#define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace));
232
4446a36f
PM
233#define RCU_SCHED_BATCH_TIME (HZ / 50)
234
e260be67
PM
235/*
236 * Return the number of RCU batches processed thus far. Useful
237 * for debug and statistics.
238 */
239long rcu_batches_completed(void)
240{
241 return rcu_ctrlblk.completed;
242}
243EXPORT_SYMBOL_GPL(rcu_batches_completed);
244
e260be67
PM
245void __rcu_read_lock(void)
246{
247 int idx;
248 struct task_struct *t = current;
249 int nesting;
250
251 nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
252 if (nesting != 0) {
253
254 /* An earlier rcu_read_lock() covers us, just count it. */
255
256 t->rcu_read_lock_nesting = nesting + 1;
257
258 } else {
259 unsigned long flags;
260
261 /*
262 * We disable interrupts for the following reasons:
263 * - If we get scheduling clock interrupt here, and we
264 * end up acking the counter flip, it's like a promise
265 * that we will never increment the old counter again.
266 * Thus we will break that promise if that
267 * scheduling clock interrupt happens between the time
268 * we pick the .completed field and the time that we
269 * increment our counter.
270 *
271 * - We don't want to be preempted out here.
272 *
273 * NMIs can still occur, of course, and might themselves
274 * contain rcu_read_lock().
275 */
276
277 local_irq_save(flags);
278
279 /*
280 * Outermost nesting of rcu_read_lock(), so increment
281 * the current counter for the current CPU. Use volatile
282 * casts to prevent the compiler from reordering.
283 */
284
285 idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1;
286 ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])++;
287
288 /*
289 * Now that the per-CPU counter has been incremented, we
290 * are protected from races with rcu_read_lock() invoked
291 * from NMI handlers on this CPU. We can therefore safely
292 * increment the nesting counter, relieving further NMIs
293 * of the need to increment the per-CPU counter.
294 */
295
296 ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting + 1;
297
298 /*
299 * Now that we have preventing any NMIs from storing
300 * to the ->rcu_flipctr_idx, we can safely use it to
301 * remember which counter to decrement in the matching
302 * rcu_read_unlock().
303 */
304
305 ACCESS_ONCE(t->rcu_flipctr_idx) = idx;
306 local_irq_restore(flags);
307 }
308}
309EXPORT_SYMBOL_GPL(__rcu_read_lock);
310
311void __rcu_read_unlock(void)
312{
313 int idx;
314 struct task_struct *t = current;
315 int nesting;
316
317 nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
318 if (nesting > 1) {
319
320 /*
321 * We are still protected by the enclosing rcu_read_lock(),
322 * so simply decrement the counter.
323 */
324
325 t->rcu_read_lock_nesting = nesting - 1;
326
327 } else {
328 unsigned long flags;
329
330 /*
331 * Disable local interrupts to prevent the grace-period
332 * detection state machine from seeing us half-done.
333 * NMIs can still occur, of course, and might themselves
334 * contain rcu_read_lock() and rcu_read_unlock().
335 */
336
337 local_irq_save(flags);
338
339 /*
340 * Outermost nesting of rcu_read_unlock(), so we must
341 * decrement the current counter for the current CPU.
342 * This must be done carefully, because NMIs can
343 * occur at any point in this code, and any rcu_read_lock()
344 * and rcu_read_unlock() pairs in the NMI handlers
345 * must interact non-destructively with this code.
346 * Lots of volatile casts, and -very- careful ordering.
347 *
348 * Changes to this code, including this one, must be
349 * inspected, validated, and tested extremely carefully!!!
350 */
351
352 /*
353 * First, pick up the index.
354 */
355
356 idx = ACCESS_ONCE(t->rcu_flipctr_idx);
357
358 /*
359 * Now that we have fetched the counter index, it is
360 * safe to decrement the per-task RCU nesting counter.
361 * After this, any interrupts or NMIs will increment and
362 * decrement the per-CPU counters.
363 */
364 ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting - 1;
365
366 /*
367 * It is now safe to decrement this task's nesting count.
368 * NMIs that occur after this statement will route their
369 * rcu_read_lock() calls through this "else" clause, and
370 * will thus start incrementing the per-CPU counter on
371 * their own. They will also clobber ->rcu_flipctr_idx,
372 * but that is OK, since we have already fetched it.
373 */
374
375 ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])--;
376 local_irq_restore(flags);
377 }
378}
379EXPORT_SYMBOL_GPL(__rcu_read_unlock);
380
381/*
382 * If a global counter flip has occurred since the last time that we
383 * advanced callbacks, advance them. Hardware interrupts must be
384 * disabled when calling this function.
385 */
386static void __rcu_advance_callbacks(struct rcu_data *rdp)
387{
388 int cpu;
389 int i;
390 int wlc = 0;
391
392 if (rdp->completed != rcu_ctrlblk.completed) {
393 if (rdp->waitlist[GP_STAGES - 1] != NULL) {
394 *rdp->donetail = rdp->waitlist[GP_STAGES - 1];
395 rdp->donetail = rdp->waittail[GP_STAGES - 1];
396 RCU_TRACE_RDP(rcupreempt_trace_move2done, rdp);
397 }
398 for (i = GP_STAGES - 2; i >= 0; i--) {
399 if (rdp->waitlist[i] != NULL) {
400 rdp->waitlist[i + 1] = rdp->waitlist[i];
401 rdp->waittail[i + 1] = rdp->waittail[i];
402 wlc++;
403 } else {
404 rdp->waitlist[i + 1] = NULL;
405 rdp->waittail[i + 1] =
406 &rdp->waitlist[i + 1];
407 }
408 }
409 if (rdp->nextlist != NULL) {
410 rdp->waitlist[0] = rdp->nextlist;
411 rdp->waittail[0] = rdp->nexttail;
412 wlc++;
413 rdp->nextlist = NULL;
414 rdp->nexttail = &rdp->nextlist;
415 RCU_TRACE_RDP(rcupreempt_trace_move2wait, rdp);
416 } else {
417 rdp->waitlist[0] = NULL;
418 rdp->waittail[0] = &rdp->waitlist[0];
419 }
420 rdp->waitlistcount = wlc;
421 rdp->completed = rcu_ctrlblk.completed;
422 }
423
424 /*
425 * Check to see if this CPU needs to report that it has seen
426 * the most recent counter flip, thereby declaring that all
427 * subsequent rcu_read_lock() invocations will respect this flip.
428 */
429
430 cpu = raw_smp_processor_id();
431 if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
432 smp_mb(); /* Subsequent counter accesses must see new value */
433 per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
434 smp_mb(); /* Subsequent RCU read-side critical sections */
435 /* seen -after- acknowledgement. */
436 }
437}
438
4446a36f
PM
439DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
440 .dynticks = 1,
441};
2232c2d8 442
4446a36f 443#ifdef CONFIG_NO_HZ
2232c2d8
SR
444static DEFINE_PER_CPU(int, rcu_update_flag);
445
446/**
447 * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI.
448 *
449 * If the CPU was idle with dynamic ticks active, this updates the
4446a36f 450 * rcu_dyntick_sched.dynticks to let the RCU handling know that the
2232c2d8
SR
451 * CPU is active.
452 */
453void rcu_irq_enter(void)
454{
455 int cpu = smp_processor_id();
4446a36f 456 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
2232c2d8
SR
457
458 if (per_cpu(rcu_update_flag, cpu))
459 per_cpu(rcu_update_flag, cpu)++;
460
461 /*
462 * Only update if we are coming from a stopped ticks mode
4446a36f 463 * (rcu_dyntick_sched.dynticks is even).
2232c2d8
SR
464 */
465 if (!in_interrupt() &&
4446a36f 466 (rdssp->dynticks & 0x1) == 0) {
2232c2d8
SR
467 /*
468 * The following might seem like we could have a race
469 * with NMI/SMIs. But this really isn't a problem.
470 * Here we do a read/modify/write, and the race happens
471 * when an NMI/SMI comes in after the read and before
472 * the write. But NMI/SMIs will increment this counter
473 * twice before returning, so the zero bit will not
474 * be corrupted by the NMI/SMI which is the most important
475 * part.
476 *
477 * The only thing is that we would bring back the counter
478 * to a postion that it was in during the NMI/SMI.
479 * But the zero bit would be set, so the rest of the
480 * counter would again be ignored.
481 *
482 * On return from the IRQ, the counter may have the zero
483 * bit be 0 and the counter the same as the return from
484 * the NMI/SMI. If the state machine was so unlucky to
485 * see that, it still doesn't matter, since all
486 * RCU read-side critical sections on this CPU would
487 * have already completed.
488 */
4446a36f 489 rdssp->dynticks++;
2232c2d8
SR
490 /*
491 * The following memory barrier ensures that any
492 * rcu_read_lock() primitives in the irq handler
493 * are seen by other CPUs to follow the above
4446a36f 494 * increment to rcu_dyntick_sched.dynticks. This is
2232c2d8
SR
495 * required in order for other CPUs to correctly
496 * determine when it is safe to advance the RCU
497 * grace-period state machine.
498 */
499 smp_mb(); /* see above block comment. */
500 /*
501 * Since we can't determine the dynamic tick mode from
4446a36f 502 * the rcu_dyntick_sched.dynticks after this routine,
2232c2d8
SR
503 * we use a second flag to acknowledge that we came
504 * from an idle state with ticks stopped.
505 */
506 per_cpu(rcu_update_flag, cpu)++;
507 /*
508 * If we take an NMI/SMI now, they will also increment
509 * the rcu_update_flag, and will not update the
4446a36f 510 * rcu_dyntick_sched.dynticks on exit. That is for
2232c2d8
SR
511 * this IRQ to do.
512 */
513 }
514}
515
516/**
517 * rcu_irq_exit - Called from exiting Hard irq context.
518 *
519 * If the CPU was idle with dynamic ticks active, update the
4446a36f 520 * rcu_dyntick_sched.dynticks to put let the RCU handling be
2232c2d8
SR
521 * aware that the CPU is going back to idle with no ticks.
522 */
523void rcu_irq_exit(void)
524{
525 int cpu = smp_processor_id();
4446a36f 526 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
2232c2d8
SR
527
528 /*
529 * rcu_update_flag is set if we interrupted the CPU
530 * when it was idle with ticks stopped.
531 * Once this occurs, we keep track of interrupt nesting
532 * because a NMI/SMI could also come in, and we still
533 * only want the IRQ that started the increment of the
4446a36f 534 * rcu_dyntick_sched.dynticks to be the one that modifies
2232c2d8
SR
535 * it on exit.
536 */
537 if (per_cpu(rcu_update_flag, cpu)) {
538 if (--per_cpu(rcu_update_flag, cpu))
539 return;
540
541 /* This must match the interrupt nesting */
542 WARN_ON(in_interrupt());
543
544 /*
545 * If an NMI/SMI happens now we are still
4446a36f 546 * protected by the rcu_dyntick_sched.dynticks being odd.
2232c2d8
SR
547 */
548
549 /*
550 * The following memory barrier ensures that any
551 * rcu_read_unlock() primitives in the irq handler
552 * are seen by other CPUs to preceed the following
4446a36f 553 * increment to rcu_dyntick_sched.dynticks. This
2232c2d8
SR
554 * is required in order for other CPUs to determine
555 * when it is safe to advance the RCU grace-period
556 * state machine.
557 */
558 smp_mb(); /* see above block comment. */
4446a36f
PM
559 rdssp->dynticks++;
560 WARN_ON(rdssp->dynticks & 0x1);
2232c2d8
SR
561 }
562}
563
564static void dyntick_save_progress_counter(int cpu)
565{
4446a36f
PM
566 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
567
568 rdssp->dynticks_snap = rdssp->dynticks;
2232c2d8
SR
569}
570
571static inline int
572rcu_try_flip_waitack_needed(int cpu)
573{
574 long curr;
575 long snap;
4446a36f 576 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
2232c2d8 577
4446a36f
PM
578 curr = rdssp->dynticks;
579 snap = rdssp->dynticks_snap;
2232c2d8
SR
580 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
581
582 /*
583 * If the CPU remained in dynticks mode for the entire time
584 * and didn't take any interrupts, NMIs, SMIs, or whatever,
585 * then it cannot be in the middle of an rcu_read_lock(), so
586 * the next rcu_read_lock() it executes must use the new value
587 * of the counter. So we can safely pretend that this CPU
588 * already acknowledged the counter.
589 */
590
591 if ((curr == snap) && ((curr & 0x1) == 0))
592 return 0;
593
594 /*
595 * If the CPU passed through or entered a dynticks idle phase with
596 * no active irq handlers, then, as above, we can safely pretend
597 * that this CPU already acknowledged the counter.
598 */
599
600 if ((curr - snap) > 2 || (snap & 0x1) == 0)
601 return 0;
602
603 /* We need this CPU to explicitly acknowledge the counter flip. */
604
605 return 1;
606}
607
608static inline int
609rcu_try_flip_waitmb_needed(int cpu)
610{
611 long curr;
612 long snap;
4446a36f 613 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
2232c2d8 614
4446a36f
PM
615 curr = rdssp->dynticks;
616 snap = rdssp->dynticks_snap;
2232c2d8
SR
617 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
618
619 /*
620 * If the CPU remained in dynticks mode for the entire time
621 * and didn't take any interrupts, NMIs, SMIs, or whatever,
622 * then it cannot have executed an RCU read-side critical section
623 * during that time, so there is no need for it to execute a
624 * memory barrier.
625 */
626
627 if ((curr == snap) && ((curr & 0x1) == 0))
628 return 0;
629
630 /*
631 * If the CPU either entered or exited an outermost interrupt,
632 * SMI, NMI, or whatever handler, then we know that it executed
633 * a memory barrier when doing so. So we don't need another one.
634 */
635 if (curr != snap)
636 return 0;
637
638 /* We need the CPU to execute a memory barrier. */
639
640 return 1;
641}
642
4446a36f
PM
643static void dyntick_save_progress_counter_sched(int cpu)
644{
645 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
646
647 rdssp->sched_dynticks_snap = rdssp->dynticks;
648}
649
650static int rcu_qsctr_inc_needed_dyntick(int cpu)
651{
652 long curr;
653 long snap;
654 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
655
656 curr = rdssp->dynticks;
657 snap = rdssp->sched_dynticks_snap;
658 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
659
660 /*
661 * If the CPU remained in dynticks mode for the entire time
662 * and didn't take any interrupts, NMIs, SMIs, or whatever,
663 * then it cannot be in the middle of an rcu_read_lock(), so
664 * the next rcu_read_lock() it executes must use the new value
665 * of the counter. Therefore, this CPU has been in a quiescent
666 * state the entire time, and we don't need to wait for it.
667 */
668
669 if ((curr == snap) && ((curr & 0x1) == 0))
670 return 0;
671
672 /*
673 * If the CPU passed through or entered a dynticks idle phase with
674 * no active irq handlers, then, as above, this CPU has already
675 * passed through a quiescent state.
676 */
677
678 if ((curr - snap) > 2 || (snap & 0x1) == 0)
679 return 0;
680
681 /* We need this CPU to go through a quiescent state. */
682
683 return 1;
684}
685
2232c2d8
SR
686#else /* !CONFIG_NO_HZ */
687
4446a36f
PM
688# define dyntick_save_progress_counter(cpu) do { } while (0)
689# define rcu_try_flip_waitack_needed(cpu) (1)
690# define rcu_try_flip_waitmb_needed(cpu) (1)
691
692# define dyntick_save_progress_counter_sched(cpu) do { } while (0)
693# define rcu_qsctr_inc_needed_dyntick(cpu) (1)
2232c2d8
SR
694
695#endif /* CONFIG_NO_HZ */
696
4446a36f
PM
697static void save_qsctr_sched(int cpu)
698{
699 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
700
701 rdssp->sched_qs_snap = rdssp->sched_qs;
702}
703
704static inline int rcu_qsctr_inc_needed(int cpu)
705{
706 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
707
708 /*
709 * If there has been a quiescent state, no more need to wait
710 * on this CPU.
711 */
712
713 if (rdssp->sched_qs != rdssp->sched_qs_snap) {
714 smp_mb(); /* force ordering with cpu entering schedule(). */
715 return 0;
716 }
717
718 /* We need this CPU to go through a quiescent state. */
719
720 return 1;
721}
722
e260be67
PM
723/*
724 * Get here when RCU is idle. Decide whether we need to
725 * move out of idle state, and return non-zero if so.
726 * "Straightforward" approach for the moment, might later
727 * use callback-list lengths, grace-period duration, or
728 * some such to determine when to exit idle state.
729 * Might also need a pre-idle test that does not acquire
730 * the lock, but let's get the simple case working first...
731 */
732
733static int
734rcu_try_flip_idle(void)
735{
736 int cpu;
737
738 RCU_TRACE_ME(rcupreempt_trace_try_flip_i1);
739 if (!rcu_pending(smp_processor_id())) {
740 RCU_TRACE_ME(rcupreempt_trace_try_flip_ie1);
741 return 0;
742 }
743
744 /*
745 * Do the flip.
746 */
747
748 RCU_TRACE_ME(rcupreempt_trace_try_flip_g1);
749 rcu_ctrlblk.completed++; /* stands in for rcu_try_flip_g2 */
750
751 /*
752 * Need a memory barrier so that other CPUs see the new
753 * counter value before they see the subsequent change of all
754 * the rcu_flip_flag instances to rcu_flipped.
755 */
756
757 smp_mb(); /* see above block comment. */
758
759 /* Now ask each CPU for acknowledgement of the flip. */
760
2232c2d8 761 for_each_cpu_mask(cpu, rcu_cpu_online_map) {
e260be67 762 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
2232c2d8
SR
763 dyntick_save_progress_counter(cpu);
764 }
e260be67
PM
765
766 return 1;
767}
768
769/*
770 * Wait for CPUs to acknowledge the flip.
771 */
772
773static int
774rcu_try_flip_waitack(void)
775{
776 int cpu;
777
778 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
eaf649e9 779 for_each_cpu_mask(cpu, rcu_cpu_online_map)
2232c2d8
SR
780 if (rcu_try_flip_waitack_needed(cpu) &&
781 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
e260be67
PM
782 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
783 return 0;
784 }
785
786 /*
787 * Make sure our checks above don't bleed into subsequent
788 * waiting for the sum of the counters to reach zero.
789 */
790
791 smp_mb(); /* see above block comment. */
792 RCU_TRACE_ME(rcupreempt_trace_try_flip_a2);
793 return 1;
794}
795
796/*
797 * Wait for collective ``last'' counter to reach zero,
798 * then tell all CPUs to do an end-of-grace-period memory barrier.
799 */
800
801static int
802rcu_try_flip_waitzero(void)
803{
804 int cpu;
805 int lastidx = !(rcu_ctrlblk.completed & 0x1);
806 int sum = 0;
807
808 /* Check to see if the sum of the "last" counters is zero. */
809
810 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
eaf649e9 811 for_each_cpu_mask(cpu, rcu_cpu_online_map)
e260be67
PM
812 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
813 if (sum != 0) {
814 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
815 return 0;
816 }
817
818 /*
819 * This ensures that the other CPUs see the call for
820 * memory barriers -after- the sum to zero has been
821 * detected here
822 */
823 smp_mb(); /* ^^^^^^^^^^^^ */
824
825 /* Call for a memory barrier from each CPU. */
2232c2d8 826 for_each_cpu_mask(cpu, rcu_cpu_online_map) {
e260be67 827 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
2232c2d8
SR
828 dyntick_save_progress_counter(cpu);
829 }
e260be67
PM
830
831 RCU_TRACE_ME(rcupreempt_trace_try_flip_z2);
832 return 1;
833}
834
835/*
836 * Wait for all CPUs to do their end-of-grace-period memory barrier.
837 * Return 0 once all CPUs have done so.
838 */
839
840static int
841rcu_try_flip_waitmb(void)
842{
843 int cpu;
844
845 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
eaf649e9 846 for_each_cpu_mask(cpu, rcu_cpu_online_map)
2232c2d8
SR
847 if (rcu_try_flip_waitmb_needed(cpu) &&
848 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
e260be67
PM
849 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
850 return 0;
851 }
852
853 smp_mb(); /* Ensure that the above checks precede any following flip. */
854 RCU_TRACE_ME(rcupreempt_trace_try_flip_m2);
855 return 1;
856}
857
858/*
859 * Attempt a single flip of the counters. Remember, a single flip does
860 * -not- constitute a grace period. Instead, the interval between
861 * at least GP_STAGES consecutive flips is a grace period.
862 *
863 * If anyone is nuts enough to run this CONFIG_PREEMPT_RCU implementation
864 * on a large SMP, they might want to use a hierarchical organization of
865 * the per-CPU-counter pairs.
866 */
867static void rcu_try_flip(void)
868{
869 unsigned long flags;
870
871 RCU_TRACE_ME(rcupreempt_trace_try_flip_1);
872 if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) {
873 RCU_TRACE_ME(rcupreempt_trace_try_flip_e1);
874 return;
875 }
876
877 /*
878 * Take the next transition(s) through the RCU grace-period
879 * flip-counter state machine.
880 */
881
882 switch (rcu_ctrlblk.rcu_try_flip_state) {
883 case rcu_try_flip_idle_state:
884 if (rcu_try_flip_idle())
885 rcu_ctrlblk.rcu_try_flip_state =
886 rcu_try_flip_waitack_state;
887 break;
888 case rcu_try_flip_waitack_state:
889 if (rcu_try_flip_waitack())
890 rcu_ctrlblk.rcu_try_flip_state =
891 rcu_try_flip_waitzero_state;
892 break;
893 case rcu_try_flip_waitzero_state:
894 if (rcu_try_flip_waitzero())
895 rcu_ctrlblk.rcu_try_flip_state =
896 rcu_try_flip_waitmb_state;
897 break;
898 case rcu_try_flip_waitmb_state:
899 if (rcu_try_flip_waitmb())
900 rcu_ctrlblk.rcu_try_flip_state =
901 rcu_try_flip_idle_state;
902 }
903 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
904}
905
906/*
907 * Check to see if this CPU needs to do a memory barrier in order to
908 * ensure that any prior RCU read-side critical sections have committed
909 * their counter manipulations and critical-section memory references
910 * before declaring the grace period to be completed.
911 */
912static void rcu_check_mb(int cpu)
913{
914 if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) {
915 smp_mb(); /* Ensure RCU read-side accesses are visible. */
916 per_cpu(rcu_mb_flag, cpu) = rcu_mb_done;
917 }
918}
919
920void rcu_check_callbacks(int cpu, int user)
921{
922 unsigned long flags;
923 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
924
4446a36f
PM
925 /*
926 * If this CPU took its interrupt from user mode or from the
927 * idle loop, and this is not a nested interrupt, then
928 * this CPU has to have exited all prior preept-disable
929 * sections of code. So increment the counter to note this.
930 *
931 * The memory barrier is needed to handle the case where
932 * writes from a preempt-disable section of code get reordered
933 * into schedule() by this CPU's write buffer. So the memory
934 * barrier makes sure that the rcu_qsctr_inc() is seen by other
935 * CPUs to happen after any such write.
936 */
937
938 if (user ||
939 (idle_cpu(cpu) && !in_softirq() &&
940 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
941 smp_mb(); /* Guard against aggressive schedule(). */
942 rcu_qsctr_inc(cpu);
943 }
944
e260be67
PM
945 rcu_check_mb(cpu);
946 if (rcu_ctrlblk.completed == rdp->completed)
947 rcu_try_flip();
948 spin_lock_irqsave(&rdp->lock, flags);
949 RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
950 __rcu_advance_callbacks(rdp);
951 if (rdp->donelist == NULL) {
952 spin_unlock_irqrestore(&rdp->lock, flags);
953 } else {
954 spin_unlock_irqrestore(&rdp->lock, flags);
955 raise_softirq(RCU_SOFTIRQ);
956 }
957}
958
959/*
960 * Needed by dynticks, to make sure all RCU processing has finished
961 * when we go idle:
962 */
963void rcu_advance_callbacks(int cpu, int user)
964{
965 unsigned long flags;
966 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
967
968 if (rcu_ctrlblk.completed == rdp->completed) {
969 rcu_try_flip();
970 if (rcu_ctrlblk.completed == rdp->completed)
971 return;
972 }
973 spin_lock_irqsave(&rdp->lock, flags);
974 RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
975 __rcu_advance_callbacks(rdp);
976 spin_unlock_irqrestore(&rdp->lock, flags);
977}
978
eaf649e9
PM
979#ifdef CONFIG_HOTPLUG_CPU
980#define rcu_offline_cpu_enqueue(srclist, srctail, dstlist, dsttail) do { \
981 *dsttail = srclist; \
982 if (srclist != NULL) { \
983 dsttail = srctail; \
984 srclist = NULL; \
985 srctail = &srclist;\
986 } \
987 } while (0)
988
989void rcu_offline_cpu(int cpu)
990{
991 int i;
992 struct rcu_head *list = NULL;
993 unsigned long flags;
994 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
4446a36f
PM
995 struct rcu_head *schedlist = NULL;
996 struct rcu_head **schedtail = &schedlist;
eaf649e9
PM
997 struct rcu_head **tail = &list;
998
999 /*
1000 * Remove all callbacks from the newly dead CPU, retaining order.
1001 * Otherwise rcu_barrier() will fail
1002 */
1003
1004 spin_lock_irqsave(&rdp->lock, flags);
1005 rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail);
1006 for (i = GP_STAGES - 1; i >= 0; i--)
1007 rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i],
1008 list, tail);
1009 rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail);
4446a36f
PM
1010 rcu_offline_cpu_enqueue(rdp->waitschedlist, rdp->waitschedtail,
1011 schedlist, schedtail);
1012 rcu_offline_cpu_enqueue(rdp->nextschedlist, rdp->nextschedtail,
1013 schedlist, schedtail);
1014 rdp->rcu_sched_sleeping = 0;
eaf649e9
PM
1015 spin_unlock_irqrestore(&rdp->lock, flags);
1016 rdp->waitlistcount = 0;
1017
1018 /* Disengage the newly dead CPU from the grace-period computation. */
1019
1020 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
1021 rcu_check_mb(cpu);
1022 if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
1023 smp_mb(); /* Subsequent counter accesses must see new value */
1024 per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
1025 smp_mb(); /* Subsequent RCU read-side critical sections */
1026 /* seen -after- acknowledgement. */
1027 }
1028
1029 RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0];
1030 RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1];
1031
1032 RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
1033 RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
1034
1035 cpu_clear(cpu, rcu_cpu_online_map);
1036
1037 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
1038
1039 /*
1040 * Place the removed callbacks on the current CPU's queue.
1041 * Make them all start a new grace period: simple approach,
1042 * in theory could starve a given set of callbacks, but
1043 * you would need to be doing some serious CPU hotplugging
1044 * to make this happen. If this becomes a problem, adding
1045 * a synchronize_rcu() to the hotplug path would be a simple
1046 * fix.
1047 */
1048
4446a36f 1049 local_irq_save(flags); /* disable preempt till we know what lock. */
eaf649e9 1050 rdp = RCU_DATA_ME();
ae778869 1051 spin_lock(&rdp->lock);
eaf649e9
PM
1052 *rdp->nexttail = list;
1053 if (list)
1054 rdp->nexttail = tail;
4446a36f
PM
1055 *rdp->nextschedtail = schedlist;
1056 if (schedlist)
1057 rdp->nextschedtail = schedtail;
eaf649e9
PM
1058 spin_unlock_irqrestore(&rdp->lock, flags);
1059}
1060
1061void __devinit rcu_online_cpu(int cpu)
1062{
1063 unsigned long flags;
4446a36f 1064 struct rcu_data *rdp;
eaf649e9
PM
1065
1066 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
1067 cpu_set(cpu, rcu_cpu_online_map);
1068 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
4446a36f
PM
1069
1070 /*
1071 * The rcu_sched grace-period processing might have bypassed
1072 * this CPU, given that it was not in the rcu_cpu_online_map
1073 * when the grace-period scan started. This means that the
1074 * grace-period task might sleep. So make sure that if this
1075 * should happen, the first callback posted to this CPU will
1076 * wake up the grace-period task if need be.
1077 */
1078
1079 rdp = RCU_DATA_CPU(cpu);
1080 spin_lock_irqsave(&rdp->lock, flags);
1081 rdp->rcu_sched_sleeping = 1;
1082 spin_unlock_irqrestore(&rdp->lock, flags);
eaf649e9
PM
1083}
1084
1085#else /* #ifdef CONFIG_HOTPLUG_CPU */
1086
1087void rcu_offline_cpu(int cpu)
1088{
1089}
1090
1091void __devinit rcu_online_cpu(int cpu)
1092{
1093}
1094
1095#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1096
e260be67
PM
1097static void rcu_process_callbacks(struct softirq_action *unused)
1098{
1099 unsigned long flags;
1100 struct rcu_head *next, *list;
c9e71002 1101 struct rcu_data *rdp;
e260be67 1102
c9e71002
PM
1103 local_irq_save(flags);
1104 rdp = RCU_DATA_ME();
1105 spin_lock(&rdp->lock);
e260be67
PM
1106 list = rdp->donelist;
1107 if (list == NULL) {
1108 spin_unlock_irqrestore(&rdp->lock, flags);
1109 return;
1110 }
1111 rdp->donelist = NULL;
1112 rdp->donetail = &rdp->donelist;
1113 RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp);
1114 spin_unlock_irqrestore(&rdp->lock, flags);
1115 while (list) {
1116 next = list->next;
1117 list->func(list);
1118 list = next;
1119 RCU_TRACE_ME(rcupreempt_trace_invoke);
1120 }
1121}
1122
1123void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1124{
1125 unsigned long flags;
1126 struct rcu_data *rdp;
1127
1128 head->func = func;
1129 head->next = NULL;
1130 local_irq_save(flags);
1131 rdp = RCU_DATA_ME();
1132 spin_lock(&rdp->lock);
1133 __rcu_advance_callbacks(rdp);
1134 *rdp->nexttail = head;
1135 rdp->nexttail = &head->next;
1136 RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp);
4446a36f 1137 spin_unlock_irqrestore(&rdp->lock, flags);
e260be67
PM
1138}
1139EXPORT_SYMBOL_GPL(call_rcu);
1140
4446a36f
PM
1141void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1142{
1143 unsigned long flags;
1144 struct rcu_data *rdp;
1145 int wake_gp = 0;
1146
1147 head->func = func;
1148 head->next = NULL;
1149 local_irq_save(flags);
1150 rdp = RCU_DATA_ME();
1151 spin_lock(&rdp->lock);
1152 *rdp->nextschedtail = head;
1153 rdp->nextschedtail = &head->next;
1154 if (rdp->rcu_sched_sleeping) {
1155
1156 /* Grace-period processing might be sleeping... */
1157
1158 rdp->rcu_sched_sleeping = 0;
1159 wake_gp = 1;
1160 }
1161 spin_unlock_irqrestore(&rdp->lock, flags);
1162 if (wake_gp) {
1163
1164 /* Wake up grace-period processing, unless someone beat us. */
1165
1166 spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
1167 if (rcu_ctrlblk.sched_sleep != rcu_sched_sleeping)
1168 wake_gp = 0;
1169 rcu_ctrlblk.sched_sleep = rcu_sched_not_sleeping;
1170 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1171 if (wake_gp)
1172 wake_up_interruptible(&rcu_ctrlblk.sched_wq);
1173 }
1174}
1175EXPORT_SYMBOL_GPL(call_rcu_sched);
1176
e260be67
PM
1177/*
1178 * Wait until all currently running preempt_disable() code segments
1179 * (including hardware-irq-disable segments) complete. Note that
1180 * in -rt this does -not- necessarily result in all currently executing
1181 * interrupt -handlers- having completed.
1182 */
4446a36f
PM
1183synchronize_rcu_xxx(__synchronize_sched, call_rcu_sched)
1184EXPORT_SYMBOL_GPL(__synchronize_sched);
1185
1186/*
1187 * kthread function that manages call_rcu_sched grace periods.
1188 */
1189static int rcu_sched_grace_period(void *arg)
e260be67 1190{
4446a36f
PM
1191 int couldsleep; /* might sleep after current pass. */
1192 int couldsleepnext = 0; /* might sleep after next pass. */
e260be67 1193 int cpu;
4446a36f
PM
1194 unsigned long flags;
1195 struct rcu_data *rdp;
1196 int ret;
e260be67 1197
4446a36f
PM
1198 /*
1199 * Each pass through the following loop handles one
1200 * rcu_sched grace period cycle.
1201 */
1202 do {
1203 /* Save each CPU's current state. */
1204
1205 for_each_online_cpu(cpu) {
1206 dyntick_save_progress_counter_sched(cpu);
1207 save_qsctr_sched(cpu);
1208 }
1209
1210 /*
1211 * Sleep for about an RCU grace-period's worth to
1212 * allow better batching and to consume less CPU.
1213 */
1214 schedule_timeout_interruptible(RCU_SCHED_BATCH_TIME);
1215
1216 /*
1217 * If there was nothing to do last time, prepare to
1218 * sleep at the end of the current grace period cycle.
1219 */
1220 couldsleep = couldsleepnext;
1221 couldsleepnext = 1;
1222 if (couldsleep) {
1223 spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
1224 rcu_ctrlblk.sched_sleep = rcu_sched_sleep_prep;
1225 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1226 }
1227
1228 /*
1229 * Wait on each CPU in turn to have either visited
1230 * a quiescent state or been in dynticks-idle mode.
1231 */
1232 for_each_online_cpu(cpu) {
1233 while (rcu_qsctr_inc_needed(cpu) &&
1234 rcu_qsctr_inc_needed_dyntick(cpu)) {
1235 /* resched_cpu(cpu); @@@ */
1236 schedule_timeout_interruptible(1);
1237 }
1238 }
1239
1240 /* Advance callbacks for each CPU. */
1241
1242 for_each_online_cpu(cpu) {
1243
1244 rdp = RCU_DATA_CPU(cpu);
1245 spin_lock_irqsave(&rdp->lock, flags);
1246
1247 /*
1248 * We are running on this CPU irq-disabled, so no
1249 * CPU can go offline until we re-enable irqs.
1250 * The current CPU might have already gone
1251 * offline (between the for_each_offline_cpu and
1252 * the spin_lock_irqsave), but in that case all its
1253 * callback lists will be empty, so no harm done.
1254 *
1255 * Advance the callbacks! We share normal RCU's
1256 * donelist, since callbacks are invoked the
1257 * same way in either case.
1258 */
1259 if (rdp->waitschedlist != NULL) {
1260 *rdp->donetail = rdp->waitschedlist;
1261 rdp->donetail = rdp->waitschedtail;
1262
1263 /*
1264 * Next rcu_check_callbacks() will
1265 * do the required raise_softirq().
1266 */
1267 }
1268 if (rdp->nextschedlist != NULL) {
1269 rdp->waitschedlist = rdp->nextschedlist;
1270 rdp->waitschedtail = rdp->nextschedtail;
1271 couldsleep = 0;
1272 couldsleepnext = 0;
1273 } else {
1274 rdp->waitschedlist = NULL;
1275 rdp->waitschedtail = &rdp->waitschedlist;
1276 }
1277 rdp->nextschedlist = NULL;
1278 rdp->nextschedtail = &rdp->nextschedlist;
1279
1280 /* Mark sleep intention. */
1281
1282 rdp->rcu_sched_sleeping = couldsleep;
1283
1284 spin_unlock_irqrestore(&rdp->lock, flags);
1285 }
1286
1287 /* If we saw callbacks on the last scan, go deal with them. */
1288
1289 if (!couldsleep)
1290 continue;
1291
1292 /* Attempt to block... */
1293
1294 spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
1295 if (rcu_ctrlblk.sched_sleep != rcu_sched_sleep_prep) {
1296
1297 /*
1298 * Someone posted a callback after we scanned.
1299 * Go take care of it.
1300 */
1301 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1302 couldsleepnext = 0;
1303 continue;
1304 }
1305
1306 /* Block until the next person posts a callback. */
1307
1308 rcu_ctrlblk.sched_sleep = rcu_sched_sleeping;
1309 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1310 ret = 0;
1311 __wait_event_interruptible(rcu_ctrlblk.sched_wq,
1312 rcu_ctrlblk.sched_sleep != rcu_sched_sleeping,
1313 ret);
1314
1315 /*
1316 * Signals would prevent us from sleeping, and we cannot
1317 * do much with them in any case. So flush them.
1318 */
1319 if (ret)
1320 flush_signals(current);
1321 couldsleepnext = 0;
1322
1323 } while (!kthread_should_stop());
1324
1325 return (0);
e260be67 1326}
e260be67
PM
1327
1328/*
1329 * Check to see if any future RCU-related work will need to be done
1330 * by the current CPU, even if none need be done immediately, returning
1331 * 1 if so. Assumes that notifiers would take care of handling any
1332 * outstanding requests from the RCU core.
1333 *
1334 * This function is part of the RCU implementation; it is -not-
1335 * an exported member of the RCU API.
1336 */
1337int rcu_needs_cpu(int cpu)
1338{
1339 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1340
1341 return (rdp->donelist != NULL ||
1342 !!rdp->waitlistcount ||
4446a36f
PM
1343 rdp->nextlist != NULL ||
1344 rdp->nextschedlist != NULL ||
1345 rdp->waitschedlist != NULL);
e260be67
PM
1346}
1347
1348int rcu_pending(int cpu)
1349{
1350 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1351
1352 /* The CPU has at least one callback queued somewhere. */
1353
1354 if (rdp->donelist != NULL ||
1355 !!rdp->waitlistcount ||
4446a36f
PM
1356 rdp->nextlist != NULL ||
1357 rdp->nextschedlist != NULL ||
1358 rdp->waitschedlist != NULL)
e260be67
PM
1359 return 1;
1360
1361 /* The RCU core needs an acknowledgement from this CPU. */
1362
1363 if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) ||
1364 (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed))
1365 return 1;
1366
1367 /* This CPU has fallen behind the global grace-period number. */
1368
1369 if (rdp->completed != rcu_ctrlblk.completed)
1370 return 1;
1371
1372 /* Nothing needed from this CPU. */
1373
1374 return 0;
1375}
1376
eaf649e9
PM
1377static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1378 unsigned long action, void *hcpu)
1379{
1380 long cpu = (long)hcpu;
1381
1382 switch (action) {
1383 case CPU_UP_PREPARE:
1384 case CPU_UP_PREPARE_FROZEN:
1385 rcu_online_cpu(cpu);
1386 break;
1387 case CPU_UP_CANCELED:
1388 case CPU_UP_CANCELED_FROZEN:
1389 case CPU_DEAD:
1390 case CPU_DEAD_FROZEN:
1391 rcu_offline_cpu(cpu);
1392 break;
1393 default:
1394 break;
1395 }
1396 return NOTIFY_OK;
1397}
1398
1399static struct notifier_block __cpuinitdata rcu_nb = {
1400 .notifier_call = rcu_cpu_notify,
1401};
1402
e260be67
PM
1403void __init __rcu_init(void)
1404{
1405 int cpu;
1406 int i;
1407 struct rcu_data *rdp;
1408
1409 printk(KERN_NOTICE "Preemptible RCU implementation.\n");
1410 for_each_possible_cpu(cpu) {
1411 rdp = RCU_DATA_CPU(cpu);
1412 spin_lock_init(&rdp->lock);
1413 rdp->completed = 0;
1414 rdp->waitlistcount = 0;
1415 rdp->nextlist = NULL;
1416 rdp->nexttail = &rdp->nextlist;
1417 for (i = 0; i < GP_STAGES; i++) {
1418 rdp->waitlist[i] = NULL;
1419 rdp->waittail[i] = &rdp->waitlist[i];
1420 }
1421 rdp->donelist = NULL;
1422 rdp->donetail = &rdp->donelist;
1423 rdp->rcu_flipctr[0] = 0;
1424 rdp->rcu_flipctr[1] = 0;
4446a36f
PM
1425 rdp->nextschedlist = NULL;
1426 rdp->nextschedtail = &rdp->nextschedlist;
1427 rdp->waitschedlist = NULL;
1428 rdp->waitschedtail = &rdp->waitschedlist;
1429 rdp->rcu_sched_sleeping = 0;
e260be67 1430 }
eaf649e9
PM
1431 register_cpu_notifier(&rcu_nb);
1432
1433 /*
1434 * We don't need protection against CPU-Hotplug here
1435 * since
1436 * a) If a CPU comes online while we are iterating over the
1437 * cpu_online_map below, we would only end up making a
1438 * duplicate call to rcu_online_cpu() which sets the corresponding
1439 * CPU's mask in the rcu_cpu_online_map.
1440 *
1441 * b) A CPU cannot go offline at this point in time since the user
1442 * does not have access to the sysfs interface, nor do we
1443 * suspend the system.
1444 */
1445 for_each_online_cpu(cpu)
1446 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu);
1447
e260be67
PM
1448 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL);
1449}
1450
1451/*
4446a36f
PM
1452 * Late-boot-time RCU initialization that must wait until after scheduler
1453 * has been initialized.
e260be67 1454 */
4446a36f 1455void __init rcu_init_sched(void)
e260be67 1456{
4446a36f
PM
1457 rcu_sched_grace_period_task = kthread_run(rcu_sched_grace_period,
1458 NULL,
1459 "rcu_sched_grace_period");
1460 WARN_ON(IS_ERR(rcu_sched_grace_period_task));
e260be67
PM
1461}
1462
1463#ifdef CONFIG_RCU_TRACE
1464long *rcupreempt_flipctr(int cpu)
1465{
1466 return &RCU_DATA_CPU(cpu)->rcu_flipctr[0];
1467}
1468EXPORT_SYMBOL_GPL(rcupreempt_flipctr);
1469
1470int rcupreempt_flip_flag(int cpu)
1471{
1472 return per_cpu(rcu_flip_flag, cpu);
1473}
1474EXPORT_SYMBOL_GPL(rcupreempt_flip_flag);
1475
1476int rcupreempt_mb_flag(int cpu)
1477{
1478 return per_cpu(rcu_mb_flag, cpu);
1479}
1480EXPORT_SYMBOL_GPL(rcupreempt_mb_flag);
1481
1482char *rcupreempt_try_flip_state_name(void)
1483{
1484 return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state];
1485}
1486EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name);
1487
1488struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu)
1489{
1490 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1491
1492 return &rdp->trace;
1493}
1494EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu);
1495
1496#endif /* #ifdef RCU_TRACE */