]>
Commit | Line | Data |
---|---|---|
01c1c660 PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright IBM Corporation, 2001 | |
19 | * | |
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | |
21 | * Manfred Spraul <manfred@colorfullife.com> | |
22 | * | |
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | |
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
25 | * Papers: | |
26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
28 | * | |
29 | * For detailed explanation of Read-Copy Update mechanism see - | |
30 | * Documentation/RCU | |
31 | * | |
32 | */ | |
33 | #include <linux/types.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/smp.h> | |
38 | #include <linux/rcupdate.h> | |
39 | #include <linux/interrupt.h> | |
40 | #include <linux/sched.h> | |
41 | #include <asm/atomic.h> | |
42 | #include <linux/bitops.h> | |
43 | #include <linux/module.h> | |
44 | #include <linux/completion.h> | |
45 | #include <linux/moduleparam.h> | |
46 | #include <linux/percpu.h> | |
47 | #include <linux/notifier.h> | |
01c1c660 PM |
48 | #include <linux/cpu.h> |
49 | #include <linux/mutex.h> | |
67182ae1 | 50 | #include <linux/time.h> |
01c1c660 PM |
51 | |
52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
53 | static struct lock_class_key rcu_lock_key; | |
54 | struct lockdep_map rcu_lock_map = | |
55 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | |
56 | EXPORT_SYMBOL_GPL(rcu_lock_map); | |
57 | #endif | |
58 | ||
59 | ||
60 | /* Definition for rcupdate control block. */ | |
61 | static struct rcu_ctrlblk rcu_ctrlblk = { | |
62 | .cur = -300, | |
63 | .completed = -300, | |
3cac97cb | 64 | .pending = -300, |
01c1c660 PM |
65 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
66 | .cpumask = CPU_MASK_NONE, | |
67 | }; | |
68 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | |
69 | .cur = -300, | |
70 | .completed = -300, | |
3cac97cb | 71 | .pending = -300, |
01c1c660 PM |
72 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), |
73 | .cpumask = CPU_MASK_NONE, | |
74 | }; | |
75 | ||
76 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; | |
77 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; | |
78 | ||
79 | static int blimit = 10; | |
80 | static int qhimark = 10000; | |
81 | static int qlowmark = 100; | |
82 | ||
83 | #ifdef CONFIG_SMP | |
84 | static void force_quiescent_state(struct rcu_data *rdp, | |
85 | struct rcu_ctrlblk *rcp) | |
86 | { | |
87 | int cpu; | |
88 | cpumask_t cpumask; | |
eff9b713 PM |
89 | unsigned long flags; |
90 | ||
01c1c660 | 91 | set_need_resched(); |
eff9b713 | 92 | spin_lock_irqsave(&rcp->lock, flags); |
01c1c660 PM |
93 | if (unlikely(!rcp->signaled)) { |
94 | rcp->signaled = 1; | |
95 | /* | |
96 | * Don't send IPI to itself. With irqs disabled, | |
97 | * rdp->cpu is the current cpu. | |
8558f8f8 GS |
98 | * |
99 | * cpu_online_map is updated by the _cpu_down() | |
9b1a4d38 RR |
100 | * using __stop_machine(). Since we're in irqs disabled |
101 | * section, __stop_machine() is not exectuting, hence | |
8558f8f8 GS |
102 | * the cpu_online_map is stable. |
103 | * | |
104 | * However, a cpu might have been offlined _just_ before | |
105 | * we disabled irqs while entering here. | |
106 | * And rcu subsystem might not yet have handled the CPU_DEAD | |
107 | * notification, leading to the offlined cpu's bit | |
108 | * being set in the rcp->cpumask. | |
109 | * | |
110 | * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent | |
111 | * sending smp_reschedule() to an offlined CPU. | |
01c1c660 | 112 | */ |
8558f8f8 | 113 | cpus_and(cpumask, rcp->cpumask, cpu_online_map); |
01c1c660 | 114 | cpu_clear(rdp->cpu, cpumask); |
363ab6f1 | 115 | for_each_cpu_mask_nr(cpu, cpumask) |
01c1c660 PM |
116 | smp_send_reschedule(cpu); |
117 | } | |
eff9b713 | 118 | spin_unlock_irqrestore(&rcp->lock, flags); |
01c1c660 PM |
119 | } |
120 | #else | |
121 | static inline void force_quiescent_state(struct rcu_data *rdp, | |
122 | struct rcu_ctrlblk *rcp) | |
123 | { | |
124 | set_need_resched(); | |
125 | } | |
126 | #endif | |
127 | ||
5127bed5 LJ |
128 | static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp, |
129 | struct rcu_data *rdp) | |
130 | { | |
131 | long batch; | |
1f7b94cd PM |
132 | |
133 | head->next = NULL; | |
134 | smp_mb(); /* Read of rcu->cur must happen after any change by caller. */ | |
5127bed5 LJ |
135 | |
136 | /* | |
137 | * Determine the batch number of this callback. | |
138 | * | |
139 | * Using ACCESS_ONCE to avoid the following error when gcc eliminates | |
140 | * local variable "batch" and emits codes like this: | |
141 | * 1) rdp->batch = rcp->cur + 1 # gets old value | |
142 | * ...... | |
143 | * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value | |
144 | * then [*nxttail[0], *nxttail[1]) may contain callbacks | |
145 | * that batch# = rdp->batch, see the comment of struct rcu_data. | |
146 | */ | |
147 | batch = ACCESS_ONCE(rcp->cur) + 1; | |
148 | ||
149 | if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) { | |
150 | /* process callbacks */ | |
151 | rdp->nxttail[0] = rdp->nxttail[1]; | |
152 | rdp->nxttail[1] = rdp->nxttail[2]; | |
153 | if (rcu_batch_after(batch - 1, rdp->batch)) | |
154 | rdp->nxttail[0] = rdp->nxttail[2]; | |
155 | } | |
156 | ||
157 | rdp->batch = batch; | |
158 | *rdp->nxttail[2] = head; | |
159 | rdp->nxttail[2] = &head->next; | |
160 | ||
161 | if (unlikely(++rdp->qlen > qhimark)) { | |
162 | rdp->blimit = INT_MAX; | |
163 | force_quiescent_state(rdp, &rcu_ctrlblk); | |
164 | } | |
165 | } | |
166 | ||
01c1c660 PM |
167 | /** |
168 | * call_rcu - Queue an RCU callback for invocation after a grace period. | |
169 | * @head: structure to be used for queueing the RCU updates. | |
170 | * @func: actual update function to be invoked after the grace period | |
171 | * | |
172 | * The update function will be invoked some time after a full grace | |
173 | * period elapses, in other words after all currently executing RCU | |
174 | * read-side critical sections have completed. RCU read-side critical | |
175 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
176 | * and may be nested. | |
177 | */ | |
178 | void call_rcu(struct rcu_head *head, | |
179 | void (*func)(struct rcu_head *rcu)) | |
180 | { | |
181 | unsigned long flags; | |
01c1c660 PM |
182 | |
183 | head->func = func; | |
01c1c660 | 184 | local_irq_save(flags); |
5127bed5 | 185 | __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
01c1c660 PM |
186 | local_irq_restore(flags); |
187 | } | |
188 | EXPORT_SYMBOL_GPL(call_rcu); | |
189 | ||
190 | /** | |
191 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | |
192 | * @head: structure to be used for queueing the RCU updates. | |
193 | * @func: actual update function to be invoked after the grace period | |
194 | * | |
195 | * The update function will be invoked some time after a full grace | |
196 | * period elapses, in other words after all currently executing RCU | |
197 | * read-side critical sections have completed. call_rcu_bh() assumes | |
198 | * that the read-side critical sections end on completion of a softirq | |
199 | * handler. This means that read-side critical sections in process | |
200 | * context must not be interrupted by softirqs. This interface is to be | |
201 | * used when most of the read-side critical sections are in softirq context. | |
202 | * RCU read-side critical sections are delimited by rcu_read_lock() and | |
203 | * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() | |
204 | * and rcu_read_unlock_bh(), if in process context. These may be nested. | |
205 | */ | |
206 | void call_rcu_bh(struct rcu_head *head, | |
207 | void (*func)(struct rcu_head *rcu)) | |
208 | { | |
209 | unsigned long flags; | |
01c1c660 PM |
210 | |
211 | head->func = func; | |
01c1c660 | 212 | local_irq_save(flags); |
5127bed5 | 213 | __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
01c1c660 PM |
214 | local_irq_restore(flags); |
215 | } | |
216 | EXPORT_SYMBOL_GPL(call_rcu_bh); | |
217 | ||
218 | /* | |
219 | * Return the number of RCU batches processed thus far. Useful | |
220 | * for debug and statistics. | |
221 | */ | |
222 | long rcu_batches_completed(void) | |
223 | { | |
224 | return rcu_ctrlblk.completed; | |
225 | } | |
226 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
227 | ||
228 | /* | |
229 | * Return the number of RCU batches processed thus far. Useful | |
230 | * for debug and statistics. | |
231 | */ | |
232 | long rcu_batches_completed_bh(void) | |
233 | { | |
234 | return rcu_bh_ctrlblk.completed; | |
235 | } | |
236 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | |
237 | ||
238 | /* Raises the softirq for processing rcu_callbacks. */ | |
239 | static inline void raise_rcu_softirq(void) | |
240 | { | |
241 | raise_softirq(RCU_SOFTIRQ); | |
01c1c660 PM |
242 | } |
243 | ||
244 | /* | |
245 | * Invoke the completed RCU callbacks. They are expected to be in | |
246 | * a per-cpu list. | |
247 | */ | |
248 | static void rcu_do_batch(struct rcu_data *rdp) | |
249 | { | |
275a89bd | 250 | unsigned long flags; |
01c1c660 PM |
251 | struct rcu_head *next, *list; |
252 | int count = 0; | |
253 | ||
254 | list = rdp->donelist; | |
255 | while (list) { | |
256 | next = list->next; | |
257 | prefetch(next); | |
258 | list->func(list); | |
259 | list = next; | |
260 | if (++count >= rdp->blimit) | |
261 | break; | |
262 | } | |
263 | rdp->donelist = list; | |
264 | ||
275a89bd | 265 | local_irq_save(flags); |
01c1c660 | 266 | rdp->qlen -= count; |
275a89bd | 267 | local_irq_restore(flags); |
01c1c660 PM |
268 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) |
269 | rdp->blimit = blimit; | |
270 | ||
271 | if (!rdp->donelist) | |
272 | rdp->donetail = &rdp->donelist; | |
273 | else | |
274 | raise_rcu_softirq(); | |
275 | } | |
276 | ||
277 | /* | |
278 | * Grace period handling: | |
279 | * The grace period handling consists out of two steps: | |
280 | * - A new grace period is started. | |
281 | * This is done by rcu_start_batch. The start is not broadcasted to | |
282 | * all cpus, they must pick this up by comparing rcp->cur with | |
283 | * rdp->quiescbatch. All cpus are recorded in the | |
284 | * rcu_ctrlblk.cpumask bitmap. | |
285 | * - All cpus must go through a quiescent state. | |
286 | * Since the start of the grace period is not broadcasted, at least two | |
287 | * calls to rcu_check_quiescent_state are required: | |
288 | * The first call just notices that a new grace period is running. The | |
289 | * following calls check if there was a quiescent state since the beginning | |
290 | * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If | |
291 | * the bitmap is empty, then the grace period is completed. | |
292 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace | |
293 | * period (if necessary). | |
294 | */ | |
67182ae1 PM |
295 | |
296 | #ifdef CONFIG_DEBUG_RCU_STALL | |
297 | ||
298 | static inline void record_gp_check_time(struct rcu_ctrlblk *rcp) | |
299 | { | |
300 | rcp->gp_check = get_seconds() + 3; | |
301 | } | |
78635fc7 | 302 | |
67182ae1 PM |
303 | static void print_other_cpu_stall(struct rcu_ctrlblk *rcp) |
304 | { | |
305 | int cpu; | |
306 | long delta; | |
eff9b713 | 307 | unsigned long flags; |
67182ae1 PM |
308 | |
309 | /* Only let one CPU complain about others per time interval. */ | |
310 | ||
eff9b713 | 311 | spin_lock_irqsave(&rcp->lock, flags); |
67182ae1 | 312 | delta = get_seconds() - rcp->gp_check; |
78635fc7 | 313 | if (delta < 2L || cpus_empty(rcp->cpumask)) { |
67182ae1 PM |
314 | spin_unlock(&rcp->lock); |
315 | return; | |
67182ae1 | 316 | } |
293a17eb | 317 | rcp->gp_check = get_seconds() + 30; |
eff9b713 | 318 | spin_unlock_irqrestore(&rcp->lock, flags); |
67182ae1 PM |
319 | |
320 | /* OK, time to rat on our buddy... */ | |
321 | ||
322 | printk(KERN_ERR "RCU detected CPU stalls:"); | |
323 | for_each_cpu_mask(cpu, rcp->cpumask) | |
324 | printk(" %d", cpu); | |
325 | printk(" (detected by %d, t=%lu/%lu)\n", | |
326 | smp_processor_id(), get_seconds(), rcp->gp_check); | |
327 | } | |
78635fc7 | 328 | |
67182ae1 PM |
329 | static void print_cpu_stall(struct rcu_ctrlblk *rcp) |
330 | { | |
eff9b713 PM |
331 | unsigned long flags; |
332 | ||
67182ae1 PM |
333 | printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu)\n", |
334 | smp_processor_id(), get_seconds(), rcp->gp_check); | |
335 | dump_stack(); | |
eff9b713 | 336 | spin_lock_irqsave(&rcp->lock, flags); |
67182ae1 PM |
337 | if ((long)(get_seconds() - rcp->gp_check) >= 0L) |
338 | rcp->gp_check = get_seconds() + 30; | |
eff9b713 | 339 | spin_unlock_irqrestore(&rcp->lock, flags); |
67182ae1 | 340 | } |
78635fc7 IM |
341 | |
342 | static void check_cpu_stall(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | |
67182ae1 PM |
343 | { |
344 | long delta; | |
345 | ||
346 | delta = get_seconds() - rcp->gp_check; | |
347 | if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0L) { | |
348 | ||
349 | /* We haven't checked in, so go dump stack. */ | |
350 | ||
351 | print_cpu_stall(rcp); | |
352 | ||
78635fc7 IM |
353 | } else { |
354 | if (!cpus_empty(rcp->cpumask) && delta >= 2L) { | |
355 | /* They had two seconds to dump stack, so complain. */ | |
356 | print_other_cpu_stall(rcp); | |
357 | } | |
67182ae1 PM |
358 | } |
359 | } | |
360 | ||
361 | #else /* #ifdef CONFIG_DEBUG_RCU_STALL */ | |
362 | ||
363 | static inline void record_gp_check_time(struct rcu_ctrlblk *rcp) | |
364 | { | |
365 | } | |
78635fc7 IM |
366 | |
367 | static inline void | |
368 | check_cpu_stall(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | |
67182ae1 PM |
369 | { |
370 | } | |
371 | ||
372 | #endif /* #else #ifdef CONFIG_DEBUG_RCU_STALL */ | |
373 | ||
01c1c660 PM |
374 | /* |
375 | * Register a new batch of callbacks, and start it up if there is currently no | |
376 | * active batch and the batch to be registered has not already occurred. | |
377 | * Caller must hold rcu_ctrlblk.lock. | |
378 | */ | |
379 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) | |
380 | { | |
3cac97cb | 381 | if (rcp->cur != rcp->pending && |
01c1c660 | 382 | rcp->completed == rcp->cur) { |
01c1c660 | 383 | rcp->cur++; |
67182ae1 | 384 | record_gp_check_time(rcp); |
01c1c660 PM |
385 | |
386 | /* | |
387 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a | |
388 | * Barrier Otherwise it can cause tickless idle CPUs to be | |
389 | * included in rcp->cpumask, which will extend graceperiods | |
390 | * unnecessarily. | |
391 | */ | |
392 | smp_mb(); | |
393 | cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); | |
394 | ||
395 | rcp->signaled = 0; | |
396 | } | |
397 | } | |
398 | ||
399 | /* | |
400 | * cpu went through a quiescent state since the beginning of the grace period. | |
401 | * Clear it from the cpu mask and complete the grace period if it was the last | |
402 | * cpu. Start another grace period if someone has further entries pending | |
403 | */ | |
404 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) | |
405 | { | |
406 | cpu_clear(cpu, rcp->cpumask); | |
407 | if (cpus_empty(rcp->cpumask)) { | |
408 | /* batch completed ! */ | |
409 | rcp->completed = rcp->cur; | |
410 | rcu_start_batch(rcp); | |
411 | } | |
412 | } | |
413 | ||
414 | /* | |
415 | * Check if the cpu has gone through a quiescent state (say context | |
416 | * switch). If so and if it already hasn't done so in this RCU | |
417 | * quiescent cycle, then indicate that it has done so. | |
418 | */ | |
419 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |
420 | struct rcu_data *rdp) | |
421 | { | |
eff9b713 PM |
422 | unsigned long flags; |
423 | ||
01c1c660 PM |
424 | if (rdp->quiescbatch != rcp->cur) { |
425 | /* start new grace period: */ | |
426 | rdp->qs_pending = 1; | |
427 | rdp->passed_quiesc = 0; | |
428 | rdp->quiescbatch = rcp->cur; | |
429 | return; | |
430 | } | |
431 | ||
432 | /* Grace period already completed for this cpu? | |
433 | * qs_pending is checked instead of the actual bitmap to avoid | |
434 | * cacheline trashing. | |
435 | */ | |
436 | if (!rdp->qs_pending) | |
437 | return; | |
438 | ||
439 | /* | |
440 | * Was there a quiescent state since the beginning of the grace | |
441 | * period? If no, then exit and wait for the next call. | |
442 | */ | |
443 | if (!rdp->passed_quiesc) | |
444 | return; | |
445 | rdp->qs_pending = 0; | |
446 | ||
eff9b713 | 447 | spin_lock_irqsave(&rcp->lock, flags); |
01c1c660 PM |
448 | /* |
449 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync | |
450 | * during cpu startup. Ignore the quiescent state. | |
451 | */ | |
452 | if (likely(rdp->quiescbatch == rcp->cur)) | |
453 | cpu_quiet(rdp->cpu, rcp); | |
454 | ||
eff9b713 | 455 | spin_unlock_irqrestore(&rcp->lock, flags); |
01c1c660 PM |
456 | } |
457 | ||
458 | ||
459 | #ifdef CONFIG_HOTPLUG_CPU | |
460 | ||
461 | /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing | |
462 | * locking requirements, the list it's pulling from has to belong to a cpu | |
463 | * which is dead and hence not processing interrupts. | |
464 | */ | |
465 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | |
5127bed5 | 466 | struct rcu_head **tail, long batch) |
01c1c660 | 467 | { |
275a89bd PM |
468 | unsigned long flags; |
469 | ||
5127bed5 | 470 | if (list) { |
275a89bd | 471 | local_irq_save(flags); |
5127bed5 LJ |
472 | this_rdp->batch = batch; |
473 | *this_rdp->nxttail[2] = list; | |
474 | this_rdp->nxttail[2] = tail; | |
275a89bd | 475 | local_irq_restore(flags); |
5127bed5 | 476 | } |
01c1c660 PM |
477 | } |
478 | ||
479 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | |
480 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | |
481 | { | |
eff9b713 PM |
482 | unsigned long flags; |
483 | ||
1f7b94cd PM |
484 | /* |
485 | * if the cpu going offline owns the grace period | |
01c1c660 PM |
486 | * we can block indefinitely waiting for it, so flush |
487 | * it here | |
488 | */ | |
eff9b713 | 489 | spin_lock_irqsave(&rcp->lock, flags); |
01c1c660 PM |
490 | if (rcp->cur != rcp->completed) |
491 | cpu_quiet(rdp->cpu, rcp); | |
5127bed5 LJ |
492 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1); |
493 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1); | |
eff9b713 | 494 | spin_unlock(&rcp->lock); |
199a9528 | 495 | |
199a9528 | 496 | this_rdp->qlen += rdp->qlen; |
eff9b713 | 497 | local_irq_restore(flags); |
01c1c660 PM |
498 | } |
499 | ||
500 | static void rcu_offline_cpu(int cpu) | |
501 | { | |
502 | struct rcu_data *this_rdp = &get_cpu_var(rcu_data); | |
503 | struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); | |
504 | ||
505 | __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, | |
506 | &per_cpu(rcu_data, cpu)); | |
507 | __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, | |
508 | &per_cpu(rcu_bh_data, cpu)); | |
509 | put_cpu_var(rcu_data); | |
510 | put_cpu_var(rcu_bh_data); | |
511 | } | |
512 | ||
513 | #else | |
514 | ||
515 | static void rcu_offline_cpu(int cpu) | |
516 | { | |
517 | } | |
518 | ||
519 | #endif | |
520 | ||
521 | /* | |
522 | * This does the RCU processing work from softirq context. | |
523 | */ | |
524 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |
525 | struct rcu_data *rdp) | |
526 | { | |
275a89bd | 527 | unsigned long flags; |
1f7b94cd PM |
528 | long completed_snap; |
529 | ||
5127bed5 | 530 | if (rdp->nxtlist) { |
275a89bd | 531 | local_irq_save(flags); |
1f7b94cd | 532 | completed_snap = ACCESS_ONCE(rcp->completed); |
01c1c660 PM |
533 | |
534 | /* | |
5127bed5 LJ |
535 | * move the other grace-period-completed entries to |
536 | * [rdp->nxtlist, *rdp->nxttail[0]) temporarily | |
537 | */ | |
1f7b94cd | 538 | if (!rcu_batch_before(completed_snap, rdp->batch)) |
5127bed5 | 539 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2]; |
1f7b94cd | 540 | else if (!rcu_batch_before(completed_snap, rdp->batch - 1)) |
5127bed5 LJ |
541 | rdp->nxttail[0] = rdp->nxttail[1]; |
542 | ||
543 | /* | |
544 | * the grace period for entries in | |
545 | * [rdp->nxtlist, *rdp->nxttail[0]) has completed and | |
546 | * move these entries to donelist | |
01c1c660 | 547 | */ |
5127bed5 LJ |
548 | if (rdp->nxttail[0] != &rdp->nxtlist) { |
549 | *rdp->donetail = rdp->nxtlist; | |
550 | rdp->donetail = rdp->nxttail[0]; | |
551 | rdp->nxtlist = *rdp->nxttail[0]; | |
552 | *rdp->donetail = NULL; | |
553 | ||
554 | if (rdp->nxttail[1] == rdp->nxttail[0]) | |
555 | rdp->nxttail[1] = &rdp->nxtlist; | |
556 | if (rdp->nxttail[2] == rdp->nxttail[0]) | |
557 | rdp->nxttail[2] = &rdp->nxtlist; | |
558 | rdp->nxttail[0] = &rdp->nxtlist; | |
559 | } | |
01c1c660 | 560 | |
275a89bd | 561 | local_irq_restore(flags); |
01c1c660 | 562 | |
3cac97cb | 563 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
3a72dc8e | 564 | unsigned long flags2; |
0c925d79 | 565 | |
01c1c660 | 566 | /* and start it/schedule start if it's a new batch */ |
3a72dc8e | 567 | spin_lock_irqsave(&rcp->lock, flags2); |
3cac97cb LJ |
568 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
569 | rcp->pending = rdp->batch; | |
570 | rcu_start_batch(rcp); | |
571 | } | |
3a72dc8e | 572 | spin_unlock_irqrestore(&rcp->lock, flags2); |
01c1c660 PM |
573 | } |
574 | } | |
575 | ||
576 | rcu_check_quiescent_state(rcp, rdp); | |
577 | if (rdp->donelist) | |
578 | rcu_do_batch(rdp); | |
579 | } | |
580 | ||
581 | static void rcu_process_callbacks(struct softirq_action *unused) | |
582 | { | |
1f7b94cd PM |
583 | /* |
584 | * Memory references from any prior RCU read-side critical sections | |
585 | * executed by the interrupted code must be see before any RCU | |
586 | * grace-period manupulations below. | |
587 | */ | |
588 | ||
589 | smp_mb(); /* See above block comment. */ | |
590 | ||
01c1c660 PM |
591 | __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
592 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); | |
1f7b94cd PM |
593 | |
594 | /* | |
595 | * Memory references from any later RCU read-side critical sections | |
596 | * executed by the interrupted code must be see after any RCU | |
597 | * grace-period manupulations above. | |
598 | */ | |
599 | ||
600 | smp_mb(); /* See above block comment. */ | |
01c1c660 PM |
601 | } |
602 | ||
603 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | |
604 | { | |
67182ae1 PM |
605 | /* Check for CPU stalls, if enabled. */ |
606 | check_cpu_stall(rcp, rdp); | |
607 | ||
5127bed5 | 608 | if (rdp->nxtlist) { |
1f7b94cd PM |
609 | long completed_snap = ACCESS_ONCE(rcp->completed); |
610 | ||
5127bed5 LJ |
611 | /* |
612 | * This cpu has pending rcu entries and the grace period | |
613 | * for them has completed. | |
614 | */ | |
1f7b94cd | 615 | if (!rcu_batch_before(completed_snap, rdp->batch)) |
5127bed5 | 616 | return 1; |
1f7b94cd | 617 | if (!rcu_batch_before(completed_snap, rdp->batch - 1) && |
5127bed5 LJ |
618 | rdp->nxttail[0] != rdp->nxttail[1]) |
619 | return 1; | |
620 | if (rdp->nxttail[0] != &rdp->nxtlist) | |
621 | return 1; | |
01c1c660 | 622 | |
5127bed5 LJ |
623 | /* |
624 | * This cpu has pending rcu entries and the new batch | |
625 | * for then hasn't been started nor scheduled start | |
626 | */ | |
627 | if (rcu_batch_after(rdp->batch, rcp->pending)) | |
628 | return 1; | |
629 | } | |
01c1c660 PM |
630 | |
631 | /* This cpu has finished callbacks to invoke */ | |
632 | if (rdp->donelist) | |
633 | return 1; | |
634 | ||
635 | /* The rcu core waits for a quiescent state from the cpu */ | |
636 | if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) | |
637 | return 1; | |
638 | ||
639 | /* nothing to do */ | |
640 | return 0; | |
641 | } | |
642 | ||
643 | /* | |
644 | * Check to see if there is any immediate RCU-related work to be done | |
645 | * by the current CPU, returning 1 if so. This function is part of the | |
646 | * RCU implementation; it is -not- an exported member of the RCU API. | |
647 | */ | |
648 | int rcu_pending(int cpu) | |
649 | { | |
650 | return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || | |
651 | __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); | |
652 | } | |
653 | ||
654 | /* | |
655 | * Check to see if any future RCU-related work will need to be done | |
656 | * by the current CPU, even if none need be done immediately, returning | |
657 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
658 | * an exported member of the RCU API. | |
659 | */ | |
660 | int rcu_needs_cpu(int cpu) | |
661 | { | |
662 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
663 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); | |
664 | ||
5127bed5 | 665 | return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu); |
01c1c660 PM |
666 | } |
667 | ||
1f7b94cd PM |
668 | /* |
669 | * Top-level function driving RCU grace-period detection, normally | |
670 | * invoked from the scheduler-clock interrupt. This function simply | |
671 | * increments counters that are read only from softirq by this same | |
672 | * CPU, so there are no memory barriers required. | |
673 | */ | |
01c1c660 PM |
674 | void rcu_check_callbacks(int cpu, int user) |
675 | { | |
676 | if (user || | |
677 | (idle_cpu(cpu) && !in_softirq() && | |
678 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | |
8db559b8 PM |
679 | |
680 | /* | |
681 | * Get here if this CPU took its interrupt from user | |
682 | * mode or from the idle loop, and if this is not a | |
683 | * nested interrupt. In this case, the CPU is in | |
684 | * a quiescent state, so count it. | |
685 | * | |
686 | * Also do a memory barrier. This is needed to handle | |
687 | * the case where writes from a preempt-disable section | |
688 | * of code get reordered into schedule() by this CPU's | |
689 | * write buffer. The memory barrier makes sure that | |
690 | * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see | |
691 | * by other CPUs to happen after any such write. | |
692 | */ | |
693 | ||
694 | smp_mb(); /* See above block comment. */ | |
01c1c660 PM |
695 | rcu_qsctr_inc(cpu); |
696 | rcu_bh_qsctr_inc(cpu); | |
8db559b8 PM |
697 | |
698 | } else if (!in_softirq()) { | |
699 | ||
700 | /* | |
701 | * Get here if this CPU did not take its interrupt from | |
702 | * softirq, in other words, if it is not interrupting | |
703 | * a rcu_bh read-side critical section. This is an _bh | |
704 | * critical section, so count it. The memory barrier | |
705 | * is needed for the same reason as is the above one. | |
706 | */ | |
707 | ||
708 | smp_mb(); /* See above block comment. */ | |
01c1c660 | 709 | rcu_bh_qsctr_inc(cpu); |
8db559b8 | 710 | } |
01c1c660 PM |
711 | raise_rcu_softirq(); |
712 | } | |
713 | ||
714 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | |
715 | struct rcu_data *rdp) | |
716 | { | |
0cd418dd | 717 | unsigned long flags; |
cd958517 PM |
718 | |
719 | spin_lock_irqsave(&rcp->lock, flags); | |
01c1c660 | 720 | memset(rdp, 0, sizeof(*rdp)); |
5127bed5 | 721 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist; |
01c1c660 PM |
722 | rdp->donetail = &rdp->donelist; |
723 | rdp->quiescbatch = rcp->completed; | |
724 | rdp->qs_pending = 0; | |
725 | rdp->cpu = cpu; | |
726 | rdp->blimit = blimit; | |
cd958517 | 727 | spin_unlock_irqrestore(&rcp->lock, flags); |
01c1c660 PM |
728 | } |
729 | ||
730 | static void __cpuinit rcu_online_cpu(int cpu) | |
731 | { | |
732 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
733 | struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); | |
734 | ||
735 | rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); | |
736 | rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); | |
962cf36c | 737 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
01c1c660 PM |
738 | } |
739 | ||
740 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |
741 | unsigned long action, void *hcpu) | |
742 | { | |
743 | long cpu = (long)hcpu; | |
744 | ||
745 | switch (action) { | |
746 | case CPU_UP_PREPARE: | |
747 | case CPU_UP_PREPARE_FROZEN: | |
748 | rcu_online_cpu(cpu); | |
749 | break; | |
750 | case CPU_DEAD: | |
751 | case CPU_DEAD_FROZEN: | |
752 | rcu_offline_cpu(cpu); | |
753 | break; | |
754 | default: | |
755 | break; | |
756 | } | |
757 | return NOTIFY_OK; | |
758 | } | |
759 | ||
760 | static struct notifier_block __cpuinitdata rcu_nb = { | |
761 | .notifier_call = rcu_cpu_notify, | |
762 | }; | |
763 | ||
764 | /* | |
765 | * Initializes rcu mechanism. Assumed to be called early. | |
766 | * That is before local timer(SMP) or jiffie timer (uniproc) is setup. | |
767 | * Note that rcu_qsctr and friends are implicitly | |
768 | * initialized due to the choice of ``0'' for RCU_CTR_INVALID. | |
769 | */ | |
770 | void __init __rcu_init(void) | |
771 | { | |
772 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, | |
773 | (void *)(long)smp_processor_id()); | |
774 | /* Register notifier for non-boot CPUs */ | |
775 | register_cpu_notifier(&rcu_nb); | |
776 | } | |
777 | ||
778 | module_param(blimit, int, 0); | |
779 | module_param(qhimark, int, 0); | |
780 | module_param(qlowmark, int, 0); |